benchmark-runner 1.0.759__py3-none-any.whl → 1.0.761__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of benchmark-runner might be problematic. Click here for more details.

@@ -1,6 +1,5 @@
1
1
 
2
2
  import os
3
- import yaml
4
3
  import ast
5
4
  import shutil
6
5
  import time
@@ -9,12 +8,13 @@ from typeguard import typechecked
9
8
 
10
9
  from benchmark_runner.common.logger.logger_time_stamp import logger_time_stamp, logger
11
10
  from benchmark_runner.common.oc.oc_exceptions import (PodNotCreateTimeout, PodNotInitializedTimeout, PodNotReadyTimeout, \
12
- PodNotCompletedTimeout, PodTerminateTimeout, PodNameNotExist, LoginFailed, VMNotCreateTimeout, VMDeleteTimeout, \
11
+ PodNotCompletedTimeout, PodTerminateTimeout, PodNameNotExist, VMNotCreateTimeout, VMDeleteTimeout, \
13
12
  YAMLNotExist, VMNameNotExist, VMNotInitializedTimeout, VMNotReadyTimeout, VMStateTimeout, VMNotCompletedTimeout, \
14
13
  ExecFailed, PodFailed, DVStatusTimeout, CSVNotCreateTimeout, UpgradeNotStartTimeout, OperatorInstallationTimeout, \
15
14
  OperatorUpgradeTimeout, ODFHealthCheckTimeout, NodeNotReady)
16
15
  from benchmark_runner.common.ssh.ssh import SSH
17
16
  from benchmark_runner.main.environment_variables import environment_variables
17
+ from benchmark_runner.common.oc.singleton_oc_login import SingletonOCLogin
18
18
 
19
19
 
20
20
  class VMStatus(Enum):
@@ -40,19 +40,18 @@ class OC(SSH):
40
40
  self._run_artifacts = self.__environment_variables_dict.get('run_artifacts_path', '')
41
41
  self.__elasticsearch_url = self.__environment_variables_dict.get('elasticsearch_url', '')
42
42
  self.__kata_csv = self.__environment_variables_dict.get('kata_csv', '')
43
- self.__cli = self.__environment_variables_dict.get('cli', '')
44
43
  self.__worker_disk_prefix = self.__environment_variables_dict.get('worker_disk_prefix', '')
45
44
  self.__worker_disk_ids = self.__environment_variables_dict.get('worker_disk_ids', '')
46
45
  if self.__worker_disk_ids:
47
46
  self.__worker_disk_ids = ast.literal_eval(self.__worker_disk_ids)
47
+ self._cli = self.__environment_variables_dict.get('cli', '')
48
48
  if kubeadmin_password:
49
- self.__kubeadmin_password = kubeadmin_password
49
+ self._kubeadmin_password = kubeadmin_password
50
50
  else:
51
- self.__kubeadmin_password = self.__environment_variables_dict.get('kubeadmin_password', '')
52
- self.__kubeconfig_path = self.__environment_variables_dict.get('kubeconfig_path', '')
53
- self._is_logged_in = False # Shared across all instances
54
- # singleton login
55
- self.__login()
51
+ self._kubeadmin_password = self.__environment_variables_dict.get('kubeadmin_password', '')
52
+ self._kubeconfig_path = self.__environment_variables_dict.get('kubeconfig_path', '')
53
+ # Singleton Login class
54
+ SingletonOCLogin(self)
56
55
 
57
56
  def _ocp_server_version(self, jsonpath: str):
58
57
  """
@@ -61,7 +60,7 @@ class OC(SSH):
61
60
  """
62
61
  for attempt in range(self.RETRIES):
63
62
  try:
64
- version = self.run(f"{self.__cli} get clusterversion version -o jsonpath={{{jsonpath}}}")
63
+ version = self.run(f"{self._cli} get clusterversion version -o jsonpath={{{jsonpath}}}")
65
64
  return version
66
65
  except Exception as err:
67
66
  logger.info(f"Attempt {attempt + 1}/{self.RETRIES}: Failed to fetch OCP version. Error: {err}")
@@ -94,9 +93,9 @@ class OC(SSH):
94
93
 
95
94
  # see: https://access.redhat.com/articles/7031404
96
95
  if ocp_channel == "4.16":
97
- patch_command = f"{self.__cli} -n openshift-config patch cm admin-acks --patch '{{\"data\":{{\"ack-4.15-kube-1.29-api-removals-in-4.16\":\"true\"}}}}' --type=merge"
96
+ patch_command = f"{self._cli} -n openshift-config patch cm admin-acks --patch '{{\"data\":{{\"ack-4.15-kube-1.29-api-removals-in-4.16\":\"true\"}}}}' --type=merge"
98
97
  self.run(patch_command)
99
- upgrade_command = f"{self.__cli} adm upgrade ; sleep 10; {self.__cli} adm upgrade channel {upgrade_channel}-{ocp_channel}; sleep 10; {self.__cli} adm upgrade --to={upgrade_ocp_version};"
98
+ upgrade_command = f"{self._cli} adm upgrade ; sleep 10; {self._cli} adm upgrade channel {upgrade_channel}-{ocp_channel}; sleep 10; {self._cli} adm upgrade --to={upgrade_ocp_version};"
100
99
  logger.info(upgrade_command)
101
100
  self.run(upgrade_command)
102
101
 
@@ -105,7 +104,7 @@ class OC(SSH):
105
104
  This method returns True when an upgrade is in progress and False when it is not.
106
105
  @return: bool
107
106
  """
108
- status = self.run(f"{self.__cli} get clusterversion version -o jsonpath='{{.status.conditions[?(@.type==\"Progressing\")].status}}'")
107
+ status = self.run(f"{self._cli} get clusterversion version -o jsonpath='{{.status.conditions[?(@.type==\"Progressing\")].status}}'")
109
108
  return status == 'True'
110
109
 
111
110
  @logger_time_stamp
@@ -131,14 +130,14 @@ class OC(SSH):
131
130
  This method returns upgrade version
132
131
  @return:
133
132
  """
134
- return self.run(f"{self.__cli} get clusterversion version -o jsonpath='{{.status.desired.version}}'")
133
+ return self.run(f"{self._cli} get clusterversion version -o jsonpath='{{.status.desired.version}}'")
135
134
 
136
135
  def get_cluster_status(self):
137
136
  """
138
137
  This method returns the STATUS from the 'oc get clusterversion' command.
139
138
  @return: str - The current status of the cluster version.
140
139
  """
141
- return self.run(f"{self.__cli} get clusterversion version -o jsonpath='{{.status.conditions[?(@.type==\"Progressing\")].message}}'")
140
+ return self.run(f"{self._cli} get clusterversion version -o jsonpath='{{.status.conditions[?(@.type==\"Progressing\")].message}}'")
142
141
 
143
142
  def get_operator_version(self, namespace):
144
143
  """
@@ -146,7 +145,7 @@ class OC(SSH):
146
145
  @param namespace: str - The namespace to search for the operator version.
147
146
  @return: major version
148
147
  """
149
- version = self.run(f"{self.__cli} get csv -n {namespace} -o jsonpath='{{.items[0].spec.version}}'")
148
+ version = self.run(f"{self._cli} get csv -n {namespace} -o jsonpath='{{.items[0].spec.version}}'")
150
149
  return '.'.join(version.split('.')[:2])
151
150
 
152
151
  def wait_for_operator_installation(self, operator: str, version: str, namespace: str, timeout: int = SHORT_TIMEOUT):
@@ -176,16 +175,16 @@ class OC(SSH):
176
175
  @return:
177
176
  """
178
177
  if action == 'stop':
179
- self.run(f"{self.__cli} -n openshift-machine-api annotate mhc $({self.__cli} get machinehealthcheck -n openshift-machine-api -o jsonpath='{{.items[0].metadata.name}}') cluster.x-k8s.io/paused=\"\"")
178
+ self.run(f"{self._cli} -n openshift-machine-api annotate mhc $({self._cli} get machinehealthcheck -n openshift-machine-api -o jsonpath='{{.items[0].metadata.name}}') cluster.x-k8s.io/paused=\"\"")
180
179
  elif action == 'resume':
181
- self.run(f"{self.__cli} -n openshift-machine-api annotate mhc $({self.__cli} get machinehealthcheck -n openshift-machine-api -o jsonpath='{{.items[0].metadata.name}}') cluster.x-k8s.io/paused-")
180
+ self.run(f"{self._cli} -n openshift-machine-api annotate mhc $({self._cli} get machinehealthcheck -n openshift-machine-api -o jsonpath='{{.items[0].metadata.name}}') cluster.x-k8s.io/paused-")
182
181
 
183
182
  def get_cnv_version(self):
184
183
  """
185
184
  This method returns cnv version
186
185
  :return:
187
186
  """
188
- return self.run(f"{self.__cli} get csv -n openshift-cnv -o json | jq -r '.items[] | select(.metadata.name | startswith(\"kubevirt-hyperconverged-operator\")) | .spec.version'")
187
+ return self.run(f"{self._cli} get csv -n openshift-cnv -o json | jq -r '.items[] | select(.metadata.name | startswith(\"kubevirt-hyperconverged-operator\")) | .spec.version'")
189
188
 
190
189
  def get_odf_version(self):
191
190
  """
@@ -194,9 +193,9 @@ class OC(SSH):
194
193
  """
195
194
  # OCP 4.16 and below
196
195
  if self.get_ocp_major_version() <= 4 and self.get_ocp_minor_version() <= 16:
197
- command = f"{self.__cli} get csv -n openshift-storage -o jsonpath='{{.items[0].spec.labels.full_version}}'"
196
+ command = f"{self._cli} get csv -n openshift-storage -o jsonpath='{{.items[0].spec.labels.full_version}}'"
198
197
  else:
199
- command = f"{self.__cli} get csv -n openshift-storage -o jsonpath='{{range .items[*]}}{{.metadata.name}}{{\"\\n\"}}{{end}}' | grep odf-operator | sed -E 's/odf-operator.v([0-9]+\\.[0-9]+\\.[0-9]+)-rhodf/\\1/'"
198
+ command = f"{self._cli} get csv -n openshift-storage -o jsonpath='{{range .items[*]}}{{.metadata.name}}{{\"\\n\"}}{{end}}' | grep odf-operator | sed -E 's/odf-operator.v([0-9]+\\.[0-9]+\\.[0-9]+)-rhodf/\\1/'"
200
199
  return self.run(command)
201
200
 
202
201
  def remove_lso_path(self):
@@ -204,7 +203,7 @@ class OC(SSH):
204
203
  The method removes lso path on each node
205
204
  @return:
206
205
  """
207
- self.run(fr"""{self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self.__cli} debug node/{{}} -- chroot /host sh -c "rm -rfv /mnt/local-storage/local-sc/" """)
206
+ self.run(fr"""{self._cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self._cli} debug node/{{}} -- chroot /host sh -c "rm -rfv /mnt/local-storage/local-sc/" """)
208
207
 
209
208
  def get_worker_disk_ids(self, node: str = None):
210
209
  """
@@ -227,7 +226,7 @@ class OC(SSH):
227
226
  This method returns list of pv disk ids
228
227
  """
229
228
  pv_ids = self.run(
230
- f"{self.__cli} get pv -o jsonpath='{{range .items[*]}}{{.metadata.annotations.storage\\.openshift\\.com/device-id}}{{\"\\n\"}}{{end}}'")
229
+ f"{self._cli} get pv -o jsonpath='{{range .items[*]}}{{.metadata.annotations.storage\\.openshift\\.com/device-id}}{{\"\\n\"}}{{end}}'")
231
230
  return [pv[len(self.__worker_disk_prefix):] for pv in pv_ids.split()]
232
231
 
233
232
  def get_free_disk_id(self, node: str = None):
@@ -254,14 +253,14 @@ class OC(SSH):
254
253
  :param: cmd
255
254
  :return:
256
255
  """
257
- self.run(f"{self.__cli} debug node/{node} --no-tty=true -- chroot /host sh -c '{cmd}'")
256
+ self.run(f"{self._cli} debug node/{node} --no-tty=true -- chroot /host sh -c '{cmd}'")
258
257
 
259
258
  def get_kata_operator_version(self):
260
259
  """
261
260
  This method returns kata operator version
262
261
  :return:
263
262
  """
264
- return self.run(f"{self.__cli} get csv -n openshift-sandboxed-containers-operator $({self.__cli} get csv -n openshift-sandboxed-containers-operator --no-headers | awk '{{ print $1; }}') -o jsonpath='{{.spec.version}}'")
263
+ return self.run(f"{self._cli} get csv -n openshift-sandboxed-containers-operator $({self._cli} get csv -n openshift-sandboxed-containers-operator --no-headers | awk '{{ print $1; }}') -o jsonpath='{{.spec.version}}'")
265
264
 
266
265
  @typechecked
267
266
  def get_kata_rpm_version(self, node: str):
@@ -270,21 +269,21 @@ class OC(SSH):
270
269
  @param: node
271
270
  :return:
272
271
  """
273
- kata_rpm_version = self.run(f"{self.__cli} debug node/{node} -- chroot /host rpm -q --queryformat='%{{VERSION}}-%{{RELEASE}}' kata-containers 2>/dev/null")
272
+ kata_rpm_version = self.run(f"{self._cli} debug node/{node} -- chroot /host rpm -q --queryformat='%{{VERSION}}-%{{RELEASE}}' kata-containers 2>/dev/null")
274
273
  return '.'.join(kata_rpm_version.split('.')[:3])
275
274
 
276
275
  def _get_kata_default_channel(self):
277
276
  """
278
277
  This method retrieves the default channel for Kata
279
278
  """
280
- return self.run(f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -o jsonpath='{{.status.defaultChannel}}'")
279
+ return self.run(f"{self._cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -o jsonpath='{{.status.defaultChannel}}'")
281
280
 
282
281
  def _get_kata_default_channel_field(self, channel_field: str):
283
282
  """
284
283
  This method retrieves a field from the packagemanifest for the default Kata channel
285
284
  """
286
285
  default_channel = f'"{self._get_kata_default_channel()}"'
287
- command = f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -ojson | jq -r '[foreach .status.channels[] as $channel ([[],[]];0;(if ($channel.name == {default_channel}) then $channel.{channel_field} else null end))] | flatten | map (select (. != null))[]'"
286
+ command = f"{self._cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -ojson | jq -r '[foreach .status.channels[] as $channel ([[],[]];0;(if ($channel.name == {default_channel}) then $channel.{channel_field} else null end))] | flatten | map (select (. != null))[]'"
288
287
  return self.run(command)
289
288
 
290
289
  def _get_kata_csv(self):
@@ -297,7 +296,7 @@ class OC(SSH):
297
296
  """
298
297
  This method retrieves the catalog source of the sandboxed containers operator for installation"
299
298
  """
300
- return self.run(f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -o jsonpath='{{.status.catalogSource}}'")
299
+ return self.run(f"{self._cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -o jsonpath='{{.status.catalogSource}}'")
301
300
 
302
301
  def _get_kata_channel(self):
303
302
  """
@@ -317,14 +316,14 @@ class OC(SSH):
317
316
  @param thread_pool_size:
318
317
  @return:
319
318
  """
320
- self.run(fr"""{self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self.__cli} debug node/{{}} -- chroot /host sh -c "mkdir -p /etc/kata-containers; cp /usr/share/kata-containers/defaults/configuration.toml /etc/kata-containers/; sed -i 's/thread-pool-size=1/thread-pool-size={thread_pool_size}/' /etc/kata-containers/configuration.toml" """)
319
+ self.run(fr"""{self._cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self._cli} debug node/{{}} -- chroot /host sh -c "mkdir -p /etc/kata-containers; cp /usr/share/kata-containers/defaults/configuration.toml /etc/kata-containers/; sed -i 's/thread-pool-size=1/thread-pool-size={thread_pool_size}/' /etc/kata-containers/configuration.toml" """)
321
320
 
322
321
  def delete_kata_threads_pool(self):
323
322
  """
324
323
  This method deletes kata thread-pool-size from every worker node
325
324
  @return:
326
325
  """
327
- self.run(fr"""{self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self.__cli} debug node/{{}} -- chroot /host sh -c "rm -f /etc/kata-containers/configuration.toml" """)
326
+ self.run(fr"""{self._cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} {self._cli} debug node/{{}} -- chroot /host sh -c "rm -f /etc/kata-containers/configuration.toml" """)
328
327
 
329
328
  @typechecked
330
329
  def populate_additional_template_variables(self, env: dict):
@@ -344,7 +343,7 @@ class OC(SSH):
344
343
  This method checks if cnv operator is installed
345
344
  :return:
346
345
  """
347
- verify_cmd = f"{self.__cli} get csv -n openshift-cnv -o jsonpath='{{.items[0].status.phase}}'"
346
+ verify_cmd = f"{self._cli} get csv -n openshift-cnv -o jsonpath='{{.items[0].status.phase}}'"
348
347
  if 'Succeeded' in self.run(verify_cmd):
349
348
  return True
350
349
  return False
@@ -354,7 +353,7 @@ class OC(SSH):
354
353
  This method checks if odf operator is installed
355
354
  :return:
356
355
  """
357
- verify_cmd = f"{self.__cli} get csv -n openshift-storage -o jsonpath='{{.items[0].status.phase}}'"
356
+ verify_cmd = f"{self._cli} get csv -n openshift-storage -o jsonpath='{{.items[0].status.phase}}'"
358
357
  if 'Succeeded' in self.run(verify_cmd):
359
358
  return True
360
359
  return False
@@ -367,7 +366,7 @@ class OC(SSH):
367
366
  :return:
368
367
  """
369
368
  namespace = f'-n {namespace}' if namespace else ''
370
- verify_cmd = f"{self.__cli} get dv {namespace} -o jsonpath='{{.items[].status.phase}}'"
369
+ verify_cmd = f"{self._cli} get dv {namespace} -o jsonpath='{{.items[].status.phase}}'"
371
370
  if status in self.run(verify_cmd):
372
371
  return True
373
372
  return False
@@ -421,7 +420,7 @@ class OC(SSH):
421
420
  @raise ODFHealthCheckTimeout: If health check fails within the timeout.
422
421
  """
423
422
  current_wait_time = 0
424
- health_check = f"{self.__cli} -n {namespace} rsh {self._get_pod_name(pod_name=pod_name, namespace=namespace)} ceph health"
423
+ health_check = f"{self._cli} -n {namespace} rsh {self._get_pod_name(pod_name=pod_name, namespace=namespace)} ceph health"
425
424
 
426
425
  while timeout <= 0 or current_wait_time <= timeout:
427
426
  if 'HEALTH_OK' == self.run(health_check).strip():
@@ -444,7 +443,7 @@ class OC(SSH):
444
443
  """
445
444
  # apply patch
446
445
  self.run(
447
- f"{self.__cli} patch storagecluster ocs-storagecluster -n {namespace} --type json --patch '[{{ \"op\": \"replace\", \"path\": \"/spec/enableCephTools\", \"value\": true }}]'")
446
+ f"{self._cli} patch storagecluster ocs-storagecluster -n {namespace} --type json --patch '[{{ \"op\": \"replace\", \"path\": \"/spec/enableCephTools\", \"value\": true }}]'")
448
447
  self.wait_for_patch(pod_name='rook-ceph-tools', label='app=rook-ceph-tools', label_uuid=False, namespace=namespace)
449
448
  return self.wait_for_odf_healthcheck(pod_name='rook-ceph-tools', namespace=namespace)
450
449
 
@@ -457,7 +456,7 @@ class OC(SSH):
457
456
  try:
458
457
  # Run the command to get ODF disk count
459
458
  disk_count_str = self.run(
460
- f"{self.__cli} get --no-headers pod -n openshift-storage | grep osd | grep -cv prepare")
459
+ f"{self._cli} get --no-headers pod -n openshift-storage | grep osd | grep -cv prepare")
461
460
  disk_count = int(disk_count_str)
462
461
  return disk_count
463
462
  except ValueError as e:
@@ -478,7 +477,7 @@ class OC(SSH):
478
477
  This method checks if kata operator is installed
479
478
  :return:
480
479
  """
481
- verify_cmd = f"{self.__cli} get csv -n openshift-sandboxed-containers-operator -o jsonpath='{{.items[0].status.phase}}'"
480
+ verify_cmd = f"{self._cli} get csv -n openshift-sandboxed-containers-operator -o jsonpath='{{.items[0].status.phase}}'"
482
481
  if 'Succeeded' in self.run(verify_cmd):
483
482
  return True
484
483
  return False
@@ -488,14 +487,14 @@ class OC(SSH):
488
487
  This method returns master nodes
489
488
  :return:
490
489
  """
491
- return self.run(fr""" {self.__cli} get nodes -l node-role.kubernetes.io/master= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """)
490
+ return self.run(fr""" {self._cli} get nodes -l node-role.kubernetes.io/master= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """)
492
491
 
493
492
  def get_worker_nodes(self):
494
493
  """
495
494
  This method returns worker nodes
496
495
  :return:
497
496
  """
498
- return self.run(fr""" {self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """)
497
+ return self.run(fr""" {self._cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """)
499
498
 
500
499
  @typechecked
501
500
  def wait_for_node_ready(self, node: str = None, wait_time: int = None, timeout: int = int(environment_variables.environment_variables_dict['timeout'])):
@@ -550,26 +549,26 @@ class OC(SSH):
550
549
  @return:
551
550
  """
552
551
  # Get the node name and status for all nodes
553
- nodes_list = self.run(f"{self.__cli} get nodes --no-headers | awk '{{print $1, $2}}'").splitlines()
552
+ nodes_list = self.run(f"{self._cli} get nodes --no-headers | awk '{{print $1, $2}}'").splitlines()
554
553
  return nodes_list
555
554
 
556
555
  def delete_available_released_pv(self):
557
556
  """
558
557
  This method deletes available or released pv because that avoid launching new pv
559
558
  """
560
- pv_status_list = self.run(fr"{self.__cli} get pv -o jsonpath={{..status.phase}}").split()
559
+ pv_status_list = self.run(fr"{self._cli} get pv -o jsonpath={{..status.phase}}").split()
561
560
  for ind, pv_status in enumerate(pv_status_list):
562
561
  if pv_status == 'Available' or pv_status == 'Released':
563
- available_pv = self.run(fr"{self.__cli} get pv -o jsonpath={{.items[{ind}].metadata.name}}")
562
+ available_pv = self.run(fr"{self._cli} get pv -o jsonpath={{.items[{ind}].metadata.name}}")
564
563
  logger.info(f'Delete {pv_status} pv {available_pv}')
565
- self.run(fr"{self.__cli} delete localvolume -n openshift-local-storage local-disks --wait=false")
566
- self.run(fr"{self.__cli} delete pv {available_pv} --wait=false")
564
+ self.run(fr"{self._cli} delete localvolume -n openshift-local-storage local-disks --wait=false")
565
+ self.run(fr"{self._cli} delete pv {available_pv} --wait=false")
567
566
 
568
567
  def clear_node_caches(self):
569
568
  """
570
569
  This method clears the node's buffer cache
571
570
  """
572
- return self.run(fr""" {self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" | xargs -I{{}} {self.__cli} debug node/{{}} -- chroot /host sh -c "sync; echo 3 > /proc/sys/vm/drop_caches" """)
571
+ return self.run(fr""" {self._cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" | xargs -I{{}} {self._cli} debug node/{{}} -- chroot /host sh -c "sync; echo 3 > /proc/sys/vm/drop_caches" """)
573
572
 
574
573
  def __get_short_uuid(self, workload: str):
575
574
  """
@@ -601,8 +600,8 @@ class OC(SSH):
601
600
  @param namespace:
602
601
  @return:
603
602
  """
604
- return self.run(f"{self.__cli} create serviceaccount -n {namespace} {namespace}; "
605
- f"{self.__cli} adm policy add-scc-to-user -n {namespace} privileged -z {namespace}")
603
+ return self.run(f"{self._cli} create serviceaccount -n {namespace} {namespace}; "
604
+ f"{self._cli} adm policy add-scc-to-user -n {namespace} privileged -z {namespace}")
606
605
 
607
606
  @typechecked
608
607
  @logger_time_stamp
@@ -614,7 +613,7 @@ class OC(SSH):
614
613
  :return:
615
614
  """
616
615
  if os.path.isfile(yaml):
617
- return self.run(f'{self.__cli} create -f {yaml}', is_check=is_check)
616
+ return self.run(f'{self._cli} create -f {yaml}', is_check=is_check)
618
617
  else:
619
618
  raise YAMLNotExist(yaml)
620
619
 
@@ -628,7 +627,7 @@ class OC(SSH):
628
627
  :return:
629
628
  """
630
629
  if os.path.isfile(yaml):
631
- return self.run(f'{self.__cli} apply -f {yaml}', is_check=is_check)
630
+ return self.run(f'{self._cli} apply -f {yaml}', is_check=is_check)
632
631
  else:
633
632
  raise YAMLNotExist(yaml)
634
633
 
@@ -641,7 +640,7 @@ class OC(SSH):
641
640
  :return:
642
641
  """
643
642
  if os.path.isfile(yaml):
644
- return self.run(f'{self.__cli} delete -f {yaml} --wait=false')
643
+ return self.run(f'{self._cli} delete -f {yaml} --wait=false')
645
644
  else:
646
645
  raise YAMLNotExist(yaml)
647
646
 
@@ -656,7 +655,7 @@ class OC(SSH):
656
655
  """
657
656
  try:
658
657
  namespace = f'-n {namespace}' if namespace else ''
659
- return self.run(f'{self.__cli} get {namespace} pods -o name | grep {pod_name}')
658
+ return self.run(f'{self._cli} get {namespace} pods -o name | grep {pod_name}')
660
659
  except Exception as err:
661
660
  raise PodNameNotExist(pod_name=pod_name)
662
661
 
@@ -669,7 +668,7 @@ class OC(SSH):
669
668
  :return:
670
669
  """
671
670
  namespace = f'-n {namespace}' if namespace else ''
672
- result = self.run(f'{self.__cli} get {namespace} pods -o name | grep {pod_name}')
671
+ result = self.run(f'{self._cli} get {namespace} pods -o name | grep {pod_name}')
673
672
  if pod_name in result:
674
673
  return True
675
674
  else:
@@ -684,7 +683,7 @@ class OC(SSH):
684
683
  :return:
685
684
  """
686
685
  namespace = f'-n {namespace}' if namespace else ''
687
- result = self.run(f"{self.__cli} get {namespace} pod -l={label_name} -o jsonpath='{{.items}}'")
686
+ result = self.run(f"{self._cli} get {namespace} pod -l={label_name} -o jsonpath='{{.items}}'")
688
687
  if result != '[]':
689
688
  return True
690
689
  else:
@@ -697,7 +696,7 @@ class OC(SSH):
697
696
  :return:
698
697
  """
699
698
  long_uuid = self.run(
700
- f"{self.__cli} -n {environment_variables.environment_variables_dict['namespace']} get benchmark/{workload} -o jsonpath={{.status.uuid}}")
699
+ f"{self._cli} -n {environment_variables.environment_variables_dict['namespace']} get benchmark/{workload} -o jsonpath={{.status.uuid}}")
701
700
  return long_uuid
702
701
 
703
702
  def get_ocp_major_version(self):
@@ -721,9 +720,9 @@ class OC(SSH):
721
720
  """
722
721
  # OCP 4.10 and below
723
722
  if self.get_ocp_major_version() <= 4 and self.get_ocp_minor_version() <= 10:
724
- prom_token = self.run(f"{self.__cli} -n openshift-monitoring sa get-token prometheus-k8s")
723
+ prom_token = self.run(f"{self._cli} -n openshift-monitoring sa get-token prometheus-k8s")
725
724
  else:
726
- prom_token = self.run(f"{self.__cli} sa new-token -n openshift-monitoring prometheus-k8s 2>/dev/null")
725
+ prom_token = self.run(f"{self._cli} sa new-token -n openshift-monitoring prometheus-k8s 2>/dev/null")
727
726
  return prom_token
728
727
 
729
728
  def collect_events(self):
@@ -732,50 +731,9 @@ class OC(SSH):
732
731
  :return: output_filename
733
732
  """
734
733
  output_filename = os.path.join(self._run_artifacts, f'events.log')
735
- self.run(f"{self.__cli} get events -A > '{output_filename}' ")
734
+ self.run(f"{self._cli} get events -A > '{output_filename}' ")
736
735
  return output_filename
737
736
 
738
- def get_kube_api_server(self):
739
- try:
740
- with open(self.__kubeconfig_path, "r") as f:
741
- config = yaml.safe_load(f)
742
-
743
- # Extract the API server URL from the clusters section
744
- return config["clusters"][0]["cluster"]["server"]
745
- except FileNotFoundError:
746
- return "Kubeconfig file not found."
747
- except KeyError:
748
- return "Invalid kubeconfig format. Could not find API server."
749
- except Exception as e:
750
- return f"Error: {str(e)}"
751
-
752
- @logger_time_stamp
753
- def __login(self):
754
- """
755
- Logs in to the cluster with retries, ensuring only a single login per execution.
756
- """
757
- if self._is_logged_in:
758
- logger.info("Already logged in. Skipping login.")
759
- return True # Avoid redundant login attempts
760
-
761
- for attempt in range(self.RETRIES):
762
- try:
763
- if self.__kubeadmin_password and self.__kubeadmin_password.strip():
764
- self.run(
765
- f'{self.__cli} login {self.get_kube_api_server()} -u kubeadmin -p {self.__kubeadmin_password}',
766
- is_check=True
767
- )
768
- self._is_logged_in = True # Mark as logged in globally
769
- return True # Success
770
- except Exception as err:
771
- logger.warning(f"Login attempt {attempt + 1} failed: {err}")
772
- if attempt < self.RETRIES - 1:
773
- time.sleep(self.DELAY)
774
- else:
775
- raise LoginFailed("Login failed after multiple attempts")
776
-
777
- return self._is_logged_in # Ensure return after loop
778
-
779
737
  @typechecked
780
738
  @logger_time_stamp
781
739
  def get_pod(self, label: str, database: str = '', namespace: str = environment_variables.environment_variables_dict['namespace']):
@@ -788,11 +746,11 @@ class OC(SSH):
788
746
  """
789
747
  if database:
790
748
  return self.run(
791
- f"{self.__cli} get pods -n '{database}-db'" + " --no-headers | awk '{ print $1; }' | grep " + database,
749
+ f"{self._cli} get pods -n '{database}-db'" + " --no-headers | awk '{ print $1; }' | grep " + database,
792
750
  is_check=True).rstrip().decode('ascii')
793
751
  else:
794
752
  namespace = f'-n {namespace}' if namespace else ''
795
- return self.run(f"{self.__cli} get pods {namespace} --no-headers | awk '{{ print $1; }}' | grep -w '{label}'", is_check=True).rstrip().decode('ascii')
753
+ return self.run(f"{self._cli} get pods {namespace} --no-headers | awk '{{ print $1; }}' | grep -w '{label}'", is_check=True).rstrip().decode('ascii')
796
754
 
797
755
  @typechecked
798
756
  @logger_time_stamp
@@ -809,12 +767,12 @@ class OC(SSH):
809
767
  else:
810
768
  output_filename = os.path.join(self._run_artifacts, pod_name)
811
769
  if database:
812
- self.run(f"{self.__cli} logs -n '{database}-db' {pod_name} > '{output_filename}' ")
770
+ self.run(f"{self._cli} logs -n '{database}-db' {pod_name} > '{output_filename}' ")
813
771
  # manager logs of benchmark-controller-manager
814
772
  elif 'benchmark-controller-manager' in pod_name:
815
- self.run(f"{self.__cli} logs -n {environment_variables.environment_variables_dict['namespace']} {pod_name} manager > '{output_filename}' ")
773
+ self.run(f"{self._cli} logs -n {environment_variables.environment_variables_dict['namespace']} {pod_name} manager > '{output_filename}' ")
816
774
  else:
817
- self.run(f"{self.__cli} logs -n {environment_variables.environment_variables_dict['namespace']} {pod_name} > '{output_filename}' ")
775
+ self.run(f"{self._cli} logs -n {environment_variables.environment_variables_dict['namespace']} {pod_name} > '{output_filename}' ")
818
776
  return output_filename
819
777
 
820
778
  def describe_pod(self, pod_name: str, namespace: str = ''):
@@ -825,7 +783,7 @@ class OC(SSH):
825
783
  :return: output_filename
826
784
  """
827
785
  output_filename = os.path.join(self._run_artifacts, f'describe-{pod_name}')
828
- self.run(f"{self.__cli} describe pod -n {namespace} {pod_name} > '{output_filename}' ")
786
+ self.run(f"{self._cli} describe pod -n {namespace} {pod_name} > '{output_filename}' ")
829
787
  return output_filename
830
788
 
831
789
  @logger_time_stamp
@@ -834,7 +792,7 @@ class OC(SSH):
834
792
  This method retrieves information on benchmark-runner pods in oc get pod format
835
793
  :return:
836
794
  """
837
- return self.run(f'{self.__cli} get pods', is_check=True)
795
+ return self.run(f'{self._cli} get pods', is_check=True)
838
796
 
839
797
  @typechecked
840
798
  @logger_time_stamp
@@ -922,7 +880,7 @@ class OC(SSH):
922
880
  :param namespace:
923
881
  :return:
924
882
  """
925
- self.run(f'{self.__cli} delete ns {namespace}')
883
+ self.run(f'{self._cli} delete ns {namespace}')
926
884
 
927
885
  @typechecked
928
886
  @logger_time_stamp
@@ -943,11 +901,11 @@ class OC(SSH):
943
901
  namespace = f'-n {namespace}' if namespace else ''
944
902
  if label_uuid:
945
903
  result = self.run(
946
- f"{self.__cli} {namespace} wait --for=condition={status} pod -l {label}-{self.__get_short_uuid(workload=workload)} --timeout={timeout}s",
904
+ f"{self._cli} {namespace} wait --for=condition={status} pod -l {label}-{self.__get_short_uuid(workload=workload)} --timeout={timeout}s",
947
905
  is_check=True)
948
906
  else:
949
907
  return self.run(
950
- f"{self.__cli} {namespace} wait --for=condition={status} pod -l {label} --timeout={timeout}s",
908
+ f"{self._cli} {namespace} wait --for=condition={status} pod -l {label} --timeout={timeout}s",
951
909
  is_check=True)
952
910
  if 'met' in result.decode("utf-8"):
953
911
  return True
@@ -976,7 +934,7 @@ class OC(SSH):
976
934
 
977
935
  while timeout <= 0 or current_wait_time <= timeout:
978
936
  # Filter out node-healthcheck-operator and self-node-remediation during CSV verification because they exist in all namespaces
979
- upgrade_versions = self.run(f"{self.__cli} get csv -n {namespace} -o json | jq -r '.items[] | select(.metadata.name | test(\"node-healthcheck-operator|self-node-remediation\") | not) | .spec.version'".splitlines()
937
+ upgrade_versions = self.run(f"{self._cli} get csv -n {namespace} -o json | jq -r '.items[] | select(.metadata.name | test(\"node-healthcheck-operator|self-node-remediation\") | not) | .spec.version'".splitlines()
980
938
  ).splitlines()
981
939
  count_upgrade_version = sum(1 for actual_upgrade_version in upgrade_versions if
982
940
  '.'.join(actual_upgrade_version.split('.')[0:2]) == upgrade_version)
@@ -1007,7 +965,7 @@ class OC(SSH):
1007
965
  current_wait_time = 0
1008
966
 
1009
967
  while timeout <= 0 or current_wait_time <= timeout:
1010
- csv_names = self.run(f"{self.__cli} get csv -n {namespace} -o jsonpath={{$.items[*].metadata.name}}")
968
+ csv_names = self.run(f"{self._cli} get csv -n {namespace} -o jsonpath={{$.items[*].metadata.name}}")
1011
969
  if csv_names and len(csv_names.split()) >= csv_num:
1012
970
  return csv_names
1013
971
 
@@ -1037,11 +995,11 @@ class OC(SSH):
1037
995
  namespace = f'-n {namespace}' if namespace else ''
1038
996
  if label_uuid:
1039
997
  result = self.run(
1040
- f"{self.__cli} {namespace} wait --for=condition={status} {run_type} -l {label}-{self.__get_short_uuid(workload=workload)} --timeout={timeout}s",
998
+ f"{self._cli} {namespace} wait --for=condition={status} {run_type} -l {label}-{self.__get_short_uuid(workload=workload)} --timeout={timeout}s",
1041
999
  is_check=True)
1042
1000
  else:
1043
1001
  result = self.run(
1044
- f"{self.__cli} {namespace} wait --for=condition={status} {run_type} -l {label} --timeout={timeout}s",
1002
+ f"{self._cli} {namespace} wait --for=condition={status} {run_type} -l {label} --timeout={timeout}s",
1045
1003
  is_check=True)
1046
1004
  if 'met' in result.decode("utf-8"):
1047
1005
  return True
@@ -1064,7 +1022,7 @@ class OC(SSH):
1064
1022
  try:
1065
1023
  namespace = f'-n {namespace}' if namespace else ''
1066
1024
  container = f'-c {container}' if container else ''
1067
- return self.run(f'{self.__cli} exec {namespace} {pod_name} {container} -- {command}')
1025
+ return self.run(f'{self._cli} exec {namespace} {pod_name} {container} -- {command}')
1068
1026
  except Exception as err:
1069
1027
  raise ExecFailed(command, pod_name, err)
1070
1028
 
@@ -1082,7 +1040,7 @@ class OC(SSH):
1082
1040
  if self.pod_exists(pod_name, namespace):
1083
1041
  try:
1084
1042
  namespace = f'-n {namespace}' if namespace else ''
1085
- self.run(f'{self.__cli} delete pod {namespace} {pod_name} timeout={timeout}')
1043
+ self.run(f'{self._cli} delete pod {namespace} {pod_name} timeout={timeout}')
1086
1044
  except Exception as err:
1087
1045
  raise PodTerminateTimeout(pod_name)
1088
1046
 
@@ -1102,7 +1060,7 @@ class OC(SSH):
1102
1060
  current_wait_time = 0
1103
1061
  namespace = f'-n {namespace}' if namespace else ''
1104
1062
  while timeout <= 0 or current_wait_time <= timeout:
1105
- answer = self.run(f'{self.__cli} get pod {namespace} {pod_name} --no-headers -ocustom-columns=Status:status.phase 2>/dev/null')
1063
+ answer = self.run(f'{self._cli} get pod {namespace} {pod_name} --no-headers -ocustom-columns=Status:status.phase 2>/dev/null')
1106
1064
  if answer == 'Running':
1107
1065
  return
1108
1066
  elif answer == 'Error':
@@ -1132,15 +1090,15 @@ class OC(SSH):
1132
1090
  while current_wait_time <= timeout:
1133
1091
  if label_uuid and job:
1134
1092
  result = self.run(
1135
- f"{self.__cli} {namespace} wait --for=condition=complete -l {label}-{self.__get_short_uuid(workload=workload)} jobs --timeout={OC.SHORT_TIMEOUT}s")
1093
+ f"{self._cli} {namespace} wait --for=condition=complete -l {label}-{self.__get_short_uuid(workload=workload)} jobs --timeout={OC.SHORT_TIMEOUT}s")
1136
1094
  if 'met' in result:
1137
1095
  return True
1138
1096
  result = self.run(
1139
- f"{self.__cli} {namespace} wait --for=condition=failed -l {label}-{self.__get_short_uuid(workload=workload)} jobs --timeout={OC.SLEEP_TIME}s")
1097
+ f"{self._cli} {namespace} wait --for=condition=failed -l {label}-{self.__get_short_uuid(workload=workload)} jobs --timeout={OC.SLEEP_TIME}s")
1140
1098
  if 'met' in result:
1141
1099
  return False
1142
1100
  if not job:
1143
- result = self.run(f"{self.__cli} get pod -l {label}" + " -n benchmark-runner --no-headers | awk '{ print $3; }'")
1101
+ result = self.run(f"{self._cli} get pod -l {label}" + " -n benchmark-runner --no-headers | awk '{ print $3; }'")
1144
1102
  if 'Completed' in result:
1145
1103
  return True
1146
1104
  # sleep for x seconds
@@ -1157,7 +1115,7 @@ class OC(SSH):
1157
1115
  :return: output_filename
1158
1116
  """
1159
1117
  output_filename = os.path.join(self._run_artifacts, f'describe-{vm_name}')
1160
- self.run(f"{self.__cli} describe vmi -n {namespace} {vm_name} > '{output_filename}' ")
1118
+ self.run(f"{self._cli} describe vmi -n {namespace} {vm_name} > '{output_filename}' ")
1161
1119
  return output_filename
1162
1120
 
1163
1121
  @typechecked
@@ -1170,7 +1128,7 @@ class OC(SSH):
1170
1128
  """
1171
1129
  try:
1172
1130
  namespace = f'-n {namespace}' if namespace else ''
1173
- return self.run(f'{self.__cli} get {namespace} vmi -o name | grep {vm_name}', is_check=True)
1131
+ return self.run(f'{self._cli} get {namespace} vmi -o name | grep {vm_name}', is_check=True)
1174
1132
  except Exception as err:
1175
1133
  raise VMNameNotExist(vm_name=vm_name)
1176
1134
 
@@ -1183,7 +1141,7 @@ class OC(SSH):
1183
1141
  :return: list of VM names or an empty list if an error occurs
1184
1142
  """
1185
1143
  namespace_option = f'-n {namespace}' if namespace else ''
1186
- command = f"{self.__cli} get {namespace_option} vm -o jsonpath='{{.items[*].metadata.name}}'"
1144
+ command = f"{self._cli} get {namespace_option} vm -o jsonpath='{{.items[*].metadata.name}}'"
1187
1145
  try:
1188
1146
  vm_names = self.run(command)
1189
1147
  return vm_names.split() if vm_names else []
@@ -1199,7 +1157,7 @@ class OC(SSH):
1199
1157
  :return: True or False
1200
1158
  """
1201
1159
  namespace = f'-n {namespace}' if namespace else ''
1202
- result = self.run(f'{self.__cli} get {namespace} vmi -o name | grep {vm_name}')
1160
+ result = self.run(f'{self._cli} get {namespace} vmi -o name | grep {vm_name}')
1203
1161
  if vm_name in result:
1204
1162
  return True
1205
1163
  else:
@@ -1217,9 +1175,9 @@ class OC(SSH):
1217
1175
  if label:
1218
1176
  namespace = f'-n {namespace}' if namespace else ''
1219
1177
  return self.run(
1220
- cmd=f"{self.__cli} get vmi {namespace} --no-headers | awk '{{ print $1; }}' | grep -w '{label}'", is_check=True).rstrip().decode('ascii')
1178
+ cmd=f"{self._cli} get vmi {namespace} --no-headers | awk '{{ print $1; }}' | grep -w '{label}'", is_check=True).rstrip().decode('ascii')
1221
1179
  else:
1222
- return self.run(f'{self.__cli} get vmi', is_check=True)
1180
+ return self.run(f'{self._cli} get vmi', is_check=True)
1223
1181
 
1224
1182
  @logger_time_stamp
1225
1183
  def __verify_vm_log_complete(self, vm_name: str, timeout: int = int(environment_variables.environment_variables_dict['timeout'])):
@@ -1248,7 +1206,7 @@ class OC(SSH):
1248
1206
  @return:
1249
1207
  """
1250
1208
  namespace = f'-n {namespace}' if namespace else ''
1251
- return self.run(cmd=f"{self.__cli} get service {vm_name} {namespace} -o jsonpath={{.spec.ports[].nodePort}}")
1209
+ return self.run(cmd=f"{self._cli} get service {vm_name} {namespace} -o jsonpath={{.spec.ports[].nodePort}}")
1252
1210
 
1253
1211
  @logger_time_stamp
1254
1212
  def get_nodes_addresses(self):
@@ -1256,7 +1214,7 @@ class OC(SSH):
1256
1214
  This method returns dictionary of nodes and corresponding IP addresses, e.g. {node1:ip1, node2:ip2, node3:ip3 }
1257
1215
  :return:
1258
1216
  """
1259
- node_ips = self.run(f"{self.__cli} get node -o jsonpath='{{$.items[*].status.addresses[*].address}}'")
1217
+ node_ips = self.run(f"{self._cli} get node -o jsonpath='{{$.items[*].status.addresses[*].address}}'")
1260
1218
  node_ips_list = node_ips.split()
1261
1219
  return dict([(k, v) for k, v in zip(node_ips_list[1::2], node_ips_list[::2])])
1262
1220
 
@@ -1275,7 +1233,7 @@ class OC(SSH):
1275
1233
  current_wait_time = 0
1276
1234
  namespace = f'-n {namespace}' if namespace else ''
1277
1235
  while timeout <= 0 or current_wait_time <= timeout:
1278
- check_vm_status = f"{self.__cli} get vm {vm_name} {namespace} -o jsonpath={{.status.printableStatus}}"
1236
+ check_vm_status = f"{self._cli} get vm {vm_name} {namespace} -o jsonpath={{.status.printableStatus}}"
1279
1237
  result = self.run(check_vm_status)
1280
1238
  if result == status.name:
1281
1239
  return True
@@ -1343,7 +1301,7 @@ class OC(SSH):
1343
1301
  :return:
1344
1302
  """
1345
1303
  namespace = f'-n {namespace}' if namespace else ''
1346
- command = f"{self.__cli} get vmi {vm_name} {namespace} -o jsonpath={{.metadata.labels.'kubevirt\\.io/nodeName'}}"
1304
+ command = f"{self._cli} get vmi {vm_name} {namespace} -o jsonpath={{.metadata.labels.'kubevirt\\.io/nodeName'}}"
1347
1305
 
1348
1306
  try:
1349
1307
  result = self.run(command)
@@ -1419,7 +1377,7 @@ class OC(SSH):
1419
1377
  :return:
1420
1378
  """
1421
1379
  namespace = f'-n {namespace}' if namespace else ''
1422
- self.run(f"{self.__cli} delete vm --all --grace-period 0 {namespace}")
1380
+ self.run(f"{self._cli} delete vm --all --grace-period 0 {namespace}")
1423
1381
 
1424
1382
  @logger_time_stamp
1425
1383
  def wait_for_vm_completed(self, workload: str = '', vm_name: str = '',
@@ -1436,7 +1394,7 @@ class OC(SSH):
1436
1394
  namespace = f'-n {namespace}' if namespace else ''
1437
1395
  while timeout <= 0 or current_wait_time <= timeout:
1438
1396
  if self.run(
1439
- f"{self.__cli} {namespace} get benchmark {workload} -o jsonpath={{.status.complete}}") == 'true':
1397
+ f"{self._cli} {namespace} get benchmark {workload} -o jsonpath={{.status.complete}}") == 'true':
1440
1398
  return True
1441
1399
  # sleep for x seconds
1442
1400
  time.sleep(OC.SLEEP_TIME)
@@ -0,0 +1,57 @@
1
+
2
+ import time
3
+ import threading
4
+
5
+ from benchmark_runner.common.logger.logger_time_stamp import logger_time_stamp, logger
6
+ from benchmark_runner.common.oc.oc_exceptions import LoginFailed
7
+
8
+
9
+ class SingletonOCLogin:
10
+
11
+ _instance = None
12
+ _lock = threading.Lock() # Create a class-level lock
13
+
14
+ def __new__(cls, oc_instance):
15
+ with cls._lock: # Acquire the lock for instance creation
16
+ if cls._instance is None:
17
+ cls._instance = super(SingletonOCLogin, cls).__new__(cls)
18
+ cls._instance.__init_instance(oc_instance)
19
+ return cls._instance
20
+
21
+ def __init_instance(self, oc_instance):
22
+ self.__oc_instance = oc_instance # Store reference to OC instance
23
+ self._kubeconfig_path = oc_instance._kubeconfig_path # Changed to protected
24
+ self._kubeadmin_password = oc_instance._kubeadmin_password # Changed to protected
25
+ self._cli = oc_instance._cli # Changed to protected
26
+ # Inherit RETRIES and DELAY from OC instance
27
+ self.RETRIES = oc_instance.RETRIES
28
+ self.DELAY = oc_instance.DELAY
29
+
30
+ # Attempt to log in during initialization
31
+ try:
32
+ self._login()
33
+ except RuntimeError as e:
34
+ logger.error(f"Initialization failed: {e}")
35
+ # Handle the exception as needed (e.g., re-raise, log, etc.)
36
+
37
+ @logger_time_stamp
38
+ def _login(self):
39
+ # Check for empty or whitespace-only password
40
+ if not self._kubeadmin_password or not self._kubeadmin_password.strip():
41
+ raise LoginFailed(msg="Empty password")
42
+
43
+ """Logs in to the cluster with retries, ensuring only a single login per execution."""
44
+ for attempt in range(self.RETRIES):
45
+ try:
46
+ if self._kubeadmin_password and self._kubeadmin_password.strip():
47
+ self.__oc_instance.run( # Use the run method from OC instance
48
+ f'{self._cli} login -u kubeadmin -p {self._kubeadmin_password}',
49
+ is_check=True
50
+ )
51
+ return True # Success
52
+ except Exception as err:
53
+ logger.warning(f"Login attempt {attempt + 1} failed: {err}")
54
+ if attempt < self.RETRIES - 1:
55
+ time.sleep(self.DELAY)
56
+ else:
57
+ raise LoginFailed(msg="Login failed after multiple attempts")
@@ -50,7 +50,6 @@ class EnvironmentVariables:
50
50
  # dynamic parameters - configure for local run
51
51
  self._environment_variables_dict['workload'] = EnvironmentVariables.get_env('WORKLOAD', '')
52
52
  self._environment_variables_dict['kubeadmin_password'] = EnvironmentVariables.get_env('KUBEADMIN_PASSWORD', '')
53
- self._environment_variables_dict['kubeconfig_path'] = EnvironmentVariables.get_env('KUBECONFIG_PATH', '')
54
53
 
55
54
  # PIN=node selector
56
55
  self._environment_variables_dict['pin_node_benchmark_operator'] = EnvironmentVariables.get_env('PIN_NODE_BENCHMARK_OPERATOR', '')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: benchmark-runner
3
- Version: 1.0.759
3
+ Version: 1.0.761
4
4
  Summary: Benchmark Runner Tool
5
5
  Home-page: https://github.com/redhat-performance/benchmark-runner
6
6
  Author: Red Hat
@@ -113,8 +113,6 @@ The following options may be passed via command line flags or set in the environ
113
113
 
114
114
  **mandatory:** KUBEADMIN_PASSWORD=$KUBEADMIN_PASSWORD
115
115
 
116
- **mandatory:** KUBECONFIG_PATH=$KUBECONFIG_PATH [config path inside the container]
117
-
118
116
  **mandatory:** $KUBECONFIG [ kubeconfig file path]
119
117
 
120
118
  **mandatory:** WORKLOAD=$WORKLOAD
@@ -162,11 +160,11 @@ Not mandatory:
162
160
  For example:
163
161
 
164
162
  ```sh
165
- podman run --rm -e WORKLOAD="hammerdb_pod_mariadb" -e KUBEADMIN_PASSWORD="1234" -e KUBECONFIG_PATH="/root/.kube/config" -e PIN_NODE_BENCHMARK_OPERATOR="node_name-0" -e PIN_NODE1="node_name-1" -e PIN_NODE2="node_name-2" -e log_level=INFO -v /root/.kube/config:/root/.kube/config --privileged quay.io/benchmark-runner/benchmark-runner:latest
163
+ podman run --rm -e WORKLOAD="hammerdb_pod_mariadb" -e KUBEADMIN_PASSWORD="1234" -e PIN_NODE_BENCHMARK_OPERATOR="node_name-0" -e PIN_NODE1="node_name-1" -e PIN_NODE2="node_name-2" -e log_level=INFO -v /root/.kube/config:/root/.kube/config --privileged quay.io/benchmark-runner/benchmark-runner:latest
166
164
  ```
167
165
  or
168
166
  ```sh
169
- docker run --rm -e WORKLOAD="hammerdb_vm_mariadb" -e KUBEADMIN_PASSWORD="1234" -e KUBECONFIG_PATH="/root/.kube/config" -e PIN_NODE_BENCHMARK_OPERATOR="node_name-0" -e PIN_NODE1="node_name-1" -e PIN_NODE2="node_name-2" -e log_level=INFO -v /root/.kube/config:/root/.kube/config --privileged quay.io/benchmark-runner/benchmark-runner:latest
167
+ docker run --rm -e WORKLOAD="hammerdb_vm_mariadb" -e KUBEADMIN_PASSWORD="1234" -e PIN_NODE_BENCHMARK_OPERATOR="node_name-0" -e PIN_NODE1="node_name-1" -e PIN_NODE2="node_name-2" -e log_level=INFO -v /root/.kube/config:/root/.kube/config --privileged quay.io/benchmark-runner/benchmark-runner:latest
170
168
  ```
171
169
 
172
170
  SAVE RUN ARTIFACTS LOCAL:
@@ -46,8 +46,9 @@ benchmark_runner/common/logger/init_logger.py,sha256=ERa-gNqrl2pZybj7v3csvmao7Mv
46
46
  benchmark_runner/common/logger/logger_exceptions.py,sha256=rivdlRm_jIsKQ53rP_-HX8emdtOmQNO4JuIkg8fnBoc,454
47
47
  benchmark_runner/common/logger/logger_time_stamp.py,sha256=2JgugN9LpXF4Ijx0wPRzz3DAGJB8eJpM5g1qPvbWbV8,1479
48
48
  benchmark_runner/common/oc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- benchmark_runner/common/oc/oc.py,sha256=PT_t1WRTY9TKYSYaOidTNnjupAJsetSZ29_ranKAUIs,69972
49
+ benchmark_runner/common/oc/oc.py,sha256=iCdkWLZV6mAB653HsRPbj1k5NZZcTFBFSLqLuoFQ00g,68193
50
50
  benchmark_runner/common/oc/oc_exceptions.py,sha256=XfKUzeK3Ors_Y2csQEoGqrlsZlYvq6OXLkFh9s_mQRM,6311
51
+ benchmark_runner/common/oc/singleton_oc_login.py,sha256=OISe7GxN-povQBk1GYVwkdcuEvIbDQP5QImYbNvhX5Y,2395
51
52
  benchmark_runner/common/ocp_resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
53
  benchmark_runner/common/ocp_resources/create_cnv.py,sha256=AXsyR8_g_RIFHz2rkyHzzvvUDIiGdOi9rZWJPhiPzDQ,3511
53
54
  benchmark_runner/common/ocp_resources/create_custom.py,sha256=rthm96yDzI-Hke54hLWTs0O3gDtpQPR6n184ehaEevo,1029
@@ -160,7 +161,7 @@ benchmark_runner/krkn_hub/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
160
161
  benchmark_runner/krkn_hub/krknhub_exceptions.py,sha256=Hk7Co6zZ0u2RSmBmS4ZAi21ffZaQ2ITTfl6tGLtiAdY,180
161
162
  benchmark_runner/krkn_hub/krknhub_workloads.py,sha256=qNRZA-FBQZiT6h013WbP6zBRINh3c6YMnnlksN9fssA,2851
162
163
  benchmark_runner/main/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
163
- benchmark_runner/main/environment_variables.py,sha256=FqXk-zKvmunv9vV2td9bYfpeFWaY5s2D38fEnzO-tw0,28189
164
+ benchmark_runner/main/environment_variables.py,sha256=3bzRBuvSkcqcvleIKrCTEWGwd788mg20_2n8T1_kWwA,28075
164
165
  benchmark_runner/main/environment_variables_exceptions.py,sha256=UR0Ith0P0oshsDZdJRlRq8ZUTt0h8jFvUtrnP4m4AIY,437
165
166
  benchmark_runner/main/main.py,sha256=A744O550wQh37hhk10H0HlT28LZ_2EOaRlJyWG6Pras,14083
166
167
  benchmark_runner/main/temporary_environment_variables.py,sha256=ODSHkfhgvdr_b2e3XyvykW21MVjSdyqimREyMc2klRE,957
@@ -172,8 +173,8 @@ benchmark_runner/workloads/windows_vm.py,sha256=eHK79ueAkSlNC1uamz19o7CO20wzJi-U
172
173
  benchmark_runner/workloads/workloads.py,sha256=F9fnk4h715tq7ANSCbDH0jktB8fpr_u3YG61Kdi5_os,1422
173
174
  benchmark_runner/workloads/workloads_exceptions.py,sha256=u7VII95iPRF_YhfpGH1U1RmgiIYESMOtbSF1dz7_ToE,1858
174
175
  benchmark_runner/workloads/workloads_operations.py,sha256=EEIZjNYGjEkyUafxFKSu4X9jaN_wgYDbMeCi1hZBZ-0,25186
175
- benchmark_runner-1.0.759.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
176
- benchmark_runner-1.0.759.dist-info/METADATA,sha256=JYJ75v_pKO8zsM2iqB1lEGznwSPJkmahLrfUUcl1Ecc,11662
177
- benchmark_runner-1.0.759.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
178
- benchmark_runner-1.0.759.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
179
- benchmark_runner-1.0.759.dist-info/RECORD,,
176
+ benchmark_runner-1.0.761.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
177
+ benchmark_runner-1.0.761.dist-info/METADATA,sha256=YG_jrJ59E4UVEEGWGXymh8X5Cg5N3LxU6Cy0jIuYQ9Q,11498
178
+ benchmark_runner-1.0.761.dist-info/WHEEL,sha256=nn6H5-ilmfVryoAQl3ZQ2l8SH5imPWFpm1A5FgEuFV4,91
179
+ benchmark_runner-1.0.761.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
180
+ benchmark_runner-1.0.761.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5