benchmark-runner 1.0.788__py3-none-any.whl → 1.0.790__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of benchmark-runner might be problematic. Click here for more details.

@@ -90,6 +90,8 @@ class EnvironmentVariables:
90
90
  self._environment_variables_dict['run_strategy'] = EnvironmentVariables.get_boolean_from_environment('RUN_STRATEGY', False)
91
91
  # Verification only, without running or deleting any resources, default False
92
92
  self._environment_variables_dict['verification_only'] = EnvironmentVariables.get_boolean_from_environment('VERIFICATION_ONLY', False)
93
+ # verify after test
94
+ self._environment_variables_dict['verify_after_test'] = EnvironmentVariables.get_env('VERIFY_AFTER_TEST', '')
93
95
 
94
96
  # default parameter - change only if needed
95
97
  # Parameters below related to 'run_workload()'
@@ -1,7 +1,7 @@
1
1
 
2
2
  import os
3
3
  import time
4
- from multiprocessing import Process
4
+ from multiprocessing import Process, Manager
5
5
 
6
6
  from benchmark_runner.common.logger.logger_time_stamp import logger_time_stamp, logger
7
7
  from benchmark_runner.common.elasticsearch.elasticsearch_exceptions import ElasticSearchDataNotUploaded
@@ -92,7 +92,9 @@ class BootstormVM(WorkloadsOperations):
92
92
  """
93
93
  if vm_node:
94
94
  delta = round((time.time() - self._bootstorm_start_time[vm_name]) * self.MILLISECONDS, 3)
95
- return {'vm_name': vm_name, 'node': vm_node, 'bootstorm_time': delta, 'vm_ssh': int(bool(vm_node)),}
95
+ data = {'vm_name': vm_name, 'node': vm_node, 'bootstorm_time': delta, 'vm_ssh': int(bool(vm_node)),}
96
+ logger.info(data)
97
+ return data
96
98
  return {}
97
99
 
98
100
  def _create_vm_scale(self, vm_num: str):
@@ -162,7 +164,7 @@ class BootstormVM(WorkloadsOperations):
162
164
  try:
163
165
  virtctl_status = self._oc.get_virtctl_vm_status(vm_name)
164
166
 
165
- if virtctl_status == "True":
167
+ if str(virtctl_status).lower() == "true":
166
168
  # Log success and store relevant data
167
169
  logger.info(f"VM {vm_name} verified successfully (virtctl SSH status: { virtctl_status}).")
168
170
  break # Exit loop on success
@@ -173,22 +175,25 @@ class BootstormVM(WorkloadsOperations):
173
175
  logger.info(f"Attempt {attempt + 1}/{retries} failed for Virtctl SSH VM {vm_name}: {e}")
174
176
 
175
177
  # Sleep before retrying
176
- time.sleep(delay)
178
+ if attempt < retries - 1:
179
+ time.sleep(delay)
180
+
177
181
 
178
182
  # Final update to self._data_dict after all attempts
179
183
  vm_node = self._oc.get_vm_node(vm_name) # Get the node again in case it changed
180
184
  self._data_dict = {
181
185
  'vm_name': vm_name,
182
186
  'node': vm_node,
183
- 'virtctl_vm': 1 if virtctl_status == 'True' else 0, # int value for Grafana
187
+ 'virtctl_vm': 1 if str(virtctl_status).lower() == "true" else 0, # int value for Grafana
184
188
  'virtctl_status': virtctl_status,
189
+ 'verify_after_test': self._verify_after_test,
185
190
  'run_artifacts_url': os.path.join(
186
191
  self._run_artifacts_url,
187
192
  f"{self._get_run_artifacts_hierarchy(self._workload_name, True)}-{self._time_stamp_format}.tar.gz"
188
193
  )
189
194
  }
190
195
 
191
- if virtctl_status != "True":
196
+ if str(virtctl_status).lower() != "true":
192
197
  logger.info(
193
198
  f"All attempts failed for VM {vm_name}. Final SSH status: {self._data_dict.get('virtctl_status', 'No status available')}")
194
199
  error_log_path = f"{self._run_artifacts_path}/{vm_name}_error.log"
@@ -196,76 +201,109 @@ class BootstormVM(WorkloadsOperations):
196
201
  # Retrieve the status or use a default message
197
202
  status_message = self._data_dict.get('virtctl_status') or "No status available"
198
203
 
199
- with open(error_log_path, "w") as error_log_file:
200
- error_log_file.write(str(status_message)) # Convert to string just in case
204
+ try:
205
+ with open(error_log_path, "w") as error_log_file:
206
+ error_log_file.write(str(status_message))
207
+ except Exception as write_err:
208
+ logger.error(f"Failed to write error log for {vm_name}: {write_err}")
201
209
 
202
210
  self._finalize_vm()
203
211
  return virtctl_status
204
212
 
205
- def _verify_virtctl_vm(self):
213
+ def _verify_single_vm_wrapper(self, vm_name, return_dict):
206
214
  """
207
- This method verifies the virtctl SSH login for each VM, either during the upgrade or once for each VM.
208
- It prepares the data for ElasticSearch, generates a must-gather in case of an error, and uploads it to Google Drive.
215
+ This method verifies single vm and update vm status in return_dict
216
+ :param vm_name:
217
+ :param return_dict:
218
+ :return:
209
219
  """
210
- try:
211
- vm_names = self._oc._get_all_vm_names()
212
- if not vm_names:
213
- raise MissingVMs("No VM names were retrieved from the cluster.")
214
-
215
- upgrade_done = True
216
- failure = False
217
- failure_vms = [] # List to store failed VM names
218
-
219
- if self._wait_for_upgrade_version:
220
- logger.info(f"wait for ocp upgrade version: {self._wait_for_upgrade_version}")
221
- upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
222
- current_wait_time = 0
223
-
224
- while (self._timeout <= 0 or current_wait_time <= self._timeout) and not upgrade_done:
225
- for vm_name in vm_names:
226
- virtctl_status = self._verify_single_vm(vm_name)
227
- if virtctl_status != 'True':
228
- failure = True
229
- if vm_name not in failure_vms:
230
- failure_vms.append(vm_name)
220
+ status = self._verify_single_vm(vm_name)
221
+ return_dict[vm_name] = status
222
+
223
+ def _verify_vms_in_parallel(self, vm_names):
224
+ """
225
+ This method verifies vms in parallel
226
+ :param vm_names:
227
+ :return:
228
+ """
229
+ failure_vms = []
230
+ manager = Manager()
231
+ return_dict = manager.dict()
232
+
233
+ # Split vm_names according to self._threads_limit
234
+ for i in range(0, len(vm_names), self._threads_limit):
235
+ bulk = vm_names[i:i + self._threads_limit]
236
+ processes = []
237
+
238
+ for vm_name in bulk:
239
+ p = Process(target=self._verify_single_vm_wrapper, args=(vm_name, return_dict))
240
+ p.start()
241
+ processes.append(p)
242
+
243
+ for p in processes:
244
+ p.join()
245
+
246
+ # After all processes are done, collect failures
247
+ for vm_name, status in return_dict.items():
248
+ if str(status).lower() != 'true':
249
+ failure_vms.append(vm_name)
250
+ return failure_vms
251
+
252
+ def _verify_virtctl_vms(self, delay=10):
253
+ """
254
+ This method verifies the virtctl SSH login for each VM, either during the upgrade or once for each VM.
255
+ It prepares the data for ElasticSearch, generates a must-gather in case of an error, and uploads it to Google Drive.
256
+ :param delay: delay between each iteration
257
+ """
258
+ try:
259
+ vm_names = self._oc._get_all_vm_names()
260
+ if not vm_names:
261
+ raise MissingVMs("No VM names were retrieved from the cluster.")
262
+
263
+ upgrade_done = True
264
+ failure_vms = [] # List to store failed VM names
265
+
266
+ if self._wait_for_upgrade_version:
267
+ logger.info(f"wait for ocp upgrade version: {self._wait_for_upgrade_version}")
231
268
  upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
269
+ start_time = time.time()
232
270
 
233
- # Sleep 1 sec between each cycle
234
- time.sleep(1)
235
- current_wait_time += 1 # Increment the wait time
236
- else:
237
- # If _wait_for_upgrade_version is empty, verify VM SSH without waiting for upgrade
238
- for vm_name in vm_names:
239
- virtctl_status = self._verify_single_vm(vm_name)
240
- if virtctl_status != 'True':
241
- failure = True
242
- if vm_name not in failure_vms:
243
- failure_vms.append(vm_name)
244
-
245
- if self._wait_for_upgrade_version:
246
- logger.info(f'Cluster is upgraded to: {self._wait_for_upgrade_version}')
247
-
248
- if failure:
249
- self._oc.generate_cnv_must_gather(destination_path=self._run_artifacts_path,
250
- cnv_version=self._cnv_version)
251
- self._oc.generate_odf_must_gather(destination_path=self._run_artifacts_path,
252
- odf_version=self._odf_version)
253
- # Upload artifacts
254
- if self._google_drive_shared_drive_id:
255
- self.upload_run_artifacts_to_google_drive()
256
- elif self._endpoint_url and not self._google_drive_shared_drive_id:
257
- self.upload_run_artifacts_to_s3()
258
- else:
259
- self._save_artifacts_local = True
271
+ while (self._timeout <= 0 or time.time() - start_time <= self._timeout) and not upgrade_done:
272
+ failure_vms = self._verify_vms_in_parallel(vm_names)
273
+ upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
260
274
 
261
- # Error log with details of failed VM, for catching all vm errors
262
- logger.error(
263
- f"Failed to verify virtctl SSH login for the following VMs: {', '.join(failure_vms)}")
275
+ if upgrade_done:
276
+ break
264
277
 
265
- except Exception as err:
266
- # Save run artifacts logs
267
- self.save_error_logs()
268
- raise err
278
+ # Sleep between each cycle
279
+ time.sleep(delay)
280
+ else:
281
+ failure_vms = self._verify_vms_in_parallel(vm_names)
282
+
283
+ if self._wait_for_upgrade_version:
284
+ logger.info(f'Cluster is upgraded to: {self._wait_for_upgrade_version}')
285
+
286
+ if failure_vms:
287
+ self._oc.generate_cnv_must_gather(destination_path=self._run_artifacts_path,
288
+ cnv_version=self._cnv_version)
289
+ self._oc.generate_odf_must_gather(destination_path=self._run_artifacts_path,
290
+ odf_version=self._odf_version)
291
+ # Upload artifacts
292
+ if self._google_drive_shared_drive_id:
293
+ self.upload_run_artifacts_to_google_drive()
294
+ elif self._endpoint_url and not self._google_drive_shared_drive_id:
295
+ self.upload_run_artifacts_to_s3()
296
+ else:
297
+ self._save_artifacts_local = True
298
+
299
+ # Error log with details of failed VM, for catching all vm errors
300
+ logger.error(
301
+ f"Failed to verify virtctl SSH login for the following VMs: {', '.join(failure_vms)}")
302
+
303
+ except Exception as err:
304
+ # Save run artifacts logs
305
+ self.save_error_logs()
306
+ raise err
269
307
 
270
308
  def _run_vm_scale(self, vm_num: str):
271
309
  """
@@ -349,7 +387,7 @@ class BootstormVM(WorkloadsOperations):
349
387
  def run_vm_workload(self):
350
388
  # verification only w/o running or deleting any resource
351
389
  if self._verification_only:
352
- self._verify_virtctl_vm()
390
+ self._verify_virtctl_vms()
353
391
  else:
354
392
  if not self._scale:
355
393
  self._run_vm()
@@ -103,6 +103,7 @@ class WorkloadsOperations:
103
103
  self._windows_url = self._environment_variables_dict.get('windows_url', '')
104
104
  self._delete_all = self._environment_variables_dict.get('delete_all', '')
105
105
  self._verification_only = self._environment_variables_dict.get('verification_only', '')
106
+ self._verify_after_test = self._environment_variables_dict.get('verify_after_test', '')
106
107
  self._wait_for_upgrade_version = self._environment_variables_dict.get('wait_for_upgrade_version', '')
107
108
  if self._windows_url:
108
109
  file_name = os.path.basename(self._windows_url)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: benchmark-runner
3
- Version: 1.0.788
3
+ Version: 1.0.790
4
4
  Summary: Benchmark Runner Tool
5
5
  Home-page: https://github.com/redhat-performance/benchmark-runner
6
6
  Author: Red Hat
@@ -161,20 +161,20 @@ benchmark_runner/krkn_hub/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
161
161
  benchmark_runner/krkn_hub/krknhub_exceptions.py,sha256=Hk7Co6zZ0u2RSmBmS4ZAi21ffZaQ2ITTfl6tGLtiAdY,180
162
162
  benchmark_runner/krkn_hub/krknhub_workloads.py,sha256=vUEw5y1rkhspxPdRFTHF_jKo1XwVphtkL-oSQQULVyI,3032
163
163
  benchmark_runner/main/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
164
- benchmark_runner/main/environment_variables.py,sha256=4kJx_e6vrI4ThBvIiCEkvmwbSfQit2EuUHiKmVllYe8,28356
164
+ benchmark_runner/main/environment_variables.py,sha256=ebp3VjLHzeprjZbhs8rGOHASfW3nKK-7BR-J7yBOYFg,28502
165
165
  benchmark_runner/main/environment_variables_exceptions.py,sha256=UR0Ith0P0oshsDZdJRlRq8ZUTt0h8jFvUtrnP4m4AIY,437
166
166
  benchmark_runner/main/main.py,sha256=A744O550wQh37hhk10H0HlT28LZ_2EOaRlJyWG6Pras,14083
167
167
  benchmark_runner/main/temporary_environment_variables.py,sha256=ODSHkfhgvdr_b2e3XyvykW21MVjSdyqimREyMc2klRE,957
168
168
  benchmark_runner/workloads/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
169
- benchmark_runner/workloads/bootstorm_vm.py,sha256=04hjGxfDcqpkJcarz-u56GqkrErQMUBC0c0vDQWn0h8,18754
169
+ benchmark_runner/workloads/bootstorm_vm.py,sha256=tWYlfFglHfcr_lOlBFQkOtarNcKerw2mmCBFE6Fn9cY,19954
170
170
  benchmark_runner/workloads/vdbench_pod.py,sha256=OcmxVr5QlbkhAgY37wsZ7xJUjs9HI-qMVDex1HdL2P0,9778
171
171
  benchmark_runner/workloads/vdbench_vm.py,sha256=Yhoz-GbvZwA8q6qGIeSUsYhEIERj8SmJB1yjetwsGow,9449
172
172
  benchmark_runner/workloads/windows_vm.py,sha256=qFVD3qBFMnVpYXnrpam-7H5-0Yzvx6qtaEEZx4T-ex4,2415
173
173
  benchmark_runner/workloads/workloads.py,sha256=F9fnk4h715tq7ANSCbDH0jktB8fpr_u3YG61Kdi5_os,1422
174
174
  benchmark_runner/workloads/workloads_exceptions.py,sha256=u7VII95iPRF_YhfpGH1U1RmgiIYESMOtbSF1dz7_ToE,1858
175
- benchmark_runner/workloads/workloads_operations.py,sha256=DtnlJKvJTlswKS6Xem4eA9rfIyUr1tMdJWz7qThgAPI,25228
176
- benchmark_runner-1.0.788.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
177
- benchmark_runner-1.0.788.dist-info/METADATA,sha256=uV4TawdMrVct8rjfXi8dYGMqtJDmo0To10vGE_L_qS0,11520
178
- benchmark_runner-1.0.788.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
179
- benchmark_runner-1.0.788.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
180
- benchmark_runner-1.0.788.dist-info/RECORD,,
175
+ benchmark_runner/workloads/workloads_operations.py,sha256=nHwy61d-CyyW03HsgPJZ73SKuq2rY0aLaRL6R4V0eLc,25324
176
+ benchmark_runner-1.0.790.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
177
+ benchmark_runner-1.0.790.dist-info/METADATA,sha256=dFVoDehhLB_vlGvKGDk70JReNy1re5Nb24aDHBOgY1c,11520
178
+ benchmark_runner-1.0.790.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
179
+ benchmark_runner-1.0.790.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
180
+ benchmark_runner-1.0.790.dist-info/RECORD,,