benchmark-runner 1.0.806__py3-none-any.whl → 1.0.808__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of benchmark-runner might be problematic. Click here for more details.
- benchmark_runner/common/google_drive/google_drive_exceptions.py +13 -0
- benchmark_runner/common/google_drive/google_drive_operations.py +38 -10
- benchmark_runner/common/oc/oc.py +25 -3
- benchmark_runner/workloads/bootstorm_vm.py +64 -65
- benchmark_runner/workloads/workloads_operations.py +13 -0
- {benchmark_runner-1.0.806.dist-info → benchmark_runner-1.0.808.dist-info}/METADATA +1 -1
- {benchmark_runner-1.0.806.dist-info → benchmark_runner-1.0.808.dist-info}/RECORD +10 -9
- {benchmark_runner-1.0.806.dist-info → benchmark_runner-1.0.808.dist-info}/WHEEL +1 -1
- {benchmark_runner-1.0.806.dist-info → benchmark_runner-1.0.808.dist-info}/licenses/LICENSE +0 -0
- {benchmark_runner-1.0.806.dist-info → benchmark_runner-1.0.808.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
class GoogleDrive(Exception):
|
|
2
|
+
""" Base class for all ElasticSearch error classes.
|
|
3
|
+
All exceptions raised by the google drive library should inherit from this class. """
|
|
4
|
+
pass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class FolderNotCreated(GoogleDrive):
|
|
8
|
+
"""Raised when a folder cannot be created in Google Drive after retries."""
|
|
9
|
+
def __init__(self, folder_path, message=None):
|
|
10
|
+
if message is None:
|
|
11
|
+
message = f"Failed to create folder: {folder_path}"
|
|
12
|
+
super().__init__(message)
|
|
13
|
+
self.folder_path = folder_path
|
|
@@ -9,7 +9,7 @@ from googleapiclient.http import MediaFileUpload
|
|
|
9
9
|
from google.auth.transport.requests import Request
|
|
10
10
|
from google_auth_oauthlib.flow import InstalledAppFlow
|
|
11
11
|
from benchmark_runner.common.logger.logger_time_stamp import logger # Added logger import
|
|
12
|
-
|
|
12
|
+
from benchmark_runner.common.google_drive.google_drive_exceptions import FolderNotCreated
|
|
13
13
|
# Define the scope
|
|
14
14
|
SCOPES = ['https://www.googleapis.com/auth/drive']
|
|
15
15
|
|
|
@@ -241,16 +241,44 @@ class GoogleDriveOperations:
|
|
|
241
241
|
# Folder exists, return the folder URL
|
|
242
242
|
folder_url = f"{self._google_drive_path}/{folder_id}"
|
|
243
243
|
return folder_url
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
244
|
+
|
|
245
|
+
def create_drive_folder_url(self, folder_path, parent_folder_id, retries=3, delay=5):
|
|
246
|
+
"""
|
|
247
|
+
Create folder if it doesn't exist. Retries in case of failure.
|
|
248
|
+
:param folder_path: The full folder path to retrieve or create.
|
|
249
|
+
:param parent_folder_id: The starting parent folder ID.
|
|
250
|
+
:param retries: Number of retry attempts on failure.
|
|
251
|
+
:param delay: Initial delay between retries in seconds.
|
|
252
|
+
:return: The Google Drive URL for the folder, or None on failure.
|
|
253
|
+
"""
|
|
254
|
+
attempt = 1
|
|
255
|
+
while attempt <= retries:
|
|
256
|
+
folder_id = self.get_folder_id_by_path(folder_path, parent_folder_id)
|
|
247
257
|
if folder_id:
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
258
|
+
logger.debug(f"Folder already exists: {folder_path}")
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
logger.warning(f"Folder not found. Attempting to create it (Attempt {attempt}/{retries}).")
|
|
262
|
+
try:
|
|
263
|
+
folder_id = self.create_folder_at_path(folder_path, parent_folder_id)
|
|
264
|
+
if folder_id:
|
|
265
|
+
logger.info(f"Successfully created folder: {folder_path}")
|
|
266
|
+
break
|
|
267
|
+
except Exception as e:
|
|
268
|
+
logger.error(f"Failed to create folder on attempt {attempt} due to: {e}")
|
|
269
|
+
raise FolderNotCreated(folder_path)
|
|
270
|
+
|
|
271
|
+
if attempt < retries:
|
|
272
|
+
wait_time = delay * (2 ** (attempt - 1))
|
|
273
|
+
logger.warning(f"Retrying in {wait_time} seconds...")
|
|
274
|
+
time.sleep(wait_time)
|
|
275
|
+
|
|
276
|
+
attempt += 1
|
|
277
|
+
|
|
278
|
+
if not folder_id:
|
|
279
|
+
error_msg = f"Unable to create or find the folder path after {retries} attempts: {folder_path}"
|
|
280
|
+
logger.error(error_msg)
|
|
281
|
+
raise FolderNotCreated(folder_path)
|
|
254
282
|
|
|
255
283
|
def list_files_in_folder(self, folder_id, level=0):
|
|
256
284
|
"""
|
benchmark_runner/common/oc/oc.py
CHANGED
|
@@ -1229,6 +1229,10 @@ class OC(SSH):
|
|
|
1229
1229
|
node_ips_list = node_ips.split()
|
|
1230
1230
|
return dict([(k, v) for k, v in zip(node_ips_list[1::2], node_ips_list[::2])])
|
|
1231
1231
|
|
|
1232
|
+
def get_vm_status(self, vm_name, namespace: str = environment_variables.environment_variables_dict['namespace']):
|
|
1233
|
+
namespace = f'-n {namespace}' if namespace else ''
|
|
1234
|
+
return self.run(f"{self._cli} get vm {vm_name} {namespace}")
|
|
1235
|
+
|
|
1232
1236
|
@logger_time_stamp
|
|
1233
1237
|
def wait_for_vm_status(self, vm_name: str = '', status: VMStatus = VMStatus.Stopped,
|
|
1234
1238
|
namespace: str = environment_variables.environment_variables_dict['namespace'],
|
|
@@ -1586,8 +1590,26 @@ class OC(SSH):
|
|
|
1586
1590
|
:param output_dir:
|
|
1587
1591
|
:return:
|
|
1588
1592
|
"""
|
|
1589
|
-
self.run(f"oc get vmi {vm_name} -n {namespace} -o yaml > '{output_dir}/{vm_name}.yaml'")
|
|
1593
|
+
self.run(f"oc get vmi '{vm_name}' -n '{namespace}' -o yaml > '{output_dir}/{vm_name}.yaml'")
|
|
1590
1594
|
pod_name = self.run(
|
|
1591
|
-
f
|
|
1595
|
+
f"oc get pod -n '{namespace}' -o jsonpath=\"{{.items[?(@.metadata.generateName=='virt-launcher-{vm_name}-')].metadata.name}}\""
|
|
1592
1596
|
)
|
|
1593
|
-
self.run(f"oc get pod {pod_name} -n {namespace} -o yaml > '{output_dir}/{pod_name}.yaml'")
|
|
1597
|
+
self.run(f"oc get pod '{pod_name}' -n '{namespace}' -o yaml > '{output_dir}/{pod_name}.yaml'")
|
|
1598
|
+
|
|
1599
|
+
def save_describe_yml(self, vm_name, vm_access, output_dir='/tmp', namespace: str = environment_variables.environment_variables_dict['namespace']):
|
|
1600
|
+
"""
|
|
1601
|
+
This method save pod and vm into yaml per namespace
|
|
1602
|
+
:param vm_name:
|
|
1603
|
+
:param vm_access: True is vm accessible
|
|
1604
|
+
:param namespace:
|
|
1605
|
+
:param output_dir:
|
|
1606
|
+
:return:
|
|
1607
|
+
"""
|
|
1608
|
+
if vm_access:
|
|
1609
|
+
self.run(f"oc describe vmi '{vm_name}' -n '{namespace}' > '{output_dir}/describe_vmi_{vm_name}.yaml'")
|
|
1610
|
+
pod_name = self.run(
|
|
1611
|
+
f'oc get pod -n {namespace} -o jsonpath="{{.items[?(@.metadata.generateName==\'virt-launcher-{vm_name}-\')].metadata.name}}"'
|
|
1612
|
+
)
|
|
1613
|
+
self.run(f"oc describe pod '{pod_name}' -n '{namespace}' > '{output_dir}/describe_pod_{pod_name}.yaml'")
|
|
1614
|
+
else:
|
|
1615
|
+
self.run(f"oc describe vm '{vm_name}' -n '{namespace}' > '{output_dir}/describe_vm_{vm_name}.yaml'")
|
|
@@ -69,7 +69,7 @@ class BootstormVM(WorkloadsOperations):
|
|
|
69
69
|
@logger_time_stamp
|
|
70
70
|
def _wait_vm_access(self, vm_name: str):
|
|
71
71
|
"""
|
|
72
|
-
This method
|
|
72
|
+
This method waits for VM access and returns the VM node on success, or False if it fails
|
|
73
73
|
@return:
|
|
74
74
|
"""
|
|
75
75
|
if self._oc.get_vm_node(vm_name=vm_name):
|
|
@@ -137,7 +137,7 @@ class BootstormVM(WorkloadsOperations):
|
|
|
137
137
|
total_run_time = self._get_bootstorm_vm_total_run_time()
|
|
138
138
|
self._data_dict.update({'total_run_time': total_run_time})
|
|
139
139
|
# Google drive run_artifacts_url folder path
|
|
140
|
-
if self._google_drive_path
|
|
140
|
+
if self._google_drive_path:
|
|
141
141
|
self._data_dict.update({'run_artifacts_url': self.get_run_artifacts_google_drive()})
|
|
142
142
|
if self._es_host:
|
|
143
143
|
# upload several run results
|
|
@@ -165,9 +165,9 @@ class BootstormVM(WorkloadsOperations):
|
|
|
165
165
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self._name}.yaml'),
|
|
166
166
|
vm_name=self._vm_name)
|
|
167
167
|
|
|
168
|
-
def
|
|
168
|
+
def _verify_single_vm_access(self, vm_name, retries=5, delay=10):
|
|
169
169
|
"""
|
|
170
|
-
This method verifies
|
|
170
|
+
This method verifies single vm access using a retry mechanism
|
|
171
171
|
:param vm_name: The name of the VM to verify.
|
|
172
172
|
:param retries: Number of retry attempts.
|
|
173
173
|
:param delay: Time to wait (in seconds) between retries.
|
|
@@ -192,7 +192,6 @@ class BootstormVM(WorkloadsOperations):
|
|
|
192
192
|
if attempt < retries - 1:
|
|
193
193
|
time.sleep(delay)
|
|
194
194
|
|
|
195
|
-
|
|
196
195
|
# Final update to self._data_dict after all attempts
|
|
197
196
|
vm_node = self._oc.get_vm_node(vm_name) # Get the node again in case it changed
|
|
198
197
|
self._data_dict = {
|
|
@@ -217,27 +216,29 @@ class BootstormVM(WorkloadsOperations):
|
|
|
217
216
|
|
|
218
217
|
try:
|
|
219
218
|
with open(error_log_path, "w") as error_log_file:
|
|
220
|
-
error_log_file.write(
|
|
219
|
+
error_log_file.write(self._oc.get_vm_status(vm_name=vm_name) + "\n\n")
|
|
220
|
+
error_log_file.write(str(status_message) + "\n")
|
|
221
221
|
except Exception as write_err:
|
|
222
222
|
logger.error(f"Failed to write error log for {vm_name}: {write_err}")
|
|
223
223
|
|
|
224
224
|
self._finalize_vm()
|
|
225
225
|
return access_status
|
|
226
226
|
|
|
227
|
-
def
|
|
227
|
+
def _verify_single_vm_access_wrapper(self, vm_name, return_dict):
|
|
228
228
|
"""
|
|
229
|
-
This method verifies single
|
|
229
|
+
This method verifies access to a single VM, saves its YAML files, and updates the VM status in return_dict
|
|
230
230
|
:param vm_name:
|
|
231
231
|
:param return_dict:
|
|
232
232
|
:return:
|
|
233
233
|
"""
|
|
234
234
|
self._oc.save_to_yaml(vm_name, output_dir=self._run_artifacts_path)
|
|
235
|
-
|
|
236
|
-
|
|
235
|
+
vm_access = self._verify_single_vm_access(vm_name)
|
|
236
|
+
self._oc.save_describe_yml(vm_name, str(vm_access).lower() == 'true', output_dir=self._run_artifacts_path)
|
|
237
|
+
return_dict[vm_name] = vm_access
|
|
237
238
|
|
|
238
|
-
def
|
|
239
|
+
def _verify_vms_access_in_parallel(self, vm_names):
|
|
239
240
|
"""
|
|
240
|
-
This method verifies
|
|
241
|
+
This method verifies VM access in parallel
|
|
241
242
|
:param vm_names:
|
|
242
243
|
:return:
|
|
243
244
|
"""
|
|
@@ -251,7 +252,7 @@ class BootstormVM(WorkloadsOperations):
|
|
|
251
252
|
processes = []
|
|
252
253
|
|
|
253
254
|
for vm_name in bulk:
|
|
254
|
-
p = Process(target=self.
|
|
255
|
+
p = Process(target=self._verify_single_vm_access_wrapper, args=(vm_name, return_dict))
|
|
255
256
|
p.start()
|
|
256
257
|
processes.append(p)
|
|
257
258
|
|
|
@@ -265,64 +266,62 @@ class BootstormVM(WorkloadsOperations):
|
|
|
265
266
|
return failure_vms
|
|
266
267
|
|
|
267
268
|
def _verify_vms_access(self, delay=10):
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
269
|
+
"""
|
|
270
|
+
This method verifies access for each VM
|
|
271
|
+
It prepares the data for ElasticSearch, generates a must-gather in case of an error, and uploads it to Google Drive.
|
|
272
|
+
:param delay: delay between each iteration
|
|
273
|
+
"""
|
|
274
|
+
try:
|
|
275
|
+
vm_names = self._oc._get_all_vm_names()
|
|
276
|
+
if not vm_names:
|
|
277
|
+
raise MissingVMs()
|
|
278
|
+
|
|
279
|
+
upgrade_done = True
|
|
280
|
+
failure_vms = [] # List to store failed VM names
|
|
277
281
|
|
|
278
|
-
|
|
279
|
-
|
|
282
|
+
if self._wait_for_upgrade_version:
|
|
283
|
+
logger.info(f"wait for ocp upgrade version: {self._wait_for_upgrade_version}")
|
|
284
|
+
upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
|
|
285
|
+
start_time = time.time()
|
|
280
286
|
|
|
281
|
-
|
|
282
|
-
|
|
287
|
+
while (self._timeout <= 0 or time.time() - start_time <= self._timeout) and not upgrade_done:
|
|
288
|
+
failure_vms = self._verify_vms_access_in_parallel(vm_names)
|
|
283
289
|
upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
|
|
284
|
-
start_time = time.time()
|
|
285
290
|
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
upgrade_done = self._oc.get_cluster_status() == f'Cluster version is {self._wait_for_upgrade_version}'
|
|
291
|
+
if upgrade_done:
|
|
292
|
+
break
|
|
289
293
|
|
|
290
|
-
|
|
291
|
-
|
|
294
|
+
# Sleep between each cycle
|
|
295
|
+
time.sleep(delay)
|
|
296
|
+
else:
|
|
297
|
+
failure_vms = self._verify_vms_access_in_parallel(vm_names)
|
|
298
|
+
|
|
299
|
+
if self._wait_for_upgrade_version:
|
|
300
|
+
logger.info(f'Cluster is upgraded to: {self._wait_for_upgrade_version}')
|
|
301
|
+
|
|
302
|
+
if failure_vms:
|
|
303
|
+
self._oc.generate_cnv_must_gather(destination_path=self._run_artifacts_path, cnv_version=self._cnv_version)
|
|
304
|
+
self._oc.generate_odf_must_gather(destination_path=self._run_artifacts_path, odf_version=self._odf_version)
|
|
305
|
+
# Error log with details of failed VM, for catching all vm errors
|
|
306
|
+
logger.error(f"Failed to verify virtctl SSH login for the following VMs: {', '.join(failure_vms)}")
|
|
307
|
+
# Upload artifacts in validation
|
|
308
|
+
if self._google_drive_shared_drive_id:
|
|
309
|
+
self.upload_run_artifacts_to_google_drive()
|
|
310
|
+
elif self._endpoint_url:
|
|
311
|
+
self.upload_run_artifacts_to_s3()
|
|
312
|
+
else:
|
|
313
|
+
self._save_artifacts_local = True
|
|
314
|
+
if self._es_host:
|
|
315
|
+
self._data_dict.update({'run_artifacts_url': self.get_run_artifacts_google_drive(), 'failure_vms': failure_vms, 'verification_failure': True})
|
|
316
|
+
# upload several run results
|
|
317
|
+
self._upload_to_elasticsearch(index=self._es_index, kind=self._kind, status=self._status,result=self._data_dict)
|
|
318
|
+
# verify that data upload to elastic search according to unique uuid
|
|
319
|
+
self._verify_elasticsearch_data_uploaded(index=self._es_index, uuid=self._uuid)
|
|
292
320
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
if self._wait_for_upgrade_version:
|
|
299
|
-
logger.info(f'Cluster is upgraded to: {self._wait_for_upgrade_version}')
|
|
300
|
-
|
|
301
|
-
if failure_vms:
|
|
302
|
-
self._oc.generate_cnv_must_gather(destination_path=self._run_artifacts_path,
|
|
303
|
-
cnv_version=self._cnv_version)
|
|
304
|
-
self._oc.generate_odf_must_gather(destination_path=self._run_artifacts_path,
|
|
305
|
-
odf_version=self._odf_version)
|
|
306
|
-
# Error log with details of failed VM, for catching all vm errors
|
|
307
|
-
logger.error(f"Failed to verify virtctl SSH login for the following VMs: {', '.join(failure_vms)}")
|
|
308
|
-
# Upload artifacts
|
|
309
|
-
if self._google_drive_shared_drive_id:
|
|
310
|
-
self.upload_run_artifacts_to_google_drive()
|
|
311
|
-
elif self._endpoint_url:
|
|
312
|
-
self.upload_run_artifacts_to_s3()
|
|
313
|
-
else:
|
|
314
|
-
self._save_artifacts_local = True
|
|
315
|
-
if self._es_host:
|
|
316
|
-
self._data_dict.update({'run_artifacts_url': self.get_run_artifacts_google_drive(), 'failure_vms': failure_vms, 'verification_failure': True})
|
|
317
|
-
# upload several run results
|
|
318
|
-
self._upload_to_elasticsearch(index=self._es_index, kind=self._kind, status=self._status,result=self._data_dict)
|
|
319
|
-
# verify that data upload to elastic search according to unique uuid
|
|
320
|
-
self._verify_elasticsearch_data_uploaded(index=self._es_index, uuid=self._uuid)
|
|
321
|
-
|
|
322
|
-
except Exception as err:
|
|
323
|
-
# Save run artifacts logs
|
|
324
|
-
self.save_error_logs()
|
|
325
|
-
raise err
|
|
321
|
+
except Exception as err:
|
|
322
|
+
# Save run artifacts logs
|
|
323
|
+
self.save_error_logs()
|
|
324
|
+
raise err
|
|
326
325
|
|
|
327
326
|
def _run_vm_scale(self, vm_num: str):
|
|
328
327
|
"""
|
|
@@ -373,6 +373,15 @@ class WorkloadsOperations:
|
|
|
373
373
|
run_artifacts_hierarchy = self._get_run_artifacts_hierarchy(workload_name=workload)
|
|
374
374
|
return self._google_drive_operation.get_drive_folder_url(folder_path=run_artifacts_hierarchy, parent_folder_id=self._google_drive_shared_drive_id)
|
|
375
375
|
|
|
376
|
+
def create_run_artifacts_google_drive(self):
|
|
377
|
+
"""
|
|
378
|
+
This method creates google drive run artifacts folder path
|
|
379
|
+
:return:
|
|
380
|
+
"""
|
|
381
|
+
workload = self._workload.replace('_', '-')
|
|
382
|
+
run_artifacts_hierarchy = self._get_run_artifacts_hierarchy(workload_name=workload)
|
|
383
|
+
self._google_drive_operation.create_drive_folder_url(folder_path=run_artifacts_hierarchy, parent_folder_id=self._google_drive_shared_drive_id)
|
|
384
|
+
|
|
376
385
|
@logger_time_stamp
|
|
377
386
|
def upload_run_artifacts_to_google_drive(self):
|
|
378
387
|
"""
|
|
@@ -524,6 +533,8 @@ class WorkloadsOperations:
|
|
|
524
533
|
self._template.generate_yamls(scale=str(self._scale), scale_nodes=self._scale_node_list, redis=self._redis, thread_limit=self._threads_limit)
|
|
525
534
|
if self._enable_prometheus_snapshot:
|
|
526
535
|
self.start_prometheus()
|
|
536
|
+
if self._google_drive_path:
|
|
537
|
+
self.create_run_artifacts_google_drive()
|
|
527
538
|
|
|
528
539
|
def finalize_workload(self):
|
|
529
540
|
"""
|
|
@@ -535,6 +546,8 @@ class WorkloadsOperations:
|
|
|
535
546
|
self.end_prometheus()
|
|
536
547
|
if self._endpoint_url:
|
|
537
548
|
self.upload_run_artifacts_to_s3()
|
|
549
|
+
elif self._google_drive_path:
|
|
550
|
+
self.upload_run_artifacts_to_google_drive()
|
|
538
551
|
if not self._save_artifacts_local:
|
|
539
552
|
self.delete_local_artifacts()
|
|
540
553
|
if self._delete_all:
|
|
@@ -37,7 +37,8 @@ benchmark_runner/common/elasticsearch/elasticsearch_operations.py,sha256=fXckd9u
|
|
|
37
37
|
benchmark_runner/common/github/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
38
|
benchmark_runner/common/github/github_operations.py,sha256=wp4zH0_4Qzb_c-GtQlP3VnN374H0aa_icGPk0pwkAZc,1111
|
|
39
39
|
benchmark_runner/common/google_drive/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
|
-
benchmark_runner/common/google_drive/
|
|
40
|
+
benchmark_runner/common/google_drive/google_drive_exceptions.py,sha256=7he1sGD-vVgwsO8DjCYbCiKkyuQAOPY4KiuRL3p1rjI,524
|
|
41
|
+
benchmark_runner/common/google_drive/google_drive_operations.py,sha256=dGf2_aDSU1XoXSEHk5kAJK7ZWWwbd_QATmXV0nulTms,17553
|
|
41
42
|
benchmark_runner/common/grafana/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
43
|
benchmark_runner/common/grafana/grafana_operations.py,sha256=Emt-p44OSe_XgVp6c94EVcDEaniyYtUDQMWCGIP0GR8,4877
|
|
43
44
|
benchmark_runner/common/grafana/update_grafana_latest_value_mappings.py,sha256=aBKXx1spB8O2kjVsOl5heLl5lKcU07Hx8oHJLasJpSQ,5523
|
|
@@ -46,7 +47,7 @@ benchmark_runner/common/logger/init_logger.py,sha256=ERa-gNqrl2pZybj7v3csvmao7Mv
|
|
|
46
47
|
benchmark_runner/common/logger/logger_exceptions.py,sha256=rivdlRm_jIsKQ53rP_-HX8emdtOmQNO4JuIkg8fnBoc,454
|
|
47
48
|
benchmark_runner/common/logger/logger_time_stamp.py,sha256=2JgugN9LpXF4Ijx0wPRzz3DAGJB8eJpM5g1qPvbWbV8,1479
|
|
48
49
|
benchmark_runner/common/oc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
-
benchmark_runner/common/oc/oc.py,sha256=
|
|
50
|
+
benchmark_runner/common/oc/oc.py,sha256=D6w0SVdqm--3PeKqtY39-tht7-0gMNFMMy1TZPGP9KQ,71724
|
|
50
51
|
benchmark_runner/common/oc/oc_exceptions.py,sha256=XfKUzeK3Ors_Y2csQEoGqrlsZlYvq6OXLkFh9s_mQRM,6311
|
|
51
52
|
benchmark_runner/common/oc/singleton_oc_login.py,sha256=OISe7GxN-povQBk1GYVwkdcuEvIbDQP5QImYbNvhX5Y,2395
|
|
52
53
|
benchmark_runner/common/ocp_resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -166,15 +167,15 @@ benchmark_runner/main/environment_variables_exceptions.py,sha256=UR0Ith0P0oshsDZ
|
|
|
166
167
|
benchmark_runner/main/main.py,sha256=A744O550wQh37hhk10H0HlT28LZ_2EOaRlJyWG6Pras,14083
|
|
167
168
|
benchmark_runner/main/temporary_environment_variables.py,sha256=ODSHkfhgvdr_b2e3XyvykW21MVjSdyqimREyMc2klRE,957
|
|
168
169
|
benchmark_runner/workloads/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
169
|
-
benchmark_runner/workloads/bootstorm_vm.py,sha256=
|
|
170
|
+
benchmark_runner/workloads/bootstorm_vm.py,sha256=CevrutxfAmQ7NeKd2WexLIgXrAT6I70sEgg27TAX02g,20978
|
|
170
171
|
benchmark_runner/workloads/vdbench_pod.py,sha256=feu3lvNumfBCD-An6__xS5Kt9nA50A_-0FmqTXuU9iw,10011
|
|
171
172
|
benchmark_runner/workloads/vdbench_vm.py,sha256=4rRbE-jAbmNrhP-k8F_OREkJ59VfQ7wLrfRQPwDneJg,9786
|
|
172
173
|
benchmark_runner/workloads/windows_vm.py,sha256=qFVD3qBFMnVpYXnrpam-7H5-0Yzvx6qtaEEZx4T-ex4,2415
|
|
173
174
|
benchmark_runner/workloads/workloads.py,sha256=F9fnk4h715tq7ANSCbDH0jktB8fpr_u3YG61Kdi5_os,1422
|
|
174
175
|
benchmark_runner/workloads/workloads_exceptions.py,sha256=u7VII95iPRF_YhfpGH1U1RmgiIYESMOtbSF1dz7_ToE,1858
|
|
175
|
-
benchmark_runner/workloads/workloads_operations.py,sha256=
|
|
176
|
-
benchmark_runner-1.0.
|
|
177
|
-
benchmark_runner-1.0.
|
|
178
|
-
benchmark_runner-1.0.
|
|
179
|
-
benchmark_runner-1.0.
|
|
180
|
-
benchmark_runner-1.0.
|
|
176
|
+
benchmark_runner/workloads/workloads_operations.py,sha256=DUwvmswAgOXEbrVrV8OTRhrX4K18jnCwedHo9-W5z2s,27068
|
|
177
|
+
benchmark_runner-1.0.808.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
178
|
+
benchmark_runner-1.0.808.dist-info/METADATA,sha256=0_z9A5zbbkDeRue5V5p01RQS1cHgmrLTY-UaNHfw0zI,11520
|
|
179
|
+
benchmark_runner-1.0.808.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
|
180
|
+
benchmark_runner-1.0.808.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
|
|
181
|
+
benchmark_runner-1.0.808.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|