benchmark-runner 1.0.789__py3-none-any.whl → 1.0.791__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of benchmark-runner might be problematic. Click here for more details.
- benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py +2 -1
- benchmark_runner/benchmark_operator/hammerdb_pod.py +8 -6
- benchmark_runner/benchmark_operator/hammerdb_vm.py +8 -6
- benchmark_runner/benchmark_operator/stressng_pod.py +8 -6
- benchmark_runner/benchmark_operator/stressng_vm.py +8 -6
- benchmark_runner/benchmark_operator/uperf_pod.py +8 -6
- benchmark_runner/benchmark_operator/uperf_vm.py +8 -6
- benchmark_runner/workloads/bootstorm_vm.py +11 -7
- benchmark_runner/workloads/vdbench_pod.py +16 -12
- benchmark_runner/workloads/vdbench_vm.py +16 -11
- benchmark_runner/workloads/workloads_operations.py +1 -0
- {benchmark_runner-1.0.789.dist-info → benchmark_runner-1.0.791.dist-info}/METADATA +1 -1
- {benchmark_runner-1.0.789.dist-info → benchmark_runner-1.0.791.dist-info}/RECORD +16 -16
- {benchmark_runner-1.0.789.dist-info → benchmark_runner-1.0.791.dist-info}/WHEEL +0 -0
- {benchmark_runner-1.0.789.dist-info → benchmark_runner-1.0.791.dist-info}/licenses/LICENSE +0 -0
- {benchmark_runner-1.0.789.dist-info → benchmark_runner-1.0.791.dist-info}/top_level.txt +0 -0
|
@@ -70,10 +70,11 @@ class BenchmarkOperatorWorkloadsOperations:
|
|
|
70
70
|
# get oc instance
|
|
71
71
|
self._oc = self.get_oc(kubeadmin_password=self._kubeadmin_password)
|
|
72
72
|
self._virtctl = Virtctl()
|
|
73
|
+
self._prometheus_result = {}
|
|
73
74
|
# PrometheusSnapshot
|
|
74
75
|
if self._enable_prometheus_snapshot:
|
|
75
76
|
self._snapshot = PrometheusSnapshot(oc=self._oc, artifacts_path=self._run_artifacts_path, verbose=True)
|
|
76
|
-
|
|
77
|
+
self._prometheus_metrics_operation = PrometheusMetricsOperation()
|
|
77
78
|
# Extract lso id for LSO workload
|
|
78
79
|
if '_lso' in self._environment_variables_dict.get('workload'):
|
|
79
80
|
self._oc.delete_available_released_pv()
|
|
@@ -34,7 +34,8 @@ class HammerdbPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
34
34
|
:return:
|
|
35
35
|
"""
|
|
36
36
|
try:
|
|
37
|
-
self.
|
|
37
|
+
if self._enable_prometheus_snapshot:
|
|
38
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
38
39
|
self.__name = f"{self._workload.split('_')[0]}_{self._workload.split('_')[1]}"
|
|
39
40
|
self.__database = self._workload.split('_')[2]
|
|
40
41
|
if 'kata' in self._workload:
|
|
@@ -65,10 +66,11 @@ class HammerdbPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
65
66
|
self._oc.wait_for_ready(label='app=hammerdb_workload', workload=self.__workload_name)
|
|
66
67
|
self.__status = self._oc.wait_for_pod_completed(label='app=hammerdb_workload', workload=self.__workload_name)
|
|
67
68
|
self.__status = 'complete' if self.__status else 'failed'
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
69
|
+
if self._enable_prometheus_snapshot:
|
|
70
|
+
# prometheus queries
|
|
71
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
72
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
73
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
72
74
|
# system metrics
|
|
73
75
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
74
76
|
self.system_metrics_collector(workload=self.__workload_name, es_fetch_min_time=self.__es_fetch_min_time)
|
|
@@ -79,7 +81,7 @@ class HammerdbPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
79
81
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name), es_fetch_min_time=self.__es_fetch_min_time)
|
|
80
82
|
# update metadata
|
|
81
83
|
for id in ids:
|
|
82
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, database=self.__database, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=
|
|
84
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, database=self.__database, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=self._prometheus_result)
|
|
83
85
|
# delete hammerdb
|
|
84
86
|
self._oc.delete_pod_sync(
|
|
85
87
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{self.__database}.yaml'),
|
|
@@ -33,7 +33,8 @@ class HammerdbVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
33
33
|
:return:
|
|
34
34
|
"""
|
|
35
35
|
try:
|
|
36
|
-
self.
|
|
36
|
+
if self._enable_prometheus_snapshot:
|
|
37
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
37
38
|
self.__name = f"{self._workload.split('_')[0]}_{self._workload.split('_')[1]}"
|
|
38
39
|
self.__database = self._workload.split('_')[2]
|
|
39
40
|
if self._run_type == 'test_ci':
|
|
@@ -51,10 +52,11 @@ class HammerdbVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
51
52
|
vm_name = self._create_vm_log(labels=[self.__workload_name])
|
|
52
53
|
self.__status = self._oc.wait_for_vm_completed(workload=self.__workload_name, vm_name=vm_name)
|
|
53
54
|
self.__status = 'complete' if self.__status else 'failed'
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
55
|
+
if self._enable_prometheus_snapshot:
|
|
56
|
+
# prometheus queries
|
|
57
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
58
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
59
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
58
60
|
# system metrics
|
|
59
61
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
60
62
|
self.system_metrics_collector(workload=self.__workload_name, es_fetch_min_time=self.__es_fetch_min_time)
|
|
@@ -65,7 +67,7 @@ class HammerdbVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
65
67
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name), es_fetch_min_time=self.__es_fetch_min_time)
|
|
66
68
|
# update metadata
|
|
67
69
|
for id in ids:
|
|
68
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, database=self.__database, prometheus_result=
|
|
70
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, database=self.__database, prometheus_result=self._prometheus_result)
|
|
69
71
|
self._oc.delete_vm_sync(
|
|
70
72
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{self.__database}.yaml'),
|
|
71
73
|
vm_name=f'{self.__workload_name}-workload')
|
|
@@ -30,7 +30,8 @@ class StressngPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
30
30
|
:return:
|
|
31
31
|
"""
|
|
32
32
|
try:
|
|
33
|
-
self.
|
|
33
|
+
if self._enable_prometheus_snapshot:
|
|
34
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
34
35
|
if 'kata' in self._workload:
|
|
35
36
|
self.__kind = 'kata'
|
|
36
37
|
self.__name = self._workload.replace('kata', 'pod')
|
|
@@ -48,10 +49,11 @@ class StressngPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
48
49
|
self._oc.wait_for_ready(label='app=stressng_workload', workload=self.__workload_name)
|
|
49
50
|
self.__status = self._oc.wait_for_pod_completed(label='app=stressng_workload', workload=self.__workload_name)
|
|
50
51
|
self.__status = 'complete' if self.__status else 'failed'
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
52
|
+
if self._enable_prometheus_snapshot:
|
|
53
|
+
# prometheus queries
|
|
54
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
55
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
56
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
55
57
|
# system metrics
|
|
56
58
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
57
59
|
self.system_metrics_collector(workload=self.__workload_name)
|
|
@@ -62,7 +64,7 @@ class StressngPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
62
64
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name))
|
|
63
65
|
# update metadata
|
|
64
66
|
for id in ids:
|
|
65
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=
|
|
67
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=self._prometheus_result)
|
|
66
68
|
self._oc.delete_pod_sync(
|
|
67
69
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}.yaml'),
|
|
68
70
|
pod_name=f'{self.__workload_name}-workload')
|
|
@@ -29,7 +29,8 @@ class StressngVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
29
29
|
:return:
|
|
30
30
|
"""
|
|
31
31
|
try:
|
|
32
|
-
self.
|
|
32
|
+
if self._enable_prometheus_snapshot:
|
|
33
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
33
34
|
self.__name = self._workload
|
|
34
35
|
if self._run_type == 'test_ci':
|
|
35
36
|
self.__es_index = 'stressng-test-ci-results'
|
|
@@ -44,10 +45,11 @@ class StressngVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
44
45
|
vm_name = self._create_vm_log(labels=[self.__workload_name])
|
|
45
46
|
self.__status = self._oc.wait_for_vm_completed(workload=self.__workload_name, vm_name=vm_name)
|
|
46
47
|
self.__status = 'complete' if self.__status else 'failed'
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
48
|
+
if self._enable_prometheus_snapshot:
|
|
49
|
+
# prometheus queries
|
|
50
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
51
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
52
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
51
53
|
# system metrics
|
|
52
54
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
53
55
|
self.system_metrics_collector(workload=self.__workload_name)
|
|
@@ -58,7 +60,7 @@ class StressngVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
58
60
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name))
|
|
59
61
|
# update metadata
|
|
60
62
|
for id in ids:
|
|
61
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=
|
|
63
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=self._prometheus_result)
|
|
62
64
|
self._oc.delete_vm_sync(
|
|
63
65
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}.yaml'),
|
|
64
66
|
vm_name=f'{self.__workload_name}-workload')
|
|
@@ -30,7 +30,8 @@ class UperfPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
30
30
|
:return:
|
|
31
31
|
"""
|
|
32
32
|
try:
|
|
33
|
-
self.
|
|
33
|
+
if self._enable_prometheus_snapshot:
|
|
34
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
34
35
|
if 'kata' in self._workload:
|
|
35
36
|
self.__kind = 'kata'
|
|
36
37
|
self.__name = self._workload.replace('kata', 'pod')
|
|
@@ -61,10 +62,11 @@ class UperfPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
61
62
|
self._oc.wait_for_ready(label='app=uperf-bench-client', workload=self.__workload_name)
|
|
62
63
|
self.__status = self._oc.wait_for_pod_completed(label='app=uperf-bench-client', workload=self.__workload_name)
|
|
63
64
|
self.__status = 'complete' if self.__status else 'failed'
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
65
|
+
if self._enable_prometheus_snapshot:
|
|
66
|
+
# prometheus queries
|
|
67
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
68
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
69
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
68
70
|
# system metrics
|
|
69
71
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
70
72
|
self.system_metrics_collector(workload=self.__workload_name)
|
|
@@ -75,7 +77,7 @@ class UperfPod(BenchmarkOperatorWorkloadsOperations):
|
|
|
75
77
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name))
|
|
76
78
|
# update metadata
|
|
77
79
|
for id in ids:
|
|
78
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=
|
|
80
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self.__kind, status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=self._prometheus_result)
|
|
79
81
|
self._oc.delete_pod_sync(
|
|
80
82
|
yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}.yaml'),
|
|
81
83
|
pod_name=f'uperf-client')
|
|
@@ -29,7 +29,8 @@ class UperfVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
29
29
|
:return:
|
|
30
30
|
"""
|
|
31
31
|
try:
|
|
32
|
-
self.
|
|
32
|
+
if self._enable_prometheus_snapshot:
|
|
33
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
33
34
|
self.__name = self._workload
|
|
34
35
|
if self._run_type == 'test_ci':
|
|
35
36
|
self.__es_index = 'uperf-test-ci-results'
|
|
@@ -50,10 +51,11 @@ class UperfVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
50
51
|
vm_name = self._create_vm_log(labels=['uperf-server', 'uperf-client'])
|
|
51
52
|
self.__status = self._oc.wait_for_vm_completed(workload=self.__workload_name, vm_name=vm_name)
|
|
52
53
|
self.__status = 'complete' if self.__status else 'failed'
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
54
|
+
if self._enable_prometheus_snapshot:
|
|
55
|
+
# prometheus queries
|
|
56
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
57
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
58
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
57
59
|
# system metrics
|
|
58
60
|
if environment_variables.environment_variables_dict['system_metrics']:
|
|
59
61
|
self.system_metrics_collector(workload=self.__workload_name)
|
|
@@ -64,7 +66,7 @@ class UperfVM(BenchmarkOperatorWorkloadsOperations):
|
|
|
64
66
|
ids = self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._oc.get_long_uuid(workload=self.__workload_name), timeout=10)
|
|
65
67
|
# update metadata
|
|
66
68
|
for id in ids:
|
|
67
|
-
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=
|
|
69
|
+
self._update_elasticsearch_index(index=self.__es_index, id=id, kind=self._environment_variables_dict.get('kind', ''), status=self.__status, run_artifacts_url=run_artifacts_url, prometheus_result=self._prometheus_result)
|
|
68
70
|
self._oc.delete_vm_sync(yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}.yaml'),
|
|
69
71
|
vm_name='uperf-server')
|
|
70
72
|
except ElasticSearchDataNotUploaded as err:
|
|
@@ -92,7 +92,9 @@ class BootstormVM(WorkloadsOperations):
|
|
|
92
92
|
"""
|
|
93
93
|
if vm_node:
|
|
94
94
|
delta = round((time.time() - self._bootstorm_start_time[vm_name]) * self.MILLISECONDS, 3)
|
|
95
|
-
|
|
95
|
+
data = {'vm_name': vm_name, 'node': vm_node, 'bootstorm_time': delta, 'vm_ssh': int(bool(vm_node)),}
|
|
96
|
+
logger.info(data)
|
|
97
|
+
return data
|
|
96
98
|
return {}
|
|
97
99
|
|
|
98
100
|
def _create_vm_scale(self, vm_num: str):
|
|
@@ -113,13 +115,14 @@ class BootstormVM(WorkloadsOperations):
|
|
|
113
115
|
self._status = 'complete' if self._data_dict else 'failed'
|
|
114
116
|
# update total vm run time
|
|
115
117
|
if not self._verification_only:
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
118
|
+
if self._enable_prometheus_snapshot:
|
|
119
|
+
# prometheus queries
|
|
120
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
121
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
122
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
123
|
+
self._data_dict.update(self._prometheus_result)
|
|
120
124
|
total_run_time = self._get_bootstorm_vm_total_run_time()
|
|
121
125
|
self._data_dict.update({'total_run_time': total_run_time})
|
|
122
|
-
self._data_dict.update(prometheus_result)
|
|
123
126
|
# Google drive run_artifacts_url folder path
|
|
124
127
|
if self._google_drive_path and self.get_run_artifacts_google_drive():
|
|
125
128
|
self._data_dict.update({'run_artifacts_url': self.get_run_artifacts_google_drive()})
|
|
@@ -372,7 +375,8 @@ class BootstormVM(WorkloadsOperations):
|
|
|
372
375
|
"""
|
|
373
376
|
Initialize prometheus start time, vm name, kind and create benchmark-runner namespace for bootstorm vms
|
|
374
377
|
"""
|
|
375
|
-
self.
|
|
378
|
+
if self._enable_prometheus_snapshot:
|
|
379
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
376
380
|
self._name = self._workload
|
|
377
381
|
self._workload_name = self._workload.replace('_', '-')
|
|
378
382
|
self._vm_name = f'{self._workload_name}-{self._trunc_uuid}'
|
|
@@ -6,7 +6,6 @@ from multiprocessing import Process
|
|
|
6
6
|
from benchmark_runner.common.logger.logger_time_stamp import logger_time_stamp, logger
|
|
7
7
|
from benchmark_runner.common.elasticsearch.elasticsearch_exceptions import ElasticSearchDataNotUploaded
|
|
8
8
|
from benchmark_runner.workloads.workloads_operations import WorkloadsOperations
|
|
9
|
-
from benchmark_runner.common.prometheus.prometheus_metrics_operations import PrometheusMetricsOperation
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
class VdbenchPod(WorkloadsOperations):
|
|
@@ -58,16 +57,18 @@ class VdbenchPod(WorkloadsOperations):
|
|
|
58
57
|
self._oc.wait_for_ready(label=f'app=vdbench-{self._trunc_uuid}-{pod_num}', label_uuid=False)
|
|
59
58
|
self.__status = self._oc.wait_for_pod_completed(label=f'app=vdbench-{self._trunc_uuid}-{pod_num}', label_uuid=False, job=False)
|
|
60
59
|
self.__status = 'complete' if self.__status else 'failed'
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
60
|
+
if self._enable_prometheus_snapshot:
|
|
61
|
+
# prometheus queries
|
|
62
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
63
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
64
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
65
65
|
# save run artifacts logs
|
|
66
66
|
result_list = self._create_pod_run_artifacts(pod_name=f'{self.__pod_name}-{pod_num}', log_type='.csv')
|
|
67
67
|
if self._es_host:
|
|
68
68
|
# upload several run results
|
|
69
69
|
for result in result_list:
|
|
70
|
-
|
|
70
|
+
if self._enable_prometheus_snapshot:
|
|
71
|
+
result.update(self._prometheus_result)
|
|
71
72
|
self._upload_to_elasticsearch(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
|
|
72
73
|
# verify that data upload to elastic search according to unique uuid
|
|
73
74
|
self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._uuid)
|
|
@@ -94,7 +95,8 @@ class VdbenchPod(WorkloadsOperations):
|
|
|
94
95
|
:return:
|
|
95
96
|
"""
|
|
96
97
|
try:
|
|
97
|
-
self.
|
|
98
|
+
if self._enable_prometheus_snapshot:
|
|
99
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
98
100
|
if 'kata' in self._workload:
|
|
99
101
|
self.__kind = 'kata'
|
|
100
102
|
self.__name = self._workload.replace('kata', 'pod')
|
|
@@ -119,16 +121,18 @@ class VdbenchPod(WorkloadsOperations):
|
|
|
119
121
|
self._oc.wait_for_ready(label=f'app=vdbench-{self._trunc_uuid}', label_uuid=False)
|
|
120
122
|
self.__status = self._oc.wait_for_pod_completed(label=f'app=vdbench-{self._trunc_uuid}', label_uuid=False, job=False)
|
|
121
123
|
self.__status = 'complete' if self.__status else 'failed'
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
124
|
+
if self._enable_prometheus_snapshot:
|
|
125
|
+
# prometheus queries
|
|
126
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
127
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
128
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
126
129
|
# save run artifacts logs
|
|
127
130
|
result_list = self._create_pod_run_artifacts(pod_name=self.__pod_name, log_type='.csv')
|
|
128
131
|
if self._es_host:
|
|
129
132
|
# upload several run results
|
|
130
133
|
for result in result_list:
|
|
131
|
-
|
|
134
|
+
if self._enable_prometheus_snapshot:
|
|
135
|
+
result.update(self._prometheus_result)
|
|
132
136
|
self._upload_to_elasticsearch(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
|
|
133
137
|
# verify that data upload to elastic search according to unique uuid
|
|
134
138
|
self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._uuid)
|
|
@@ -62,16 +62,18 @@ class VdbenchVM(WorkloadsOperations):
|
|
|
62
62
|
self.__vm_name = self._create_vm_log(labels=[f'{self.__workload_name}-{self._trunc_uuid}-{vm_num}'])
|
|
63
63
|
self.__status = self._oc.wait_for_vm_log_completed(vm_name=self.__vm_name, end_stamp=self.END_STAMP)
|
|
64
64
|
self.__status = 'complete' if self.__status else 'failed'
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
if self._enable_prometheus_snapshot:
|
|
66
|
+
# prometheus queries
|
|
67
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
68
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
69
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
69
70
|
# save run artifacts logs
|
|
70
71
|
result_list = self._create_vm_run_artifacts(vm_name=f'{self.__workload_name}-{self._trunc_uuid}-{vm_num}', start_stamp=self.START_STAMP, end_stamp=self.END_STAMP, log_type='.csv')
|
|
71
72
|
if self._es_host:
|
|
72
73
|
# upload several run results
|
|
73
74
|
for result in result_list:
|
|
74
|
-
|
|
75
|
+
if self._enable_prometheus_snapshot:
|
|
76
|
+
result.update(self._prometheus_result)
|
|
75
77
|
self._upload_to_elasticsearch(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
|
|
76
78
|
# verify that data upload to elastic search according to unique uuid
|
|
77
79
|
self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._uuid)
|
|
@@ -101,7 +103,8 @@ class VdbenchVM(WorkloadsOperations):
|
|
|
101
103
|
:return:
|
|
102
104
|
"""
|
|
103
105
|
try:
|
|
104
|
-
self.
|
|
106
|
+
if self._enable_prometheus_snapshot:
|
|
107
|
+
self._prometheus_metrics_operation.init_prometheus()
|
|
105
108
|
self.__name = self._workload
|
|
106
109
|
if self._run_type == 'test_ci':
|
|
107
110
|
self.__es_index = 'vdbench-test-ci-results'
|
|
@@ -122,14 +125,16 @@ class VdbenchVM(WorkloadsOperations):
|
|
|
122
125
|
self.__status = 'complete' if self.__status else 'failed'
|
|
123
126
|
# save run artifacts logs
|
|
124
127
|
result_list = self._create_vm_run_artifacts(vm_name=self.__vm_name, start_stamp=self.START_STAMP, end_stamp=self.END_STAMP, log_type='.csv')
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
128
|
+
if self._enable_prometheus_snapshot:
|
|
129
|
+
# prometheus queries
|
|
130
|
+
self._prometheus_metrics_operation.finalize_prometheus()
|
|
131
|
+
metric_results = self._prometheus_metrics_operation.run_prometheus_queries()
|
|
132
|
+
self._prometheus_result = self._prometheus_metrics_operation.parse_prometheus_metrics(data=metric_results)
|
|
129
133
|
if self._es_host:
|
|
130
134
|
# upload several run results
|
|
131
135
|
for result in result_list:
|
|
132
|
-
|
|
136
|
+
if self._enable_prometheus_snapshot:
|
|
137
|
+
result.update(self._prometheus_result)
|
|
133
138
|
self._upload_to_elasticsearch(index=self.__es_index, kind=self.__kind, status=self.__status, result=result)
|
|
134
139
|
# verify that data upload to elastic search according to unique uuid
|
|
135
140
|
self._verify_elasticsearch_data_uploaded(index=self.__es_index, uuid=self._uuid)
|
|
@@ -96,6 +96,7 @@ class WorkloadsOperations:
|
|
|
96
96
|
self._virtctl = Virtctl()
|
|
97
97
|
|
|
98
98
|
# Prometheus Snapshot
|
|
99
|
+
self._prometheus_result = {}
|
|
99
100
|
if self._enable_prometheus_snapshot:
|
|
100
101
|
self._snapshot = PrometheusSnapshot(oc=self._oc, artifacts_path=self._run_artifacts_path, verbose=True)
|
|
101
102
|
self._prometheus_snap_interval = self._environment_variables_dict.get('prometheus_snap_interval', '')
|
|
@@ -2,13 +2,13 @@ benchmark_runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
|
|
|
2
2
|
benchmark_runner/benchmark_operator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
benchmark_runner/benchmark_operator/benchmark_operator_exceptions.py,sha256=5BCKcvLBMLFW4-XgZ7uXrja7uOE0UaplxAsdkKTpmek,1668
|
|
4
4
|
benchmark_runner/benchmark_operator/benchmark_operator_workloads.py,sha256=dL09ni6x4yaNFHs5djEebe4KE7M4n_rupWH7ngHpJwc,1425
|
|
5
|
-
benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py,sha256=
|
|
6
|
-
benchmark_runner/benchmark_operator/hammerdb_pod.py,sha256=
|
|
7
|
-
benchmark_runner/benchmark_operator/hammerdb_vm.py,sha256=
|
|
8
|
-
benchmark_runner/benchmark_operator/stressng_pod.py,sha256=
|
|
9
|
-
benchmark_runner/benchmark_operator/stressng_vm.py,sha256=
|
|
10
|
-
benchmark_runner/benchmark_operator/uperf_pod.py,sha256=
|
|
11
|
-
benchmark_runner/benchmark_operator/uperf_vm.py,sha256=
|
|
5
|
+
benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py,sha256=WLubpoEtKU6G1DJ45jRW4stQGLOx_K2jq66Fk7DY9zg,25062
|
|
6
|
+
benchmark_runner/benchmark_operator/hammerdb_pod.py,sha256=3wrjXW0XZwhWKamk_Yw82ibo5RmTUva4rpmcVogXqF8,8257
|
|
7
|
+
benchmark_runner/benchmark_operator/hammerdb_vm.py,sha256=0FhgEvfgbKwUyLEK3be9LYoishOkM4VZqhcTp6zIUr8,5915
|
|
8
|
+
benchmark_runner/benchmark_operator/stressng_pod.py,sha256=4K8PGSv-OCfyBMelJjscVtsCtuvhEx1AuO0biewGF5Q,5398
|
|
9
|
+
benchmark_runner/benchmark_operator/stressng_vm.py,sha256=WtnHxus-LCLZvAwg4NDc_ur2qQFPQMFPtplbj3cKThQ,5207
|
|
10
|
+
benchmark_runner/benchmark_operator/uperf_pod.py,sha256=6_4fiZ8ZjNzgzc8qyVoXGL8kp4qRksI0dujE0NmSsCQ,6130
|
|
11
|
+
benchmark_runner/benchmark_operator/uperf_vm.py,sha256=jiF-Fj_QoF86MOuOO4sboztTg_lIXhw7R0CF_cZLYHc,5446
|
|
12
12
|
benchmark_runner/clusterbuster/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
benchmark_runner/clusterbuster/clusterbuster_exceptions.py,sha256=KwtlsmnCDtbYX8mNoo1T4Djszz_kgc5aup-FUA_Y1SY,754
|
|
14
14
|
benchmark_runner/clusterbuster/clusterbuster_workloads.py,sha256=Ep_jhk9T4NnOKDbWrxjHEaGNKnFFnMDnpm_ZcaOuJvk,6736
|
|
@@ -166,15 +166,15 @@ benchmark_runner/main/environment_variables_exceptions.py,sha256=UR0Ith0P0oshsDZ
|
|
|
166
166
|
benchmark_runner/main/main.py,sha256=A744O550wQh37hhk10H0HlT28LZ_2EOaRlJyWG6Pras,14083
|
|
167
167
|
benchmark_runner/main/temporary_environment_variables.py,sha256=ODSHkfhgvdr_b2e3XyvykW21MVjSdyqimREyMc2klRE,957
|
|
168
168
|
benchmark_runner/workloads/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
169
|
-
benchmark_runner/workloads/bootstorm_vm.py,sha256=
|
|
170
|
-
benchmark_runner/workloads/vdbench_pod.py,sha256=
|
|
171
|
-
benchmark_runner/workloads/vdbench_vm.py,sha256=
|
|
169
|
+
benchmark_runner/workloads/bootstorm_vm.py,sha256=ykpr__vdOPssp1E6Z3kGWuJbPhgLFMlWR7-ZdYy9ISY,20084
|
|
170
|
+
benchmark_runner/workloads/vdbench_pod.py,sha256=feu3lvNumfBCD-An6__xS5Kt9nA50A_-0FmqTXuU9iw,10011
|
|
171
|
+
benchmark_runner/workloads/vdbench_vm.py,sha256=4rRbE-jAbmNrhP-k8F_OREkJ59VfQ7wLrfRQPwDneJg,9786
|
|
172
172
|
benchmark_runner/workloads/windows_vm.py,sha256=qFVD3qBFMnVpYXnrpam-7H5-0Yzvx6qtaEEZx4T-ex4,2415
|
|
173
173
|
benchmark_runner/workloads/workloads.py,sha256=F9fnk4h715tq7ANSCbDH0jktB8fpr_u3YG61Kdi5_os,1422
|
|
174
174
|
benchmark_runner/workloads/workloads_exceptions.py,sha256=u7VII95iPRF_YhfpGH1U1RmgiIYESMOtbSF1dz7_ToE,1858
|
|
175
|
-
benchmark_runner/workloads/workloads_operations.py,sha256=
|
|
176
|
-
benchmark_runner-1.0.
|
|
177
|
-
benchmark_runner-1.0.
|
|
178
|
-
benchmark_runner-1.0.
|
|
179
|
-
benchmark_runner-1.0.
|
|
180
|
-
benchmark_runner-1.0.
|
|
175
|
+
benchmark_runner/workloads/workloads_operations.py,sha256=788ZqnMsNHcU2jK8SXy5bJFrkDBJrHogzFWZeWQr8FQ,25361
|
|
176
|
+
benchmark_runner-1.0.791.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
177
|
+
benchmark_runner-1.0.791.dist-info/METADATA,sha256=gXZ4irZCjjDt56XR8c9PcfeEEH33YU9kYDQuAKqxq2o,11520
|
|
178
|
+
benchmark_runner-1.0.791.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
|
179
|
+
benchmark_runner-1.0.791.dist-info/top_level.txt,sha256=MP7UbTCzu59D53uKCZl5VsQeM_vheyMc7FmryczJQbk,17
|
|
180
|
+
benchmark_runner-1.0.791.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|