argus-alm 0.15.2__py3-none-any.whl → 0.15.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- argus/_version.py +2 -2
- argus/client/generic_result.py +6 -1
- {argus_alm-0.15.2.dist-info → argus_alm-0.15.3.dist-info}/METADATA +1 -1
- argus_alm-0.15.3.dist-info/RECORD +22 -0
- argus/backend/.gitkeep +0 -0
- argus/backend/__init__.py +0 -0
- argus/backend/cli.py +0 -57
- argus/backend/controller/__init__.py +0 -0
- argus/backend/controller/admin.py +0 -20
- argus/backend/controller/admin_api.py +0 -355
- argus/backend/controller/api.py +0 -589
- argus/backend/controller/auth.py +0 -67
- argus/backend/controller/client_api.py +0 -109
- argus/backend/controller/main.py +0 -316
- argus/backend/controller/notification_api.py +0 -72
- argus/backend/controller/notifications.py +0 -13
- argus/backend/controller/planner_api.py +0 -194
- argus/backend/controller/team.py +0 -129
- argus/backend/controller/team_ui.py +0 -19
- argus/backend/controller/testrun_api.py +0 -513
- argus/backend/controller/view_api.py +0 -188
- argus/backend/controller/views_widgets/__init__.py +0 -0
- argus/backend/controller/views_widgets/graphed_stats.py +0 -54
- argus/backend/controller/views_widgets/graphs.py +0 -68
- argus/backend/controller/views_widgets/highlights.py +0 -135
- argus/backend/controller/views_widgets/nemesis_stats.py +0 -26
- argus/backend/controller/views_widgets/summary.py +0 -43
- argus/backend/db.py +0 -98
- argus/backend/error_handlers.py +0 -41
- argus/backend/events/event_processors.py +0 -34
- argus/backend/models/__init__.py +0 -0
- argus/backend/models/argus_ai.py +0 -24
- argus/backend/models/github_issue.py +0 -60
- argus/backend/models/plan.py +0 -24
- argus/backend/models/result.py +0 -187
- argus/backend/models/runtime_store.py +0 -58
- argus/backend/models/view_widgets.py +0 -25
- argus/backend/models/web.py +0 -403
- argus/backend/plugins/__init__.py +0 -0
- argus/backend/plugins/core.py +0 -248
- argus/backend/plugins/driver_matrix_tests/controller.py +0 -66
- argus/backend/plugins/driver_matrix_tests/model.py +0 -429
- argus/backend/plugins/driver_matrix_tests/plugin.py +0 -21
- argus/backend/plugins/driver_matrix_tests/raw_types.py +0 -62
- argus/backend/plugins/driver_matrix_tests/service.py +0 -61
- argus/backend/plugins/driver_matrix_tests/udt.py +0 -42
- argus/backend/plugins/generic/model.py +0 -86
- argus/backend/plugins/generic/plugin.py +0 -15
- argus/backend/plugins/generic/types.py +0 -14
- argus/backend/plugins/loader.py +0 -39
- argus/backend/plugins/sct/controller.py +0 -224
- argus/backend/plugins/sct/plugin.py +0 -37
- argus/backend/plugins/sct/resource_setup.py +0 -177
- argus/backend/plugins/sct/service.py +0 -682
- argus/backend/plugins/sct/testrun.py +0 -288
- argus/backend/plugins/sct/udt.py +0 -100
- argus/backend/plugins/sirenada/model.py +0 -118
- argus/backend/plugins/sirenada/plugin.py +0 -16
- argus/backend/service/admin.py +0 -26
- argus/backend/service/argus_service.py +0 -696
- argus/backend/service/build_system_monitor.py +0 -185
- argus/backend/service/client_service.py +0 -127
- argus/backend/service/event_service.py +0 -18
- argus/backend/service/github_service.py +0 -233
- argus/backend/service/jenkins_service.py +0 -269
- argus/backend/service/notification_manager.py +0 -159
- argus/backend/service/planner_service.py +0 -608
- argus/backend/service/release_manager.py +0 -229
- argus/backend/service/results_service.py +0 -690
- argus/backend/service/stats.py +0 -610
- argus/backend/service/team_manager_service.py +0 -82
- argus/backend/service/test_lookup.py +0 -172
- argus/backend/service/testrun.py +0 -489
- argus/backend/service/user.py +0 -308
- argus/backend/service/views.py +0 -219
- argus/backend/service/views_widgets/__init__.py +0 -0
- argus/backend/service/views_widgets/graphed_stats.py +0 -180
- argus/backend/service/views_widgets/highlights.py +0 -374
- argus/backend/service/views_widgets/nemesis_stats.py +0 -34
- argus/backend/template_filters.py +0 -27
- argus/backend/tests/__init__.py +0 -0
- argus/backend/tests/client_service/__init__.py +0 -0
- argus/backend/tests/client_service/test_submit_results.py +0 -79
- argus/backend/tests/conftest.py +0 -180
- argus/backend/tests/results_service/__init__.py +0 -0
- argus/backend/tests/results_service/test_best_results.py +0 -178
- argus/backend/tests/results_service/test_cell.py +0 -65
- argus/backend/tests/results_service/test_chartjs_additional_functions.py +0 -259
- argus/backend/tests/results_service/test_create_chartjs.py +0 -220
- argus/backend/tests/results_service/test_result_metadata.py +0 -100
- argus/backend/tests/results_service/test_results_service.py +0 -203
- argus/backend/tests/results_service/test_validation_rules.py +0 -213
- argus/backend/tests/view_widgets/__init__.py +0 -0
- argus/backend/tests/view_widgets/test_highlights_api.py +0 -532
- argus/backend/util/common.py +0 -65
- argus/backend/util/config.py +0 -38
- argus/backend/util/encoders.py +0 -56
- argus/backend/util/logsetup.py +0 -80
- argus/backend/util/module_loaders.py +0 -30
- argus/backend/util/send_email.py +0 -91
- argus/client/tests/__init__.py +0 -0
- argus/client/tests/conftest.py +0 -19
- argus/client/tests/test_package.py +0 -45
- argus/client/tests/test_results.py +0 -224
- argus_alm-0.15.2.dist-info/RECORD +0 -122
- {argus_alm-0.15.2.dist-info → argus_alm-0.15.3.dist-info}/WHEEL +0 -0
- {argus_alm-0.15.2.dist-info → argus_alm-0.15.3.dist-info}/entry_points.txt +0 -0
- {argus_alm-0.15.2.dist-info → argus_alm-0.15.3.dist-info}/licenses/LICENSE +0 -0
- {argus_alm-0.15.2.dist-info → argus_alm-0.15.3.dist-info}/top_level.txt +0 -0
|
@@ -1,690 +0,0 @@
|
|
|
1
|
-
import copy
|
|
2
|
-
import logging
|
|
3
|
-
import math
|
|
4
|
-
import operator
|
|
5
|
-
from collections import defaultdict
|
|
6
|
-
from datetime import datetime, timezone
|
|
7
|
-
from functools import partial, cache
|
|
8
|
-
from typing import List, Dict, Any
|
|
9
|
-
from uuid import UUID, uuid4
|
|
10
|
-
|
|
11
|
-
from dataclasses import dataclass
|
|
12
|
-
from argus.backend.db import ScyllaCluster
|
|
13
|
-
from argus.backend.models.result import ArgusGenericResultMetadata, ArgusGenericResultData, ArgusBestResultData, ColumnMetadata, ArgusGraphView
|
|
14
|
-
from argus.backend.plugins.sct.udt import PackageVersion
|
|
15
|
-
from argus.backend.service.testrun import TestRunService
|
|
16
|
-
|
|
17
|
-
LOGGER = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
|
-
type RunId = str
|
|
20
|
-
type ReleasesMap = dict[str, list[RunId]]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
@dataclass
|
|
24
|
-
class BestResult:
|
|
25
|
-
key: str
|
|
26
|
-
value: float
|
|
27
|
-
result_date: datetime
|
|
28
|
-
run_id: str
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@dataclass
|
|
32
|
-
class Cell:
|
|
33
|
-
column: str
|
|
34
|
-
row: str
|
|
35
|
-
status: str
|
|
36
|
-
value: Any | None = None
|
|
37
|
-
value_text: str | None = None
|
|
38
|
-
|
|
39
|
-
def update_cell_status_based_on_rules(self, table_metadata: ArgusGenericResultMetadata, best_results: dict[str, List[BestResult]],
|
|
40
|
-
) -> None:
|
|
41
|
-
column_validation_rules = table_metadata.validation_rules.get(self.column)
|
|
42
|
-
rules = column_validation_rules[-1] if column_validation_rules else {}
|
|
43
|
-
higher_is_better = next(
|
|
44
|
-
(col.higher_is_better for col in table_metadata.columns_meta if col.name == self.column), None)
|
|
45
|
-
if not rules or self.status != "UNSET" or higher_is_better is None:
|
|
46
|
-
return
|
|
47
|
-
is_better = partial(operator.gt, self.value) if higher_is_better else partial(operator.lt, self.value)
|
|
48
|
-
key = f"{self.column}:{self.row}"
|
|
49
|
-
limits = []
|
|
50
|
-
if rules.fixed_limit is not None:
|
|
51
|
-
limits.append(rules.fixed_limit)
|
|
52
|
-
|
|
53
|
-
if best_result := best_results.get(key):
|
|
54
|
-
best_value = best_result[-1].value
|
|
55
|
-
if (best_pct := rules.best_pct) is not None:
|
|
56
|
-
multiplier = 1 - best_pct / 100 if higher_is_better else 1 + best_pct / 100
|
|
57
|
-
limits.append(best_value * multiplier)
|
|
58
|
-
if (best_abs := rules.best_abs) is not None:
|
|
59
|
-
limits.append(best_value - best_abs if higher_is_better else best_value + best_abs)
|
|
60
|
-
if all(is_better(limit) for limit in limits):
|
|
61
|
-
self.status = "PASS"
|
|
62
|
-
else:
|
|
63
|
-
self.status = "ERROR"
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
@dataclass
|
|
67
|
-
class RunsDetails:
|
|
68
|
-
ignored: list[RunId]
|
|
69
|
-
packages: dict[RunId, list[PackageVersion]]
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
default_options = {
|
|
73
|
-
"scales": {
|
|
74
|
-
"y": {
|
|
75
|
-
"beginAtZero": True,
|
|
76
|
-
"title": {
|
|
77
|
-
"display": True,
|
|
78
|
-
"text": ''
|
|
79
|
-
}
|
|
80
|
-
},
|
|
81
|
-
"x": {
|
|
82
|
-
"type": "time",
|
|
83
|
-
"time": {
|
|
84
|
-
"unit": "day",
|
|
85
|
-
"displayFormats": {
|
|
86
|
-
"day": "yyyy-MM-dd",
|
|
87
|
-
},
|
|
88
|
-
},
|
|
89
|
-
"title": {
|
|
90
|
-
"display": True,
|
|
91
|
-
"text": 'SUT Date'
|
|
92
|
-
}
|
|
93
|
-
},
|
|
94
|
-
},
|
|
95
|
-
"elements": {
|
|
96
|
-
"line": {
|
|
97
|
-
"tension": .1,
|
|
98
|
-
}
|
|
99
|
-
},
|
|
100
|
-
"plugins": {
|
|
101
|
-
"legend": {
|
|
102
|
-
"position": 'top',
|
|
103
|
-
},
|
|
104
|
-
"title": {
|
|
105
|
-
"display": True,
|
|
106
|
-
"text": ''
|
|
107
|
-
}
|
|
108
|
-
}
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
colors = [
|
|
112
|
-
'rgba(220, 53, 69, 1.0)', # Soft Red
|
|
113
|
-
'rgba(40, 167, 69, 1.0)', # Soft Green
|
|
114
|
-
'rgba(0, 123, 255, 1.0)', # Soft Blue
|
|
115
|
-
'rgba(23, 162, 184, 1.0)', # Soft Cyan
|
|
116
|
-
'rgba(255, 193, 7, 1.0)', # Soft Yellow
|
|
117
|
-
'rgba(255, 133, 27, 1.0)', # Soft Orange
|
|
118
|
-
'rgba(102, 16, 242, 1.0)', # Soft Purple
|
|
119
|
-
'rgba(111, 207, 151, 1.0)', # Soft Lime
|
|
120
|
-
'rgba(255, 182, 193, 1.0)', # Soft Pink
|
|
121
|
-
'rgba(32, 201, 151, 1.0)', # Soft Teal
|
|
122
|
-
'rgba(134, 83, 78, 1.0)', # Soft Brown
|
|
123
|
-
'rgba(0, 84, 153, 1.0)', # Soft Navy
|
|
124
|
-
'rgba(128, 128, 0, 1.0)', # Soft Olive
|
|
125
|
-
'rgba(255, 159, 80, 1.0)' # Soft Coral
|
|
126
|
-
]
|
|
127
|
-
shapes = ["circle", "triangle", "rect", "star", "dash", "crossRot", "line"]
|
|
128
|
-
dash_patterns = [
|
|
129
|
-
[0, 0], # Solid line
|
|
130
|
-
[10, 5], # Long dash
|
|
131
|
-
[5, 10], # Long gap
|
|
132
|
-
[15, 5, 5, 5], # Alternating long and short dashes
|
|
133
|
-
[5, 5, 1, 5], # Mixed small dash and gap
|
|
134
|
-
[10, 10, 5, 5], # Alternating medium and small dashes
|
|
135
|
-
[20, 5], # Very long dash
|
|
136
|
-
[10, 5, 2, 5], # Long, medium, and small dashes
|
|
137
|
-
[5, 5], # Standard dashed
|
|
138
|
-
]
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
def get_sorted_data_for_column_and_row(data: List[ArgusGenericResultData], column: str, row: str,
|
|
142
|
-
runs_details: RunsDetails, main_package: str) -> List[Dict[str, Any]]:
|
|
143
|
-
points = sorted([{"x": entry.sut_timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
|
|
144
|
-
"y": entry.value,
|
|
145
|
-
"id": entry.run_id,
|
|
146
|
-
}
|
|
147
|
-
for entry in data if entry.column == column and entry.row == row],
|
|
148
|
-
key=lambda point: point["x"])
|
|
149
|
-
if not points:
|
|
150
|
-
return points
|
|
151
|
-
packages = runs_details.packages
|
|
152
|
-
prev_versions = {pkg.name: pkg.version + (f" ({pkg.date})" if pkg.date else "")
|
|
153
|
-
for pkg in packages.get(points[0]["id"], [])}
|
|
154
|
-
points[0]['changes'] = [f"{main_package}: {prev_versions.pop(main_package, None)}"]
|
|
155
|
-
points[0]['dep_change'] = False
|
|
156
|
-
for point in points[1:]:
|
|
157
|
-
changes = []
|
|
158
|
-
mark_dependency_change = False
|
|
159
|
-
current_versions = {pkg.name: pkg.version + (f" ({pkg.date})" if pkg.date else "")
|
|
160
|
-
for pkg in packages.get(point["id"], [])}
|
|
161
|
-
main_package_version = current_versions.pop(main_package, None)
|
|
162
|
-
for pkg_name in current_versions.keys() | prev_versions.keys():
|
|
163
|
-
curr_ver = current_versions.get(pkg_name)
|
|
164
|
-
prev_ver = prev_versions.get(pkg_name)
|
|
165
|
-
if curr_ver != prev_ver:
|
|
166
|
-
changes.append({'name': pkg_name, 'prev_version': prev_ver, 'curr_version': curr_ver})
|
|
167
|
-
if pkg_name != main_package:
|
|
168
|
-
mark_dependency_change = True
|
|
169
|
-
point['changes'] = [f"{main_package}: {main_package_version}"] + [
|
|
170
|
-
f"{change['name']}: {change['prev_version']} -> {change['curr_version']}" for change in changes]
|
|
171
|
-
point['dep_change'] = mark_dependency_change
|
|
172
|
-
prev_versions = current_versions
|
|
173
|
-
return points
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
def get_min_max_y(datasets: List[Dict[str, Any]]) -> (float, float):
|
|
177
|
-
"""0.5 - 1.5 of min/max of 50% results"""
|
|
178
|
-
y = [entry['y'] for dataset in datasets for entry in dataset['data']]
|
|
179
|
-
if not y:
|
|
180
|
-
return 0, 0
|
|
181
|
-
sorted_y = sorted(y)
|
|
182
|
-
lower_percentile_index = int(0.25 * len(sorted_y))
|
|
183
|
-
upper_percentile_index = int(0.75 * len(sorted_y)) - 1
|
|
184
|
-
y_min = sorted_y[lower_percentile_index]
|
|
185
|
-
y_max = sorted_y[upper_percentile_index]
|
|
186
|
-
return math.floor(0.5 * y_min), math.ceil(1.5 * y_max)
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
def coerce_values_to_axis_boundaries(datasets: List[Dict[str, Any]], min_y: float, max_y: float) -> List[Dict[str, Any]]:
|
|
190
|
-
"""Round values to min/max and provide original value for tooltip"""
|
|
191
|
-
for dataset in datasets:
|
|
192
|
-
for entry in dataset['data']:
|
|
193
|
-
val = entry['y']
|
|
194
|
-
if val > max_y:
|
|
195
|
-
entry['y'] = max_y
|
|
196
|
-
entry['ori'] = val
|
|
197
|
-
elif val < min_y:
|
|
198
|
-
entry['y'] = min_y
|
|
199
|
-
entry['ori'] = val
|
|
200
|
-
return datasets
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
def calculate_limits(points: List[dict], best_results: List, validation_rules_list: List, higher_is_better: bool) -> List[dict]:
|
|
204
|
-
"""Calculate limits for points based on best results and validation rules"""
|
|
205
|
-
for point in points:
|
|
206
|
-
point_date = datetime.strptime(point["x"], '%Y-%m-%dT%H:%M:%SZ')
|
|
207
|
-
validation_rule = next(
|
|
208
|
-
(rule for rule in reversed(validation_rules_list) if rule.valid_from <= point_date),
|
|
209
|
-
validation_rules_list[0]
|
|
210
|
-
)
|
|
211
|
-
best_result = next(
|
|
212
|
-
(result for result in reversed(best_results) if result.result_date <= point_date),
|
|
213
|
-
best_results[0]
|
|
214
|
-
)
|
|
215
|
-
limit_values = []
|
|
216
|
-
if validation_rule.fixed_limit is not None:
|
|
217
|
-
limit_values.append(validation_rule.fixed_limit)
|
|
218
|
-
best_value = best_result.value
|
|
219
|
-
if validation_rule.best_pct is not None:
|
|
220
|
-
multiplier = 1 - validation_rule.best_pct / 100 if higher_is_better else 1 + validation_rule.best_pct / 100
|
|
221
|
-
limit_values.append(best_value * multiplier)
|
|
222
|
-
if validation_rule.best_abs is not None:
|
|
223
|
-
limit_values.append(
|
|
224
|
-
best_value - validation_rule.best_abs if higher_is_better else best_value + validation_rule.best_abs)
|
|
225
|
-
if limit_values:
|
|
226
|
-
limit_value = max(limit_values) if higher_is_better else min(limit_values)
|
|
227
|
-
point['limit'] = limit_value
|
|
228
|
-
|
|
229
|
-
return points
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
def create_datasets_for_column(table: ArgusGenericResultMetadata, data: list[ArgusGenericResultData],
|
|
233
|
-
best_results: dict[str, List[BestResult]], releases_map: ReleasesMap, column: ColumnMetadata,
|
|
234
|
-
runs_details: RunsDetails, main_package: str) -> List[Dict]:
|
|
235
|
-
"""
|
|
236
|
-
Create datasets (series) for a specific column, splitting by version and showing limit lines.
|
|
237
|
-
"""
|
|
238
|
-
datasets = []
|
|
239
|
-
is_fixed_limit_drawn = False
|
|
240
|
-
|
|
241
|
-
for idx, row in enumerate(table.rows_meta):
|
|
242
|
-
line_color = colors[idx % len(colors)]
|
|
243
|
-
line_dash = dash_patterns[idx % len(dash_patterns)]
|
|
244
|
-
points = get_sorted_data_for_column_and_row(data, column.name, row, runs_details, main_package)
|
|
245
|
-
|
|
246
|
-
datasets.extend(create_release_datasets(points, row, releases_map, line_dash))
|
|
247
|
-
|
|
248
|
-
limit_dataset = create_limit_dataset(points, column, row, best_results, table, line_color, is_fixed_limit_drawn)
|
|
249
|
-
if limit_dataset:
|
|
250
|
-
datasets.append(limit_dataset)
|
|
251
|
-
is_fixed_limit_drawn = True
|
|
252
|
-
|
|
253
|
-
return datasets
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
def create_release_datasets(points: list[Dict], row: str, releases_map: ReleasesMap, line_dash: list[int]) -> List[Dict]:
|
|
257
|
-
"""
|
|
258
|
-
Create datasets separately for each release.
|
|
259
|
-
"""
|
|
260
|
-
release_datasets = []
|
|
261
|
-
|
|
262
|
-
for v_idx, (release, run_ids) in enumerate(releases_map.items()):
|
|
263
|
-
release_points = [point for point in points if point["id"] in run_ids]
|
|
264
|
-
|
|
265
|
-
if release_points:
|
|
266
|
-
release_datasets.append({
|
|
267
|
-
"label": f"{release} - {row}",
|
|
268
|
-
"borderColor": colors[v_idx % len(colors)],
|
|
269
|
-
"borderWidth": 2,
|
|
270
|
-
"pointRadius": 3,
|
|
271
|
-
"showLine": True,
|
|
272
|
-
"data": release_points,
|
|
273
|
-
"borderDash": line_dash
|
|
274
|
-
})
|
|
275
|
-
|
|
276
|
-
return release_datasets
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
def create_limit_dataset(points: list[Dict], column: ColumnMetadata, row: str, best_results: dict[str, List[BestResult]],
|
|
280
|
-
table: ArgusGenericResultMetadata, line_color: str, is_fixed_limit_drawn: bool) -> Dict | None:
|
|
281
|
-
"""
|
|
282
|
-
Create a dataset for limit lines if applicable.
|
|
283
|
-
"""
|
|
284
|
-
key = f"{column.name}:{row}"
|
|
285
|
-
higher_is_better = column.higher_is_better
|
|
286
|
-
|
|
287
|
-
if higher_is_better is None:
|
|
288
|
-
return None
|
|
289
|
-
|
|
290
|
-
best_result_list = best_results.get(key, [])
|
|
291
|
-
validation_rules_list = table.validation_rules.get(column.name, [])
|
|
292
|
-
|
|
293
|
-
if validation_rules_list and best_result_list:
|
|
294
|
-
points = calculate_limits(points, best_result_list, validation_rules_list, higher_is_better)
|
|
295
|
-
limit_points = [{"x": point["x"], "y": point["limit"]} for point in points if 'limit' in point]
|
|
296
|
-
|
|
297
|
-
if limit_points and not is_fixed_limit_drawn:
|
|
298
|
-
return {
|
|
299
|
-
"label": "error threshold",
|
|
300
|
-
"borderColor": line_color,
|
|
301
|
-
"borderWidth": 2,
|
|
302
|
-
"borderDash": [5, 5],
|
|
303
|
-
"fill": False,
|
|
304
|
-
"data": limit_points,
|
|
305
|
-
"showLine": True,
|
|
306
|
-
"pointRadius": 0,
|
|
307
|
-
"pointHitRadius": 0,
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
return None
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
def create_chart_options(table: ArgusGenericResultMetadata, column: ColumnMetadata, min_y: float, max_y: float) -> Dict:
|
|
314
|
-
"""
|
|
315
|
-
Create options for Chart.js, including title and y-axis configuration.
|
|
316
|
-
"""
|
|
317
|
-
options = copy.deepcopy(default_options)
|
|
318
|
-
options["plugins"]["title"]["text"] = f"{table.name} - {column.name}"
|
|
319
|
-
options["plugins"]["subtitle"] = {"text": table.description, "display": True} if table.description else {"text": ""}
|
|
320
|
-
options["scales"]["y"]["title"]["text"] = f"[{column.unit}]" if column.unit else ""
|
|
321
|
-
options["scales"]["y"]["min"] = min_y
|
|
322
|
-
options["scales"]["y"]["max"] = max_y
|
|
323
|
-
return options
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
def calculate_graph_ticks(graphs: List[Dict]) -> dict[str, str]:
|
|
327
|
-
min_x, max_x = None, None
|
|
328
|
-
|
|
329
|
-
for graph in graphs:
|
|
330
|
-
for dataset in graph["data"]["datasets"]:
|
|
331
|
-
if not dataset["data"]:
|
|
332
|
-
continue
|
|
333
|
-
first_x = dataset["data"][0]["x"]
|
|
334
|
-
last_x = dataset["data"][-1]["x"]
|
|
335
|
-
if min_x is None or first_x < min_x:
|
|
336
|
-
min_x = first_x
|
|
337
|
-
if max_x is None or last_x > max_x:
|
|
338
|
-
max_x = last_x
|
|
339
|
-
if not max_x or not min_x:
|
|
340
|
-
return {} # no data
|
|
341
|
-
return {"min": min_x[:10], "max": max_x[:10]}
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
def _identify_most_changed_package(packages_list: list[PackageVersion]) -> str:
|
|
345
|
-
version_date_changes: dict[str, set[tuple[str, str]]] = defaultdict(set)
|
|
346
|
-
|
|
347
|
-
# avoid counting unrelevant packages when detecting automatically
|
|
348
|
-
packages_list = [pkg for pkg in packages_list if pkg.name in (
|
|
349
|
-
'scylla-server-upgraded', 'scylla-server', 'scylla-manager-server')]
|
|
350
|
-
for package_version in packages_list:
|
|
351
|
-
version_date_changes[package_version.name].add((package_version.version, package_version.date))
|
|
352
|
-
|
|
353
|
-
return max(version_date_changes, key=lambda k: len(version_date_changes[k]))
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
def _split_results_by_release(packages: dict[str, list[PackageVersion]], main_package: str) -> ReleasesMap:
|
|
357
|
-
releases_map = defaultdict(list)
|
|
358
|
-
for run_id, package_versions in packages.items():
|
|
359
|
-
for package in package_versions:
|
|
360
|
-
if package.name == main_package:
|
|
361
|
-
if "dev" in package.version:
|
|
362
|
-
major_version = 'dev'
|
|
363
|
-
else:
|
|
364
|
-
major_version = '.'.join(package.version.split('.')[:2])
|
|
365
|
-
releases_map[major_version].append(run_id)
|
|
366
|
-
return releases_map
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
def create_chartjs(table: ArgusGenericResultMetadata, data: list[ArgusGenericResultData], best_results: dict[str, List[BestResult]],
|
|
370
|
-
releases_map: ReleasesMap, runs_details: RunsDetails, main_package: str) -> List[Dict]:
|
|
371
|
-
"""
|
|
372
|
-
Create Chart.js-compatible graph for each column in the table.
|
|
373
|
-
"""
|
|
374
|
-
graphs = []
|
|
375
|
-
columns = [column for column in table.columns_meta
|
|
376
|
-
if column.type != "TEXT" and column.visible is not False]
|
|
377
|
-
|
|
378
|
-
for column in columns:
|
|
379
|
-
datasets = create_datasets_for_column(table, data, best_results, releases_map,
|
|
380
|
-
column, runs_details, main_package)
|
|
381
|
-
|
|
382
|
-
if datasets:
|
|
383
|
-
min_y, max_y = get_min_max_y(datasets)
|
|
384
|
-
datasets = coerce_values_to_axis_boundaries(datasets, min_y, max_y)
|
|
385
|
-
options = create_chart_options(table, column, min_y, max_y)
|
|
386
|
-
graphs.append({"options": options, "data": {"datasets": datasets}})
|
|
387
|
-
|
|
388
|
-
return graphs
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
class ResultsService:
|
|
392
|
-
|
|
393
|
-
def __init__(self):
|
|
394
|
-
self.cluster = ScyllaCluster.get()
|
|
395
|
-
|
|
396
|
-
def _remove_duplicate_packages(self, packages: List[PackageVersion]) -> List[PackageVersion]:
|
|
397
|
-
"""removes scylla packages that are considered as duplicates:
|
|
398
|
-
scylla-server-upgraded, scylla-server-upgrade-target, sylla-server, scylla-server-target
|
|
399
|
-
(first found is kept)"""
|
|
400
|
-
packages_to_remove = ["scylla-server-upgraded",
|
|
401
|
-
"scylla-server-upgrade-target", "scylla-server", "scylla-server-target"]
|
|
402
|
-
for package in packages_to_remove[:]:
|
|
403
|
-
if any(package == p.name for p in packages):
|
|
404
|
-
packages_to_remove.remove(package)
|
|
405
|
-
break
|
|
406
|
-
packages = [p for p in packages if p.name not in packages_to_remove]
|
|
407
|
-
return packages
|
|
408
|
-
|
|
409
|
-
@cache
|
|
410
|
-
def _get_runs_details(self, test_id: UUID) -> RunsDetails:
|
|
411
|
-
plugin_query = self.cluster.prepare("SELECT id, plugin_name FROM argus_test_v2 WHERE id = ?")
|
|
412
|
-
plugin_name = self.cluster.session.execute(plugin_query, parameters=(test_id,)).one()['plugin_name']
|
|
413
|
-
plugin = TestRunService().get_plugin(plugin_name)
|
|
414
|
-
runs_details_query = self.cluster.prepare(
|
|
415
|
-
f"SELECT id, investigation_status, packages FROM {plugin.model.table_name()} WHERE test_id = ?")
|
|
416
|
-
rows = self.cluster.session.execute(runs_details_query, parameters=(test_id,)).all()
|
|
417
|
-
ignored_runs = [row["id"] for row in rows if row["investigation_status"].lower() == "ignored"]
|
|
418
|
-
packages = {row["id"]: self._remove_duplicate_packages(
|
|
419
|
-
row["packages"]) for row in rows if row["packages"] and row["id"] not in ignored_runs}
|
|
420
|
-
return RunsDetails(ignored=ignored_runs, packages=packages)
|
|
421
|
-
|
|
422
|
-
def _get_tables_metadata(self, test_id: UUID) -> list[ArgusGenericResultMetadata]:
|
|
423
|
-
query_fields = ["name", "description", "columns_meta", "rows_meta", "validation_rules", "sut_package_name"]
|
|
424
|
-
raw_query = (f"SELECT {','.join(query_fields)}"
|
|
425
|
-
f" FROM generic_result_metadata_v1 WHERE test_id = ?")
|
|
426
|
-
query = self.cluster.prepare(raw_query)
|
|
427
|
-
tables_meta = self.cluster.session.execute(query=query, parameters=(test_id,))
|
|
428
|
-
return [ArgusGenericResultMetadata(**table) for table in tables_meta]
|
|
429
|
-
|
|
430
|
-
def _get_tables_data(self, test_id: UUID, table_name: str, ignored_runs: list[RunId],
|
|
431
|
-
start_date: datetime | None = None, end_date: datetime | None = None) -> list[ArgusGenericResultData]:
|
|
432
|
-
query_fields = ["run_id", "column", "row", "value", "status", "sut_timestamp"]
|
|
433
|
-
raw_query = (f"SELECT {','.join(query_fields)}"
|
|
434
|
-
f" FROM generic_result_data_v1 WHERE test_id = ? AND name = ?")
|
|
435
|
-
|
|
436
|
-
parameters = [test_id, table_name]
|
|
437
|
-
|
|
438
|
-
if start_date:
|
|
439
|
-
raw_query += " AND sut_timestamp >= ?"
|
|
440
|
-
parameters.append(start_date)
|
|
441
|
-
if end_date:
|
|
442
|
-
raw_query += " AND sut_timestamp <= ?"
|
|
443
|
-
parameters.append(end_date)
|
|
444
|
-
|
|
445
|
-
if start_date or end_date:
|
|
446
|
-
raw_query += " ALLOW FILTERING"
|
|
447
|
-
query = self.cluster.prepare(raw_query)
|
|
448
|
-
data = self.cluster.session.execute(query=query, parameters=tuple(parameters))
|
|
449
|
-
return [ArgusGenericResultData(**cell) for cell in data if cell["run_id"] not in ignored_runs]
|
|
450
|
-
|
|
451
|
-
def get_table_metadata(self, test_id: UUID, table_name: str) -> ArgusGenericResultMetadata:
|
|
452
|
-
raw_query = ("SELECT * FROM generic_result_metadata_v1 WHERE test_id = ? AND name = ?")
|
|
453
|
-
query = self.cluster.prepare(raw_query)
|
|
454
|
-
table_meta = self.cluster.session.execute(query=query, parameters=(test_id, table_name))
|
|
455
|
-
return [ArgusGenericResultMetadata(**table) for table in table_meta][0] if table_meta else None
|
|
456
|
-
|
|
457
|
-
def get_run_results(self, test_id: UUID, run_id: UUID, key_metrics: list[str] | None = None) -> list:
|
|
458
|
-
query_fields = ["column", "row", "value", "value_text", "status"]
|
|
459
|
-
raw_query = (f"SELECT {','.join(query_fields)}, WRITETIME(status) as ordering "
|
|
460
|
-
f"FROM generic_result_data_v1 WHERE test_id = ? AND run_id = ? AND name = ?")
|
|
461
|
-
query = self.cluster.prepare(raw_query)
|
|
462
|
-
tables_meta = self._get_tables_metadata(test_id=test_id)
|
|
463
|
-
table_entries = []
|
|
464
|
-
for table in tables_meta:
|
|
465
|
-
cells = self.cluster.session.execute(query=query, parameters=(test_id, run_id, table.name))
|
|
466
|
-
cells = [dict(cell.items()) for cell in cells]
|
|
467
|
-
if key_metrics:
|
|
468
|
-
cells = [cell for cell in cells if cell['column'] in key_metrics]
|
|
469
|
-
if not cells:
|
|
470
|
-
continue
|
|
471
|
-
|
|
472
|
-
table_name = table.name
|
|
473
|
-
table_description = table.description
|
|
474
|
-
column_types_map = {col_meta.name: col_meta.type for col_meta in table.columns_meta}
|
|
475
|
-
column_names = [col_meta.name for col_meta in table.columns_meta if col_meta.visible is not False]
|
|
476
|
-
|
|
477
|
-
table_data = {
|
|
478
|
-
'description': table_description,
|
|
479
|
-
'table_data': {},
|
|
480
|
-
'columns': [],
|
|
481
|
-
'rows': [],
|
|
482
|
-
'table_status': 'PASS',
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
present_columns = {cell['column'] for cell in cells}
|
|
486
|
-
present_rows = {cell['row'] for cell in cells}
|
|
487
|
-
|
|
488
|
-
# Filter columns based on presence in cells and visibility
|
|
489
|
-
table_data['columns'] = [
|
|
490
|
-
col_meta for col_meta in table.columns_meta
|
|
491
|
-
if col_meta.name in present_columns and col_meta.name in column_names
|
|
492
|
-
]
|
|
493
|
-
table_data['rows'] = [
|
|
494
|
-
row for row in table.rows_meta if row in present_rows
|
|
495
|
-
]
|
|
496
|
-
|
|
497
|
-
for row in table_data['rows']:
|
|
498
|
-
table_data['table_data'][row] = {}
|
|
499
|
-
|
|
500
|
-
for cell in cells:
|
|
501
|
-
column = cell['column']
|
|
502
|
-
row = cell['row']
|
|
503
|
-
value = cell.get('value') if cell.get('value') is not None else cell.get('value_text')
|
|
504
|
-
status = cell['status']
|
|
505
|
-
|
|
506
|
-
if column in column_names and row in table_data['rows']:
|
|
507
|
-
table_data['table_data'][row][column] = {
|
|
508
|
-
'value': value,
|
|
509
|
-
'status': status,
|
|
510
|
-
'type': column_types_map.get(column)
|
|
511
|
-
}
|
|
512
|
-
|
|
513
|
-
if status not in ["UNSET", "PASS"] and table_data['table_status'] != "ERROR":
|
|
514
|
-
table_data['table_status'] = status
|
|
515
|
-
|
|
516
|
-
table_entries.append({
|
|
517
|
-
'table_name': table_name,
|
|
518
|
-
'table_data': table_data,
|
|
519
|
-
'ordering': cells[0]['ordering']
|
|
520
|
-
})
|
|
521
|
-
|
|
522
|
-
table_entries.sort(key=lambda x: x['ordering'])
|
|
523
|
-
|
|
524
|
-
return [{entry['table_name']: entry['table_data']} for entry in table_entries]
|
|
525
|
-
|
|
526
|
-
def get_test_graphs(self, test_id: UUID, start_date: datetime | None = None, end_date: datetime | None = None, table_names: list[str] | None = None):
|
|
527
|
-
runs_details = self._get_runs_details(test_id)
|
|
528
|
-
tables_meta = self._get_tables_metadata(test_id=test_id)
|
|
529
|
-
|
|
530
|
-
if table_names:
|
|
531
|
-
tables_meta = [table for table in tables_meta if table.name in table_names]
|
|
532
|
-
|
|
533
|
-
graphs = []
|
|
534
|
-
releases_filters = set()
|
|
535
|
-
for table in tables_meta:
|
|
536
|
-
data = self._get_tables_data(test_id=test_id, table_name=table.name, ignored_runs=runs_details.ignored,
|
|
537
|
-
start_date=start_date, end_date=end_date)
|
|
538
|
-
if not data:
|
|
539
|
-
continue
|
|
540
|
-
best_results = self.get_best_results(test_id=test_id, name=table.name)
|
|
541
|
-
main_package = tables_meta[0].sut_package_name
|
|
542
|
-
if not main_package:
|
|
543
|
-
main_package = _identify_most_changed_package(
|
|
544
|
-
[pkg for sublist in runs_details.packages.values() for pkg in sublist])
|
|
545
|
-
releases_map = _split_results_by_release(runs_details.packages, main_package=main_package)
|
|
546
|
-
graphs.extend(
|
|
547
|
-
create_chartjs(table, data, best_results, releases_map=releases_map, runs_details=runs_details, main_package=main_package))
|
|
548
|
-
releases_filters.update(releases_map.keys())
|
|
549
|
-
ticks = calculate_graph_ticks(graphs)
|
|
550
|
-
return graphs, ticks, list(releases_filters)
|
|
551
|
-
|
|
552
|
-
def is_results_exist(self, test_id: UUID):
|
|
553
|
-
"""Verify if results for given test id exist at all."""
|
|
554
|
-
return bool(ArgusGenericResultMetadata.objects(test_id=test_id).only(["name"]).limit(1))
|
|
555
|
-
|
|
556
|
-
def get_best_results(self, test_id: UUID, name: str) -> dict[str, List[BestResult]]:
|
|
557
|
-
runs_details = self._get_runs_details(test_id)
|
|
558
|
-
query_fields = ["key", "value", "result_date", "run_id"]
|
|
559
|
-
raw_query = (f"SELECT {','.join(query_fields)}"
|
|
560
|
-
f" FROM generic_result_best_v2 WHERE test_id = ? and name = ?")
|
|
561
|
-
query = self.cluster.prepare(raw_query)
|
|
562
|
-
best_results = [BestResult(**best) for best in self.cluster.session.execute(query=query, parameters=(test_id, name))
|
|
563
|
-
if best["run_id"] not in runs_details.ignored]
|
|
564
|
-
best_results_map = defaultdict(list)
|
|
565
|
-
for best in sorted(best_results, key=lambda x: x.result_date):
|
|
566
|
-
best_results_map.setdefault(best.key, []).append(best)
|
|
567
|
-
return best_results_map
|
|
568
|
-
|
|
569
|
-
def update_best_results(self, test_id: UUID, table_name: str, cells: list[Cell],
|
|
570
|
-
table_metadata: ArgusGenericResultMetadata, run_id: str) -> dict[str, List[BestResult]]:
|
|
571
|
-
"""update best results for given test_id and table_name based on cells values - if any value is better than current best"""
|
|
572
|
-
higher_is_better_map = {meta["name"]: meta.higher_is_better for meta in table_metadata.columns_meta}
|
|
573
|
-
best_results = self.get_best_results(test_id=test_id, name=table_name)
|
|
574
|
-
for cell in cells:
|
|
575
|
-
if cell.value is None:
|
|
576
|
-
# textual value, skip
|
|
577
|
-
continue
|
|
578
|
-
key = f"{cell.column}:{cell.row}"
|
|
579
|
-
if higher_is_better_map[cell.column] is None:
|
|
580
|
-
# skipping updating best value when higher_is_better is not set (not enabled by user)
|
|
581
|
-
continue
|
|
582
|
-
current_best = best_results.get(key)[-1] if key in best_results else None
|
|
583
|
-
is_better = partial(operator.gt, cell.value) if higher_is_better_map[cell.column] \
|
|
584
|
-
else partial(operator.lt, cell.value)
|
|
585
|
-
if current_best is None or is_better(current_best.value):
|
|
586
|
-
result_date = datetime.now(timezone.utc)
|
|
587
|
-
best_results[key].append(BestResult(key=key, value=cell.value, result_date=result_date, run_id=run_id))
|
|
588
|
-
ArgusBestResultData(test_id=test_id, name=table_name, key=key, value=cell.value, result_date=result_date,
|
|
589
|
-
run_id=run_id).save()
|
|
590
|
-
return best_results
|
|
591
|
-
|
|
592
|
-
def _exclude_disabled_tests(self, test_ids: list[UUID]) -> list[UUID]:
|
|
593
|
-
is_enabled_query = self.cluster.prepare("SELECT id, enabled FROM argus_test_v2 WHERE id = ?")
|
|
594
|
-
return [test_id for test_id in test_ids if self.cluster.session.execute(is_enabled_query, parameters=(test_id,)).one()['enabled']]
|
|
595
|
-
|
|
596
|
-
def get_tests_by_version(self, sut_package_name: str, test_ids: list[UUID]) -> dict:
|
|
597
|
-
"""
|
|
598
|
-
Get the latest run details for each test method, excluding ignored runs.
|
|
599
|
-
Returns:
|
|
600
|
-
{
|
|
601
|
-
'versions': {version: {test_id: {test_method: {'run_id': run_id, 'status': status}}}},
|
|
602
|
-
'test_info': {test_id: {'name': test_name, 'build_id': build_id}}
|
|
603
|
-
}
|
|
604
|
-
Currently works only with scylla-cluster-tests plugin (due to test_method field requirement)
|
|
605
|
-
"""
|
|
606
|
-
plugin = TestRunService().get_plugin("scylla-cluster-tests")
|
|
607
|
-
result = defaultdict(lambda: defaultdict(dict))
|
|
608
|
-
test_info = {}
|
|
609
|
-
test_ids = self._exclude_disabled_tests(test_ids)
|
|
610
|
-
for test_id in test_ids:
|
|
611
|
-
runs_details_query = self.cluster.prepare(
|
|
612
|
-
f"""
|
|
613
|
-
SELECT id, status, investigation_status, test_name, build_id, packages, test_method, started_by
|
|
614
|
-
FROM {plugin.model.table_name()}
|
|
615
|
-
WHERE test_id = ? LIMIT 10
|
|
616
|
-
"""
|
|
617
|
-
)
|
|
618
|
-
rows = self.cluster.session.execute(runs_details_query, parameters=(test_id,)).all()
|
|
619
|
-
for row in rows:
|
|
620
|
-
if row["investigation_status"].lower() == "ignored":
|
|
621
|
-
continue
|
|
622
|
-
packages = row['packages']
|
|
623
|
-
test_method = row['test_method']
|
|
624
|
-
if not test_method:
|
|
625
|
-
continue
|
|
626
|
-
for sut_name in [f"{sut_package_name}-upgraded",
|
|
627
|
-
f"{sut_package_name}-upgrade-target",
|
|
628
|
-
sut_package_name,
|
|
629
|
-
f"{sut_package_name}-target"
|
|
630
|
-
]:
|
|
631
|
-
sut_version = next(
|
|
632
|
-
(f"{pkg.version}-{pkg.date}-{pkg.revision_id}" for pkg in packages if pkg.name == sut_name), None)
|
|
633
|
-
if sut_version:
|
|
634
|
-
break
|
|
635
|
-
if sut_version is None:
|
|
636
|
-
continue
|
|
637
|
-
method_name = test_method.rsplit('.', 1)[-1]
|
|
638
|
-
|
|
639
|
-
if method_name not in result[sut_version][str(test_id)]:
|
|
640
|
-
result[sut_version][str(test_id)][method_name] = {
|
|
641
|
-
'run_id': str(row['id']),
|
|
642
|
-
'status': row['status'],
|
|
643
|
-
'started_by': row['started_by']
|
|
644
|
-
}
|
|
645
|
-
|
|
646
|
-
if str(test_id) not in test_info:
|
|
647
|
-
test_info[str(test_id)] = {
|
|
648
|
-
'name': row['test_name'],
|
|
649
|
-
'build_id': row['build_id']
|
|
650
|
-
}
|
|
651
|
-
|
|
652
|
-
return {
|
|
653
|
-
'versions': {version: dict(tests) for version, tests in result.items()},
|
|
654
|
-
'test_info': test_info
|
|
655
|
-
}
|
|
656
|
-
|
|
657
|
-
def create_argus_graph_view(self, test_id: UUID, name: str, description: str) -> ArgusGraphView:
|
|
658
|
-
view_id = uuid4()
|
|
659
|
-
graph_view = ArgusGraphView(test_id=test_id, id=view_id)
|
|
660
|
-
graph_view.name = name
|
|
661
|
-
graph_view.description = description
|
|
662
|
-
graph_view.save()
|
|
663
|
-
return graph_view
|
|
664
|
-
|
|
665
|
-
def update_argus_graph_view(self, test_id: UUID, view_id: UUID, name: str, description: str,
|
|
666
|
-
graphs: dict[str, str]) -> ArgusGraphView:
|
|
667
|
-
try:
|
|
668
|
-
graph_view = ArgusGraphView.get(test_id=test_id, id=view_id)
|
|
669
|
-
except ArgusGraphView.DoesNotExist:
|
|
670
|
-
raise ValueError(f"GraphView with id {view_id} does not exist for test {test_id}")
|
|
671
|
-
|
|
672
|
-
existing_keys = set(graph_view.graphs.keys())
|
|
673
|
-
new_keys = set(graphs.keys())
|
|
674
|
-
keys_to_remove = existing_keys - new_keys
|
|
675
|
-
|
|
676
|
-
for key in keys_to_remove:
|
|
677
|
-
ArgusGraphView.objects(test_id=test_id, id=view_id).update(graphs={key: None})
|
|
678
|
-
|
|
679
|
-
if graphs:
|
|
680
|
-
ArgusGraphView.objects(test_id=test_id, id=view_id).update(graphs=graphs)
|
|
681
|
-
|
|
682
|
-
ArgusGraphView.objects(test_id=test_id, id=view_id).update(
|
|
683
|
-
name=name,
|
|
684
|
-
description=description
|
|
685
|
-
)
|
|
686
|
-
|
|
687
|
-
return ArgusGraphView.get(test_id=test_id, id=view_id)
|
|
688
|
-
|
|
689
|
-
def get_argus_graph_views(self, test_id: UUID) -> list[ArgusGraphView]:
|
|
690
|
-
return list(ArgusGraphView.objects(test_id=test_id))
|