argus-alm 0.14.2__py3-none-any.whl → 0.15.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. argus/_version.py +21 -0
  2. argus/backend/.gitkeep +0 -0
  3. argus/backend/__init__.py +0 -0
  4. argus/backend/cli.py +57 -0
  5. argus/backend/controller/__init__.py +0 -0
  6. argus/backend/controller/admin.py +20 -0
  7. argus/backend/controller/admin_api.py +355 -0
  8. argus/backend/controller/api.py +589 -0
  9. argus/backend/controller/auth.py +67 -0
  10. argus/backend/controller/client_api.py +109 -0
  11. argus/backend/controller/main.py +316 -0
  12. argus/backend/controller/notification_api.py +72 -0
  13. argus/backend/controller/notifications.py +13 -0
  14. argus/backend/controller/planner_api.py +194 -0
  15. argus/backend/controller/team.py +129 -0
  16. argus/backend/controller/team_ui.py +19 -0
  17. argus/backend/controller/testrun_api.py +513 -0
  18. argus/backend/controller/view_api.py +188 -0
  19. argus/backend/controller/views_widgets/__init__.py +0 -0
  20. argus/backend/controller/views_widgets/graphed_stats.py +54 -0
  21. argus/backend/controller/views_widgets/graphs.py +68 -0
  22. argus/backend/controller/views_widgets/highlights.py +135 -0
  23. argus/backend/controller/views_widgets/nemesis_stats.py +26 -0
  24. argus/backend/controller/views_widgets/summary.py +43 -0
  25. argus/backend/db.py +98 -0
  26. argus/backend/error_handlers.py +41 -0
  27. argus/backend/events/event_processors.py +34 -0
  28. argus/backend/models/__init__.py +0 -0
  29. argus/backend/models/argus_ai.py +24 -0
  30. argus/backend/models/github_issue.py +60 -0
  31. argus/backend/models/plan.py +24 -0
  32. argus/backend/models/result.py +187 -0
  33. argus/backend/models/runtime_store.py +58 -0
  34. argus/backend/models/view_widgets.py +25 -0
  35. argus/backend/models/web.py +403 -0
  36. argus/backend/plugins/__init__.py +0 -0
  37. argus/backend/plugins/core.py +248 -0
  38. argus/backend/plugins/driver_matrix_tests/controller.py +66 -0
  39. argus/backend/plugins/driver_matrix_tests/model.py +429 -0
  40. argus/backend/plugins/driver_matrix_tests/plugin.py +21 -0
  41. argus/backend/plugins/driver_matrix_tests/raw_types.py +62 -0
  42. argus/backend/plugins/driver_matrix_tests/service.py +61 -0
  43. argus/backend/plugins/driver_matrix_tests/udt.py +42 -0
  44. argus/backend/plugins/generic/model.py +86 -0
  45. argus/backend/plugins/generic/plugin.py +15 -0
  46. argus/backend/plugins/generic/types.py +14 -0
  47. argus/backend/plugins/loader.py +39 -0
  48. argus/backend/plugins/sct/controller.py +224 -0
  49. argus/backend/plugins/sct/plugin.py +37 -0
  50. argus/backend/plugins/sct/resource_setup.py +177 -0
  51. argus/backend/plugins/sct/service.py +682 -0
  52. argus/backend/plugins/sct/testrun.py +288 -0
  53. argus/backend/plugins/sct/udt.py +100 -0
  54. argus/backend/plugins/sirenada/model.py +118 -0
  55. argus/backend/plugins/sirenada/plugin.py +16 -0
  56. argus/backend/service/admin.py +26 -0
  57. argus/backend/service/argus_service.py +696 -0
  58. argus/backend/service/build_system_monitor.py +185 -0
  59. argus/backend/service/client_service.py +127 -0
  60. argus/backend/service/event_service.py +18 -0
  61. argus/backend/service/github_service.py +233 -0
  62. argus/backend/service/jenkins_service.py +269 -0
  63. argus/backend/service/notification_manager.py +159 -0
  64. argus/backend/service/planner_service.py +608 -0
  65. argus/backend/service/release_manager.py +229 -0
  66. argus/backend/service/results_service.py +690 -0
  67. argus/backend/service/stats.py +610 -0
  68. argus/backend/service/team_manager_service.py +82 -0
  69. argus/backend/service/test_lookup.py +172 -0
  70. argus/backend/service/testrun.py +489 -0
  71. argus/backend/service/user.py +308 -0
  72. argus/backend/service/views.py +219 -0
  73. argus/backend/service/views_widgets/__init__.py +0 -0
  74. argus/backend/service/views_widgets/graphed_stats.py +180 -0
  75. argus/backend/service/views_widgets/highlights.py +374 -0
  76. argus/backend/service/views_widgets/nemesis_stats.py +34 -0
  77. argus/backend/template_filters.py +27 -0
  78. argus/backend/tests/__init__.py +0 -0
  79. argus/backend/tests/client_service/__init__.py +0 -0
  80. argus/backend/tests/client_service/test_submit_results.py +79 -0
  81. argus/backend/tests/conftest.py +180 -0
  82. argus/backend/tests/results_service/__init__.py +0 -0
  83. argus/backend/tests/results_service/test_best_results.py +178 -0
  84. argus/backend/tests/results_service/test_cell.py +65 -0
  85. argus/backend/tests/results_service/test_chartjs_additional_functions.py +259 -0
  86. argus/backend/tests/results_service/test_create_chartjs.py +220 -0
  87. argus/backend/tests/results_service/test_result_metadata.py +100 -0
  88. argus/backend/tests/results_service/test_results_service.py +203 -0
  89. argus/backend/tests/results_service/test_validation_rules.py +213 -0
  90. argus/backend/tests/view_widgets/__init__.py +0 -0
  91. argus/backend/tests/view_widgets/test_highlights_api.py +532 -0
  92. argus/backend/util/common.py +65 -0
  93. argus/backend/util/config.py +38 -0
  94. argus/backend/util/encoders.py +56 -0
  95. argus/backend/util/logsetup.py +80 -0
  96. argus/backend/util/module_loaders.py +30 -0
  97. argus/backend/util/send_email.py +91 -0
  98. argus/client/base.py +1 -3
  99. argus/client/driver_matrix_tests/cli.py +17 -8
  100. argus/client/generic/cli.py +4 -2
  101. argus/client/generic/client.py +1 -0
  102. argus/client/generic_result.py +48 -9
  103. argus/client/sct/client.py +1 -3
  104. argus/client/sirenada/client.py +4 -1
  105. argus/client/tests/__init__.py +0 -0
  106. argus/client/tests/conftest.py +19 -0
  107. argus/client/tests/test_package.py +45 -0
  108. argus/client/tests/test_results.py +224 -0
  109. argus/common/sct_types.py +3 -0
  110. argus/common/sirenada_types.py +1 -1
  111. {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info}/METADATA +43 -19
  112. argus_alm-0.15.2.dist-info/RECORD +122 -0
  113. {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info}/WHEEL +2 -1
  114. argus_alm-0.15.2.dist-info/entry_points.txt +3 -0
  115. argus_alm-0.15.2.dist-info/top_level.txt +1 -0
  116. argus_alm-0.14.2.dist-info/RECORD +0 -20
  117. argus_alm-0.14.2.dist-info/entry_points.txt +0 -4
  118. {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,203 @@
1
+ from datetime import datetime, timedelta
2
+ from uuid import uuid4
3
+
4
+ import pytest
5
+ from argus.backend.models.result import ArgusGenericResultMetadata, ArgusGenericResultData, ColumnMetadata, ArgusGraphView
6
+ from argus.backend.plugins.sct.testrun import SCTTestRun
7
+ from argus.backend.plugins.sct.udt import PackageVersion
8
+ from argus.backend.service.results_service import ResultsService
9
+
10
+
11
+ @pytest.fixture
12
+ def setup_data(argus_db):
13
+ test_id = uuid4()
14
+ table = ArgusGenericResultMetadata(
15
+ test_id=test_id,
16
+ name='Test Table',
17
+ columns_meta=[
18
+ ColumnMetadata(name='col1', unit='ms', type='FLOAT', higher_is_better=False)
19
+ ],
20
+ rows_meta=['row1'],
21
+ validation_rules={}
22
+ )
23
+ data = [
24
+ ArgusGenericResultData(
25
+ test_id=test_id,
26
+ name=table.name,
27
+ run_id=uuid4(),
28
+ column='col1',
29
+ row='row1',
30
+ sut_timestamp=datetime.today() - timedelta(days=10),
31
+ value=100.0,
32
+ status='UNSET'
33
+ ).save(),
34
+ ArgusGenericResultData(
35
+ test_id=test_id,
36
+ name=table.name,
37
+ run_id=uuid4(),
38
+ column='col1',
39
+ row='row1',
40
+ sut_timestamp=datetime.today() - timedelta(days=5),
41
+ value=150.0,
42
+ status='UNSET'
43
+ ).save(),
44
+ ArgusGenericResultData(
45
+ test_id=test_id,
46
+ name=table.name,
47
+ run_id=uuid4(),
48
+ column='col1',
49
+ row='row1',
50
+ sut_timestamp=datetime.today() - timedelta(days=1),
51
+ value=200.0,
52
+ status='UNSET'
53
+ ).save()
54
+ ]
55
+ return test_id, table, data
56
+
57
+
58
+ def test_results_service_should_return_results_within_date_range(setup_data):
59
+ test_id, table, data = setup_data
60
+ service = ResultsService()
61
+
62
+ start_date = datetime.today() - timedelta(days=7)
63
+ end_date = datetime.today() - timedelta(days=2)
64
+
65
+ filtered_data = service._get_tables_data(
66
+ test_id=test_id,
67
+ table_name=table.name,
68
+ ignored_runs=[],
69
+ start_date=start_date,
70
+ end_date=end_date
71
+ )
72
+
73
+ assert len(filtered_data) == 1
74
+ assert filtered_data[0].value == 150.0
75
+
76
+
77
+ def test_results_service_should_return_no_results_outside_date_range(setup_data):
78
+ test_id, table, data = setup_data
79
+ service = ResultsService()
80
+
81
+ start_date = datetime.today() - timedelta(days=20)
82
+ end_date = datetime.today() - timedelta(days=15)
83
+
84
+ filtered_data = service._get_tables_data(
85
+ test_id=test_id,
86
+ table_name=table.name,
87
+ ignored_runs=[],
88
+ start_date=start_date,
89
+ end_date=end_date
90
+ )
91
+
92
+ assert len(filtered_data) == 0
93
+
94
+
95
+ def test_results_service_should_return_all_results_with_no_date_range(setup_data):
96
+ test_id, table, data = setup_data
97
+ service = ResultsService()
98
+
99
+ filtered_data = service._get_tables_data(
100
+ test_id=test_id,
101
+ table_name=table.name,
102
+ ignored_runs=[]
103
+ )
104
+
105
+ assert len(filtered_data) == 3
106
+
107
+
108
+ def test_get_tests_by_version_groups_runs_correctly(argus_db):
109
+ test_id1 = uuid4()
110
+ test_id2 = uuid4()
111
+ run_id1 = uuid4()
112
+ run_id2 = uuid4()
113
+ run_id3 = uuid4()
114
+ run_id4 = uuid4()
115
+ pkg_v4_0 = PackageVersion(name='scylla', version='4.0', date='2021-01-01', revision_id='', build_id='')
116
+ pkg_v4_1 = PackageVersion(name='scylla', version='4.1', date='2021-02-01', revision_id='', build_id='')
117
+
118
+ SCTTestRun(
119
+ id=run_id1,
120
+ build_id='build_id1',
121
+ test_id=test_id1,
122
+ test_method='test_method1', # Changed to 'test_method'
123
+ investigation_status='',
124
+ packages=[pkg_v4_0]
125
+ ).save()
126
+ SCTTestRun(
127
+ id=run_id2,
128
+ build_id='build_id1',
129
+ test_id=test_id1,
130
+ test_method='test_method2', # Changed to 'test_method'
131
+ investigation_status='ignored',
132
+ packages=[pkg_v4_0]
133
+ ).save()
134
+ SCTTestRun(
135
+ id=run_id3,
136
+ build_id='build_id1',
137
+ test_id=test_id2,
138
+ test_method='test_method1', # Changed to 'test_method'
139
+ investigation_status='',
140
+ packages=[pkg_v4_0]
141
+ ).save()
142
+ SCTTestRun(
143
+ id=run_id4,
144
+ build_id='build_id1',
145
+ test_id=test_id2,
146
+ test_method='test_method1', # Changed to 'test_method'
147
+ investigation_status='',
148
+ packages=[pkg_v4_1]
149
+ ).save()
150
+
151
+ sut_package_name = 'scylla'
152
+ test_ids = [test_id1, test_id2]
153
+ service = ResultsService()
154
+ service._exclude_disabled_tests = lambda x: x
155
+ result = service.get_tests_by_version(sut_package_name, test_ids)
156
+
157
+ expected_result = {'test_info': {str(test_id1): {'build_id': 'build_id1',
158
+ 'name': None},
159
+ str(test_id2): {'build_id': 'build_id1',
160
+ 'name': None}},
161
+ 'versions': {'4.0-2021-01-01-': {
162
+ str(test_id1): {'test_method1': {'run_id': str(run_id1),
163
+ 'started_by': None,
164
+ 'status': 'created'}},
165
+ str(test_id2): {'test_method1': {'run_id': str(run_id3),
166
+ 'started_by': None,
167
+ 'status': 'created'}}},
168
+ '4.1-2021-02-01-': {str(test_id2): {
169
+ 'test_method1': {'run_id': str(run_id4),
170
+ 'started_by': None,
171
+ 'status': 'created'}}}}}
172
+ assert result == expected_result
173
+
174
+
175
+ def test_create_update_argus_graph_view_should_create() -> None:
176
+ service = ResultsService()
177
+ test_id = uuid4()
178
+ service.create_argus_graph_view(test_id, "MyView", "MyDescription")
179
+ result = service.get_argus_graph_views(test_id)[0]
180
+ assert result is not None
181
+ assert result.name == "MyView"
182
+ assert result.description == "MyDescription"
183
+ assert result.graphs == {}
184
+
185
+
186
+ def test_create_update_argus_graph_view_should_update() -> None:
187
+ service = ResultsService()
188
+ test_id = uuid4()
189
+ graph_view = service.create_argus_graph_view(test_id, "OldName", "OldDesc")
190
+ service.update_argus_graph_view(test_id, graph_view.id, "NewName", "NewDesc", {"graph2": "new_data"})
191
+ updated = service.get_argus_graph_views(test_id)[0]
192
+ assert updated.name == "NewName"
193
+ assert updated.description == "NewDesc"
194
+ assert updated.graphs == {"graph2": "new_data"}
195
+
196
+
197
+ def test_get_argus_graph_views_should_return_list() -> None:
198
+ service = ResultsService()
199
+ test_id = uuid4()
200
+ service.create_argus_graph_view(test_id, "View1", "Desc1")
201
+ service.create_argus_graph_view(test_id, "View2", "Desc2")
202
+ views = service.get_argus_graph_views(test_id)
203
+ assert len(views) == 2
@@ -0,0 +1,213 @@
1
+ import logging
2
+ from dataclasses import asdict, dataclass
3
+ from typing import Any
4
+ from uuid import UUID
5
+
6
+ import pytest
7
+
8
+ from argus.backend.error_handlers import DataValidationError
9
+ from argus.backend.tests.conftest import get_fake_test_run, fake_test
10
+ from argus.client.generic_result import ColumnMetadata, ResultType, ValidationRule, Status, StaticGenericResultTable
11
+
12
+ LOGGER = logging.getLogger(__name__)
13
+
14
+
15
+ class SampleTable(StaticGenericResultTable):
16
+ class Meta:
17
+ name = "Test Table Name"
18
+ description = "Test Table Description"
19
+ Columns = [ColumnMetadata(name="h_is_better", unit="ms", type=ResultType.FLOAT, higher_is_better=True),
20
+ ColumnMetadata(name="l_is_better", unit="ms", type=ResultType.INTEGER, higher_is_better=False),
21
+ ColumnMetadata(name="duration col name", unit="s", type=ResultType.DURATION, higher_is_better=False),
22
+ ColumnMetadata(name="non tracked col name", unit="", type=ResultType.FLOAT),
23
+ ColumnMetadata(name="text col name", unit="", type=ResultType.TEXT),
24
+ ]
25
+ ValidationRules = {"h_is_better": ValidationRule(best_abs=4),
26
+ "l_is_better": ValidationRule(best_pct=50),
27
+ "duration col name": ValidationRule(fixed_limit=590)
28
+ }
29
+
30
+
31
+ @dataclass
32
+ class SampleCell:
33
+ column: str
34
+ row: str
35
+ value: Any
36
+ status: Status = Status.UNSET
37
+
38
+
39
+ def results_to_dict(results):
40
+ actual_cells = {}
41
+ table_data = results['Test Table Name']['table_data']
42
+
43
+ for row_key, row_data in table_data.items():
44
+ for col_name, col_data in row_data.items():
45
+ if col_name not in actual_cells:
46
+ actual_cells[col_name] = {}
47
+ actual_cells[col_name][row_key] = {
48
+ 'value': col_data['value'],
49
+ 'status': col_data['status']
50
+ }
51
+ return actual_cells
52
+
53
+
54
+ def test_can_track_validation_rules_changes(fake_test, client_service, results_service, release, group):
55
+ run_type, run = get_fake_test_run(test=fake_test)
56
+ results = SampleTable()
57
+ results.sut_timestamp = 123
58
+ sample_data = [
59
+ SampleCell(column="h_is_better", row="row", value=10),
60
+ SampleCell(column="l_is_better", row="row", value=100),
61
+ SampleCell(column="duration col name", row="row", value=10),
62
+ SampleCell(column="non tracked col name", row="row", value=10),
63
+ SampleCell(column="text col name", row="row", value="a value"),
64
+ ]
65
+ for cell in sample_data:
66
+ results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
67
+ client_service.submit_run(run_type, asdict(run))
68
+ client_service.submit_results(run_type, run.run_id, results.as_dict())
69
+ run_results = results_service.get_run_results(fake_test.id, UUID(run.run_id))
70
+ actual_cells = results_to_dict(run_results[0])
71
+ # all results should be marked as passed
72
+ for cell in sample_data:
73
+ if cell.column == "text col name" or cell.column == "non tracked col name":
74
+ assert actual_cells[cell.column][cell.row]['value'] == cell.value
75
+ assert actual_cells[cell.column][cell.row]['status'] == "UNSET"
76
+ else:
77
+ assert actual_cells[cell.column][cell.row]['value'] == cell.value
78
+ assert actual_cells[cell.column][cell.row]['status'] == "PASS"
79
+
80
+ # test validation rules are applied correctly
81
+ run_type, run = get_fake_test_run(test=fake_test)
82
+ results = SampleTable()
83
+ results.sut_timestamp = 124
84
+ sample_data = [
85
+ SampleCell(column="h_is_better", row="row", value=6),
86
+ SampleCell(column="l_is_better", row="row", value=150),
87
+ SampleCell(column="duration col name", row="row", value=600),
88
+ SampleCell(column="non tracked col name", row="row", value=12),
89
+ SampleCell(column="text col name", row="row", value="a value"),
90
+ ]
91
+ for cell in sample_data:
92
+ results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
93
+ client_service.submit_run(run_type, asdict(run))
94
+ with pytest.raises(DataValidationError):
95
+ client_service.submit_results(run_type, run.run_id, results.as_dict())
96
+ run_results = results_service.get_run_results(fake_test.id, UUID(run.run_id))
97
+ actual_cells = results_to_dict(run_results[0])
98
+ for cell in sample_data:
99
+ if cell.column == "text col name" or cell.column == "non tracked col name":
100
+ assert actual_cells[cell.column][cell.row][
101
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
102
+ assert actual_cells[cell.column][cell.row][
103
+ 'status'] == "UNSET", f"Expected ERROR for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
104
+ else:
105
+ assert actual_cells[cell.column][cell.row][
106
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
107
+ assert actual_cells[cell.column][cell.row][
108
+ 'status'] == "ERROR", f"Expected ERROR for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
109
+
110
+ # new best result appears
111
+ run_type, run = get_fake_test_run(test=fake_test)
112
+ results = SampleTable()
113
+ results.sut_timestamp = 125
114
+ sample_data = [
115
+ SampleCell(column="h_is_better", row="row", value=100),
116
+ SampleCell(column="l_is_better", row="row", value=50),
117
+ SampleCell(column="duration col name", row="row", value=10),
118
+ SampleCell(column="non tracked col name", row="row", value=12),
119
+ SampleCell(column="text col name", row="row", value="a value"),
120
+ ]
121
+ for cell in sample_data:
122
+ results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
123
+ client_service.submit_run(run_type, asdict(run))
124
+ client_service.submit_results(run_type, run.run_id, results.as_dict())
125
+ run_results = results_service.get_run_results(fake_test.id, UUID(run.run_id))
126
+ actual_cells = results_to_dict(run_results[0])
127
+ for cell in sample_data:
128
+ if cell.column == "text col name" or cell.column == "non tracked col name":
129
+ assert actual_cells[cell.column][cell.row][
130
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
131
+ assert actual_cells[cell.column][cell.row][
132
+ 'status'] == "UNSET", f"Expected PASS for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
133
+ else:
134
+ assert actual_cells[cell.column][cell.row][
135
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
136
+ assert actual_cells[cell.column][cell.row][
137
+ 'status'] == "PASS", f"Expected PASS for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
138
+
139
+ # validation should be now with new best results
140
+ run_type, run = get_fake_test_run(test=fake_test)
141
+ results = SampleTable()
142
+ results.sut_timestamp = 126
143
+ sample_data = [
144
+ SampleCell(column="h_is_better", row="row", value=95),
145
+ SampleCell(column="l_is_better", row="row", value=75),
146
+ SampleCell(column="duration col name", row="row", value=590),
147
+ SampleCell(column="non tracked col name", row="row", value=12),
148
+ SampleCell(column="text col name", row="row", value="a value"),
149
+ ]
150
+ for cell in sample_data:
151
+ results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
152
+ client_service.submit_run(run_type, asdict(run))
153
+ with pytest.raises(DataValidationError):
154
+ client_service.submit_results(run_type, run.run_id, results.as_dict())
155
+ run_results = results_service.get_run_results(fake_test.id, UUID(run.run_id))
156
+ actual_cells = results_to_dict(run_results[0])
157
+ for cell in sample_data:
158
+ if cell.column == "text col name" or cell.column == "non tracked col name":
159
+ assert actual_cells[cell.column][cell.row][
160
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
161
+ assert actual_cells[cell.column][cell.row][
162
+ 'status'] == "UNSET", f"Expected ERROR for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
163
+ else:
164
+ assert actual_cells[cell.column][cell.row][
165
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
166
+ assert actual_cells[cell.column][cell.row][
167
+ 'status'] == "ERROR", f"Expected ERROR for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
168
+
169
+ # applying new validation rules should be taken into account for next results, new rules are less strict
170
+ # duration col name rule is removed and 'non tracked col name' has new fixed limit
171
+ class NewRulesSampleTable(SampleTable):
172
+ class Meta:
173
+ name = "Test Table Name"
174
+ description = "Test Table Description"
175
+ Columns = [ColumnMetadata(name="h_is_better", unit="ms", type=ResultType.FLOAT, higher_is_better=True),
176
+ ColumnMetadata(name="l_is_better", unit="ms", type=ResultType.INTEGER, higher_is_better=False),
177
+ ColumnMetadata(name="duration col name", unit="s",
178
+ type=ResultType.DURATION, higher_is_better=False),
179
+ ColumnMetadata(name="non tracked col name", unit="",
180
+ type=ResultType.FLOAT, higher_is_better=True),
181
+ ColumnMetadata(name="text col name", unit="", type=ResultType.TEXT),
182
+ ]
183
+ ValidationRules = {"h_is_better": ValidationRule(best_abs=100),
184
+ "l_is_better": ValidationRule(best_pct=90),
185
+ "non tracked col name": ValidationRule(fixed_limit=100) # new rule, removed old one too
186
+ }
187
+ run_type, run = get_fake_test_run(test=fake_test)
188
+ results = NewRulesSampleTable()
189
+ results.sut_timestamp = 122
190
+ sample_data = [
191
+ SampleCell(column="h_is_better", row="row", value=95),
192
+ SampleCell(column="l_is_better", row="row", value=75),
193
+ SampleCell(column="duration col name", row="row", value=691),
194
+ SampleCell(column="non tracked col name", row="row", value=101),
195
+ SampleCell(column="text col name", row="row", value="a value"),
196
+ ]
197
+ for cell in sample_data:
198
+ results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
199
+ client_service.submit_run(run_type, asdict(run))
200
+ client_service.submit_results(run_type, run.run_id, results.as_dict())
201
+ run_results = results_service.get_run_results(fake_test.id, UUID(run.run_id))
202
+ actual_cells = results_to_dict(run_results[0])
203
+ for cell in sample_data:
204
+ if cell.column == "text col name":
205
+ assert actual_cells[cell.column][cell.row][
206
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
207
+ assert actual_cells[cell.column][cell.row][
208
+ 'status'] == "UNSET", f"Expected UNSET for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
209
+ else:
210
+ assert actual_cells[cell.column][cell.row][
211
+ 'value'] == cell.value, f"Expected {cell.value} but got {actual_cells[cell.column][cell.row]['value']}"
212
+ assert actual_cells[cell.column][cell.row][
213
+ 'status'] == "PASS", f"Expected PASS for {cell.column} but got {actual_cells[cell.column][cell.row]['status']}"
File without changes