argus-alm 0.14.2__py3-none-any.whl → 0.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- argus/_version.py +21 -0
- argus/backend/.gitkeep +0 -0
- argus/backend/__init__.py +0 -0
- argus/backend/cli.py +57 -0
- argus/backend/controller/__init__.py +0 -0
- argus/backend/controller/admin.py +20 -0
- argus/backend/controller/admin_api.py +355 -0
- argus/backend/controller/api.py +589 -0
- argus/backend/controller/auth.py +67 -0
- argus/backend/controller/client_api.py +109 -0
- argus/backend/controller/main.py +316 -0
- argus/backend/controller/notification_api.py +72 -0
- argus/backend/controller/notifications.py +13 -0
- argus/backend/controller/planner_api.py +194 -0
- argus/backend/controller/team.py +129 -0
- argus/backend/controller/team_ui.py +19 -0
- argus/backend/controller/testrun_api.py +513 -0
- argus/backend/controller/view_api.py +188 -0
- argus/backend/controller/views_widgets/__init__.py +0 -0
- argus/backend/controller/views_widgets/graphed_stats.py +54 -0
- argus/backend/controller/views_widgets/graphs.py +68 -0
- argus/backend/controller/views_widgets/highlights.py +135 -0
- argus/backend/controller/views_widgets/nemesis_stats.py +26 -0
- argus/backend/controller/views_widgets/summary.py +43 -0
- argus/backend/db.py +98 -0
- argus/backend/error_handlers.py +41 -0
- argus/backend/events/event_processors.py +34 -0
- argus/backend/models/__init__.py +0 -0
- argus/backend/models/argus_ai.py +24 -0
- argus/backend/models/github_issue.py +60 -0
- argus/backend/models/plan.py +24 -0
- argus/backend/models/result.py +187 -0
- argus/backend/models/runtime_store.py +58 -0
- argus/backend/models/view_widgets.py +25 -0
- argus/backend/models/web.py +403 -0
- argus/backend/plugins/__init__.py +0 -0
- argus/backend/plugins/core.py +248 -0
- argus/backend/plugins/driver_matrix_tests/controller.py +66 -0
- argus/backend/plugins/driver_matrix_tests/model.py +429 -0
- argus/backend/plugins/driver_matrix_tests/plugin.py +21 -0
- argus/backend/plugins/driver_matrix_tests/raw_types.py +62 -0
- argus/backend/plugins/driver_matrix_tests/service.py +61 -0
- argus/backend/plugins/driver_matrix_tests/udt.py +42 -0
- argus/backend/plugins/generic/model.py +86 -0
- argus/backend/plugins/generic/plugin.py +15 -0
- argus/backend/plugins/generic/types.py +14 -0
- argus/backend/plugins/loader.py +39 -0
- argus/backend/plugins/sct/controller.py +224 -0
- argus/backend/plugins/sct/plugin.py +37 -0
- argus/backend/plugins/sct/resource_setup.py +177 -0
- argus/backend/plugins/sct/service.py +682 -0
- argus/backend/plugins/sct/testrun.py +288 -0
- argus/backend/plugins/sct/udt.py +100 -0
- argus/backend/plugins/sirenada/model.py +118 -0
- argus/backend/plugins/sirenada/plugin.py +16 -0
- argus/backend/service/admin.py +26 -0
- argus/backend/service/argus_service.py +696 -0
- argus/backend/service/build_system_monitor.py +185 -0
- argus/backend/service/client_service.py +127 -0
- argus/backend/service/event_service.py +18 -0
- argus/backend/service/github_service.py +233 -0
- argus/backend/service/jenkins_service.py +269 -0
- argus/backend/service/notification_manager.py +159 -0
- argus/backend/service/planner_service.py +608 -0
- argus/backend/service/release_manager.py +229 -0
- argus/backend/service/results_service.py +690 -0
- argus/backend/service/stats.py +610 -0
- argus/backend/service/team_manager_service.py +82 -0
- argus/backend/service/test_lookup.py +172 -0
- argus/backend/service/testrun.py +489 -0
- argus/backend/service/user.py +308 -0
- argus/backend/service/views.py +219 -0
- argus/backend/service/views_widgets/__init__.py +0 -0
- argus/backend/service/views_widgets/graphed_stats.py +180 -0
- argus/backend/service/views_widgets/highlights.py +374 -0
- argus/backend/service/views_widgets/nemesis_stats.py +34 -0
- argus/backend/template_filters.py +27 -0
- argus/backend/tests/__init__.py +0 -0
- argus/backend/tests/client_service/__init__.py +0 -0
- argus/backend/tests/client_service/test_submit_results.py +79 -0
- argus/backend/tests/conftest.py +180 -0
- argus/backend/tests/results_service/__init__.py +0 -0
- argus/backend/tests/results_service/test_best_results.py +178 -0
- argus/backend/tests/results_service/test_cell.py +65 -0
- argus/backend/tests/results_service/test_chartjs_additional_functions.py +259 -0
- argus/backend/tests/results_service/test_create_chartjs.py +220 -0
- argus/backend/tests/results_service/test_result_metadata.py +100 -0
- argus/backend/tests/results_service/test_results_service.py +203 -0
- argus/backend/tests/results_service/test_validation_rules.py +213 -0
- argus/backend/tests/view_widgets/__init__.py +0 -0
- argus/backend/tests/view_widgets/test_highlights_api.py +532 -0
- argus/backend/util/common.py +65 -0
- argus/backend/util/config.py +38 -0
- argus/backend/util/encoders.py +56 -0
- argus/backend/util/logsetup.py +80 -0
- argus/backend/util/module_loaders.py +30 -0
- argus/backend/util/send_email.py +91 -0
- argus/client/base.py +1 -3
- argus/client/driver_matrix_tests/cli.py +17 -8
- argus/client/generic/cli.py +4 -2
- argus/client/generic/client.py +1 -0
- argus/client/generic_result.py +48 -9
- argus/client/sct/client.py +1 -3
- argus/client/sirenada/client.py +4 -1
- argus/client/tests/__init__.py +0 -0
- argus/client/tests/conftest.py +19 -0
- argus/client/tests/test_package.py +45 -0
- argus/client/tests/test_results.py +224 -0
- argus/common/sct_types.py +3 -0
- argus/common/sirenada_types.py +1 -1
- {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info}/METADATA +43 -19
- argus_alm-0.15.2.dist-info/RECORD +122 -0
- {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info}/WHEEL +2 -1
- argus_alm-0.15.2.dist-info/entry_points.txt +3 -0
- argus_alm-0.15.2.dist-info/top_level.txt +1 -0
- argus_alm-0.14.2.dist-info/RECORD +0 -20
- argus_alm-0.14.2.dist-info/entry_points.txt +0 -4
- {argus_alm-0.14.2.dist-info → argus_alm-0.15.2.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from dataclasses import asdict, dataclass
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from argus.backend.error_handlers import DataValidationError
|
|
7
|
+
from argus.backend.tests.conftest import get_fake_test_run
|
|
8
|
+
from argus.client.generic_result import ColumnMetadata, ResultType, ValidationRule, Status, \
|
|
9
|
+
StaticGenericResultTable
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class SampleCell:
|
|
14
|
+
column: str
|
|
15
|
+
row: str
|
|
16
|
+
value: Any
|
|
17
|
+
status: Status = Status.UNSET
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SampleTable(StaticGenericResultTable):
|
|
21
|
+
class Meta:
|
|
22
|
+
name = "Test Table"
|
|
23
|
+
description = "Test Table Description"
|
|
24
|
+
sut_package_name = "test_package"
|
|
25
|
+
Columns = [
|
|
26
|
+
ColumnMetadata(name="metric1", unit="ms", type=ResultType.FLOAT, higher_is_better=False),
|
|
27
|
+
ColumnMetadata(name="metric2", unit="ms", type=ResultType.INTEGER, higher_is_better=False),
|
|
28
|
+
]
|
|
29
|
+
ValidationRules = {
|
|
30
|
+
"metric1": ValidationRule(fixed_limit=100),
|
|
31
|
+
"metric2": ValidationRule(fixed_limit=200),
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_submit_results_responds_ok_if_all_cells_pass(fake_test, client_service):
|
|
36
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
37
|
+
results = SampleTable()
|
|
38
|
+
results.sut_timestamp = 123
|
|
39
|
+
sample_data = [
|
|
40
|
+
SampleCell(column="metric1", row="row1", value=99.99),
|
|
41
|
+
SampleCell(column="metric2", row="row1", value=199.99),
|
|
42
|
+
]
|
|
43
|
+
for cell in sample_data:
|
|
44
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
45
|
+
client_service.submit_run(run_type, asdict(run))
|
|
46
|
+
response = client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
47
|
+
assert results.as_dict()["meta"]["sut_package_name"] == "test_package"
|
|
48
|
+
assert response["status"] == "ok"
|
|
49
|
+
assert response["message"] == "Results submitted"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_submit_results_responds_with_error_when_cell_fails_validation(fake_test, client_service):
|
|
53
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
54
|
+
results = SampleTable()
|
|
55
|
+
results.sut_timestamp = 123
|
|
56
|
+
sample_data = [
|
|
57
|
+
SampleCell(column="metric1", row="row1", value=100.01), # Exceeds fixed_limit
|
|
58
|
+
SampleCell(column="metric2", row="row1", value=50),
|
|
59
|
+
]
|
|
60
|
+
for cell in sample_data:
|
|
61
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
62
|
+
client_service.submit_run(run_type, asdict(run))
|
|
63
|
+
with pytest.raises(DataValidationError):
|
|
64
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def test_submit_results_responds_with_error_when_cell_has_error(fake_test, client_service):
|
|
68
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
69
|
+
results = SampleTable()
|
|
70
|
+
results.sut_timestamp = 123
|
|
71
|
+
sample_data = [
|
|
72
|
+
SampleCell(column="metric1", row="row1", value=88, status=Status.ERROR), # hardcoded error
|
|
73
|
+
SampleCell(column="metric2", row="row1", value=50),
|
|
74
|
+
]
|
|
75
|
+
for cell in sample_data:
|
|
76
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
77
|
+
client_service.submit_run(run_type, asdict(run))
|
|
78
|
+
with pytest.raises(DataValidationError):
|
|
79
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import uuid
|
|
4
|
+
from unittest.mock import patch
|
|
5
|
+
|
|
6
|
+
from cassandra.auth import PlainTextAuthProvider
|
|
7
|
+
from docker import DockerClient
|
|
8
|
+
from flask import g
|
|
9
|
+
|
|
10
|
+
from argus.backend.util.config import Config
|
|
11
|
+
|
|
12
|
+
os.environ['DOCKER_HOST'] = ""
|
|
13
|
+
from _pytest.fixtures import fixture
|
|
14
|
+
from docker.errors import NotFound
|
|
15
|
+
from argus.backend.cli import sync_models
|
|
16
|
+
from argus.backend.db import ScyllaCluster
|
|
17
|
+
from argus.backend.models.web import ArgusTest, ArgusGroup, ArgusRelease, User, UserRoles
|
|
18
|
+
from argus.backend.plugins.sct.testrun import SCTTestRunSubmissionRequest
|
|
19
|
+
from argus.backend.service.client_service import ClientService
|
|
20
|
+
from argus.backend.service.release_manager import ReleaseManagerService
|
|
21
|
+
from argus.backend.service.results_service import ResultsService
|
|
22
|
+
import logging
|
|
23
|
+
from cassandra.cluster import Cluster
|
|
24
|
+
|
|
25
|
+
logging.getLogger().setLevel(logging.INFO)
|
|
26
|
+
os.environ['CQLENG_ALLOW_SCHEMA_MANAGEMENT'] = '1'
|
|
27
|
+
logging.getLogger('cassandra').setLevel(logging.WARNING)
|
|
28
|
+
logging.getLogger('cassandra.connection').setLevel(logging.WARNING)
|
|
29
|
+
logging.getLogger('cassandra.pool').setLevel(logging.WARNING)
|
|
30
|
+
logging.getLogger('cassandra.cluster').setLevel(logging.WARNING)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@fixture(scope='session')
|
|
34
|
+
def argus_db():
|
|
35
|
+
container_name = "argus_test_scylla"
|
|
36
|
+
docker_client = DockerClient.from_env()
|
|
37
|
+
need_sync_models = True
|
|
38
|
+
try:
|
|
39
|
+
container = docker_client.containers.get(container_name)
|
|
40
|
+
if container.status != 'running':
|
|
41
|
+
container.start()
|
|
42
|
+
print(f"Started existing container '{container_name}'.")
|
|
43
|
+
else:
|
|
44
|
+
print(f"Using already running container '{container_name}'.")
|
|
45
|
+
need_sync_models = False
|
|
46
|
+
except NotFound:
|
|
47
|
+
container = docker_client.containers.run(
|
|
48
|
+
"scylladb/scylla-enterprise:2024.1.10",
|
|
49
|
+
name=container_name,
|
|
50
|
+
detach=True,
|
|
51
|
+
ports={'9042/tcp': 9042},
|
|
52
|
+
command=[
|
|
53
|
+
"--smp", "1",
|
|
54
|
+
"--overprovisioned", "1",
|
|
55
|
+
"--skip-wait-for-gossip-to-settle", "0",
|
|
56
|
+
"--endpoint-snitch=SimpleSnitch",
|
|
57
|
+
"--authenticator", "PasswordAuthenticator",
|
|
58
|
+
],
|
|
59
|
+
)
|
|
60
|
+
log_wait_timeout = 120
|
|
61
|
+
start_time = time.time()
|
|
62
|
+
|
|
63
|
+
print("Waiting for 'init - serving' message in container logs...")
|
|
64
|
+
for log in container.logs(stream=True):
|
|
65
|
+
log_line = log.decode('utf-8')
|
|
66
|
+
if "init - serving" in log_line:
|
|
67
|
+
print("'init - serving' message found.")
|
|
68
|
+
break
|
|
69
|
+
if "FATAL state" in log_line:
|
|
70
|
+
raise Exception("ScyllaDB exited unexpectedly. Check container logs for more information.")
|
|
71
|
+
if time.time() - start_time > log_wait_timeout:
|
|
72
|
+
raise Exception("ScyllaDB did not log 'init - serving' within the timeout period.")
|
|
73
|
+
|
|
74
|
+
container.reload()
|
|
75
|
+
container_ip = container.attrs['NetworkSettings']['Networks']['bridge']['IPAddress']
|
|
76
|
+
print(f"Container IP: {container_ip}")
|
|
77
|
+
|
|
78
|
+
if need_sync_models:
|
|
79
|
+
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
|
|
80
|
+
cluster = Cluster([container_ip], port=9042, auth_provider=auth_provider) # Use container IP
|
|
81
|
+
session = cluster.connect()
|
|
82
|
+
session.execute("""
|
|
83
|
+
CREATE KEYSPACE IF NOT EXISTS test_argus
|
|
84
|
+
WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};
|
|
85
|
+
""")
|
|
86
|
+
config = {"SCYLLA_KEYSPACE_NAME": "test_argus", "SCYLLA_CONTACT_POINTS": [container_ip],
|
|
87
|
+
"SCYLLA_USERNAME": "cassandra", "SCYLLA_PASSWORD": "cassandra", "APP_LOG_LEVEL": "INFO",
|
|
88
|
+
"EMAIL_SENDER": "unit tester", "EMAIL_SENDER_PASS": "pass", "EMAIL_SENDER_USER": "qa",
|
|
89
|
+
"EMAIL_SERVER": "fake", "EMAIL_SERVER_PORT": 25}
|
|
90
|
+
Config.CONFIG = config # patch config for whole test to avoid using Config.load_yaml_config() required by app context
|
|
91
|
+
database = ScyllaCluster.get(config)
|
|
92
|
+
if need_sync_models:
|
|
93
|
+
sync_models()
|
|
94
|
+
|
|
95
|
+
yield database
|
|
96
|
+
database.shutdown()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@fixture(scope='session')
|
|
100
|
+
def argus_app():
|
|
101
|
+
with patch('argus.backend.service.user.load_logged_in_user') as mock_load:
|
|
102
|
+
mock_load.return_value = None # Make the function do nothing so test can override user
|
|
103
|
+
from argus_backend import argus_app
|
|
104
|
+
yield argus_app
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@fixture(scope='session', autouse=True)
|
|
108
|
+
def app_context(argus_db, argus_app):
|
|
109
|
+
with argus_app.app_context():
|
|
110
|
+
g.user = User(id=uuid.uuid4(), username='test_user', full_name='Test User',
|
|
111
|
+
email="tester@scylladb.com",
|
|
112
|
+
roles=[UserRoles.User, UserRoles.Admin, UserRoles.Manager])
|
|
113
|
+
yield
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@fixture(scope='session')
|
|
117
|
+
def flask_client(argus_app):
|
|
118
|
+
return argus_app.test_client()
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@fixture(scope='session')
|
|
122
|
+
def release_manager_service(argus_db) -> ReleaseManagerService:
|
|
123
|
+
return ReleaseManagerService()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@fixture(scope='session')
|
|
127
|
+
def client_service(argus_db):
|
|
128
|
+
return ClientService()
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@fixture(scope='session')
|
|
132
|
+
def results_service(argus_db):
|
|
133
|
+
return ResultsService()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def get_fake_test_run(
|
|
137
|
+
test: ArgusTest,
|
|
138
|
+
schema_version: str = "1.0.0",
|
|
139
|
+
job_url: str = "http://example.com",
|
|
140
|
+
started_by: str = "default_user",
|
|
141
|
+
commit_id: str = "default_commit_id",
|
|
142
|
+
sct_config: dict | None = None,
|
|
143
|
+
origin_url: str | None = None,
|
|
144
|
+
branch_name: str | None = "main",
|
|
145
|
+
runner_public_ip: str | None = None,
|
|
146
|
+
runner_private_ip: str | None = None
|
|
147
|
+
) -> tuple[str, SCTTestRunSubmissionRequest]:
|
|
148
|
+
run_id = str(uuid.uuid4())
|
|
149
|
+
return "scylla-cluster-tests", SCTTestRunSubmissionRequest(
|
|
150
|
+
schema_version=schema_version,
|
|
151
|
+
run_id=run_id,
|
|
152
|
+
job_name=test.build_system_id,
|
|
153
|
+
job_url=job_url,
|
|
154
|
+
started_by=started_by,
|
|
155
|
+
commit_id=commit_id,
|
|
156
|
+
sct_config=sct_config,
|
|
157
|
+
origin_url=origin_url,
|
|
158
|
+
branch_name=branch_name,
|
|
159
|
+
runner_public_ip=runner_public_ip,
|
|
160
|
+
runner_private_ip=runner_private_ip
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@fixture(scope='session')
|
|
165
|
+
def release(release_manager_service) -> ArgusRelease:
|
|
166
|
+
name = f"best_results_{time.time_ns()}"
|
|
167
|
+
return release_manager_service.create_release(name, name, False)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
@fixture(scope='session')
|
|
171
|
+
def group(release_manager_service, release) -> ArgusGroup:
|
|
172
|
+
name = f"br_group{time.time_ns()}"
|
|
173
|
+
return release_manager_service.create_group(name, name, build_system_id=release.name, release_id=str(release.id))
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@fixture
|
|
177
|
+
def fake_test(release_manager_service, group: ArgusGroup, release: ArgusRelease) -> ArgusTest:
|
|
178
|
+
name = f"test_{time.time_ns()}"
|
|
179
|
+
return release_manager_service.create_test(name, name, name, name,
|
|
180
|
+
group_id=str(group.id), release_id=str(release.id), plugin_name='scylla-cluster-tests')
|
|
File without changes
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from dataclasses import asdict, dataclass
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from argus.backend.error_handlers import DataValidationError
|
|
8
|
+
from argus.backend.plugins.sct.testrun import SCTTestRun
|
|
9
|
+
from argus.backend.tests.conftest import get_fake_test_run, fake_test
|
|
10
|
+
from argus.common.enums import TestInvestigationStatus
|
|
11
|
+
from argus.client.generic_result import ColumnMetadata, ResultType, ValidationRule, Status, StaticGenericResultTable
|
|
12
|
+
|
|
13
|
+
LOGGER = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class SampleTable(StaticGenericResultTable):
|
|
17
|
+
class Meta:
|
|
18
|
+
name = "Test Table Name"
|
|
19
|
+
description = "Test Table Description"
|
|
20
|
+
Columns = [ColumnMetadata(name="h_is_better", unit="ms", type=ResultType.FLOAT, higher_is_better=True),
|
|
21
|
+
ColumnMetadata(name="l_is_better", unit="ms", type=ResultType.INTEGER, higher_is_better=False),
|
|
22
|
+
ColumnMetadata(name="duration col name", unit="s", type=ResultType.DURATION, higher_is_better=False),
|
|
23
|
+
ColumnMetadata(name="non tracked col name", unit="", type=ResultType.FLOAT),
|
|
24
|
+
ColumnMetadata(name="text col name", unit="", type=ResultType.TEXT),
|
|
25
|
+
]
|
|
26
|
+
ValidationRules = {"h_is_better": ValidationRule(best_abs=4),
|
|
27
|
+
"l_is_better": ValidationRule(best_pct=50, best_abs=5),
|
|
28
|
+
"duration col name": ValidationRule(fixed_limit=590)
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class SampleCell:
|
|
34
|
+
column: str
|
|
35
|
+
row: str
|
|
36
|
+
value: Any
|
|
37
|
+
status: Status = Status.UNSET
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def test_can_track_best_result(fake_test, client_service, results_service, release, group):
|
|
41
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
42
|
+
results = SampleTable()
|
|
43
|
+
results.sut_timestamp = 123
|
|
44
|
+
sample_data = [
|
|
45
|
+
SampleCell(column="h_is_better", row="row", value=10),
|
|
46
|
+
SampleCell(column="l_is_better", row="row", value=10),
|
|
47
|
+
SampleCell(column="duration col name", row="row", value=10),
|
|
48
|
+
]
|
|
49
|
+
for cell in sample_data:
|
|
50
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
51
|
+
client_service.submit_run(run_type, asdict(run))
|
|
52
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
53
|
+
best_results = results_service.get_best_results(fake_test.id, results.name)
|
|
54
|
+
# all results should be tracked as best - first submission
|
|
55
|
+
for cell in sample_data:
|
|
56
|
+
key = f"{cell.column}:{cell.row}"
|
|
57
|
+
assert best_results[key][-1].value == cell.value
|
|
58
|
+
assert str(best_results[key][-1].run_id) == run.run_id
|
|
59
|
+
result_date_h = best_results["h_is_better:row"][-1].result_date # save the result date for later comparison
|
|
60
|
+
result_date_duration = best_results["duration col name:row"][-1].result_date
|
|
61
|
+
# second submission with better results
|
|
62
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
63
|
+
results = SampleTable()
|
|
64
|
+
results.sut_timestamp = 124
|
|
65
|
+
sample_data = [
|
|
66
|
+
SampleCell(column="h_is_better", row="row", value=15), # Improved
|
|
67
|
+
SampleCell(column="l_is_better", row="row", value=5), # Improved
|
|
68
|
+
SampleCell(column="duration col name", row="row", value=10), # Same
|
|
69
|
+
]
|
|
70
|
+
for cell in sample_data:
|
|
71
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
72
|
+
client_service.submit_run(run_type, asdict(run))
|
|
73
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
74
|
+
best_results = results_service.get_best_results(fake_test.id, results.name)
|
|
75
|
+
# best results should be updated
|
|
76
|
+
assert best_results["h_is_better:row"][-1].value == 15
|
|
77
|
+
assert best_results["h_is_better:row"][-1].result_date > result_date_h # result date should be updated
|
|
78
|
+
assert best_results["l_is_better:row"][-1].value == 5
|
|
79
|
+
assert best_results["duration col name:row"][-1].value == 10
|
|
80
|
+
# result date should not change as was not updated
|
|
81
|
+
assert best_results["duration col name:row"][-1].result_date == result_date_duration
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def test_can_enable_best_results_tracking(fake_test, client_service, results_service, release, group):
|
|
85
|
+
"""
|
|
86
|
+
best results tracking can be enabled by setting higher_is_better in ColumnMetadata to bool value
|
|
87
|
+
enabling best results tracking for a text column should not break the system
|
|
88
|
+
"""
|
|
89
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
90
|
+
results = SampleTable()
|
|
91
|
+
results.sut_timestamp = 123
|
|
92
|
+
sample_data = [
|
|
93
|
+
SampleCell(column="h_is_better", row="row", value=10),
|
|
94
|
+
SampleCell(column="l_is_better", row="row", value=10),
|
|
95
|
+
SampleCell(column="duration col name", row="row", value=10),
|
|
96
|
+
SampleCell(column="non tracked col name", row="row", value=10),
|
|
97
|
+
]
|
|
98
|
+
for cell in sample_data:
|
|
99
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
100
|
+
client_service.submit_run(run_type, asdict(run))
|
|
101
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
102
|
+
best_results = results_service.get_best_results(fake_test.id, results.name)
|
|
103
|
+
assert 'non tracked col name:row' not in best_results # non tracked column should not be tracked
|
|
104
|
+
|
|
105
|
+
class TrackingAllSampleTable(StaticGenericResultTable):
|
|
106
|
+
class Meta:
|
|
107
|
+
name = "Test Table Name"
|
|
108
|
+
description = "Test Table Description"
|
|
109
|
+
Columns = [ColumnMetadata(name="h_is_better", unit="ms", type=ResultType.FLOAT, higher_is_better=True),
|
|
110
|
+
ColumnMetadata(name="l_is_better", unit="ms", type=ResultType.INTEGER, higher_is_better=False),
|
|
111
|
+
ColumnMetadata(name="duration col name", unit="s",
|
|
112
|
+
type=ResultType.DURATION, higher_is_better=False),
|
|
113
|
+
ColumnMetadata(name="non tracked col name", unit="",
|
|
114
|
+
type=ResultType.FLOAT, higher_is_better=True),
|
|
115
|
+
ColumnMetadata(name="text col name", unit="", type=ResultType.TEXT, higher_is_better=True),
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
ValidationRules = {"h_is_better": ValidationRule(best_abs=4),
|
|
119
|
+
"l_is_better": ValidationRule(best_pct=50, best_abs=5),
|
|
120
|
+
"duration col name": ValidationRule(fixed_limit=590)
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
124
|
+
results = TrackingAllSampleTable()
|
|
125
|
+
results.sut_timestamp = 124
|
|
126
|
+
sample_data = [
|
|
127
|
+
SampleCell(column="h_is_better", row="row", value=15), # Improved
|
|
128
|
+
SampleCell(column="l_is_better", row="row", value=5), # Improved
|
|
129
|
+
SampleCell(column="duration col name", row="row", value=10), # Same
|
|
130
|
+
SampleCell(column="non tracked col name", row="row", value=10),
|
|
131
|
+
SampleCell(column="text col name", row="row", value="10"),
|
|
132
|
+
]
|
|
133
|
+
for cell in sample_data:
|
|
134
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
135
|
+
client_service.submit_run(run_type, asdict(run))
|
|
136
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
137
|
+
best_results = results_service.get_best_results(fake_test.id, results.name)
|
|
138
|
+
assert best_results["h_is_better:row"][-1].value == 15
|
|
139
|
+
assert best_results["l_is_better:row"][-1].value == 5
|
|
140
|
+
assert best_results["duration col name:row"][-1].value == 10
|
|
141
|
+
assert best_results["non tracked col name:row"][-1].value == 10
|
|
142
|
+
assert 'text col name:row' not in best_results # text column should not be tracked
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def test_ignored_runs_are_not_considered_in_best_results(fake_test, client_service, results_service, release, group):
|
|
146
|
+
run_type, run = get_fake_test_run(test=fake_test)
|
|
147
|
+
results = SampleTable()
|
|
148
|
+
results.sut_timestamp = 123
|
|
149
|
+
sample_data = [
|
|
150
|
+
SampleCell(column="h_is_better", row="row", value=100),
|
|
151
|
+
SampleCell(column="l_is_better", row="row", value=10),
|
|
152
|
+
]
|
|
153
|
+
for cell in sample_data:
|
|
154
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
155
|
+
client_service.submit_run(run_type, asdict(run))
|
|
156
|
+
client_service.submit_results(run_type, run.run_id, results.as_dict())
|
|
157
|
+
run_type, run2 = get_fake_test_run(test=fake_test)
|
|
158
|
+
sample_data = [
|
|
159
|
+
SampleCell(column="h_is_better", row="row", value=200),
|
|
160
|
+
SampleCell(column="l_is_better", row="row", value=5),
|
|
161
|
+
]
|
|
162
|
+
for cell in sample_data:
|
|
163
|
+
results.add_result(column=cell.column, row=cell.row, value=cell.value, status=cell.status)
|
|
164
|
+
client_service.submit_run(run_type, asdict(run2))
|
|
165
|
+
with pytest.raises(DataValidationError):
|
|
166
|
+
client_service.submit_results(run_type, run2.run_id, results.as_dict())
|
|
167
|
+
|
|
168
|
+
# ignore the second run
|
|
169
|
+
run_model = SCTTestRun.get(id=run2.run_id)
|
|
170
|
+
run_model.investigation_status = TestInvestigationStatus.IGNORED.value
|
|
171
|
+
run_model.save()
|
|
172
|
+
|
|
173
|
+
best_results = results_service.get_best_results(fake_test.id, results.name)
|
|
174
|
+
|
|
175
|
+
assert best_results["h_is_better:row"][-1].value == 100 # should not consider the second run
|
|
176
|
+
assert str(best_results["h_is_better:row"][-1].run_id) == run.run_id
|
|
177
|
+
assert best_results["l_is_better:row"][-1].value == 10 # should not consider the second run
|
|
178
|
+
assert str(best_results["l_is_better:row"][-1].run_id) == run.run_id
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from argus.backend.service.results_service import Cell, ArgusGenericResultMetadata
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_cell_initialization():
|
|
5
|
+
cell = Cell(column="col1", row="row1", status="UNSET")
|
|
6
|
+
assert cell.column == "col1"
|
|
7
|
+
assert cell.row == "row1"
|
|
8
|
+
assert cell.status == "UNSET"
|
|
9
|
+
assert cell.value is None
|
|
10
|
+
assert cell.value_text is None
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_valid_rules_and_better_value_should_pass():
|
|
14
|
+
cell = Cell(column="col1", row="row1", status="UNSET", value=10)
|
|
15
|
+
table_metadata = ArgusGenericResultMetadata(
|
|
16
|
+
validation_rules={"col1": [{"fixed_limit": 5}]},
|
|
17
|
+
columns_meta=[{"name": "col1", "higher_is_better": True}]
|
|
18
|
+
)
|
|
19
|
+
best_results = {}
|
|
20
|
+
cell.update_cell_status_based_on_rules(table_metadata, best_results)
|
|
21
|
+
assert cell.status == "PASS"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_valid_rules_and_worse_value_should_fail():
|
|
25
|
+
cell = Cell(column="col1", row="row1", status="UNSET", value=3)
|
|
26
|
+
table_metadata = ArgusGenericResultMetadata(
|
|
27
|
+
validation_rules={"col1": [{"fixed_limit": 5}]},
|
|
28
|
+
columns_meta=[{"name": "col1", "higher_is_better": True}]
|
|
29
|
+
)
|
|
30
|
+
best_results = {}
|
|
31
|
+
cell.update_cell_status_based_on_rules(table_metadata, best_results)
|
|
32
|
+
assert cell.status == "ERROR"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_no_rules_should_keep_status_unset():
|
|
36
|
+
cell = Cell(column="col1", row="row1", status="UNSET", value=10)
|
|
37
|
+
table_metadata = ArgusGenericResultMetadata(
|
|
38
|
+
validation_rules={},
|
|
39
|
+
columns_meta=[{"name": "col1", "higher_is_better": True}]
|
|
40
|
+
)
|
|
41
|
+
best_results = {}
|
|
42
|
+
cell.update_cell_status_based_on_rules(table_metadata, best_results)
|
|
43
|
+
assert cell.status == "UNSET"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def test_not_unset_status_should_not_be_validated():
|
|
47
|
+
cell = Cell(column="col1", row="row1", status="PASS", value=10)
|
|
48
|
+
table_metadata = ArgusGenericResultMetadata(
|
|
49
|
+
validation_rules={"col1": [{"fixed_limit": 5}]},
|
|
50
|
+
columns_meta=[{"name": "col1", "higher_is_better": True}]
|
|
51
|
+
)
|
|
52
|
+
best_results = {}
|
|
53
|
+
cell.update_cell_status_based_on_rules(table_metadata, best_results)
|
|
54
|
+
assert cell.status == "PASS"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def test_higher_is_better_false_should_fail():
|
|
58
|
+
cell = Cell(column="col1", row="row1", status="UNSET", value=10)
|
|
59
|
+
table_metadata = ArgusGenericResultMetadata(
|
|
60
|
+
validation_rules={"col1": [{"fixed_limit": 5}]},
|
|
61
|
+
columns_meta=[{"name": "col1", "higher_is_better": False}]
|
|
62
|
+
)
|
|
63
|
+
best_results = {}
|
|
64
|
+
cell.update_cell_status_based_on_rules(table_metadata, best_results)
|
|
65
|
+
assert cell.status == "ERROR"
|