squad 1.73__py3-none-any.whl → 1.75__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of squad might be problematic. Click here for more details.
- squad/api/rest.py +18 -10
- squad/ci/backend/tuxsuite.py +104 -22
- squad/ci/models.py +30 -1
- squad/core/comparison.py +25 -10
- squad/core/failures.py +14 -18
- squad/core/history.py +32 -26
- squad/core/models.py +6 -5
- squad/core/queries.py +1 -1
- squad/frontend/comparison.py +5 -5
- squad/frontend/templates/squad/_results_table.jinja2 +0 -4
- squad/frontend/templates/squad/build-nav.jinja2 +0 -5
- squad/frontend/templates/squad/test_history.jinja2 +1 -1
- squad/frontend/templatetags/squad.py +2 -4
- squad/frontend/tests.py +54 -36
- squad/frontend/urls.py +0 -2
- squad/frontend/views.py +35 -16
- squad/settings.py +9 -0
- squad/version.py +1 -1
- {squad-1.73.dist-info → squad-1.75.dist-info}/METADATA +1 -1
- {squad-1.73.dist-info → squad-1.75.dist-info}/RECORD +24 -26
- squad/frontend/failures.py +0 -65
- squad/frontend/templates/squad/failures.jinja2 +0 -91
- {squad-1.73.dist-info → squad-1.75.dist-info}/COPYING +0 -0
- {squad-1.73.dist-info → squad-1.75.dist-info}/WHEEL +0 -0
- {squad-1.73.dist-info → squad-1.75.dist-info}/entry_points.txt +0 -0
- {squad-1.73.dist-info → squad-1.75.dist-info}/top_level.txt +0 -0
squad/api/rest.py
CHANGED
@@ -3,7 +3,6 @@ import yaml
|
|
3
3
|
|
4
4
|
from django.db.models import Q, F, Value as V, CharField, Prefetch
|
5
5
|
from django.db.models.functions import Concat
|
6
|
-
from django.db.models.query import prefetch_related_objects
|
7
6
|
from django.db.utils import IntegrityError
|
8
7
|
from django.core import exceptions as core_exceptions
|
9
8
|
from django.core.exceptions import ValidationError
|
@@ -904,7 +903,10 @@ class BuildViewSet(NestedViewSetMixin, ModelViewSet):
|
|
904
903
|
|
905
904
|
* `api/builds/<id>/failures_with_confidence` GET
|
906
905
|
|
907
|
-
List of failing tests with confidence scores.
|
906
|
+
List of failing tests with confidence scores. For each failure SQUAD will look back
|
907
|
+
N builds, where N is defined in project settings. List is paginated.
|
908
|
+
|
909
|
+
* releases_only - when active, look back only on builds with is_release=True
|
908
910
|
|
909
911
|
* `api/builds/<id>/metrics` GET
|
910
912
|
|
@@ -1024,17 +1026,13 @@ class BuildViewSet(NestedViewSetMixin, ModelViewSet):
|
|
1024
1026
|
result=False,
|
1025
1027
|
).exclude(
|
1026
1028
|
has_known_issues=True,
|
1027
|
-
).only(
|
1028
|
-
'metadata__suite', 'metadata__name', 'metadata__id',
|
1029
1029
|
).order_by(
|
1030
|
-
'metadata__suite', 'metadata__name',
|
1031
|
-
).
|
1032
|
-
'metadata__suite', 'metadata__name', 'metadata__id', named=True,
|
1033
|
-
)
|
1030
|
+
'id', 'metadata__suite', 'metadata__name', 'environment__slug',
|
1031
|
+
).distinct()
|
1034
1032
|
|
1035
1033
|
page = self.paginate_queryset(failures)
|
1036
|
-
|
1037
|
-
|
1034
|
+
releases_only = request.GET.get("releases_only")
|
1035
|
+
fwc = failures_with_confidence(build.project, build, page, releases_only=releases_only)
|
1038
1036
|
serializer = FailuresWithConfidenceSerializer(fwc, many=True, context={'request': request})
|
1039
1037
|
return self.get_paginated_response(serializer.data)
|
1040
1038
|
|
@@ -1393,6 +1391,16 @@ class ConfidenceSerializer(serializers.BaseSerializer):
|
|
1393
1391
|
|
1394
1392
|
class FailuresWithConfidenceSerializer(TestSerializer):
|
1395
1393
|
confidence = ConfidenceSerializer()
|
1394
|
+
status = None
|
1395
|
+
|
1396
|
+
class Meta:
|
1397
|
+
model = Test
|
1398
|
+
exclude = (
|
1399
|
+
'known_issues',
|
1400
|
+
'has_known_issues',
|
1401
|
+
'result',
|
1402
|
+
'url',
|
1403
|
+
)
|
1396
1404
|
|
1397
1405
|
|
1398
1406
|
class TestViewSet(NestedViewSetMixin, ModelViewSet):
|
squad/ci/backend/tuxsuite.py
CHANGED
@@ -19,6 +19,7 @@ from cryptography.hazmat.primitives import (
|
|
19
19
|
from squad.ci.backend.null import Backend as BaseBackend
|
20
20
|
from squad.ci.exceptions import FetchIssue, TemporaryFetchIssue
|
21
21
|
from squad.ci.models import TestJob
|
22
|
+
from squad.core.models import TestRun
|
22
23
|
|
23
24
|
|
24
25
|
logger = logging.getLogger('squad.ci.backend.tuxsuite')
|
@@ -99,9 +100,14 @@ class Backend(BaseBackend):
|
|
99
100
|
|
100
101
|
('BUILD', 'linaro@anders', '1yPYGaOEPNwr2pCqBgONY43zORq')
|
101
102
|
|
103
|
+
The leading string determines the type of the tuxsuite object:
|
104
|
+
- BUILD
|
105
|
+
- OEBUILD
|
106
|
+
- TEST
|
107
|
+
|
102
108
|
"""
|
103
109
|
|
104
|
-
regex = r'^(BUILD|TEST):([0-9a-z_\-]+@[0-9a-z_\-]+)#([a-zA-Z0-9]+)$'
|
110
|
+
regex = r'^(OEBUILD|BUILD|TEST):([0-9a-z_\-]+@[0-9a-z_\-]+)#([a-zA-Z0-9]+)$'
|
105
111
|
matches = re.findall(regex, job_id)
|
106
112
|
if len(matches) == 0:
|
107
113
|
raise FetchIssue(f'Job id "{job_id}" does not match "{regex}"')
|
@@ -112,18 +118,19 @@ class Backend(BaseBackend):
|
|
112
118
|
def generate_job_id(self, result_type, result):
|
113
119
|
"""
|
114
120
|
The job id for TuxSuite results is generated using 3 pieces of info:
|
115
|
-
1. If it's either "BUILD" or "TEST" result;
|
121
|
+
1. If it's either "BUILD", "OEBUILD" or "TEST" result;
|
116
122
|
2. The TuxSuite project. Ex: "linaro/anders"
|
117
123
|
3. The ksuid of the object. Ex: "1yPYGaOEPNwr2pfqBgONY43zORp"
|
118
124
|
|
119
125
|
A couple examples for job_id are:
|
120
126
|
- BUILD:linaro@anders#1yPYGaOEPNwr2pCqBgONY43zORq
|
127
|
+
- OEBUILD:linaro@lkft#2Wetiz7Qs0TbtfPgPT7hUObWqDK
|
121
128
|
- TEST:arm@bob#1yPYGaOEPNwr2pCqBgONY43zORp
|
122
129
|
|
123
130
|
Then it's up to SQUAD's TuxSuite backend to parse the job_id
|
124
131
|
and fetch results properly.
|
125
132
|
"""
|
126
|
-
_type =
|
133
|
+
_type = result_type.upper()
|
127
134
|
project = result["project"].replace("/", "@")
|
128
135
|
uid = result["uid"]
|
129
136
|
return f"{_type}:{project}#{uid}"
|
@@ -146,6 +153,72 @@ class Backend(BaseBackend):
|
|
146
153
|
|
147
154
|
return None
|
148
155
|
|
156
|
+
def set_build_name(self, test_job, job_url, results, metadata, settings):
|
157
|
+
"""
|
158
|
+
Tuxsuite allows plans with builds and tests within.
|
159
|
+
Some of these plans also support "special tests", which are
|
160
|
+
kind a sanity test to run before spinning a heavy load of tests.
|
161
|
+
|
162
|
+
Here's the default plan hierarchy:
|
163
|
+
- build -> tests
|
164
|
+
|
165
|
+
Now with sanity tests in between:
|
166
|
+
- build -> sanity tests -> tests
|
167
|
+
|
168
|
+
SQUAD needs to get to the build level in
|
169
|
+
order to retrieve the build object and finally retrieve
|
170
|
+
its build name attribute
|
171
|
+
"""
|
172
|
+
|
173
|
+
build_id = results['waiting_for']
|
174
|
+
if build_id is None:
|
175
|
+
return
|
176
|
+
|
177
|
+
items = build_id.split('#')
|
178
|
+
if len(items) == 2:
|
179
|
+
_type = items[0]
|
180
|
+
_id = items[1]
|
181
|
+
else:
|
182
|
+
_type = "BUILD"
|
183
|
+
_id = items[0]
|
184
|
+
|
185
|
+
test_id = results['uid']
|
186
|
+
|
187
|
+
try:
|
188
|
+
# Check if the target build or sanity test is fetched
|
189
|
+
job_id = self.generate_job_id(_type.lower(), results)
|
190
|
+
job_id = job_id.replace(test_id, _id)
|
191
|
+
|
192
|
+
candidate = TestRun.objects.get(
|
193
|
+
build=test_job.target_build,
|
194
|
+
job_id=job_id
|
195
|
+
)
|
196
|
+
|
197
|
+
build_name = candidate.metadata.get('build_name')
|
198
|
+
if build_name:
|
199
|
+
metadata['build_name'] = build_name
|
200
|
+
return
|
201
|
+
|
202
|
+
except TestRun.DoesNotExist:
|
203
|
+
pass
|
204
|
+
|
205
|
+
# It is a sanity test, an extra request is needed to get build id
|
206
|
+
if _type == 'TEST':
|
207
|
+
follow_test_url = job_url.replace(test_id, _id)
|
208
|
+
test_json = self.fetch_url(follow_test_url).json()
|
209
|
+
build_id = test_json.get('waiting_for')
|
210
|
+
|
211
|
+
build_id = build_id.replace('BUILD#', '')
|
212
|
+
build_url = job_url.replace(test_id, build_id).replace('/tests/', '/builds/')
|
213
|
+
|
214
|
+
build_metadata = self.fetch_url(build_url).json()
|
215
|
+
|
216
|
+
build_metadata_keys = settings.get('TEST_BUILD_METADATA_KEYS', [])
|
217
|
+
metadata.update({k: build_metadata.get(k) for k in build_metadata_keys})
|
218
|
+
|
219
|
+
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
|
220
|
+
metadata['build_name'] = self.generate_test_name(build_metadata)
|
221
|
+
|
149
222
|
def parse_build_results(self, test_job, job_url, results, settings):
|
150
223
|
required_keys = ['build_status', 'warnings_count', 'download_url', 'retry']
|
151
224
|
self.__check_required_keys__(required_keys, results)
|
@@ -163,6 +236,7 @@ class Backend(BaseBackend):
|
|
163
236
|
metadata_keys = settings.get('BUILD_METADATA_KEYS', [])
|
164
237
|
metadata = {k: results.get(k) for k in metadata_keys}
|
165
238
|
metadata['job_url'] = job_url
|
239
|
+
metadata['job_id'] = test_job.job_id
|
166
240
|
metadata['config'] = urljoin(results.get('download_url') + '/', 'config')
|
167
241
|
metadata['build_name'] = test_name
|
168
242
|
|
@@ -189,6 +263,30 @@ class Backend(BaseBackend):
|
|
189
263
|
|
190
264
|
return status, completed, metadata, tests, metrics, logs
|
191
265
|
|
266
|
+
def parse_oebuild_results(self, test_job, job_url, results, settings):
|
267
|
+
required_keys = ['download_url', 'result']
|
268
|
+
self.__check_required_keys__(required_keys, results)
|
269
|
+
|
270
|
+
# Make metadata
|
271
|
+
metadata_keys = settings.get('OEBUILD_METADATA_KEYS', [])
|
272
|
+
metadata = {k: results.get(k) for k in metadata_keys}
|
273
|
+
metadata['job_url'] = job_url
|
274
|
+
metadata['job_id'] = test_job.job_id
|
275
|
+
|
276
|
+
sources = results.get('sources')
|
277
|
+
if sources:
|
278
|
+
metadata['sources'] = sources
|
279
|
+
|
280
|
+
# Create tests and metrics
|
281
|
+
tests = {}
|
282
|
+
metrics = {}
|
283
|
+
completed = True
|
284
|
+
status = 'Complete'
|
285
|
+
tests['build/build'] = 'pass' if results['result'] == 'pass' else 'fail'
|
286
|
+
logs = self.fetch_url(results['download_url'], 'build.log').text
|
287
|
+
|
288
|
+
return status, completed, metadata, tests, metrics, logs
|
289
|
+
|
192
290
|
def parse_test_results(self, test_job, job_url, results, settings):
|
193
291
|
status = 'Complete'
|
194
292
|
completed = True
|
@@ -200,6 +298,7 @@ class Backend(BaseBackend):
|
|
200
298
|
metadata_keys = settings.get('TEST_METADATA_KEYS', [])
|
201
299
|
metadata = {k: results.get(k) for k in metadata_keys}
|
202
300
|
metadata['job_url'] = job_url
|
301
|
+
metadata['job_id'] = test_job.job_id
|
203
302
|
|
204
303
|
# Set job name
|
205
304
|
try:
|
@@ -227,25 +326,8 @@ class Backend(BaseBackend):
|
|
227
326
|
# Retrieve TuxRun log
|
228
327
|
logs = self.fetch_url(job_url + '/', 'logs?format=txt').text
|
229
328
|
|
230
|
-
#
|
231
|
-
|
232
|
-
build_id = results['waiting_for']
|
233
|
-
|
234
|
-
# Tuxsuite recently has added support for tests depending on other tests
|
235
|
-
if build_id.startswith('BUILD#') or '#' not in build_id:
|
236
|
-
_, _, test_id = self.parse_job_id(test_job.job_id)
|
237
|
-
build_id = build_id.replace('BUILD#', '')
|
238
|
-
build_url = job_url.replace(test_id, build_id).replace('/tests/', '/builds/')
|
239
|
-
|
240
|
-
# TODO: check if we can save a few seconds by querying a testjob that
|
241
|
-
# already contains build results
|
242
|
-
build_metadata = self.fetch_url(build_url).json()
|
243
|
-
|
244
|
-
build_metadata_keys = settings.get('TEST_BUILD_METADATA_KEYS', [])
|
245
|
-
metadata.update({k: build_metadata.get(k) for k in build_metadata_keys})
|
246
|
-
|
247
|
-
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
|
248
|
-
metadata['build_name'] = self.generate_test_name(build_metadata)
|
329
|
+
# Follow up the chain and retrieve build name
|
330
|
+
self.set_build_name(test_job, job_url, results, metadata, settings)
|
249
331
|
|
250
332
|
# Create a boot test
|
251
333
|
boot_test_name = 'boot/' + (metadata.get('build_name') or 'boot')
|
squad/ci/models.py
CHANGED
@@ -4,6 +4,7 @@ import traceback
|
|
4
4
|
import yaml
|
5
5
|
from io import StringIO
|
6
6
|
from django.db import models, transaction, DatabaseError
|
7
|
+
from django.db.models import Q
|
7
8
|
from django.utils import timezone
|
8
9
|
from dateutil.relativedelta import relativedelta
|
9
10
|
|
@@ -66,6 +67,19 @@ class Backend(models.Model):
|
|
66
67
|
yield test_job
|
67
68
|
|
68
69
|
def fetch(self, job_id):
|
70
|
+
# Job statuses can be one of:
|
71
|
+
# * None
|
72
|
+
# * Submitted
|
73
|
+
# * Scheduling
|
74
|
+
# * Scheduled
|
75
|
+
# * Running
|
76
|
+
# * Complete
|
77
|
+
# * Incomplete
|
78
|
+
# * Canceled
|
79
|
+
# * Fetching
|
80
|
+
# Only jobs in 'Complete', 'Canceled' and 'Incomplete' are eligible for fetching
|
81
|
+
|
82
|
+
job_status = None
|
69
83
|
with transaction.atomic():
|
70
84
|
try:
|
71
85
|
test_job = TestJob.objects.select_for_update(nowait=True).get(pk=job_id)
|
@@ -91,6 +105,8 @@ class Backend(models.Model):
|
|
91
105
|
test_job.save()
|
92
106
|
return
|
93
107
|
|
108
|
+
job_status = test_job.job_status
|
109
|
+
test_job.job_status = 'Fetching'
|
94
110
|
test_job.fetched = True
|
95
111
|
test_job.fetched_at = timezone.now()
|
96
112
|
test_job.save()
|
@@ -130,10 +146,16 @@ class Backend(models.Model):
|
|
130
146
|
except DuplicatedTestJob as exception:
|
131
147
|
logger.error('Failed to fetch test_job(%d): "%s"' % (test_job.id, str(exception)))
|
132
148
|
|
149
|
+
if test_job.testrun:
|
150
|
+
self.__postprocess_testjob__(test_job)
|
151
|
+
|
152
|
+
# Removed the 'Fetching' job_status only after eventual plugins
|
153
|
+
# are finished, this garantees extra tests and metadata to
|
154
|
+
# be in SQUAD before the build is considered finished
|
155
|
+
test_job.job_status = job_status
|
133
156
|
test_job.save()
|
134
157
|
|
135
158
|
if test_job.testrun:
|
136
|
-
self.__postprocess_testjob__(test_job)
|
137
159
|
UpdateProjectStatus()(test_job.testrun)
|
138
160
|
|
139
161
|
def __postprocess_testjob__(self, test_job):
|
@@ -177,9 +199,16 @@ class Backend(models.Model):
|
|
177
199
|
return '%s (%s)' % (self.name, self.implementation_type)
|
178
200
|
|
179
201
|
|
202
|
+
class TestJobManager(models.Manager):
|
203
|
+
|
204
|
+
def pending(self):
|
205
|
+
return self.filter(Q(fetched=False) | Q(job_status='Fetching'))
|
206
|
+
|
207
|
+
|
180
208
|
class TestJob(models.Model):
|
181
209
|
|
182
210
|
__test__ = False
|
211
|
+
objects = TestJobManager()
|
183
212
|
|
184
213
|
# input - internal
|
185
214
|
backend = models.ForeignKey(Backend, related_name='test_jobs', on_delete=models.CASCADE)
|
squad/core/comparison.py
CHANGED
@@ -393,7 +393,7 @@ class TestComparison(BaseComparison):
|
|
393
393
|
|
394
394
|
tests = models.Test.objects.filter(test_run_id__in=test_runs_ids.keys()).annotate(
|
395
395
|
suite_slug=F('suite__slug'),
|
396
|
-
).prefetch_related('metadata').defer('log')
|
396
|
+
).prefetch_related('metadata').defer('log').order_by()
|
397
397
|
|
398
398
|
for test in tests:
|
399
399
|
build, env = test_runs_ids.get(test.test_run_id)
|
@@ -539,6 +539,9 @@ class TestComparison(BaseComparison):
|
|
539
539
|
# No baseline is present, then no comparison is needed
|
540
540
|
return
|
541
541
|
|
542
|
+
baseline = self.builds[0]
|
543
|
+
target = self.builds[1]
|
544
|
+
|
542
545
|
query = self.base_sql.copy()
|
543
546
|
query['select'].append('target.result')
|
544
547
|
query['select'].append('target.has_known_issues')
|
@@ -549,42 +552,54 @@ class TestComparison(BaseComparison):
|
|
549
552
|
tests = [t for t in models.Test.objects.raw(sql)]
|
550
553
|
prefetch_related_objects(tests, 'metadata', 'suite')
|
551
554
|
|
552
|
-
env_ids = []
|
555
|
+
env_ids = [t.environment_id for t in tests]
|
556
|
+
envs = {e.id: e for e in models.Environment.objects.filter(id__in=env_ids).all()}
|
557
|
+
envs_slugs = sorted({e.slug for e in envs.values()})
|
558
|
+
|
559
|
+
for build in self.builds:
|
560
|
+
self.environments[build] = envs_slugs
|
561
|
+
|
553
562
|
fixed_tests = defaultdict(set)
|
554
563
|
regressions = defaultdict(set)
|
555
564
|
fixes = defaultdict(set)
|
556
565
|
|
557
566
|
for test in tests:
|
558
567
|
env_id = test.environment_id
|
568
|
+
|
559
569
|
full_name = test.full_name
|
570
|
+
if full_name not in self.results:
|
571
|
+
self.results[full_name] = OrderedDict()
|
560
572
|
|
561
|
-
|
573
|
+
baseline_key = (baseline, envs[env_id].slug)
|
574
|
+
target_key = (target, envs[env_id].slug)
|
562
575
|
|
563
576
|
if test.status == 'fail':
|
564
577
|
regressions[env_id].add(full_name)
|
578
|
+
self.results[full_name][target_key] = 'fail'
|
579
|
+
self.results[full_name][baseline_key] = 'pass'
|
565
580
|
elif test.status == 'pass':
|
566
581
|
fixes[env_id].add(full_name)
|
567
582
|
fixed_tests[env_id].add(test.metadata_id)
|
583
|
+
self.results[full_name][target_key] = 'pass'
|
584
|
+
self.results[full_name][baseline_key] = 'fail'
|
568
585
|
|
569
|
-
|
586
|
+
self.results = OrderedDict(sorted(self.results.items()))
|
570
587
|
|
571
588
|
for env_id in regressions.keys():
|
572
|
-
self.__regressions__[
|
589
|
+
self.__regressions__[envs[env_id].slug] = list(regressions[env_id])
|
573
590
|
|
574
591
|
# It's not a fix if baseline test is intermittent for a given environment:
|
575
592
|
# - test.has_known_issues == True and
|
576
593
|
# - test.known_issues[env].intermittent == True
|
577
|
-
fixed_tests_environment_slugs = [
|
594
|
+
fixed_tests_environment_slugs = [envs[env_id] for env_id in fixed_tests.keys()]
|
578
595
|
intermittent_fixed_tests = self.__intermittent_fixed_tests__(fixed_tests, fixed_tests_environment_slugs)
|
579
596
|
for env_id in fixes.keys():
|
580
|
-
env_slug =
|
597
|
+
env_slug = envs[env_id].slug
|
581
598
|
test_list = [test for test in fixes[env_id] if (test, env_slug) not in intermittent_fixed_tests]
|
582
599
|
if len(test_list):
|
583
600
|
self.__fixes__[env_slug] = test_list
|
584
601
|
|
585
|
-
|
586
|
-
target = self.builds[1]
|
587
|
-
for env in environments.values():
|
602
|
+
for env in envs.values():
|
588
603
|
if env.slug in self.__regressions__:
|
589
604
|
for test in self.__regressions__[env.slug]:
|
590
605
|
self.__diff__[test][target][env.slug] = False
|
squad/core/failures.py
CHANGED
@@ -1,32 +1,28 @@
|
|
1
|
+
from django.db.models import prefetch_related_objects
|
2
|
+
|
1
3
|
from squad.core.models import Test
|
2
4
|
|
3
5
|
|
4
|
-
def failures_with_confidence(project, build, failures):
|
6
|
+
def failures_with_confidence(project, build, failures, releases_only=False):
|
5
7
|
limit = project.build_confidence_count
|
6
8
|
threshold = project.build_confidence_threshold
|
7
9
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
"metadata",
|
16
|
-
"environment",
|
17
|
-
).order_by(
|
18
|
-
"metadata__suite",
|
19
|
-
"metadata__name",
|
20
|
-
)
|
10
|
+
prefetch_related_objects(failures, "metadata")
|
11
|
+
|
12
|
+
queryset = project.builds.filter(id__lt=build.id)
|
13
|
+
if releases_only:
|
14
|
+
queryset = queryset.filter(is_release=True)
|
15
|
+
builds = queryset.order_by('-id').all()[:limit]
|
16
|
+
builds_ids = [b.id for b in builds]
|
21
17
|
|
22
18
|
# Find previous `limit` tests that contain this test x environment
|
23
|
-
for failure in
|
19
|
+
for failure in failures:
|
24
20
|
history = Test.objects.filter(
|
25
|
-
|
21
|
+
build_id__in=builds_ids,
|
26
22
|
metadata_id=failure.metadata_id,
|
27
23
|
environment_id=failure.environment_id,
|
28
|
-
).
|
24
|
+
).only("result").order_by()
|
29
25
|
|
30
26
|
failure.set_confidence(threshold, history)
|
31
27
|
|
32
|
-
return
|
28
|
+
return failures
|
squad/core/history.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
|
-
from collections import
|
1
|
+
from collections import defaultdict
|
2
2
|
from django.core.paginator import Paginator
|
3
3
|
|
4
4
|
from squad.core.queries import test_confidence
|
5
5
|
from squad.core.utils import parse_name
|
6
|
-
from squad.core.models import
|
6
|
+
from squad.core.models import SuiteMetadata, KnownIssue, Environment
|
7
7
|
|
8
8
|
|
9
9
|
class TestResult(object):
|
@@ -11,20 +11,20 @@ class TestResult(object):
|
|
11
11
|
__test__ = False
|
12
12
|
|
13
13
|
class TestRunStatus(object):
|
14
|
-
def __init__(self,
|
15
|
-
self.
|
14
|
+
def __init__(self, test_run_id, suite):
|
15
|
+
self.test_run_id = test_run_id
|
16
16
|
self.suite = suite
|
17
17
|
|
18
|
-
def __init__(self, test, suite, metadata, known_issues, is_duplicate=False):
|
18
|
+
def __init__(self, test, suite, metadata, known_issues, is_duplicate=False, list_of_duplicates=None):
|
19
19
|
self.test = test
|
20
20
|
self.suite = suite
|
21
21
|
self.known_issues = known_issues
|
22
22
|
if is_duplicate:
|
23
|
-
self.status, self.confidence_score = test_confidence(
|
23
|
+
self.status, self.confidence_score = test_confidence(None, list_of_duplicates=list_of_duplicates)
|
24
24
|
else:
|
25
25
|
self.status, self.confidence_score = (test.status, None)
|
26
|
-
self.
|
27
|
-
self.test_run_status = self.TestRunStatus(self.
|
26
|
+
self.test_run_id = test.test_run_id
|
27
|
+
self.test_run_status = self.TestRunStatus(self.test_run_id, self.suite)
|
28
28
|
self.info = {
|
29
29
|
"test_description": metadata.description if metadata else '',
|
30
30
|
"test_instructions": metadata.instructions_to_reproduce if metadata else '',
|
@@ -51,11 +51,6 @@ class TestHistory(object):
|
|
51
51
|
|
52
52
|
self.top = builds[0]
|
53
53
|
|
54
|
-
environments = OrderedDict()
|
55
|
-
results = OrderedDict()
|
56
|
-
for build in builds:
|
57
|
-
results[build] = {}
|
58
|
-
|
59
54
|
issues_by_env = {}
|
60
55
|
for issue in KnownIssue.active_by_project_and_test(project, full_test_name).all():
|
61
56
|
for env in issue.environments.all():
|
@@ -65,16 +60,27 @@ class TestHistory(object):
|
|
65
60
|
|
66
61
|
suite = project.suites.prefetch_related('metadata').get(slug=suite_slug)
|
67
62
|
metadata = SuiteMetadata.objects.get(kind='test', suite=suite_slug, name=test_name)
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
63
|
+
|
64
|
+
results = defaultdict()
|
65
|
+
environments_ids = set()
|
66
|
+
for build in builds:
|
67
|
+
results[build] = defaultdict(list)
|
68
|
+
for test in build.tests.filter(metadata=metadata).order_by():
|
69
|
+
test.metadata = metadata
|
70
|
+
test.suite = suite
|
71
|
+
results[build][test.environment_id].append(test)
|
72
|
+
environments_ids.add(test.environment_id)
|
73
|
+
|
74
|
+
results_without_duplicates = defaultdict()
|
75
|
+
for build in results:
|
76
|
+
results_without_duplicates[build] = defaultdict()
|
77
|
+
for env in results[build]:
|
78
|
+
tests = results[build][env]
|
79
|
+
|
80
|
+
is_duplicate = len(tests) > 1
|
81
|
+
known_issues = issues_by_env.get(tests[0].environment_id)
|
82
|
+
result = TestResult(tests[0], suite, metadata, known_issues, is_duplicate, list_of_duplicates=tests)
|
83
|
+
results_without_duplicates[build][env] = result
|
84
|
+
|
85
|
+
self.environments = Environment.objects.filter(id__in=environments_ids).order_by('slug')
|
86
|
+
self.results = results_without_duplicates
|
squad/core/models.py
CHANGED
@@ -584,10 +584,11 @@ class Build(models.Model):
|
|
584
584
|
def important_metadata(self):
|
585
585
|
wanted = (self.project.important_metadata_keys or '').splitlines()
|
586
586
|
m = self.metadata
|
587
|
+
metadata = self.metadata
|
587
588
|
if len(wanted):
|
588
|
-
|
589
|
-
|
590
|
-
|
589
|
+
metadata = {k: m[k] for k in wanted if k in m}
|
590
|
+
|
591
|
+
return metadata
|
591
592
|
|
592
593
|
@property
|
593
594
|
def has_extra_metadata(self):
|
@@ -619,7 +620,7 @@ class Build(models.Model):
|
|
619
620
|
# dependency on squad.ci, what in theory violates our architecture.
|
620
621
|
testjobs = self.test_jobs
|
621
622
|
if testjobs.count() > 0:
|
622
|
-
if testjobs.
|
623
|
+
if testjobs.pending().count() > 0:
|
623
624
|
# a build that has pending CI jobs is NOT finished
|
624
625
|
reasons.append("There are unfinished CI jobs")
|
625
626
|
else:
|
@@ -1025,7 +1026,7 @@ class Test(models.Model):
|
|
1025
1026
|
|
1026
1027
|
@property
|
1027
1028
|
def passes(self):
|
1028
|
-
return sum(1 for t in self.tests if t.
|
1029
|
+
return sum(1 for t in self.tests if t.result)
|
1029
1030
|
|
1030
1031
|
@property
|
1031
1032
|
def score(self):
|
squad/core/queries.py
CHANGED
@@ -199,7 +199,7 @@ def test_confidence(test, list_of_duplicates=None):
|
|
199
199
|
return {value: count for value, count in data.items() if count == max_count}
|
200
200
|
|
201
201
|
if test:
|
202
|
-
duplicates = models.Test.objects.filter(
|
202
|
+
duplicates = models.Test.objects.filter(metadata_id=test.metadata_id, environment_id=test.environment_id, build_id=test.build_id).order_by()
|
203
203
|
else:
|
204
204
|
duplicates = list_of_duplicates
|
205
205
|
|
squad/frontend/comparison.py
CHANGED
@@ -116,11 +116,11 @@ def compare_builds(request):
|
|
116
116
|
baseline = get_object_or_404(project.builds, version=baseline_build)
|
117
117
|
target = get_object_or_404(project.builds, version=target_build)
|
118
118
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
comparison
|
119
|
+
if comparison_type == 'test':
|
120
|
+
comparison = TestComparison(baseline, target, regressions_and_fixes_only=True)
|
121
|
+
else:
|
122
|
+
comparison_class = __get_comparison_class(comparison_type)
|
123
|
+
comparison = comparison_class.compare_builds(baseline, target)
|
124
124
|
|
125
125
|
comparison.results = __paginate(comparison.results, request)
|
126
126
|
|
@@ -103,11 +103,6 @@
|
|
103
103
|
{{ _('Tests') }}
|
104
104
|
</a>
|
105
105
|
</li>
|
106
|
-
<li role="presentation" {% if url_name == 'failures' %}class="active"{% endif %}>
|
107
|
-
<a href="{{build_section_url(build, 'failures')}}">
|
108
|
-
{{ _('Test failures') }}
|
109
|
-
</a>
|
110
|
-
</li>
|
111
106
|
<li role="presentation" {% if url_name == 'build_metrics' %}class="active"{% endif %}>
|
112
107
|
<a href="{{build_section_url(build, 'build_metrics')}}">
|
113
108
|
{{ _('Metrics') }}
|
@@ -39,7 +39,7 @@
|
|
39
39
|
<td><a href="{{project_url(build)}}">{{build.version}}</a></td>
|
40
40
|
<td>{{build.datetime|date}}</td>
|
41
41
|
{% for environment in history.environments %}
|
42
|
-
{% with result=results[environment] %}
|
42
|
+
{% with result=results[environment.id] %}
|
43
43
|
{% if result %}
|
44
44
|
{% with known_issues=result.known_issues %}
|
45
45
|
<td class='{{result.status|slugify}}'>
|
@@ -82,14 +82,12 @@ def testrun_suite_test_details_history_url(group, project, build, status, test):
|
|
82
82
|
|
83
83
|
|
84
84
|
def testrun_suite_or_test_url(group, project, build, status, kind, test=None):
|
85
|
-
testrun = status.test_run.id
|
86
|
-
suite = status.suite
|
87
85
|
args = (
|
88
86
|
group.slug,
|
89
87
|
project.slug,
|
90
88
|
build.version,
|
91
|
-
|
92
|
-
suite.slug.replace('/', '$'),
|
89
|
+
status.test_run_id,
|
90
|
+
status.suite.slug.replace('/', '$'),
|
93
91
|
)
|
94
92
|
if test:
|
95
93
|
if isinstance(test, Test):
|