squad 1.72.2__py3-none-any.whl → 1.74__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- squad/api/rest.py +19 -10
- squad/ci/backend/lava.py +2 -2
- squad/ci/backend/tuxsuite.py +71 -19
- squad/compat.py +14 -0
- squad/core/comparison.py +25 -10
- squad/core/failures.py +14 -18
- squad/core/history.py +32 -26
- squad/core/models.py +1 -1
- squad/core/queries.py +1 -1
- squad/core/tasks/__init__.py +3 -3
- squad/core/templates/squad/notification/diff.txt.jinja2 +1 -1
- squad/core/utils.py +1 -1
- squad/frontend/comparison.py +5 -5
- squad/frontend/templates/squad/_results_table.jinja2 +0 -4
- squad/frontend/templates/squad/build-nav.jinja2 +0 -5
- squad/frontend/templates/squad/test_history.jinja2 +1 -1
- squad/frontend/templatetags/squad.py +5 -6
- squad/frontend/tests.py +54 -36
- squad/frontend/urls.py +0 -2
- squad/frontend/views.py +42 -46
- squad/version.py +1 -1
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/METADATA +1 -1
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/RECORD +27 -29
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/WHEEL +1 -1
- squad/frontend/failures.py +0 -65
- squad/frontend/templates/squad/failures.jinja2 +0 -91
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/COPYING +0 -0
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/entry_points.txt +0 -0
- {squad-1.72.2.dist-info → squad-1.74.dist-info}/top_level.txt +0 -0
squad/api/rest.py
CHANGED
@@ -3,7 +3,6 @@ import yaml
|
|
3
3
|
|
4
4
|
from django.db.models import Q, F, Value as V, CharField, Prefetch
|
5
5
|
from django.db.models.functions import Concat
|
6
|
-
from django.db.models.query import prefetch_related_objects
|
7
6
|
from django.db.utils import IntegrityError
|
8
7
|
from django.core import exceptions as core_exceptions
|
9
8
|
from django.core.exceptions import ValidationError
|
@@ -904,7 +903,10 @@ class BuildViewSet(NestedViewSetMixin, ModelViewSet):
|
|
904
903
|
|
905
904
|
* `api/builds/<id>/failures_with_confidence` GET
|
906
905
|
|
907
|
-
List of failing tests with confidence scores.
|
906
|
+
List of failing tests with confidence scores. For each failure SQUAD will look back
|
907
|
+
N builds, where N is defined in project settings. List is paginated.
|
908
|
+
|
909
|
+
* releases_only - when active, look back only on builds with is_release=True
|
908
910
|
|
909
911
|
* `api/builds/<id>/metrics` GET
|
910
912
|
|
@@ -1024,17 +1026,13 @@ class BuildViewSet(NestedViewSetMixin, ModelViewSet):
|
|
1024
1026
|
result=False,
|
1025
1027
|
).exclude(
|
1026
1028
|
has_known_issues=True,
|
1027
|
-
).only(
|
1028
|
-
'metadata__suite', 'metadata__name', 'metadata__id',
|
1029
1029
|
).order_by(
|
1030
|
-
'metadata__suite', 'metadata__name',
|
1031
|
-
).
|
1032
|
-
'metadata__suite', 'metadata__name', 'metadata__id', named=True,
|
1033
|
-
)
|
1030
|
+
'id', 'metadata__suite', 'metadata__name', 'environment__slug',
|
1031
|
+
).distinct()
|
1034
1032
|
|
1035
1033
|
page = self.paginate_queryset(failures)
|
1036
|
-
|
1037
|
-
|
1034
|
+
releases_only = request.GET.get("releases_only")
|
1035
|
+
fwc = failures_with_confidence(build.project, build, page, releases_only=releases_only)
|
1038
1036
|
serializer = FailuresWithConfidenceSerializer(fwc, many=True, context={'request': request})
|
1039
1037
|
return self.get_paginated_response(serializer.data)
|
1040
1038
|
|
@@ -1393,6 +1391,17 @@ class ConfidenceSerializer(serializers.BaseSerializer):
|
|
1393
1391
|
|
1394
1392
|
class FailuresWithConfidenceSerializer(TestSerializer):
|
1395
1393
|
confidence = ConfidenceSerializer()
|
1394
|
+
status = None
|
1395
|
+
|
1396
|
+
class Meta:
|
1397
|
+
model = Test
|
1398
|
+
exclude = (
|
1399
|
+
'known_issues',
|
1400
|
+
'has_known_issues',
|
1401
|
+
'result',
|
1402
|
+
'url',
|
1403
|
+
'suite',
|
1404
|
+
)
|
1396
1405
|
|
1397
1406
|
|
1398
1407
|
class TestViewSet(NestedViewSetMixin, ModelViewSet):
|
squad/ci/backend/lava.py
CHANGED
@@ -122,12 +122,12 @@ class Backend(BaseBackend):
|
|
122
122
|
start_time = data.get('start_time', None)
|
123
123
|
end_time = data.get('end_time', None)
|
124
124
|
# convert to datetime
|
125
|
-
if type(start_time)
|
125
|
+
if type(start_time) is str:
|
126
126
|
try:
|
127
127
|
start_time = isoparse(start_time)
|
128
128
|
except ValueError:
|
129
129
|
start_time = None
|
130
|
-
if type(end_time)
|
130
|
+
if type(end_time) is str:
|
131
131
|
try:
|
132
132
|
end_time = isoparse(end_time)
|
133
133
|
except ValueError:
|
squad/ci/backend/tuxsuite.py
CHANGED
@@ -19,6 +19,7 @@ from cryptography.hazmat.primitives import (
|
|
19
19
|
from squad.ci.backend.null import Backend as BaseBackend
|
20
20
|
from squad.ci.exceptions import FetchIssue, TemporaryFetchIssue
|
21
21
|
from squad.ci.models import TestJob
|
22
|
+
from squad.core.models import TestRun
|
22
23
|
|
23
24
|
|
24
25
|
logger = logging.getLogger('squad.ci.backend.tuxsuite')
|
@@ -146,6 +147,72 @@ class Backend(BaseBackend):
|
|
146
147
|
|
147
148
|
return None
|
148
149
|
|
150
|
+
def set_build_name(self, test_job, job_url, results, metadata, settings):
|
151
|
+
"""
|
152
|
+
Tuxsuite allows plans with builds and tests within.
|
153
|
+
Some of these plans also support "special tests", which are
|
154
|
+
kind a sanity test to run before spinning a heavy load of tests.
|
155
|
+
|
156
|
+
Here's the default plan hierarchy:
|
157
|
+
- build -> tests
|
158
|
+
|
159
|
+
Now with sanity tests in between:
|
160
|
+
- build -> sanity tests -> tests
|
161
|
+
|
162
|
+
SQUAD needs to get to the build level in
|
163
|
+
order to retrieve the build object and finally retrieve
|
164
|
+
its build name attribute
|
165
|
+
"""
|
166
|
+
|
167
|
+
build_id = results['waiting_for']
|
168
|
+
if build_id is None:
|
169
|
+
return
|
170
|
+
|
171
|
+
items = build_id.split('#')
|
172
|
+
if len(items) == 2:
|
173
|
+
_type = items[0]
|
174
|
+
_id = items[1]
|
175
|
+
else:
|
176
|
+
_type = "BUILD"
|
177
|
+
_id = items[0]
|
178
|
+
|
179
|
+
test_id = results['uid']
|
180
|
+
|
181
|
+
try:
|
182
|
+
# Check if the target build or sanity test is fetched
|
183
|
+
job_id = self.generate_job_id(_type.lower(), results)
|
184
|
+
job_id = job_id.replace(test_id, _id)
|
185
|
+
|
186
|
+
candidate = TestRun.objects.get(
|
187
|
+
build=test_job.target_build,
|
188
|
+
job_id=job_id
|
189
|
+
)
|
190
|
+
|
191
|
+
build_name = candidate.metadata.get('build_name')
|
192
|
+
if build_name:
|
193
|
+
metadata['build_name'] = build_name
|
194
|
+
return
|
195
|
+
|
196
|
+
except TestRun.DoesNotExist:
|
197
|
+
pass
|
198
|
+
|
199
|
+
# It is a sanity test, an extra request is needed to get build id
|
200
|
+
if _type == 'TEST':
|
201
|
+
follow_test_url = job_url.replace(test_id, _id)
|
202
|
+
test_json = self.fetch_url(follow_test_url).json()
|
203
|
+
build_id = test_json.get('waiting_for')
|
204
|
+
|
205
|
+
build_id = build_id.replace('BUILD#', '')
|
206
|
+
build_url = job_url.replace(test_id, build_id).replace('/tests/', '/builds/')
|
207
|
+
|
208
|
+
build_metadata = self.fetch_url(build_url).json()
|
209
|
+
|
210
|
+
build_metadata_keys = settings.get('TEST_BUILD_METADATA_KEYS', [])
|
211
|
+
metadata.update({k: build_metadata.get(k) for k in build_metadata_keys})
|
212
|
+
|
213
|
+
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
|
214
|
+
metadata['build_name'] = self.generate_test_name(build_metadata)
|
215
|
+
|
149
216
|
def parse_build_results(self, test_job, job_url, results, settings):
|
150
217
|
required_keys = ['build_status', 'warnings_count', 'download_url', 'retry']
|
151
218
|
self.__check_required_keys__(required_keys, results)
|
@@ -163,6 +230,7 @@ class Backend(BaseBackend):
|
|
163
230
|
metadata_keys = settings.get('BUILD_METADATA_KEYS', [])
|
164
231
|
metadata = {k: results.get(k) for k in metadata_keys}
|
165
232
|
metadata['job_url'] = job_url
|
233
|
+
metadata['job_id'] = test_job.job_id
|
166
234
|
metadata['config'] = urljoin(results.get('download_url') + '/', 'config')
|
167
235
|
metadata['build_name'] = test_name
|
168
236
|
|
@@ -200,6 +268,7 @@ class Backend(BaseBackend):
|
|
200
268
|
metadata_keys = settings.get('TEST_METADATA_KEYS', [])
|
201
269
|
metadata = {k: results.get(k) for k in metadata_keys}
|
202
270
|
metadata['job_url'] = job_url
|
271
|
+
metadata['job_id'] = test_job.job_id
|
203
272
|
|
204
273
|
# Set job name
|
205
274
|
try:
|
@@ -227,25 +296,8 @@ class Backend(BaseBackend):
|
|
227
296
|
# Retrieve TuxRun log
|
228
297
|
logs = self.fetch_url(job_url + '/', 'logs?format=txt').text
|
229
298
|
|
230
|
-
#
|
231
|
-
|
232
|
-
build_id = results['waiting_for']
|
233
|
-
|
234
|
-
# Tuxsuite recently has added support for tests depending on other tests
|
235
|
-
if build_id.startswith('BUILD#') or '#' not in build_id:
|
236
|
-
_, _, test_id = self.parse_job_id(test_job.job_id)
|
237
|
-
build_id = build_id.replace('BUILD#', '')
|
238
|
-
build_url = job_url.replace(test_id, build_id).replace('/tests/', '/builds/')
|
239
|
-
|
240
|
-
# TODO: check if we can save a few seconds by querying a testjob that
|
241
|
-
# already contains build results
|
242
|
-
build_metadata = self.fetch_url(build_url).json()
|
243
|
-
|
244
|
-
build_metadata_keys = settings.get('TEST_BUILD_METADATA_KEYS', [])
|
245
|
-
metadata.update({k: build_metadata.get(k) for k in build_metadata_keys})
|
246
|
-
|
247
|
-
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
|
248
|
-
metadata['build_name'] = self.generate_test_name(build_metadata)
|
299
|
+
# Follow up the chain and retrieve build name
|
300
|
+
self.set_build_name(test_job, job_url, results, metadata, settings)
|
249
301
|
|
250
302
|
# Create a boot test
|
251
303
|
boot_test_name = 'boot/' + (metadata.get('build_name') or 'boot')
|
squad/compat.py
CHANGED
@@ -2,8 +2,10 @@
|
|
2
2
|
SQUAD compatibity file
|
3
3
|
"""
|
4
4
|
from rest_framework_extensions import __version__ as DRFE_VERSION_STR
|
5
|
+
from allauth import __version__ as DAA_VERSION_STR
|
5
6
|
|
6
7
|
DRFE_VERSION = [int(n) for n in DRFE_VERSION_STR.split(".")]
|
8
|
+
DAA_VERSION = [int(n) for n in DAA_VERSION_STR.split(".")]
|
7
9
|
|
8
10
|
# Handles compatibility for django_restframework_filters
|
9
11
|
try:
|
@@ -27,3 +29,15 @@ def drf_basename(name):
|
|
27
29
|
return {"basename": name}
|
28
30
|
else:
|
29
31
|
return {"base_name": name}
|
32
|
+
|
33
|
+
|
34
|
+
def get_socialaccount_provider(providers, socialapp, request):
|
35
|
+
"""
|
36
|
+
Django-allauth 0.55 removed the function `by_id`
|
37
|
+
Ref: https://github.com/pennersr/django-allauth/commit/cc5279bb61dba9cf0fafb10f4ae175c018749f1f
|
38
|
+
"""
|
39
|
+
|
40
|
+
if DAA_VERSION >= [0, 55]:
|
41
|
+
return socialapp.get_provider(request)
|
42
|
+
else:
|
43
|
+
return providers.registry.by_id(socialapp.provider)
|
squad/core/comparison.py
CHANGED
@@ -393,7 +393,7 @@ class TestComparison(BaseComparison):
|
|
393
393
|
|
394
394
|
tests = models.Test.objects.filter(test_run_id__in=test_runs_ids.keys()).annotate(
|
395
395
|
suite_slug=F('suite__slug'),
|
396
|
-
).prefetch_related('metadata').defer('log')
|
396
|
+
).prefetch_related('metadata').defer('log').order_by()
|
397
397
|
|
398
398
|
for test in tests:
|
399
399
|
build, env = test_runs_ids.get(test.test_run_id)
|
@@ -539,6 +539,9 @@ class TestComparison(BaseComparison):
|
|
539
539
|
# No baseline is present, then no comparison is needed
|
540
540
|
return
|
541
541
|
|
542
|
+
baseline = self.builds[0]
|
543
|
+
target = self.builds[1]
|
544
|
+
|
542
545
|
query = self.base_sql.copy()
|
543
546
|
query['select'].append('target.result')
|
544
547
|
query['select'].append('target.has_known_issues')
|
@@ -549,42 +552,54 @@ class TestComparison(BaseComparison):
|
|
549
552
|
tests = [t for t in models.Test.objects.raw(sql)]
|
550
553
|
prefetch_related_objects(tests, 'metadata', 'suite')
|
551
554
|
|
552
|
-
env_ids = []
|
555
|
+
env_ids = [t.environment_id for t in tests]
|
556
|
+
envs = {e.id: e for e in models.Environment.objects.filter(id__in=env_ids).all()}
|
557
|
+
envs_slugs = sorted({e.slug for e in envs.values()})
|
558
|
+
|
559
|
+
for build in self.builds:
|
560
|
+
self.environments[build] = envs_slugs
|
561
|
+
|
553
562
|
fixed_tests = defaultdict(set)
|
554
563
|
regressions = defaultdict(set)
|
555
564
|
fixes = defaultdict(set)
|
556
565
|
|
557
566
|
for test in tests:
|
558
567
|
env_id = test.environment_id
|
568
|
+
|
559
569
|
full_name = test.full_name
|
570
|
+
if full_name not in self.results:
|
571
|
+
self.results[full_name] = OrderedDict()
|
560
572
|
|
561
|
-
|
573
|
+
baseline_key = (baseline, envs[env_id].slug)
|
574
|
+
target_key = (target, envs[env_id].slug)
|
562
575
|
|
563
576
|
if test.status == 'fail':
|
564
577
|
regressions[env_id].add(full_name)
|
578
|
+
self.results[full_name][target_key] = 'fail'
|
579
|
+
self.results[full_name][baseline_key] = 'pass'
|
565
580
|
elif test.status == 'pass':
|
566
581
|
fixes[env_id].add(full_name)
|
567
582
|
fixed_tests[env_id].add(test.metadata_id)
|
583
|
+
self.results[full_name][target_key] = 'pass'
|
584
|
+
self.results[full_name][baseline_key] = 'fail'
|
568
585
|
|
569
|
-
|
586
|
+
self.results = OrderedDict(sorted(self.results.items()))
|
570
587
|
|
571
588
|
for env_id in regressions.keys():
|
572
|
-
self.__regressions__[
|
589
|
+
self.__regressions__[envs[env_id].slug] = list(regressions[env_id])
|
573
590
|
|
574
591
|
# It's not a fix if baseline test is intermittent for a given environment:
|
575
592
|
# - test.has_known_issues == True and
|
576
593
|
# - test.known_issues[env].intermittent == True
|
577
|
-
fixed_tests_environment_slugs = [
|
594
|
+
fixed_tests_environment_slugs = [envs[env_id] for env_id in fixed_tests.keys()]
|
578
595
|
intermittent_fixed_tests = self.__intermittent_fixed_tests__(fixed_tests, fixed_tests_environment_slugs)
|
579
596
|
for env_id in fixes.keys():
|
580
|
-
env_slug =
|
597
|
+
env_slug = envs[env_id].slug
|
581
598
|
test_list = [test for test in fixes[env_id] if (test, env_slug) not in intermittent_fixed_tests]
|
582
599
|
if len(test_list):
|
583
600
|
self.__fixes__[env_slug] = test_list
|
584
601
|
|
585
|
-
|
586
|
-
target = self.builds[1]
|
587
|
-
for env in environments.values():
|
602
|
+
for env in envs.values():
|
588
603
|
if env.slug in self.__regressions__:
|
589
604
|
for test in self.__regressions__[env.slug]:
|
590
605
|
self.__diff__[test][target][env.slug] = False
|
squad/core/failures.py
CHANGED
@@ -1,32 +1,28 @@
|
|
1
|
+
from django.db.models import prefetch_related_objects
|
2
|
+
|
1
3
|
from squad.core.models import Test
|
2
4
|
|
3
5
|
|
4
|
-
def failures_with_confidence(project, build, failures):
|
6
|
+
def failures_with_confidence(project, build, failures, releases_only=False):
|
5
7
|
limit = project.build_confidence_count
|
6
8
|
threshold = project.build_confidence_threshold
|
7
9
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
"metadata",
|
16
|
-
"environment",
|
17
|
-
).order_by(
|
18
|
-
"metadata__suite",
|
19
|
-
"metadata__name",
|
20
|
-
)
|
10
|
+
prefetch_related_objects(failures, "metadata")
|
11
|
+
|
12
|
+
queryset = project.builds.filter(id__lt=build.id)
|
13
|
+
if releases_only:
|
14
|
+
queryset = queryset.filter(is_release=True)
|
15
|
+
builds = queryset.order_by('-id').all()[:limit]
|
16
|
+
builds_ids = [b.id for b in builds]
|
21
17
|
|
22
18
|
# Find previous `limit` tests that contain this test x environment
|
23
|
-
for failure in
|
19
|
+
for failure in failures:
|
24
20
|
history = Test.objects.filter(
|
25
|
-
|
21
|
+
build_id__in=builds_ids,
|
26
22
|
metadata_id=failure.metadata_id,
|
27
23
|
environment_id=failure.environment_id,
|
28
|
-
).
|
24
|
+
).only("result").order_by()
|
29
25
|
|
30
26
|
failure.set_confidence(threshold, history)
|
31
27
|
|
32
|
-
return
|
28
|
+
return failures
|
squad/core/history.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
|
-
from collections import
|
1
|
+
from collections import defaultdict
|
2
2
|
from django.core.paginator import Paginator
|
3
3
|
|
4
4
|
from squad.core.queries import test_confidence
|
5
5
|
from squad.core.utils import parse_name
|
6
|
-
from squad.core.models import
|
6
|
+
from squad.core.models import SuiteMetadata, KnownIssue, Environment
|
7
7
|
|
8
8
|
|
9
9
|
class TestResult(object):
|
@@ -11,20 +11,20 @@ class TestResult(object):
|
|
11
11
|
__test__ = False
|
12
12
|
|
13
13
|
class TestRunStatus(object):
|
14
|
-
def __init__(self,
|
15
|
-
self.
|
14
|
+
def __init__(self, test_run_id, suite):
|
15
|
+
self.test_run_id = test_run_id
|
16
16
|
self.suite = suite
|
17
17
|
|
18
|
-
def __init__(self, test, suite, metadata, known_issues, is_duplicate=False):
|
18
|
+
def __init__(self, test, suite, metadata, known_issues, is_duplicate=False, list_of_duplicates=None):
|
19
19
|
self.test = test
|
20
20
|
self.suite = suite
|
21
21
|
self.known_issues = known_issues
|
22
22
|
if is_duplicate:
|
23
|
-
self.status, self.confidence_score = test_confidence(
|
23
|
+
self.status, self.confidence_score = test_confidence(None, list_of_duplicates=list_of_duplicates)
|
24
24
|
else:
|
25
25
|
self.status, self.confidence_score = (test.status, None)
|
26
|
-
self.
|
27
|
-
self.test_run_status = self.TestRunStatus(self.
|
26
|
+
self.test_run_id = test.test_run_id
|
27
|
+
self.test_run_status = self.TestRunStatus(self.test_run_id, self.suite)
|
28
28
|
self.info = {
|
29
29
|
"test_description": metadata.description if metadata else '',
|
30
30
|
"test_instructions": metadata.instructions_to_reproduce if metadata else '',
|
@@ -51,11 +51,6 @@ class TestHistory(object):
|
|
51
51
|
|
52
52
|
self.top = builds[0]
|
53
53
|
|
54
|
-
environments = OrderedDict()
|
55
|
-
results = OrderedDict()
|
56
|
-
for build in builds:
|
57
|
-
results[build] = {}
|
58
|
-
|
59
54
|
issues_by_env = {}
|
60
55
|
for issue in KnownIssue.active_by_project_and_test(project, full_test_name).all():
|
61
56
|
for env in issue.environments.all():
|
@@ -65,16 +60,27 @@ class TestHistory(object):
|
|
65
60
|
|
66
61
|
suite = project.suites.prefetch_related('metadata').get(slug=suite_slug)
|
67
62
|
metadata = SuiteMetadata.objects.get(kind='test', suite=suite_slug, name=test_name)
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
63
|
+
|
64
|
+
results = defaultdict()
|
65
|
+
environments_ids = set()
|
66
|
+
for build in builds:
|
67
|
+
results[build] = defaultdict(list)
|
68
|
+
for test in build.tests.filter(metadata=metadata).order_by():
|
69
|
+
test.metadata = metadata
|
70
|
+
test.suite = suite
|
71
|
+
results[build][test.environment_id].append(test)
|
72
|
+
environments_ids.add(test.environment_id)
|
73
|
+
|
74
|
+
results_without_duplicates = defaultdict()
|
75
|
+
for build in results:
|
76
|
+
results_without_duplicates[build] = defaultdict()
|
77
|
+
for env in results[build]:
|
78
|
+
tests = results[build][env]
|
79
|
+
|
80
|
+
is_duplicate = len(tests) > 1
|
81
|
+
known_issues = issues_by_env.get(tests[0].environment_id)
|
82
|
+
result = TestResult(tests[0], suite, metadata, known_issues, is_duplicate, list_of_duplicates=tests)
|
83
|
+
results_without_duplicates[build][env] = result
|
84
|
+
|
85
|
+
self.environments = Environment.objects.filter(id__in=environments_ids).order_by('slug')
|
86
|
+
self.results = results_without_duplicates
|
squad/core/models.py
CHANGED
squad/core/queries.py
CHANGED
@@ -199,7 +199,7 @@ def test_confidence(test, list_of_duplicates=None):
|
|
199
199
|
return {value: count for value, count in data.items() if count == max_count}
|
200
200
|
|
201
201
|
if test:
|
202
|
-
duplicates = models.Test.objects.filter(
|
202
|
+
duplicates = models.Test.objects.filter(metadata_id=test.metadata_id, environment_id=test.environment_id, build_id=test.build_id).order_by()
|
203
203
|
else:
|
204
204
|
duplicates = list_of_duplicates
|
205
205
|
|
squad/core/tasks/__init__.py
CHANGED
@@ -72,7 +72,7 @@ class ValidateTestRun(object):
|
|
72
72
|
except json.decoder.JSONDecodeError as e:
|
73
73
|
raise exceptions.InvalidMetadataJSON("metadata is not valid JSON: " + str(e) + "\n" + metadata_json)
|
74
74
|
|
75
|
-
if type(metadata)
|
75
|
+
if type(metadata) is not dict:
|
76
76
|
raise exceptions.InvalidMetadata("metadata is not a object ({})")
|
77
77
|
|
78
78
|
if "job_id" in metadata.keys():
|
@@ -87,7 +87,7 @@ class ValidateTestRun(object):
|
|
87
87
|
except json.decoder.JSONDecodeError as e:
|
88
88
|
raise exceptions.InvalidMetricsDataJSON("metrics is not valid JSON: " + str(e) + "\n" + metrics_file)
|
89
89
|
|
90
|
-
if type(metrics)
|
90
|
+
if type(metrics) is not dict:
|
91
91
|
raise exceptions.InvalidMetricsData.type(metrics)
|
92
92
|
|
93
93
|
for metric, value_dict in metrics.items():
|
@@ -113,7 +113,7 @@ class ValidateTestRun(object):
|
|
113
113
|
except json.decoder.JSONDecodeError as e:
|
114
114
|
raise exceptions.InvalidTestsDataJSON("tests is not valid JSON: " + str(e) + "\n" + tests_file)
|
115
115
|
|
116
|
-
if type(tests)
|
116
|
+
if type(tests) is not dict:
|
117
117
|
raise exceptions.InvalidTestsData.type(tests)
|
118
118
|
|
119
119
|
|
@@ -24,7 +24,7 @@ Failures
|
|
24
24
|
{% if summary.failures %}
|
25
25
|
{% for env, tests in summary.failures.items() %}{{env}}:
|
26
26
|
{% for test in tests %}
|
27
|
-
* {{test.full_name}}{% for issue in known_issues %}{% if issue.test_name == test.full_name %}{% for issue_environment in issue.
|
27
|
+
* {{test.full_name}}{% for issue in known_issues %}{% if issue.test_name == test.full_name %}{% for issue_environment in issue.environments.all() %}{% if env == issue_environment.slug %}
|
28
28
|
* Known issue: {{issue.title}}{% if issue.url %} {{issue.url}}{% endif %}{% if issue.intermittent %} (intermittent){% endif %}{% endif %}{% endfor %}{% endif %}{% endfor %}{% endfor %}
|
29
29
|
{% endfor %}
|
30
30
|
{% else %}
|
squad/core/utils.py
CHANGED
@@ -170,7 +170,7 @@ def log_deletion(request, object, message):
|
|
170
170
|
|
171
171
|
def storage_save(obj, storage_field, filename, content):
|
172
172
|
content_bytes = content or ''
|
173
|
-
if type(content_bytes)
|
173
|
+
if type(content_bytes) is str:
|
174
174
|
content_bytes = content_bytes.encode()
|
175
175
|
filename = '%s/%s/%s' % (obj.__class__.__name__.lower(), obj.pk, filename)
|
176
176
|
storage_field.save(filename, ContentFile(content_bytes))
|
squad/frontend/comparison.py
CHANGED
@@ -116,11 +116,11 @@ def compare_builds(request):
|
|
116
116
|
baseline = get_object_or_404(project.builds, version=baseline_build)
|
117
117
|
target = get_object_or_404(project.builds, version=target_build)
|
118
118
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
comparison
|
119
|
+
if comparison_type == 'test':
|
120
|
+
comparison = TestComparison(baseline, target, regressions_and_fixes_only=True)
|
121
|
+
else:
|
122
|
+
comparison_class = __get_comparison_class(comparison_type)
|
123
|
+
comparison = comparison_class.compare_builds(baseline, target)
|
124
124
|
|
125
125
|
comparison.results = __paginate(comparison.results, request)
|
126
126
|
|
@@ -103,11 +103,6 @@
|
|
103
103
|
{{ _('Tests') }}
|
104
104
|
</a>
|
105
105
|
</li>
|
106
|
-
<li role="presentation" {% if url_name == 'failures' %}class="active"{% endif %}>
|
107
|
-
<a href="{{build_section_url(build, 'failures')}}">
|
108
|
-
{{ _('Test failures') }}
|
109
|
-
</a>
|
110
|
-
</li>
|
111
106
|
<li role="presentation" {% if url_name == 'build_metrics' %}class="active"{% endif %}>
|
112
107
|
<a href="{{build_section_url(build, 'build_metrics')}}">
|
113
108
|
{{ _('Metrics') }}
|
@@ -39,7 +39,7 @@
|
|
39
39
|
<td><a href="{{project_url(build)}}">{{build.version}}</a></td>
|
40
40
|
<td>{{build.datetime|date}}</td>
|
41
41
|
{% for environment in history.environments %}
|
42
|
-
{% with result=results[environment] %}
|
42
|
+
{% with result=results[environment.id] %}
|
43
43
|
{% if result %}
|
44
44
|
{% with known_issues=result.known_issues %}
|
45
45
|
<td class='{{result.status|slugify}}'>
|
@@ -14,6 +14,7 @@ from allauth.socialaccount import providers
|
|
14
14
|
|
15
15
|
|
16
16
|
from squad import version
|
17
|
+
from squad.compat import get_socialaccount_provider
|
17
18
|
from squad.core.models import Test, Build
|
18
19
|
from squad.core.utils import format_metadata
|
19
20
|
from squad.jinja2 import register_global_function, register_filter
|
@@ -81,14 +82,12 @@ def testrun_suite_test_details_history_url(group, project, build, status, test):
|
|
81
82
|
|
82
83
|
|
83
84
|
def testrun_suite_or_test_url(group, project, build, status, kind, test=None):
|
84
|
-
testrun = status.test_run.id
|
85
|
-
suite = status.suite
|
86
85
|
args = (
|
87
86
|
group.slug,
|
88
87
|
project.slug,
|
89
88
|
build.version,
|
90
|
-
|
91
|
-
suite.slug.replace('/', '$'),
|
89
|
+
status.test_run_id,
|
90
|
+
status.suite.slug.replace('/', '$'),
|
92
91
|
)
|
93
92
|
if test:
|
94
93
|
if isinstance(test, Test):
|
@@ -272,7 +271,7 @@ def to_json(d):
|
|
272
271
|
def socialaccount_providers(context):
|
273
272
|
request = context['request']
|
274
273
|
return_dict = {}
|
275
|
-
for
|
276
|
-
provider = providers
|
274
|
+
for socialapp in SocialApp.objects.all():
|
275
|
+
provider = get_socialaccount_provider(providers, socialapp, request)
|
277
276
|
return_dict.update({provider: provider.get_login_url(request)})
|
278
277
|
return return_dict
|