squad 1.89__py3-none-any.whl → 1.91__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- squad/api/ci.py +11 -6
- squad/api/prometheus.py +52 -0
- squad/api/urls.py +2 -0
- squad/api/views.py +1 -1
- squad/ci/backend/fake.py +3 -0
- squad/ci/backend/lava.py +6 -1
- squad/ci/backend/null.py +6 -0
- squad/ci/backend/tuxsuite.py +38 -4
- squad/core/management/commands/import_data.py +2 -1
- squad/core/models.py +2 -2
- squad/core/tasks/__init__.py +3 -3
- squad/core/utils.py +7 -4
- squad/frontend/templates/squad/project.jinja2 +8 -0
- squad/frontend/urls.py +1 -0
- squad/plugins/lib/__init__.py +1 -0
- squad/plugins/lib/base_log_parser.py +157 -0
- squad/plugins/linux_log_parser.py +18 -130
- squad/settings.py +17 -11
- squad/version.py +1 -1
- {squad-1.89.dist-info → squad-1.91.dist-info}/METADATA +13 -12
- {squad-1.89.dist-info → squad-1.91.dist-info}/RECORD +25 -22
- {squad-1.89.dist-info → squad-1.91.dist-info}/WHEEL +1 -1
- {squad-1.89.dist-info → squad-1.91.dist-info}/entry_points.txt +1 -0
- {squad-1.89.dist-info → squad-1.91.dist-info}/COPYING +0 -0
- {squad-1.89.dist-info → squad-1.91.dist-info}/top_level.txt +0 -0
squad/api/ci.py
CHANGED
@@ -73,15 +73,26 @@ def submit_job(request, group_slug, project_slug, version, environment_slug):
|
|
73
73
|
@csrf_exempt
|
74
74
|
@auth_privileged
|
75
75
|
def watch_job(request, group_slug, project_slug, version, environment_slug):
|
76
|
+
|
77
|
+
# testjob_id points to the backend's test job
|
78
|
+
testjob_id = request.POST.get('testjob_id', None)
|
79
|
+
if testjob_id is None:
|
80
|
+
return HttpResponseBadRequest("testjob_id is required")
|
81
|
+
|
76
82
|
backend_name = request.POST.get('backend')
|
77
83
|
if backend_name is None:
|
78
84
|
return HttpResponseBadRequest("backend field is required")
|
85
|
+
|
79
86
|
backend = None
|
80
87
|
try:
|
81
88
|
backend = Backend.objects.get(name=request.POST.get('backend'))
|
82
89
|
except Backend.DoesNotExist:
|
83
90
|
return HttpResponseBadRequest("requested backend does not exist")
|
84
91
|
|
92
|
+
check = backend.get_implementation().check_job_id(testjob_id)
|
93
|
+
if check is not True:
|
94
|
+
return HttpResponseBadRequest(check)
|
95
|
+
|
85
96
|
# project has to exist or request will result with 400
|
86
97
|
project = request.project
|
87
98
|
if backend is None or project is None:
|
@@ -90,12 +101,6 @@ def watch_job(request, group_slug, project_slug, version, environment_slug):
|
|
90
101
|
# create Build object
|
91
102
|
build, _ = project.builds.get_or_create(version=version)
|
92
103
|
|
93
|
-
# testjob_id points to the backend's test job
|
94
|
-
testjob_id = request.POST.get('testjob_id', None)
|
95
|
-
|
96
|
-
if testjob_id is None:
|
97
|
-
return HttpResponseBadRequest("testjob_id is required")
|
98
|
-
|
99
104
|
# create TestJob object
|
100
105
|
test_job = TestJob(
|
101
106
|
backend=backend,
|
squad/api/prometheus.py
ADDED
@@ -0,0 +1,52 @@
|
|
1
|
+
import re
|
2
|
+
import requests
|
3
|
+
import celery
|
4
|
+
|
5
|
+
from django.conf import settings
|
6
|
+
from django.http import HttpResponse, HttpResponseForbidden
|
7
|
+
from django.views.decorators.csrf import csrf_exempt
|
8
|
+
from django.views.decorators.http import require_http_methods
|
9
|
+
|
10
|
+
from squad.http import auth_user_from_request
|
11
|
+
|
12
|
+
|
13
|
+
@csrf_exempt
|
14
|
+
@require_http_methods(['GET'])
|
15
|
+
def metrics(request):
|
16
|
+
user = auth_user_from_request(request, request.user)
|
17
|
+
if not user.is_authenticated:
|
18
|
+
return HttpResponseForbidden()
|
19
|
+
|
20
|
+
output = ''
|
21
|
+
available_queues = None
|
22
|
+
|
23
|
+
active_queues = celery.current_app.control.inspect().active_queues()
|
24
|
+
if active_queues is not None:
|
25
|
+
active_workers = set()
|
26
|
+
available_queues = set()
|
27
|
+
for worker_name, queues in active_queues.items():
|
28
|
+
active_workers.add(worker_name)
|
29
|
+
available_queues |= set([q['name'] for q in queues])
|
30
|
+
|
31
|
+
output += '# TYPE workers_count counter\n'
|
32
|
+
output += f'workers_count {len(active_workers)}\n'
|
33
|
+
|
34
|
+
# TODO: check how to get metrics for non-RabbitMQ brokers
|
35
|
+
if settings.CELERY_BROKER_URL:
|
36
|
+
rabbitmq_url = settings.CELERY_BROKER_URL.replace('amqps://', 'https://').replace('amqp://', 'http://')
|
37
|
+
rabbitmq_url = re.sub(r':\d+$', '', rabbitmq_url)
|
38
|
+
rabbitmq_url += '/api/queues'
|
39
|
+
|
40
|
+
response = requests.get(rabbitmq_url)
|
41
|
+
queues = response.json()
|
42
|
+
available_queues = {r["queue"] for r in settings.CELERY_TASK_ROUTES.values()}
|
43
|
+
|
44
|
+
for queue in queues:
|
45
|
+
if queue['name'] in available_queues:
|
46
|
+
metric_name = f'queue_{queue["name"]}_length'
|
47
|
+
length = queue['messages_ready']
|
48
|
+
|
49
|
+
output += f'\n# TYPE {metric_name} counter'
|
50
|
+
output += f'\n{metric_name} {length}'
|
51
|
+
|
52
|
+
return HttpResponse(output, status=200, content_type="text/plain;")
|
squad/api/urls.py
CHANGED
@@ -5,6 +5,7 @@ from rest_framework.schemas import get_schema_view
|
|
5
5
|
from . import views
|
6
6
|
from . import data
|
7
7
|
from . import ci
|
8
|
+
from . import prometheus
|
8
9
|
from . import rest
|
9
10
|
|
10
11
|
|
@@ -26,4 +27,5 @@ urlpatterns = [
|
|
26
27
|
url(r'^resubmit/([0-9]+)', ci.resubmit_job),
|
27
28
|
url(r'^forceresubmit/([0-9]+)', ci.force_resubmit_job),
|
28
29
|
url(r'^version/', views.version),
|
30
|
+
url(r'^prometheus/', prometheus.metrics),
|
29
31
|
]
|
squad/api/views.py
CHANGED
@@ -105,7 +105,7 @@ def add_test_run(request, group_slug, project_slug, version, environment_slug):
|
|
105
105
|
if 'attachment' in request.FILES:
|
106
106
|
attachments = {}
|
107
107
|
for f in request.FILES.getlist('attachment'):
|
108
|
-
attachments[f.name] =
|
108
|
+
attachments[f.name] = f
|
109
109
|
test_run_data['attachments'] = attachments
|
110
110
|
|
111
111
|
receive = ReceiveTestRun(project)
|
squad/ci/backend/fake.py
CHANGED
squad/ci/backend/lava.py
CHANGED
@@ -683,7 +683,7 @@ class Backend(BaseBackend):
|
|
683
683
|
if clone_measurements_to_tests:
|
684
684
|
res_value = result['result']
|
685
685
|
results.update({res_name: res_value})
|
686
|
-
elif result['name']
|
686
|
+
elif 'login-action' in result['name'] and handle_lava_boot:
|
687
687
|
# add artificial 'boot' test result for each test job
|
688
688
|
# by default the boot test is named after the device_type
|
689
689
|
boot = "boot-%s" % test_job.name
|
@@ -785,6 +785,11 @@ class Backend(BaseBackend):
|
|
785
785
|
except yaml.YAMLError as e:
|
786
786
|
return str(e)
|
787
787
|
|
788
|
+
def check_job_id(self, job_id):
|
789
|
+
if re.match(r"^\d+$", str(job_id)) is not None:
|
790
|
+
return True
|
791
|
+
return "LAVA job id should be an integer"
|
792
|
+
|
788
793
|
def get_job_definition(self, job_id):
|
789
794
|
if self.use_xml_rpc:
|
790
795
|
return self.proxy.scheduler.jobs.definition(job_id)
|
squad/ci/backend/null.py
CHANGED
@@ -143,6 +143,12 @@ class Backend:
|
|
143
143
|
"""
|
144
144
|
raise NotImplementedError
|
145
145
|
|
146
|
+
def check_job_id(self, job_id):
|
147
|
+
"""
|
148
|
+
Returns True if job id matches what the backend expect, else returns the error message
|
149
|
+
"""
|
150
|
+
raise NotImplementedError
|
151
|
+
|
146
152
|
def format_message(self, msg):
|
147
153
|
if self.data and hasattr(self.data, "name"):
|
148
154
|
return self.data.name + ': ' + msg
|
squad/ci/backend/tuxsuite.py
CHANGED
@@ -120,6 +120,13 @@ class Backend(BaseBackend):
|
|
120
120
|
# The regex below is supposed to find only one match
|
121
121
|
return matches[0]
|
122
122
|
|
123
|
+
def check_job_id(self, job_id):
|
124
|
+
try:
|
125
|
+
self.parse_job_id(job_id)
|
126
|
+
return True
|
127
|
+
except FetchIssue as e:
|
128
|
+
return str(e)
|
129
|
+
|
123
130
|
def generate_job_id(self, result_type, result):
|
124
131
|
"""
|
125
132
|
The job id for TuxSuite results is generated using 3 pieces of info:
|
@@ -144,7 +151,11 @@ class Backend(BaseBackend):
|
|
144
151
|
url = reduce(urljoin, urlbits)
|
145
152
|
|
146
153
|
try:
|
147
|
-
|
154
|
+
headers = {}
|
155
|
+
if hasattr(self, 'auth_token') and self.auth_token is not None:
|
156
|
+
headers = {'Authorization': self.auth_token}
|
157
|
+
|
158
|
+
response = Backend.get_session().request("GET", url, headers=headers)
|
148
159
|
except Exception as e:
|
149
160
|
raise TemporaryFetchIssue(f"Can't retrieve from {url}: {e}")
|
150
161
|
|
@@ -224,6 +235,11 @@ class Backend(BaseBackend):
|
|
224
235
|
if 'toolchain' in build_metadata_keys and 'kconfig' in build_metadata_keys and metadata['build_name'] in [None, '']:
|
225
236
|
metadata['build_name'] = self.generate_test_name(build_metadata)
|
226
237
|
|
238
|
+
def add_skip_boot_test(self, tests, metadata):
|
239
|
+
# Create an artificial boot test and mark it as skip
|
240
|
+
boot_test_name = 'boot/' + (metadata.get('build_name') or 'boot')
|
241
|
+
tests[boot_test_name] = None
|
242
|
+
|
227
243
|
def parse_build_results(self, test_job, job_url, results, settings):
|
228
244
|
required_keys = ['build_status', 'warnings_count', 'download_url', 'retry']
|
229
245
|
self.__check_required_keys__(required_keys, results)
|
@@ -318,6 +334,11 @@ class Backend(BaseBackend):
|
|
318
334
|
metadata_keys = settings.get('TEST_METADATA_KEYS', [])
|
319
335
|
metadata = {k: results.get(k) for k in metadata_keys}
|
320
336
|
|
337
|
+
# Change environment name
|
338
|
+
if 'test_name' in results and results.get('test_name') is not None:
|
339
|
+
test_job.environment = results.get('test_name')
|
340
|
+
test_job.save()
|
341
|
+
|
321
342
|
# Add extra metadata from metadata file if it exists
|
322
343
|
self.update_metadata_from_file(results=results, metadata=metadata)
|
323
344
|
|
@@ -340,6 +361,8 @@ class Backend(BaseBackend):
|
|
340
361
|
else:
|
341
362
|
test_job.failure = 'sanity test failed'
|
342
363
|
|
364
|
+
self.add_skip_boot_test(tests, metadata)
|
365
|
+
|
343
366
|
return status, completed, metadata, tests, metrics, logs
|
344
367
|
|
345
368
|
# Fetch results even if the job fails, but has results
|
@@ -348,8 +371,14 @@ class Backend(BaseBackend):
|
|
348
371
|
|
349
372
|
elif results['result'] == 'error':
|
350
373
|
test_job.failure = 'tuxsuite infrastructure error'
|
374
|
+
self.add_skip_boot_test(tests, metadata)
|
351
375
|
return 'Incomplete', completed, metadata, tests, metrics, logs
|
352
376
|
|
377
|
+
elif results['result'] == 'canceled':
|
378
|
+
test_job.failure = 'tuxsuite job canceled'
|
379
|
+
self.add_skip_boot_test(tests, metadata)
|
380
|
+
return 'Canceled', completed, metadata, tests, metrics, logs
|
381
|
+
|
353
382
|
# If boot result is unkown, a retry is needed, otherwise, it either passed or failed
|
354
383
|
if 'unknown' == results['results']['boot']:
|
355
384
|
return None
|
@@ -384,6 +413,10 @@ class Backend(BaseBackend):
|
|
384
413
|
|
385
414
|
def fetch(self, test_job):
|
386
415
|
url = self.job_url(test_job)
|
416
|
+
|
417
|
+
settings = self.__resolve_settings__(test_job)
|
418
|
+
self.auth_token = settings.get('TUXSUITE_TOKEN', None)
|
419
|
+
|
387
420
|
if test_job.input:
|
388
421
|
results = self.fetch_from_results_input(test_job)
|
389
422
|
test_job.input = None
|
@@ -393,11 +426,12 @@ class Backend(BaseBackend):
|
|
393
426
|
if results.get('state') != 'finished':
|
394
427
|
return None
|
395
428
|
|
396
|
-
settings = self.__resolve_settings__(test_job)
|
397
|
-
|
398
429
|
result_type = self.parse_job_id(test_job.job_id)[0]
|
399
430
|
parse_results = getattr(self, f'parse_{result_type.lower()}_results')
|
400
|
-
|
431
|
+
parsed = parse_results(test_job, url, results, settings)
|
432
|
+
|
433
|
+
self.auth_token = None
|
434
|
+
return parsed
|
401
435
|
|
402
436
|
def job_url(self, test_job):
|
403
437
|
result_type, tux_project, tux_uid = self.parse_job_id(test_job.job_id)
|
@@ -2,6 +2,7 @@ from glob import glob
|
|
2
2
|
import os
|
3
3
|
import re
|
4
4
|
from django.core.management.base import BaseCommand
|
5
|
+
from django.core.files import File
|
5
6
|
|
6
7
|
|
7
8
|
from squad.core.models import Build
|
@@ -123,7 +124,7 @@ class Command(BaseCommand):
|
|
123
124
|
for f in glob(os.path.join(directory, '*')):
|
124
125
|
name = os.path.basename(f)
|
125
126
|
if name not in ['metrics.json', 'metadata.json', 'tests.json']:
|
126
|
-
attachments[name] = open(f, 'rb')
|
127
|
+
attachments[name] = File(open(f, 'rb'))
|
127
128
|
|
128
129
|
if not self.options['silent']:
|
129
130
|
print("Importing test run: %s" % directory)
|
squad/core/models.py
CHANGED
@@ -909,8 +909,8 @@ class Attachment(models.Model):
|
|
909
909
|
self.__data__ = b''
|
910
910
|
return self.__data__
|
911
911
|
|
912
|
-
def save_file(self, filename,
|
913
|
-
storage_save(self, self.storage, filename,
|
912
|
+
def save_file(self, filename, file):
|
913
|
+
storage_save(self, self.storage, filename, file)
|
914
914
|
|
915
915
|
|
916
916
|
class SuiteMetadata(models.Model):
|
squad/core/tasks/__init__.py
CHANGED
@@ -172,9 +172,9 @@ class ReceiveTestRun(object):
|
|
172
172
|
if log_file is not None:
|
173
173
|
testrun.save_log_file(log_file)
|
174
174
|
|
175
|
-
for filename,
|
176
|
-
attachment = testrun.attachments.create(filename=filename, length=
|
177
|
-
attachment.save_file(filename,
|
175
|
+
for filename, file in attachments.items():
|
176
|
+
attachment = testrun.attachments.create(filename=filename, length=file.size)
|
177
|
+
attachment.save_file(filename, file)
|
178
178
|
|
179
179
|
testrun.refresh_from_db()
|
180
180
|
|
squad/core/utils.py
CHANGED
@@ -169,8 +169,11 @@ def log_deletion(request, object, message):
|
|
169
169
|
|
170
170
|
|
171
171
|
def storage_save(obj, storage_field, filename, content):
|
172
|
-
content_bytes = content or ''
|
173
|
-
if type(content_bytes) is str:
|
174
|
-
content_bytes = content_bytes.encode()
|
175
172
|
filename = '%s/%s/%s' % (obj.__class__.__name__.lower(), obj.pk, filename)
|
176
|
-
|
173
|
+
if type(content) in [bytes, str]:
|
174
|
+
content_bytes = content or ''
|
175
|
+
if type(content_bytes) is str:
|
176
|
+
content_bytes = content_bytes.encode()
|
177
|
+
storage_field.save(filename, ContentFile(content_bytes))
|
178
|
+
else:
|
179
|
+
storage_field.save(filename, content)
|
@@ -6,6 +6,14 @@
|
|
6
6
|
|
7
7
|
{% include "squad/project-nav.jinja2" %}
|
8
8
|
|
9
|
+
|
10
|
+
{% if project.description%}
|
11
|
+
<h2>{{ _('Description') }}</h2>
|
12
|
+
<div class='description-{{project.id}}'>
|
13
|
+
{{project.description}}
|
14
|
+
</div>
|
15
|
+
{% endif %}
|
16
|
+
|
9
17
|
{% if last_build %}
|
10
18
|
<div>
|
11
19
|
<h2>
|
squad/frontend/urls.py
CHANGED
@@ -52,6 +52,7 @@ urlpatterns = [
|
|
52
52
|
url(r'^(%s)/(%s)/build/([^/]+)/attachments/testrun/([^/]+)/([^/]+)$' % group_and_project, views.build_attachment, name='build_attachments'),
|
53
53
|
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/tests/$' % group_and_project, views.test_run_suite_tests, name='testrun_suite_tests'),
|
54
54
|
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/test/([^/]+)/history/$' % group_and_project, tests.test_history, name='test_history'),
|
55
|
+
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/test/([^/]+)/$' % group_and_project, views.test_run_suite_test_details, name='testrun_suite_test_details'),
|
55
56
|
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/test/([^/]+)/details/$' % group_and_project, views.test_run_suite_test_details, name='testrun_suite_test_details'),
|
56
57
|
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/metrics/$' % group_and_project, views.test_run_suite_metrics, name='testrun_suite_metrics'),
|
57
58
|
url(r'^(%s)/(%s)/build/([^/]+)/testrun/([^/]+)/suite/([^/]+)/test/([^/]+)/log$' % group_and_project, views.test_details_log, name='test_details_log'),
|
@@ -0,0 +1 @@
|
|
1
|
+
from squad.plugins import Plugin # noqa
|
@@ -0,0 +1,157 @@
|
|
1
|
+
import hashlib
|
2
|
+
import re
|
3
|
+
from collections import defaultdict
|
4
|
+
|
5
|
+
from django.template.defaultfilters import slugify
|
6
|
+
|
7
|
+
REGEX_NAME = 0
|
8
|
+
REGEX_BODY = 1
|
9
|
+
REGEX_EXTRACT_NAME = 2
|
10
|
+
|
11
|
+
tstamp = r"\[[ \d]+\.[ \d]+\]"
|
12
|
+
pid = r"(?:\s*?\[\s*?[CT]\d+\s*?\])"
|
13
|
+
not_newline_or_plus = r"[^\+\n]"
|
14
|
+
square_brackets_and_contents = r"\[[^\]]+\]"
|
15
|
+
|
16
|
+
|
17
|
+
class BaseLogParser:
|
18
|
+
def compile_regexes(self, regexes):
|
19
|
+
combined = [r"(%s)" % r[REGEX_BODY] for r in regexes]
|
20
|
+
return re.compile(r"|".join(combined), re.S | re.M)
|
21
|
+
|
22
|
+
def remove_numbers_and_time(self, snippet):
|
23
|
+
# [ 1067.461794][ T132] BUG: KCSAN: data-race in do_page_fault spectre_v4_enable_task_mitigation
|
24
|
+
# -> [ .][ T] BUG: KCSAN: data-race in do_page_fault spectre_v_enable_task_mitigation
|
25
|
+
without_numbers = re.sub(r"(0x[a-f0-9]+|[<\[][0-9a-f]+?[>\]]|\d+)", "", snippet)
|
26
|
+
|
27
|
+
# [ .][ T] BUG: KCSAN: data-race in do_page_fault spectre_v_enable_task_mitigation
|
28
|
+
# -> BUG: KCSAN: data-race in do_page_fault spectre_v_enable_task_mitigation
|
29
|
+
without_time = re.sub(f"^{square_brackets_and_contents}({square_brackets_and_contents})?", "", without_numbers) # noqa
|
30
|
+
|
31
|
+
return without_time
|
32
|
+
|
33
|
+
def create_name(self, snippet, compiled_regex=None):
|
34
|
+
matches = None
|
35
|
+
if compiled_regex:
|
36
|
+
matches = compiled_regex.findall(snippet)
|
37
|
+
if not matches:
|
38
|
+
# Only extract a name if we provide a regex to extract the name and
|
39
|
+
# there is a match
|
40
|
+
return None
|
41
|
+
snippet = matches[0]
|
42
|
+
without_numbers_and_time = self.remove_numbers_and_time(snippet)
|
43
|
+
|
44
|
+
# Limit the name length to 191 characters, since the max name length
|
45
|
+
# for SuiteMetadata in SQUAD is 256 characters. The SHA and "-" take 65
|
46
|
+
# characters: 256-65=191
|
47
|
+
return slugify(without_numbers_and_time)[:191]
|
48
|
+
|
49
|
+
def create_shasum(self, snippet):
|
50
|
+
sha = hashlib.sha256()
|
51
|
+
without_numbers_and_time = self.remove_numbers_and_time(snippet)
|
52
|
+
sha.update(without_numbers_and_time.encode())
|
53
|
+
return sha.hexdigest()
|
54
|
+
|
55
|
+
def create_name_log_dict(self, test_name, lines, test_regex=None):
|
56
|
+
"""
|
57
|
+
Produce a dictionary with the test names as keys and the extracted logs
|
58
|
+
for that test name as values. There will be at least one test name per
|
59
|
+
regex. If there were any matches for a given regex, then a new test
|
60
|
+
will be generated using test_name + shasum.
|
61
|
+
"""
|
62
|
+
# Run the REGEX_EXTRACT_NAME regex over the log lines to sort them by
|
63
|
+
# extracted name. If no name is extracted or the log parser did not
|
64
|
+
# have any output for a particular regex, just use the default name
|
65
|
+
# (for example "check-kernel-oops").
|
66
|
+
tests_without_shas_to_create = defaultdict(set)
|
67
|
+
tests_with_shas_to_create = defaultdict(set)
|
68
|
+
|
69
|
+
# If there are lines, then create the tests for these.
|
70
|
+
for line in lines:
|
71
|
+
extracted_name = self.create_name(line, test_regex)
|
72
|
+
if extracted_name:
|
73
|
+
extended_test_name = f"{test_name}-{extracted_name}"
|
74
|
+
else:
|
75
|
+
extended_test_name = test_name
|
76
|
+
tests_without_shas_to_create[extended_test_name].add(line)
|
77
|
+
|
78
|
+
for name, test_lines in tests_without_shas_to_create.items():
|
79
|
+
# Some lines of the matched regex might be the same, and we don't want to create
|
80
|
+
# multiple tests like test1-sha1, test1-sha1, etc, so we'll create a set of sha1sums
|
81
|
+
# then create only new tests for unique sha's
|
82
|
+
|
83
|
+
for line in test_lines:
|
84
|
+
sha = self.create_shasum(line)
|
85
|
+
name_with_sha = f"{name}-{sha}"
|
86
|
+
tests_with_shas_to_create[name_with_sha].add(line)
|
87
|
+
|
88
|
+
return tests_without_shas_to_create, tests_with_shas_to_create
|
89
|
+
|
90
|
+
def create_squad_tests_from_name_log_dict(
|
91
|
+
self, suite, testrun, tests_without_shas_to_create, tests_with_shas_to_create
|
92
|
+
):
|
93
|
+
# Import SuiteMetadata from SQUAD only when required so BaseLogParser
|
94
|
+
# does not require a SQUAD to work. This makes it easier to reuse this
|
95
|
+
# class outside of SQUAD for testing and developing log parser
|
96
|
+
# patterns.
|
97
|
+
from squad.core.models import SuiteMetadata
|
98
|
+
|
99
|
+
for name, lines in tests_without_shas_to_create.items():
|
100
|
+
metadata, _ = SuiteMetadata.objects.get_or_create(
|
101
|
+
suite=suite.slug, name=name, kind="test"
|
102
|
+
)
|
103
|
+
testrun.tests.create(
|
104
|
+
suite=suite,
|
105
|
+
result=(len(lines) == 0),
|
106
|
+
log="\n".join(lines),
|
107
|
+
metadata=metadata,
|
108
|
+
build=testrun.build,
|
109
|
+
environment=testrun.environment,
|
110
|
+
)
|
111
|
+
for name_with_sha, lines in tests_with_shas_to_create.items():
|
112
|
+
metadata, _ = SuiteMetadata.objects.get_or_create(
|
113
|
+
suite=suite.slug, name=name_with_sha, kind="test"
|
114
|
+
)
|
115
|
+
testrun.tests.create(
|
116
|
+
suite=suite,
|
117
|
+
result=False,
|
118
|
+
log="\n---\n".join(lines),
|
119
|
+
metadata=metadata,
|
120
|
+
build=testrun.build,
|
121
|
+
environment=testrun.environment,
|
122
|
+
)
|
123
|
+
|
124
|
+
def create_squad_tests(self, testrun, suite, test_name, lines, test_regex=None):
|
125
|
+
"""
|
126
|
+
There will be at least one test per regex. If there were any match for
|
127
|
+
a given regex, then a new test will be generated using test_name +
|
128
|
+
shasum. This helps comparing kernel logs across different builds
|
129
|
+
"""
|
130
|
+
tests_without_shas_to_create, tests_with_shas_to_create = (
|
131
|
+
self.create_name_log_dict(test_name, lines, test_regex)
|
132
|
+
)
|
133
|
+
self.create_squad_tests_from_name_log_dict(
|
134
|
+
suite,
|
135
|
+
testrun,
|
136
|
+
tests_without_shas_to_create,
|
137
|
+
tests_with_shas_to_create,
|
138
|
+
)
|
139
|
+
|
140
|
+
def join_matches(self, matches, regexes):
|
141
|
+
"""
|
142
|
+
group regex in python are returned as a list of tuples which each
|
143
|
+
group match in one of the positions in the tuple. Example:
|
144
|
+
regex = r'(a)|(b)|(c)'
|
145
|
+
matches = [
|
146
|
+
('match a', '', ''),
|
147
|
+
('', 'match b', ''),
|
148
|
+
('match a', '', ''),
|
149
|
+
('', '', 'match c')
|
150
|
+
]
|
151
|
+
"""
|
152
|
+
snippets = {regex_id: [] for regex_id in range(len(regexes))}
|
153
|
+
for match in matches:
|
154
|
+
for regex_id in range(len(regexes)):
|
155
|
+
if len(match[regex_id]) > 0:
|
156
|
+
snippets[regex_id].append(match[regex_id])
|
157
|
+
return snippets
|
@@ -1,42 +1,33 @@
|
|
1
|
-
import hashlib
|
2
1
|
import logging
|
3
2
|
import re
|
4
|
-
from collections import defaultdict
|
5
3
|
from squad.plugins import Plugin as BasePlugin
|
6
|
-
from squad.
|
7
|
-
from django.template.defaultfilters import slugify
|
8
|
-
|
4
|
+
from squad.plugins.lib.base_log_parser import BaseLogParser, REGEX_NAME, REGEX_EXTRACT_NAME, tstamp, pid, not_newline_or_plus
|
9
5
|
|
10
6
|
logger = logging.getLogger()
|
11
7
|
|
12
|
-
REGEX_NAME = 0
|
13
|
-
REGEX_BODY = 1
|
14
|
-
REGEX_EXTRACT_NAME = 2
|
15
|
-
|
16
8
|
MULTILINERS = [
|
17
|
-
('
|
18
|
-
('
|
19
|
-
('
|
9
|
+
('exception', f'-+\[? cut here \]?-+.*?{tstamp}{pid}?\s+-+\[? end trace \w* \]?-+', f"\n{tstamp}{not_newline_or_plus}*"), # noqa
|
10
|
+
('kasan', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KASAN:.*?\n*?{tstamp}{pid}?\s+=+', f"BUG: KASAN:{not_newline_or_plus}*"), # noqa
|
11
|
+
('kcsan', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KCSAN:.*?=+', f"BUG: KCSAN:{not_newline_or_plus}*"), # noqa
|
12
|
+
('kfence', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KFENCE:.*?{tstamp}{pid}?\s+=+', f"BUG: KFENCE:{not_newline_or_plus}*"), # noqa
|
13
|
+
('panic-multiline', f'{tstamp}{pid}?\s+Kernel panic - [^\n]+\n.*?-+\[? end Kernel panic - [^\n]+ \]?-*', f"Kernel {not_newline_or_plus}*"), # noqa
|
14
|
+
('internal-error-oops', f'{tstamp}{pid}?\s+Internal error: Oops.*?-+\[? end trace \w+ \]?-+', f"Oops{not_newline_or_plus}*"), # noqa
|
20
15
|
]
|
21
16
|
|
22
17
|
ONELINERS = [
|
23
|
-
('
|
24
|
-
('
|
25
|
-
('
|
26
|
-
('
|
27
|
-
('
|
28
|
-
('
|
18
|
+
('oops', r'^[^\n]+Oops(?: -|:).*?$', f"Oops{not_newline_or_plus}*"), # noqa
|
19
|
+
('fault', r'^[^\n]+Unhandled fault.*?$', f"Unhandled {not_newline_or_plus}*"), # noqa
|
20
|
+
('warning', r'^[^\n]+WARNING:.*?$', f"WARNING:{not_newline_or_plus}*"), # noqa
|
21
|
+
('bug', r'^[^\n]+(?: kernel BUG at|BUG:).*?$', f"BUG{not_newline_or_plus}*"), # noqa
|
22
|
+
('invalid-opcode', r'^[^\n]+invalid opcode:.*?$', f"invalid opcode:{not_newline_or_plus}*"), # noqa
|
23
|
+
('panic', r'Kernel panic - not syncing.*?$', f"Kernel {not_newline_or_plus}*"), # noqa
|
29
24
|
]
|
30
25
|
|
31
26
|
# Tip: broader regexes should come first
|
32
27
|
REGEXES = MULTILINERS + ONELINERS
|
33
28
|
|
34
29
|
|
35
|
-
class Plugin(BasePlugin):
|
36
|
-
def __compile_regexes(self, regexes):
|
37
|
-
combined = [r'(%s)' % r[REGEX_BODY] for r in regexes]
|
38
|
-
return re.compile(r'|'.join(combined), re.S | re.M)
|
39
|
-
|
30
|
+
class Plugin(BasePlugin, BaseLogParser):
|
40
31
|
def __cutoff_boot_log(self, log):
|
41
32
|
# Attempt to split the log in " login:"
|
42
33
|
logs = log.split(' login:', 1)
|
@@ -50,112 +41,9 @@ class Plugin(BasePlugin):
|
|
50
41
|
return boot_log, test_log
|
51
42
|
|
52
43
|
def __kernel_msgs_only(self, log):
|
53
|
-
kernel_msgs = re.findall(
|
44
|
+
kernel_msgs = re.findall(f'({tstamp}{pid}? .*?)$', log, re.S | re.M) # noqa
|
54
45
|
return '\n'.join(kernel_msgs)
|
55
46
|
|
56
|
-
def __join_matches(self, matches, regexes):
|
57
|
-
"""
|
58
|
-
group regex in python are returned as a list of tuples which each
|
59
|
-
group match in one of the positions in the tuple. Example:
|
60
|
-
regex = r'(a)|(b)|(c)'
|
61
|
-
matches = [
|
62
|
-
('match a', '', ''),
|
63
|
-
('', 'match b', ''),
|
64
|
-
('match a', '', ''),
|
65
|
-
('', '', 'match c')
|
66
|
-
]
|
67
|
-
"""
|
68
|
-
snippets = {regex_id: [] for regex_id in range(len(regexes))}
|
69
|
-
for match in matches:
|
70
|
-
for regex_id in range(len(regexes)):
|
71
|
-
if len(match[regex_id]) > 0:
|
72
|
-
snippets[regex_id].append(match[regex_id])
|
73
|
-
return snippets
|
74
|
-
|
75
|
-
def __create_tests(self, testrun, suite, test_name, lines, test_regex=None):
|
76
|
-
"""
|
77
|
-
There will be at least one test per regex. If there were any match for a given
|
78
|
-
regex, then a new test will be generated using test_name + shasum. This helps
|
79
|
-
comparing kernel logs accross different builds
|
80
|
-
"""
|
81
|
-
# Run the REGEX_EXTRACT_NAME regex over the log lines to sort them by
|
82
|
-
# extracted name. If no name is extracted or the log parser did not
|
83
|
-
# have any output for a particular regex, just use the default name
|
84
|
-
# (for example "check-kernel-oops").
|
85
|
-
tests_to_create = defaultdict(set)
|
86
|
-
shas = defaultdict(set)
|
87
|
-
|
88
|
-
# If there are no lines, use the default name and create a passing
|
89
|
-
# test. For example "check-kernel-oops"
|
90
|
-
if not lines:
|
91
|
-
tests_to_create[test_name] = []
|
92
|
-
|
93
|
-
# If there are lines, then create the tests for these.
|
94
|
-
for line in lines:
|
95
|
-
extracted_name = self.__create_name(line, test_regex)
|
96
|
-
if extracted_name:
|
97
|
-
extended_test_name = f"{test_name}-{extracted_name}"
|
98
|
-
else:
|
99
|
-
extended_test_name = test_name
|
100
|
-
tests_to_create[extended_test_name].add(line)
|
101
|
-
|
102
|
-
for name, lines in tests_to_create.items():
|
103
|
-
metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=name, kind='test')
|
104
|
-
testrun.tests.create(
|
105
|
-
suite=suite,
|
106
|
-
result=(len(lines) == 0),
|
107
|
-
log='\n'.join(lines),
|
108
|
-
metadata=metadata,
|
109
|
-
build=testrun.build,
|
110
|
-
environment=testrun.environment,
|
111
|
-
)
|
112
|
-
|
113
|
-
# Some lines of the matched regex might be the same, and we don't want to create
|
114
|
-
# multiple tests like test1-sha1, test1-sha1, etc, so we'll create a set of sha1sums
|
115
|
-
# then create only new tests for unique sha's
|
116
|
-
|
117
|
-
for line in lines:
|
118
|
-
sha = self.__create_shasum(line)
|
119
|
-
name_with_sha = f"{name}-{sha}"
|
120
|
-
shas[name_with_sha].add(line)
|
121
|
-
|
122
|
-
for name_with_sha, lines in shas.items():
|
123
|
-
metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=name_with_sha, kind='test')
|
124
|
-
testrun.tests.create(
|
125
|
-
suite=suite,
|
126
|
-
result=False,
|
127
|
-
log='\n---\n'.join(lines),
|
128
|
-
metadata=metadata,
|
129
|
-
build=testrun.build,
|
130
|
-
environment=testrun.environment,
|
131
|
-
)
|
132
|
-
|
133
|
-
def __remove_numbers_and_time(self, snippet):
|
134
|
-
without_numbers = re.sub(r"(0x[a-f0-9]+|[<\[][0-9a-f]+?[>\]]|\d+)", "", snippet)
|
135
|
-
without_time = re.sub(r"^\[[^\]]+\]", "", without_numbers)
|
136
|
-
|
137
|
-
return without_time
|
138
|
-
|
139
|
-
def __create_name(self, snippet, regex=None):
|
140
|
-
matches = None
|
141
|
-
if regex:
|
142
|
-
matches = regex.findall(snippet)
|
143
|
-
if not matches:
|
144
|
-
return None
|
145
|
-
snippet = matches[0]
|
146
|
-
without_numbers_and_time = self.__remove_numbers_and_time(snippet)
|
147
|
-
|
148
|
-
# Limit the name length to 191 characters, since the max name length
|
149
|
-
# for SuiteMetadata in SQUAD is 256 characters. The SHA and "-" take 65
|
150
|
-
# characters: 256-65=191
|
151
|
-
return slugify(without_numbers_and_time)[:191]
|
152
|
-
|
153
|
-
def __create_shasum(self, snippet):
|
154
|
-
sha = hashlib.sha256()
|
155
|
-
without_numbers_and_time = self.__remove_numbers_and_time(snippet)
|
156
|
-
sha.update(without_numbers_and_time.encode())
|
157
|
-
return sha.hexdigest()
|
158
|
-
|
159
47
|
def postprocess_testrun(self, testrun):
|
160
48
|
if testrun.log_file is None:
|
161
49
|
return
|
@@ -170,9 +58,9 @@ class Plugin(BasePlugin):
|
|
170
58
|
log = self.__kernel_msgs_only(log)
|
171
59
|
suite, _ = testrun.build.project.suites.get_or_create(slug=f'log-parser-{log_type}')
|
172
60
|
|
173
|
-
regex = self.
|
61
|
+
regex = self.compile_regexes(REGEXES)
|
174
62
|
matches = regex.findall(log)
|
175
|
-
snippets = self.
|
63
|
+
snippets = self.join_matches(matches, REGEXES)
|
176
64
|
|
177
65
|
for regex_id in range(len(REGEXES)):
|
178
66
|
test_name = REGEXES[regex_id][REGEX_NAME]
|
@@ -180,4 +68,4 @@ class Plugin(BasePlugin):
|
|
180
68
|
test_name_regex = None
|
181
69
|
if regex_pattern:
|
182
70
|
test_name_regex = re.compile(regex_pattern, re.S | re.M)
|
183
|
-
self.
|
71
|
+
self.create_squad_tests(testrun, suite, test_name, snippets[regex_id], test_name_regex)
|
squad/settings.py
CHANGED
@@ -37,17 +37,21 @@ if not os.access(DATA_DIR, os.W_OK):
|
|
37
37
|
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
|
38
38
|
|
39
39
|
# SECURITY WARNING: keep the secret key used in production secret!
|
40
|
-
|
41
|
-
if
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
40
|
+
secret_key = os.getenv('SECRET_KEY', None)
|
41
|
+
if secret_key:
|
42
|
+
SECRET_KEY = secret_key
|
43
|
+
else:
|
44
|
+
secret_key_file = os.getenv('SECRET_KEY_FILE', None)
|
45
|
+
if secret_key_file is None:
|
46
|
+
secret_key_file = os.path.join(DATA_DIR, 'secret.dat')
|
47
|
+
|
48
|
+
if not os.path.exists(secret_key_file):
|
49
|
+
from squad.core.utils import random_key
|
50
|
+
fd = os.open(secret_key_file, os.O_WRONLY | os.O_CREAT, 0o600)
|
51
|
+
with os.fdopen(fd, 'w') as f:
|
52
|
+
f.write(random_key(64))
|
53
|
+
|
54
|
+
SECRET_KEY = open(secret_key_file).read()
|
51
55
|
|
52
56
|
DEBUG = os.getenv('ENV') not in ['production', 'staging']
|
53
57
|
|
@@ -418,6 +422,7 @@ CRISPY_TEMPLATE_PACK = 'bootstrap3'
|
|
418
422
|
|
419
423
|
# Sentry support
|
420
424
|
SENTRY_DSN = os.getenv('SENTRY_DSN')
|
425
|
+
SENTRY_TRACES_SAMPLE_RATE = os.getenv('SENTRY_TRACES_SAMPLE_RATE', '0')
|
421
426
|
if SENTRY_DSN:
|
422
427
|
try:
|
423
428
|
import sentry_sdk
|
@@ -428,6 +433,7 @@ if SENTRY_DSN:
|
|
428
433
|
dsn=SENTRY_DSN,
|
429
434
|
integrations=[DjangoIntegration(), CeleryIntegration()],
|
430
435
|
release='%s@%s' % (os.getenv('ENV', 'squad'), squad_version),
|
436
|
+
traces_sample_rate=float(SENTRY_TRACES_SAMPLE_RATE),
|
431
437
|
)
|
432
438
|
except ImportError:
|
433
439
|
pass
|
squad/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = '1.
|
1
|
+
__version__ = '1.91'
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: squad
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.91
|
4
4
|
Summary: Software Quality Dashboard
|
5
5
|
Home-page: https://github.com/Linaro/squad
|
6
6
|
Author: Antonio Terceiro
|
@@ -12,26 +12,26 @@ Requires-Dist: aiohttp
|
|
12
12
|
Requires-Dist: celery
|
13
13
|
Requires-Dist: cryptography
|
14
14
|
Requires-Dist: coreapi
|
15
|
-
Requires-Dist: django-crispy-forms
|
16
|
-
Requires-Dist: Django
|
15
|
+
Requires-Dist: django-crispy-forms==1.14.0
|
16
|
+
Requires-Dist: Django>=3
|
17
17
|
Requires-Dist: django-allauth
|
18
18
|
Requires-Dist: django-bootstrap3
|
19
19
|
Requires-Dist: django-celery-results
|
20
20
|
Requires-Dist: django-cors-headers
|
21
21
|
Requires-Dist: django-debug-toolbar
|
22
|
-
Requires-Dist: django-simple-history
|
23
|
-
Requires-Dist: django-filter
|
24
|
-
Requires-Dist: djangorestframework
|
25
|
-
Requires-Dist: djangorestframework-filters
|
22
|
+
Requires-Dist: django-simple-history>3.0
|
23
|
+
Requires-Dist: django-filter>=2.0
|
24
|
+
Requires-Dist: djangorestframework>=3.9.2
|
25
|
+
Requires-Dist: djangorestframework-filters>=1.0.0.dev0
|
26
26
|
Requires-Dist: drf-extensions
|
27
27
|
Requires-Dist: future
|
28
28
|
Requires-Dist: gunicorn
|
29
|
-
Requires-Dist: importlib-metadata
|
30
|
-
Requires-Dist: Jinja2
|
29
|
+
Requires-Dist: importlib-metadata>3
|
30
|
+
Requires-Dist: Jinja2==3.0.3
|
31
31
|
Requires-Dist: Markdown
|
32
|
-
Requires-Dist: msgpack
|
32
|
+
Requires-Dist: msgpack>=0.5.0
|
33
33
|
Requires-Dist: python-dateutil
|
34
|
-
Requires-Dist: PyYAML
|
34
|
+
Requires-Dist: PyYAML>=5.1
|
35
35
|
Requires-Dist: PyJWT
|
36
36
|
Requires-Dist: pyzmq
|
37
37
|
Requires-Dist: requests
|
@@ -40,6 +40,7 @@ Requires-Dist: sqlparse
|
|
40
40
|
Requires-Dist: svgwrite
|
41
41
|
Requires-Dist: whitenoise
|
42
42
|
Provides-Extra: postgres
|
43
|
-
Requires-Dist: psycopg2
|
43
|
+
Requires-Dist: psycopg2-binary; extra == "postgres"
|
44
44
|
|
45
45
|
Software Quality Dashboard
|
46
|
+
|
@@ -7,20 +7,21 @@ squad/http.py,sha256=KuIKtpf3yOvf5fwc0T2MR0ul1l4AKxq3b0CLdk6KBhM,3667
|
|
7
7
|
squad/jinja2.py,sha256=OKX-lzNz6qtTZL56HWv4UBMPuBl4WQXv0qFJztGp9zs,2541
|
8
8
|
squad/mail.py,sha256=xH5wuIpD7u1fTN9vNOcbzByojleaffsKwp-9i3BeOD0,390
|
9
9
|
squad/manage.py,sha256=Z-LXT67p0R-IzwJ9fLIAacEZmU0VUjqDOSg7j2ZSxJ4,1437
|
10
|
-
squad/settings.py,sha256=
|
10
|
+
squad/settings.py,sha256=0MZ48SV_7CTrLMik2ubWf8-ROQiFju6CKnUC3iR8KAc,14800
|
11
11
|
squad/socialaccount.py,sha256=vySqPwQ3qVVpahuJ-Snln8K--yzRL3bw4Nx27AsB39A,789
|
12
12
|
squad/urls.py,sha256=JiEfVW8YlzLPE52c2aHzdn5kVVKK4o22w8h5KOA6QhQ,2776
|
13
|
-
squad/version.py,sha256=
|
13
|
+
squad/version.py,sha256=S8qXLXebPToWZIRCvA5VIWk_5c_MVZR49-YdwQ0ypzc,21
|
14
14
|
squad/wsgi.py,sha256=SF8T0cQ0OPVyuYjO5YXBIQzvSXQHV0M2BTmd4gP1rPs,387
|
15
15
|
squad/api/__init__.py,sha256=CJiVakfAlHVN5mIFRVQYZQfuNUhUgWVbsdYTME4tq7U,1349
|
16
16
|
squad/api/apps.py,sha256=Trk72p-iV1uGn0o5mdJn5HARUoHGbfgO49jwXvpkmdQ,141
|
17
|
-
squad/api/ci.py,sha256=
|
17
|
+
squad/api/ci.py,sha256=QjGIhSpm8gmIjH4Nd2NAWtJItSVleg3QOLxBU_p9h1E,7082
|
18
18
|
squad/api/data.py,sha256=obKDV0-neEvj5lPF9VED2gy_hpfhGtLJABYvSY38ing,2379
|
19
19
|
squad/api/filters.py,sha256=Zvp8DCJmiNquFWqvfVseEAAMYYPiT95RUjqKdzcqSnw,6917
|
20
|
+
squad/api/prometheus.py,sha256=0usJgOz14g1a71sdfjM-cOC8IGXkpE-5-TqpvJj-Oyk,1840
|
20
21
|
squad/api/rest.py,sha256=ZtbK0c1BLPPnsX79XlKFVYONM_VJ0vacWZ2JsdCd4l0,77342
|
21
|
-
squad/api/urls.py,sha256=
|
22
|
+
squad/api/urls.py,sha256=c-o27_RP0ynOtxuyRKUl274fFMWWrzoii31Mr2saxSQ,1414
|
22
23
|
squad/api/utils.py,sha256=Sa8QFId3_oSqD2UOoY3Kuh54LLDLPNMq2sub5ktd6Fs,1160
|
23
|
-
squad/api/views.py,sha256=
|
24
|
+
squad/api/views.py,sha256=WH4c10e7iRmuL5tWDxG4zEFHzvF5hxDpEVvybfvbc_E,3880
|
24
25
|
squad/ci/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
26
|
squad/ci/admin.py,sha256=7yB-6F0cvt0NVvzGOTlZCyGPV_YHarmbKJZTTzataT4,2255
|
26
27
|
squad/ci/apps.py,sha256=6OVnzTdJkxdqEJnKWYE9dZgUcc29_T1LrDw41cK4EQk,139
|
@@ -29,10 +30,10 @@ squad/ci/models.py,sha256=Fm-4b3SDgMh9HXzqjOd4iZDRMJ1D9AnZ2cg7i2OR248,16018
|
|
29
30
|
squad/ci/tasks.py,sha256=P0NYjLuyUViTpO1jZMuRVREbFDCccrMCZDw5E4pt928,3882
|
30
31
|
squad/ci/utils.py,sha256=38zHpw8xkZDSFlkG-2BwSK6AkcddK9OkN9LXuQ3SHR0,97
|
31
32
|
squad/ci/backend/__init__.py,sha256=yhpotXT9F4IdAOXvGQ3-17eOHAFwoaqf9SnMX17ab30,534
|
32
|
-
squad/ci/backend/fake.py,sha256=
|
33
|
-
squad/ci/backend/lava.py,sha256=
|
34
|
-
squad/ci/backend/null.py,sha256=
|
35
|
-
squad/ci/backend/tuxsuite.py,sha256
|
33
|
+
squad/ci/backend/fake.py,sha256=7Rl-JXnBYThDomOBzBsN9XuVkSjSHTZjtZOURdowZbA,2397
|
34
|
+
squad/ci/backend/lava.py,sha256=WeOJJNxv42geGf3Y6r-I0WnhWinxpSSgZAFAwfkiXGY,34039
|
35
|
+
squad/ci/backend/null.py,sha256=htEd4NbrXLKdPgFfTS0Ixm8PdT6Ghat3BCYi2zjfuv0,5624
|
36
|
+
squad/ci/backend/tuxsuite.py,sha256=HTYLyJvtraHnkMKOjYix66bq1QV4m8bamNBahV5SZZw,19129
|
36
37
|
squad/ci/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
38
|
squad/ci/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
38
39
|
squad/ci/management/commands/create_tuxsuite_boot_tests.py,sha256=JvjNusebLX71eyz9d-kaeCyekYSpzc1eXoeIqWK9ygo,4045
|
@@ -81,12 +82,12 @@ squad/core/comparison.py,sha256=LR3-Unv0CTmakFCDzF_h8fm2peTJzkv79mQWNau1iwI,2442
|
|
81
82
|
squad/core/data.py,sha256=2zw56v7iYRTUc7wlhuUNgwIIMmK2w84hi-amR9J7EPU,2236
|
82
83
|
squad/core/failures.py,sha256=X6lJVghM2fOrd-RfuHeLlezW2pt7owDZ8eX-Kn_Qrt0,918
|
83
84
|
squad/core/history.py,sha256=QRSIoDOw6R6vUWMtsPMknsHGM7FaCAeuCYqASCayHTk,3541
|
84
|
-
squad/core/models.py,sha256=
|
85
|
+
squad/core/models.py,sha256=sXQmgPtl54IZT7rDmJEU3QK6JSPbi0hTUGRsjwL6PIo,60851
|
85
86
|
squad/core/notification.py,sha256=rOpO6F63w7_5l9gQgWBBEk-MFBjp7x_hVzoVIVyDze0,10030
|
86
87
|
squad/core/plugins.py,sha256=FLgyoXXKnPBYEf2MgHup9M017rHuADHivLhgzmx_cJE,6354
|
87
88
|
squad/core/queries.py,sha256=78fhIJZWXIlDryewYAt96beK1VJad66Ufu8cg3dHh4w,7698
|
88
89
|
squad/core/statistics.py,sha256=xyTHuhdBjcJ4AozZESjTzSD3dBmmCDgLpbg5XpeyO_M,1056
|
89
|
-
squad/core/utils.py,sha256=
|
90
|
+
squad/core/utils.py,sha256=HwCq8SsKJHbBUtF4DZt1iWCuWhqZaHRBn--Yh0O_RH4,5018
|
90
91
|
squad/core/locale/django.pot,sha256=XycSJyEaEpozGBS9zu7QTNQbffZC0D9eSJ-AwXaVZx4,2282
|
91
92
|
squad/core/locale/es_MX/LC_MESSAGES/django.po,sha256=bwvTWHK2KOT6zFqbIYh61_xYqRnMaQECZsMsOvNdMNw,3071
|
92
93
|
squad/core/locale/pl/LC_MESSAGES/django.po,sha256=mI-Vo8OKWCcx4PrsoB6GiPY3lYU55tSqh0sO6fUeK2Y,3111
|
@@ -98,7 +99,7 @@ squad/core/management/commands/compute_build_summaries.py,sha256=dz6-3vXtFNGYOzl
|
|
98
99
|
squad/core/management/commands/compute_project_statuses.py,sha256=qcm71zEP_A-XhNWrDHM55TJSgKUk_oWjewuZEu2B2KM,3134
|
99
100
|
squad/core/management/commands/fill_test_metadata.py,sha256=EG2mqKtThY5D7nnGalM3q0XOPEVDiDnFLV7sw7YSz1U,1326
|
100
101
|
squad/core/management/commands/fix_squadplugin_data.py,sha256=cbjPL_-AvazBsmXKd5x6LpaoP-3MGpa3uoUUxljVzdw,5072
|
101
|
-
squad/core/management/commands/import_data.py,sha256=
|
102
|
+
squad/core/management/commands/import_data.py,sha256=KgSTNtrQQiqzqjJdvKDHbU6IExPsdTbdMJ-yqfZY4Y4,4556
|
102
103
|
squad/core/management/commands/import_data.rst,sha256=79tAcJ6hOVRVzW2iheQuO6o2RHZKbbFtsHM-IEr6490,1444
|
103
104
|
squad/core/management/commands/migrate_test_runs.py,sha256=RHV06tb4gWyv_q-ooC821_QGZi0WGwxjIYaUGTboqfI,4214
|
104
105
|
squad/core/management/commands/populate_metric_build_and_environment.py,sha256=DJP9_YLRso0RiERBVsB0GP4-GaiRtJb0rAiUQDfFNQk,3166
|
@@ -277,7 +278,7 @@ squad/core/migrations/0167_add_project_datetime.py,sha256=VUBG-qsAhh2f2NXaHOqfX9
|
|
277
278
|
squad/core/migrations/0168_add_group_settings.py,sha256=5UdylfMMNavTL0KXkjPSiEMhSisGWXbhUXQSzfK29Ck,462
|
278
279
|
squad/core/migrations/0169_userpreferences.py,sha256=FwYv9RWxMWdQ2lXJMgi-Xc6XBB5Kp-_YTAOr9GVq1To,1098
|
279
280
|
squad/core/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
280
|
-
squad/core/tasks/__init__.py,sha256=
|
281
|
+
squad/core/tasks/__init__.py,sha256=pYbEkFzNaat7iQQretRiJQPPF4Sq-5-hBykJYnBM04g,18567
|
281
282
|
squad/core/tasks/exceptions.py,sha256=n4cbmJFBdA6KWsGiTbfN9DyYGbJpk0DjR0UneEYw_W0,931
|
282
283
|
squad/core/tasks/notification.py,sha256=6ZyTbUQZPITPP-4r9MUON7x-NbwvDBG8YeabM6fsjzA,4915
|
283
284
|
squad/core/templates/squad/notification/base.jinja2,sha256=AbtQioEHV5DJBW4Etsu0-DQXd_8tQCnLejzgbDGDW7s,3413
|
@@ -305,7 +306,7 @@ squad/frontend/project_settings.py,sha256=TtWz8h8Goeb3pccLy9jLUibeHqyqkdK8phL7_V
|
|
305
306
|
squad/frontend/queries.py,sha256=NxQF2woAf9A4Wk_ozHzZXOGmr2as-j7hqfvmsfJ-ojc,967
|
306
307
|
squad/frontend/setup.py,sha256=NF9VunY1HJGB2HsHJss-go7EGmqr__JASddxiBCvmeQ,169
|
307
308
|
squad/frontend/tests.py,sha256=PidrjaToK_Cks0s9Mc4i3Vh4UXOWoXTZlpnxQ2wWjHY,8740
|
308
|
-
squad/frontend/urls.py,sha256
|
309
|
+
squad/frontend/urls.py,sha256=-rxbsUlMyxldzoVKiVAOMAREqU8SOipy4CqBTlULuMQ,5055
|
309
310
|
squad/frontend/user_settings.py,sha256=U_i59iuylg98uH98K4ezPa2NY56idslBhn7MS6FguHQ,4976
|
310
311
|
squad/frontend/utils.py,sha256=DeH58CJUI1dovpQrj3a-DcxNzM0cxsnBDOF0mrC4Qws,1364
|
311
312
|
squad/frontend/views.py,sha256=4ld9n8pOS35sUKPifgCY4rLHL_Dmybfo_Jg0_Bo5sxs,27031
|
@@ -387,7 +388,7 @@ squad/frontend/templates/squad/knownissues.jinja2,sha256=RdQZ2AKgV97953eIuP-4IwN
|
|
387
388
|
squad/frontend/templates/squad/login.jinja2,sha256=NPp20MpmgoGxWOschCUxcZMJKdnkVhUmy8kpCgogPs0,2775
|
388
389
|
squad/frontend/templates/squad/metrics.jinja2,sha256=7oFBkTiGi3k1UtfR5x0RS961u6rsRCe_YcEXklA0iLA,3277
|
389
390
|
squad/frontend/templates/squad/project-nav.jinja2,sha256=AHN7r5TMvJ-NwEo_u3vlJg34J1njsuII32SgQuTfiwA,1526
|
390
|
-
squad/frontend/templates/squad/project.jinja2,sha256=
|
391
|
+
squad/frontend/templates/squad/project.jinja2,sha256=k2orc5C6Fxp_74utQAN1sa6XPOFMF2sC2D2y671pgSg,887
|
391
392
|
squad/frontend/templates/squad/test_history.jinja2,sha256=g_pHD4yQdfXK1D8-tTAPG4yzoqbDVUYm6ml2hANffp8,5869
|
392
393
|
squad/frontend/templates/squad/test_run.jinja2,sha256=smxFEC7XnHu28Wj7iC2WQrGjpuPiqsxASpflbyYGG_A,1176
|
393
394
|
squad/frontend/templates/squad/test_run_suite_metrics.jinja2,sha256=WGjlObw7ZTGoomTmON0O2QRHHdmEBOYf9xMSTWP83F4,1780
|
@@ -425,15 +426,17 @@ squad/plugins/__init__.py,sha256=9BSzy2jFIoDpWlhD7odPPrLdW4CC3btBhdFCvB651dM,152
|
|
425
426
|
squad/plugins/example.py,sha256=BKpwd315lHRIuNXJPteibpwfnI6C5eXYHYdFYBtVmsI,89
|
426
427
|
squad/plugins/gerrit.py,sha256=CqO2KnFQzu9utr_TQ-sGr1wg3ln0B-bS2-c0_i8T5-c,7009
|
427
428
|
squad/plugins/github.py,sha256=pdtLZw_7xNuzkaFvY_zWi0f2rsMlalXjKm7sz0eADz4,2429
|
428
|
-
squad/plugins/linux_log_parser.py,sha256=
|
429
|
+
squad/plugins/linux_log_parser.py,sha256=WrDbyfupEcP1-E4ke9wjHiddio8sD5BFuEtF4AH0aXA,3274
|
430
|
+
squad/plugins/lib/__init__.py,sha256=jzazbAvp2_ibblAs0cKZrmo9aR2EL3hKLyRDE008r2I,40
|
431
|
+
squad/plugins/lib/base_log_parser.py,sha256=OW6JkZ3PM5RiDkt9UZ7OFFpUIArCxFUaqovynzwBL1Y,6573
|
429
432
|
squad/run/__init__.py,sha256=ssE8GPAGFiK6V0WpZYowav6Zqsd63dfDMMYasNa1sQg,1410
|
430
433
|
squad/run/__main__.py,sha256=DOl8JOi4Yg7DdtwnUeGqtYBJ6P2k-D2psAEuYOjWr8w,66
|
431
434
|
squad/run/listener.py,sha256=jBeOQhPGb4EdIREB1QsCzYuumsfJ-TqJPd3nR-0m59g,200
|
432
435
|
squad/run/scheduler.py,sha256=CDJG3q5C0GuQuxwlMOfWTSSJpDdwbR6rzpbJfuA0xuw,277
|
433
436
|
squad/run/worker.py,sha256=jtML0h5qKDuSbpJ6_rpWP4MT_rsGA7a24AhwGxBquzk,594
|
434
|
-
squad-1.
|
435
|
-
squad-1.
|
436
|
-
squad-1.
|
437
|
-
squad-1.
|
438
|
-
squad-1.
|
439
|
-
squad-1.
|
437
|
+
squad-1.91.dist-info/COPYING,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
|
438
|
+
squad-1.91.dist-info/METADATA,sha256=U074123n6U3T9v0BY5GTaFCynFxFjL4HA7GEoLQjXX4,1278
|
439
|
+
squad-1.91.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
440
|
+
squad-1.91.dist-info/entry_points.txt,sha256=J_jG3qnkoOHX4RFNGC0f83eJ4BSvK3pqLFkoF3HWfmA,195
|
441
|
+
squad-1.91.dist-info/top_level.txt,sha256=_x9uqE1XppiiytmVTl_qNgpnXus6Gsef69HqfliE7WI,6
|
442
|
+
squad-1.91.dist-info/RECORD,,
|
File without changes
|
File without changes
|