squad 1.91__py3-none-any.whl → 1.93__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
squad/api/prometheus.py CHANGED
@@ -43,7 +43,7 @@ def metrics(request):
43
43
 
44
44
  for queue in queues:
45
45
  if queue['name'] in available_queues:
46
- metric_name = f'queue_{queue["name"]}_length'
46
+ metric_name = f'queue_{queue["name"]}_count'
47
47
  length = queue['messages_ready']
48
48
 
49
49
  output += f'\n# TYPE {metric_name} counter'
@@ -10,6 +10,7 @@ from requests.adapters import HTTPAdapter, Retry
10
10
  from functools import reduce
11
11
  from urllib.parse import urljoin
12
12
 
13
+ from cryptography.exceptions import InvalidSignature
13
14
  from cryptography.hazmat.primitives.asymmetric import ec
14
15
  from cryptography.hazmat.primitives import (
15
16
  hashes,
@@ -249,9 +250,6 @@ class Backend(BaseBackend):
249
250
  test_job.name = test_name
250
251
 
251
252
  build_status = results['build_status']
252
- if build_status == 'error' and results['retry'] < 2:
253
- # SQUAD should retry fetching the build until retry == 2
254
- raise TemporaryFetchIssue(results.get('status_message', 'TuxSuite Error'))
255
253
 
256
254
  # Make metadata
257
255
  metadata_keys = settings.get('BUILD_METADATA_KEYS', [])
@@ -270,7 +268,7 @@ class Backend(BaseBackend):
270
268
  metrics = {}
271
269
 
272
270
  completed = True
273
- if results['retry'] >= 2:
271
+ if build_status == 'error':
274
272
  # This indicates that TuxSuite gave up trying to work on this build
275
273
  status = 'Incomplete'
276
274
  tests[f'build/{test_name}'] = 'skip'
@@ -484,22 +482,18 @@ class Backend(BaseBackend):
484
482
  if public_key is None:
485
483
  raise Exception("missing tuxsuite public key for this project")
486
484
 
487
- payload = json.loads(request.body)
488
485
  signature = base64.urlsafe_b64decode(signature)
489
486
  key = serialization.load_ssh_public_key(public_key.encode("ascii"))
490
- key.verify(
491
- signature,
492
- payload.encode("utf-8"),
493
- ec.ECDSA(hashes.SHA256()),
494
- )
487
+ try:
488
+ key.verify(
489
+ signature,
490
+ request.body,
491
+ ec.ECDSA(hashes.SHA256()),
492
+ )
493
+ except InvalidSignature:
494
+ raise Exception("Failed to verify signature against payload")
495
495
 
496
496
  def process_callback(self, json_payload, build, environment, backend):
497
- # The payload coming from Tuxsuite is formatted as bytes,
498
- # so after the first json.loads(request.body), the result
499
- # will still be a string containing the actual json document
500
- # We need to call json.loads() once more to get the actual
501
- # python dict containing all the information we need
502
- json_payload = json.loads(json_payload)
503
497
  if "kind" not in json_payload or "status" not in json_payload:
504
498
  raise Exception("`kind` and `status` are required in the payload")
505
499
 
@@ -0,0 +1,44 @@
1
+ from django.core.management.base import BaseCommand
2
+
3
+ from squad.plugins.linux_log_parser import Plugin as BootTestLogParser
4
+ from squad.plugins.linux_log_parser_build import Plugin as BuildLogParser
5
+
6
+
7
+ class FakeTestRun:
8
+ log_file = None
9
+ id = None
10
+
11
+
12
+ log_parsers = {
13
+ 'linux_log_parser_boot_test': BootTestLogParser(),
14
+ "linux_log_parser_build": BuildLogParser(),
15
+ }
16
+
17
+
18
+ class Command(BaseCommand):
19
+
20
+ help = """Run a log parser and print the outputs to the stdout."""
21
+
22
+ def add_arguments(self, parser):
23
+
24
+ parser.add_argument(
25
+ "LOG_FILE",
26
+ help="Log file to parser",
27
+ )
28
+
29
+ parser.add_argument(
30
+ "LOG_PARSER",
31
+ choices=log_parsers.keys(),
32
+ help="Which log parser to run"
33
+ )
34
+
35
+ def handle(self, *args, **options):
36
+ self.options = options
37
+
38
+ with open(options["LOG_FILE"], "r") as f:
39
+ log_file = f.read()
40
+
41
+ testrun = FakeTestRun()
42
+ testrun.log_file = log_file
43
+ parser = log_parsers[options["LOG_PARSER"]]
44
+ parser.postprocess_testrun(testrun, squad=False, print=True)
squad/core/models.py CHANGED
@@ -577,12 +577,17 @@ class Build(models.Model):
577
577
  List of attachments from all testruns
578
578
  """
579
579
  if self.__attachments__ is None:
580
+ test_run_ids = self.test_runs.values_list('id', flat=True)
581
+ all_attachments = Attachment.objects.filter(test_run_id__in=test_run_ids).values(
582
+ 'test_run_id', 'filename'
583
+ )
584
+
580
585
  attachments = {}
581
- for test_run in self.test_runs.all():
582
- attachments[test_run.pk] = []
583
- for attachment in test_run.attachments.all():
584
- attachments[test_run.pk].append(attachment.filename)
586
+ for attachment in all_attachments:
587
+ attachments.setdefault(attachment['test_run_id'], []).append(attachment['filename'])
588
+
585
589
  self.__attachments__ = attachments
590
+
586
591
  return self.__attachments__
587
592
 
588
593
  @property
@@ -16,8 +16,16 @@ square_brackets_and_contents = r"\[[^\]]+\]"
16
16
 
17
17
  class BaseLogParser:
18
18
  def compile_regexes(self, regexes):
19
- combined = [r"(%s)" % r[REGEX_BODY] for r in regexes]
20
- return re.compile(r"|".join(combined), re.S | re.M)
19
+ with_brackets = [r"(%s)" % r[REGEX_BODY] for r in regexes]
20
+ combined = r"|".join(with_brackets)
21
+
22
+ # In the case where there is only one regex, we need to add extra
23
+ # bracket around it for it to behave the same as the multiple regex
24
+ # case
25
+ if len(regexes) == 1:
26
+ combined = f"({combined})"
27
+
28
+ return re.compile(combined, re.S | re.M)
21
29
 
22
30
  def remove_numbers_and_time(self, snippet):
23
31
  # [ 1067.461794][ T132] BUG: KCSAN: data-race in do_page_fault spectre_v4_enable_task_mitigation
@@ -26,7 +34,11 @@ class BaseLogParser:
26
34
 
27
35
  # [ .][ T] BUG: KCSAN: data-race in do_page_fault spectre_v_enable_task_mitigation
28
36
  # -> BUG: KCSAN: data-race in do_page_fault spectre_v_enable_task_mitigation
29
- without_time = re.sub(f"^{square_brackets_and_contents}({square_brackets_and_contents})?", "", without_numbers) # noqa
37
+ without_time = re.sub(
38
+ f"^{square_brackets_and_contents}({square_brackets_and_contents})?",
39
+ "",
40
+ without_numbers,
41
+ ) # noqa
30
42
 
31
43
  return without_time
32
44
 
@@ -41,10 +53,7 @@ class BaseLogParser:
41
53
  snippet = matches[0]
42
54
  without_numbers_and_time = self.remove_numbers_and_time(snippet)
43
55
 
44
- # Limit the name length to 191 characters, since the max name length
45
- # for SuiteMetadata in SQUAD is 256 characters. The SHA and "-" take 65
46
- # characters: 256-65=191
47
- return slugify(without_numbers_and_time)[:191]
56
+ return slugify(without_numbers_and_time)
48
57
 
49
58
  def create_shasum(self, snippet):
50
59
  sha = hashlib.sha256()
@@ -52,7 +61,7 @@ class BaseLogParser:
52
61
  sha.update(without_numbers_and_time.encode())
53
62
  return sha.hexdigest()
54
63
 
55
- def create_name_log_dict(self, test_name, lines, test_regex=None):
64
+ def create_name_log_dict(self, test_name, lines, test_regex=None, create_shas=True):
56
65
  """
57
66
  Produce a dictionary with the test names as keys and the extracted logs
58
67
  for that test name as values. There will be at least one test name per
@@ -64,31 +73,43 @@ class BaseLogParser:
64
73
  # have any output for a particular regex, just use the default name
65
74
  # (for example "check-kernel-oops").
66
75
  tests_without_shas_to_create = defaultdict(set)
67
- tests_with_shas_to_create = defaultdict(set)
76
+ tests_with_shas_to_create = None
68
77
 
69
78
  # If there are lines, then create the tests for these.
70
79
  for line in lines:
71
80
  extracted_name = self.create_name(line, test_regex)
72
81
  if extracted_name:
73
- extended_test_name = f"{test_name}-{extracted_name}"
82
+ max_name_length = 256
83
+ # If adding SHAs, limit the name length to 191 characters,
84
+ # since the max name length for SuiteMetadata in SQUAD is 256
85
+ # characters. The SHA and "-" take 65 characters: 256-65=191
86
+ if create_shas:
87
+ max_name_length -= 65
88
+ extended_test_name = f"{test_name}-{extracted_name}"[:max_name_length]
74
89
  else:
75
90
  extended_test_name = test_name
76
91
  tests_without_shas_to_create[extended_test_name].add(line)
77
92
 
78
- for name, test_lines in tests_without_shas_to_create.items():
79
- # Some lines of the matched regex might be the same, and we don't want to create
80
- # multiple tests like test1-sha1, test1-sha1, etc, so we'll create a set of sha1sums
81
- # then create only new tests for unique sha's
93
+ if create_shas:
94
+ tests_with_shas_to_create = defaultdict(set)
95
+ for name, test_lines in tests_without_shas_to_create.items():
96
+ # Some lines of the matched regex might be the same, and we don't want to create
97
+ # multiple tests like test1-sha1, test1-sha1, etc, so we'll create a set of sha1sums
98
+ # then create only new tests for unique sha's
82
99
 
83
- for line in test_lines:
84
- sha = self.create_shasum(line)
85
- name_with_sha = f"{name}-{sha}"
86
- tests_with_shas_to_create[name_with_sha].add(line)
100
+ for line in test_lines:
101
+ sha = self.create_shasum(line)
102
+ name_with_sha = f"{name}-{sha}"
103
+ tests_with_shas_to_create[name_with_sha].add(line)
87
104
 
88
105
  return tests_without_shas_to_create, tests_with_shas_to_create
89
106
 
90
107
  def create_squad_tests_from_name_log_dict(
91
- self, suite, testrun, tests_without_shas_to_create, tests_with_shas_to_create
108
+ self,
109
+ suite_name,
110
+ testrun,
111
+ tests_without_shas_to_create,
112
+ tests_with_shas_to_create=None,
92
113
  ):
93
114
  # Import SuiteMetadata from SQUAD only when required so BaseLogParser
94
115
  # does not require a SQUAD to work. This makes it easier to reuse this
@@ -96,6 +117,8 @@ class BaseLogParser:
96
117
  # patterns.
97
118
  from squad.core.models import SuiteMetadata
98
119
 
120
+ suite, _ = testrun.build.project.suites.get_or_create(slug=suite_name)
121
+
99
122
  for name, lines in tests_without_shas_to_create.items():
100
123
  metadata, _ = SuiteMetadata.objects.get_or_create(
101
124
  suite=suite.slug, name=name, kind="test"
@@ -108,34 +131,72 @@ class BaseLogParser:
108
131
  build=testrun.build,
109
132
  environment=testrun.environment,
110
133
  )
111
- for name_with_sha, lines in tests_with_shas_to_create.items():
112
- metadata, _ = SuiteMetadata.objects.get_or_create(
113
- suite=suite.slug, name=name_with_sha, kind="test"
114
- )
115
- testrun.tests.create(
116
- suite=suite,
117
- result=False,
118
- log="\n---\n".join(lines),
119
- metadata=metadata,
120
- build=testrun.build,
121
- environment=testrun.environment,
122
- )
123
-
124
- def create_squad_tests(self, testrun, suite, test_name, lines, test_regex=None):
134
+ if tests_with_shas_to_create:
135
+ for name_with_sha, lines in tests_with_shas_to_create.items():
136
+ metadata, _ = SuiteMetadata.objects.get_or_create(
137
+ suite=suite.slug, name=name_with_sha, kind="test"
138
+ )
139
+ testrun.tests.create(
140
+ suite=suite,
141
+ result=False,
142
+ log="\n---\n".join(lines),
143
+ metadata=metadata,
144
+ build=testrun.build,
145
+ environment=testrun.environment,
146
+ )
147
+
148
+ def print_squad_tests_from_name_log_dict(
149
+ self,
150
+ suite_name,
151
+ tests_without_shas_to_create,
152
+ tests_with_shas_to_create=None,
153
+ ):
154
+ for name, lines in tests_without_shas_to_create.items():
155
+ print(f"\nName: {suite_name}/{name}")
156
+ log = "\n".join(lines)
157
+ print(f"Log:\n{log}")
158
+
159
+ if tests_with_shas_to_create:
160
+ for name_with_sha, lines in tests_with_shas_to_create.items():
161
+ print(f"\nName: {suite_name}/{name_with_sha}")
162
+ log = "\n---\n".join(lines)
163
+ print(f"Log:\n{log}")
164
+
165
+ def create_squad_tests(
166
+ self,
167
+ testrun,
168
+ suite_name,
169
+ test_name,
170
+ lines,
171
+ test_regex=None,
172
+ create_shas=True,
173
+ print=False,
174
+ squad=True,
175
+ ):
125
176
  """
126
177
  There will be at least one test per regex. If there were any match for
127
178
  a given regex, then a new test will be generated using test_name +
128
179
  shasum. This helps comparing kernel logs across different builds
129
180
  """
181
+
130
182
  tests_without_shas_to_create, tests_with_shas_to_create = (
131
- self.create_name_log_dict(test_name, lines, test_regex)
132
- )
133
- self.create_squad_tests_from_name_log_dict(
134
- suite,
135
- testrun,
136
- tests_without_shas_to_create,
137
- tests_with_shas_to_create,
183
+ self.create_name_log_dict(
184
+ test_name, lines, test_regex, create_shas=create_shas
185
+ )
138
186
  )
187
+ if print:
188
+ self.print_squad_tests_from_name_log_dict(
189
+ suite_name,
190
+ tests_without_shas_to_create,
191
+ tests_with_shas_to_create,
192
+ )
193
+ if squad:
194
+ self.create_squad_tests_from_name_log_dict(
195
+ suite_name,
196
+ testrun,
197
+ tests_without_shas_to_create,
198
+ tests_with_shas_to_create,
199
+ )
139
200
 
140
201
  def join_matches(self, matches, regexes):
141
202
  """
@@ -6,21 +6,21 @@ from squad.plugins.lib.base_log_parser import BaseLogParser, REGEX_NAME, REGEX_E
6
6
  logger = logging.getLogger()
7
7
 
8
8
  MULTILINERS = [
9
- ('exception', f'-+\[? cut here \]?-+.*?{tstamp}{pid}?\s+-+\[? end trace \w* \]?-+', f"\n{tstamp}{not_newline_or_plus}*"), # noqa
10
- ('kasan', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KASAN:.*?\n*?{tstamp}{pid}?\s+=+', f"BUG: KASAN:{not_newline_or_plus}*"), # noqa
11
- ('kcsan', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KCSAN:.*?=+', f"BUG: KCSAN:{not_newline_or_plus}*"), # noqa
12
- ('kfence', f'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KFENCE:.*?{tstamp}{pid}?\s+=+', f"BUG: KFENCE:{not_newline_or_plus}*"), # noqa
13
- ('panic-multiline', f'{tstamp}{pid}?\s+Kernel panic - [^\n]+\n.*?-+\[? end Kernel panic - [^\n]+ \]?-*', f"Kernel {not_newline_or_plus}*"), # noqa
14
- ('internal-error-oops', f'{tstamp}{pid}?\s+Internal error: Oops.*?-+\[? end trace \w+ \]?-+', f"Oops{not_newline_or_plus}*"), # noqa
9
+ ('exception', fr'-+\[? cut here \]?-+.*?{tstamp}{pid}?\s+-+\[? end trace \w* \]?-+', fr"\n{tstamp}{not_newline_or_plus}*"), # noqa
10
+ ('kasan', fr'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KASAN:.*?\n*?{tstamp}{pid}?\s+=+', fr"BUG: KASAN:{not_newline_or_plus}*"), # noqa
11
+ ('kcsan', fr'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KCSAN:.*?=+', fr"BUG: KCSAN:{not_newline_or_plus}*"), # noqa
12
+ ('kfence', fr'{tstamp}{pid}?\s+=+\n{tstamp}{pid}?\s+BUG: KFENCE:.*?{tstamp}{pid}?\s+=+', fr"BUG: KFENCE:{not_newline_or_plus}*"), # noqa
13
+ ('panic-multiline', fr'{tstamp}{pid}?\s+Kernel panic - [^\n]+\n.*?-+\[? end Kernel panic - [^\n]+ \]?-*', fr"Kernel {not_newline_or_plus}*"), # noqa
14
+ ('internal-error-oops', fr'{tstamp}{pid}?\s+Internal error: Oops.*?-+\[? end trace \w+ \]?-+', fr"Oops{not_newline_or_plus}*"), # noqa
15
15
  ]
16
16
 
17
17
  ONELINERS = [
18
- ('oops', r'^[^\n]+Oops(?: -|:).*?$', f"Oops{not_newline_or_plus}*"), # noqa
19
- ('fault', r'^[^\n]+Unhandled fault.*?$', f"Unhandled {not_newline_or_plus}*"), # noqa
20
- ('warning', r'^[^\n]+WARNING:.*?$', f"WARNING:{not_newline_or_plus}*"), # noqa
21
- ('bug', r'^[^\n]+(?: kernel BUG at|BUG:).*?$', f"BUG{not_newline_or_plus}*"), # noqa
22
- ('invalid-opcode', r'^[^\n]+invalid opcode:.*?$', f"invalid opcode:{not_newline_or_plus}*"), # noqa
23
- ('panic', r'Kernel panic - not syncing.*?$', f"Kernel {not_newline_or_plus}*"), # noqa
18
+ ('oops', r'^[^\n]+Oops(?: -|:).*?$', fr"Oops{not_newline_or_plus}*"), # noqa
19
+ ('fault', r'^[^\n]+Unhandled fault.*?$', fr"Unhandled {not_newline_or_plus}*"), # noqa
20
+ ('warning', r'^[^\n]+WARNING:.*?$', fr"WARNING:{not_newline_or_plus}*"), # noqa
21
+ ('bug', r'^[^\n]+(?: kernel BUG at|BUG:).*?$', fr"BUG{not_newline_or_plus}*"), # noqa
22
+ ('invalid-opcode', r'^[^\n]+invalid opcode:.*?$', fr"invalid opcode:{not_newline_or_plus}*"), # noqa
23
+ ('panic', r'Kernel panic - not syncing.*?$', fr"Kernel {not_newline_or_plus}*"), # noqa
24
24
  ]
25
25
 
26
26
  # Tip: broader regexes should come first
@@ -44,8 +44,9 @@ class Plugin(BasePlugin, BaseLogParser):
44
44
  kernel_msgs = re.findall(f'({tstamp}{pid}? .*?)$', log, re.S | re.M) # noqa
45
45
  return '\n'.join(kernel_msgs)
46
46
 
47
- def postprocess_testrun(self, testrun):
48
- if testrun.log_file is None:
47
+ def postprocess_testrun(self, testrun, squad=True, print=False):
48
+ # If running as a SQUAD plugin, only run the boot/test log parser if this is not a build testrun
49
+ if testrun.log_file is None or (squad and testrun.tests.filter(suite__slug="build").exists()):
49
50
  return
50
51
 
51
52
  boot_log, test_log = self.__cutoff_boot_log(testrun.log_file)
@@ -56,7 +57,7 @@ class Plugin(BasePlugin, BaseLogParser):
56
57
 
57
58
  for log_type, log in logs.items():
58
59
  log = self.__kernel_msgs_only(log)
59
- suite, _ = testrun.build.project.suites.get_or_create(slug=f'log-parser-{log_type}')
60
+ suite_name = f'log-parser-{log_type}'
60
61
 
61
62
  regex = self.compile_regexes(REGEXES)
62
63
  matches = regex.findall(log)
@@ -68,4 +69,4 @@ class Plugin(BasePlugin, BaseLogParser):
68
69
  test_name_regex = None
69
70
  if regex_pattern:
70
71
  test_name_regex = re.compile(regex_pattern, re.S | re.M)
71
- self.create_squad_tests(testrun, suite, test_name, snippets[regex_id], test_name_regex)
72
+ self.create_squad_tests(testrun, suite_name, test_name, snippets[regex_id], test_name_regex, squad=squad, print=print)
@@ -0,0 +1,332 @@
1
+ import logging
2
+ import re
3
+
4
+ from django.template.defaultfilters import slugify
5
+
6
+ from squad.plugins import Plugin as BasePlugin
7
+ from squad.plugins.lib.base_log_parser import (
8
+ REGEX_EXTRACT_NAME,
9
+ REGEX_NAME,
10
+ BaseLogParser,
11
+ )
12
+
13
+ logger = logging.getLogger()
14
+
15
+ file_path = r"^(?:[^\n]*?:(?:\d+:){2}|<[^\n]*?>:)"
16
+ gcc_clang_compiler_error_warning = rf"{file_path} (?:error|warning): [^\n]+?\n^(?:\.+\n|^(?!\s+(?:CC|Kernel[^\n]*?is ready))\s+?[^\n]+\n|{file_path} note:[^\n]+\n)*"
17
+
18
+ MULTILINERS_GCC = [
19
+ (
20
+ "gcc-compiler",
21
+ gcc_clang_compiler_error_warning,
22
+ r"^[^\n]*(?:error|warning)[^\n]*$",
23
+ ),
24
+ ]
25
+
26
+ ONELINERS_GCC = []
27
+
28
+
29
+ MULTILINERS_CLANG = [
30
+ (
31
+ "clang-compiler",
32
+ gcc_clang_compiler_error_warning,
33
+ r"^[^\n]*(?:error|warning)[^\n]*$",
34
+ ),
35
+ ]
36
+
37
+ ONELINERS_CLANG = [
38
+ (
39
+ "clang-compiler-single-line",
40
+ "^clang: (?:error|warning).*?$",
41
+ r"^[^\n]*(?:error|warning).*?$",
42
+ ),
43
+ (
44
+ "clang-compiler-fatal-error",
45
+ "^fatal error.*?$",
46
+ r"^fatal error.*?$",
47
+ ),
48
+ ]
49
+
50
+ MULTILINERS_GENERAL = [
51
+ (
52
+ "general-not-a-git-repo",
53
+ r"^[^\n]*fatal: not a git repository.*?not set\)\.$",
54
+ r"^[^\n]*fatal: not a git repository.*?$",
55
+ ),
56
+ (
57
+ "general-unexpected-argument",
58
+ r"^[^\n]*error: Found argument.*?--help$",
59
+ r"^[^\n]*error: Found argument.*?$",
60
+ ),
61
+ (
62
+ "general-broken-32-bit",
63
+ r"^[^\n]*Warning: you seem to have a broken 32-bit build.*?(?:If[^\n]*?try:(?:\n|\s+.+?$)+)+",
64
+ r"^[^\n]*Warning:.*?$",
65
+ ),
66
+ (
67
+ "general-makefile-overriding",
68
+ r"^[^\n]*warning: overriding recipe for target.*?ignoring old recipe for target.*?$",
69
+ r"^[^\n]*warning:.*?$",
70
+ ),
71
+ (
72
+ "general-unmet-dependencies",
73
+ r"^WARNING: unmet direct dependencies detected for.*?$(?:\n +[^\n]+)*",
74
+ r"^WARNING: unmet direct dependencies detected for.*?$",
75
+ ),
76
+ (
77
+ "general-ldd",
78
+ r"^[^\n]*?lld:[^\n]+?(?:warning|error):.*?$(?:\n^>>>[^\n]+)*",
79
+ r"^[^\n]*?lld:.*?$",
80
+ ),
81
+ (
82
+ "general-ld",
83
+ r"^[^\n]*?ld:[^\n]+?(?:warning|error):[^\n]*?$(?:\n^[^\n]*?NOTE:[^\n]+)*",
84
+ r"^[^\n]*?ld:[^\n]+?(?:warning|error):.*?$",
85
+ ),
86
+ (
87
+ "general-objcopy",
88
+ r"^[^\n]*?objcopy:[^\n]+?(?:warning|error):[^\n]*?$(?:\n^[^\n]*?NOTE:[^\n]+)*",
89
+ r"^[^\n]*?objcopy:[^\n]+?(?:warning|error):.*?$",
90
+ ),
91
+ (
92
+ "general-ld-undefined-reference",
93
+ r"^[^\n]*?ld[^\n]*?$\n^[^\n]+undefined reference.*?$",
94
+ r"^[^\n]+undefined reference.*?$",
95
+ ),
96
+ (
97
+ "general-modpost",
98
+ r"^[^\n]*?WARNING: modpost:[^\n]*?$(?:\n^To see.*?:$\n^.*?$)?",
99
+ r"^[^\n]*?WARNING.*?$",
100
+ ),
101
+ (
102
+ "general-python-traceback",
103
+ r"Traceback.*?^[^\s]+Error: .*?$",
104
+ r"^[^\s]+Error: .*?$",
105
+ ),
106
+ ]
107
+
108
+ ONELINERS_GENERAL = [
109
+ (
110
+ "general-no-such-file-or-directory",
111
+ r"^[^\n]+?No such file or directory.*?$",
112
+ r"^[^\n]+?No such file or directory.*?$",
113
+ ),
114
+ (
115
+ "general-no-targets",
116
+ r"^[^\n]+?No targets.*?$",
117
+ r"^[^\n]+?No targets.*?$",
118
+ ),
119
+ (
120
+ "general-no-rule-to-make-target",
121
+ r"^[^\n]+?No rule to make target.*?$",
122
+ r"^[^\n]+?No rule to make target.*?$",
123
+ ),
124
+ (
125
+ "general-makefile-config",
126
+ r"^Makefile.config:\d+:.*?$",
127
+ r"^Makefile.config:\d+:.*?$",
128
+ ),
129
+ (
130
+ "general-not-found",
131
+ r"^[^\n]*?not found.*?$",
132
+ r"^[^\n]*?not found.*?$",
133
+ ),
134
+ (
135
+ "general-kernel-abi",
136
+ r"^Warning: Kernel ABI header at.*?$",
137
+ r"^Warning: Kernel ABI header at.*?$",
138
+ ),
139
+ (
140
+ "general-missing",
141
+ r"^Warning: missing.*?$",
142
+ r"^Warning: missing.*?$",
143
+ ),
144
+ (
145
+ "general-dtc",
146
+ r"^[^\n]*?Warning \([^\n]*?\).*?$",
147
+ r"^[^\n]*?Warning.*?$",
148
+ ),
149
+ (
150
+ "general-register-allocation",
151
+ r"^[^\n]*?error: register allocation failed.*?$",
152
+ r"^[^\n]*?error.*?$",
153
+ ),
154
+ ]
155
+
156
+ # Tip: broader regexes should come first
157
+ REGEXES_GCC = MULTILINERS_GCC + MULTILINERS_GENERAL + ONELINERS_GCC + ONELINERS_GENERAL
158
+ REGEXES_CLANG = (
159
+ MULTILINERS_CLANG + MULTILINERS_GENERAL + ONELINERS_CLANG + ONELINERS_GENERAL
160
+ )
161
+
162
+ supported_toolchains = {
163
+ "gcc": REGEXES_GCC,
164
+ "clang": REGEXES_CLANG,
165
+ }
166
+
167
+ make_regex = r"^make .*?$"
168
+ in_file_regex = r"^In file[^\n]*?[:,]$(?:\n^(?:\s+|In file)[^\n]*?[:,]$)*"
169
+ in_function_regex = r"^[^\n]*?In function.*?:$"
170
+ entering_dir_regex = r"^make\[(?:\d+)\]: Entering directory.*?$"
171
+ leaving_dir_regex = r"^make\[(?:\d+)\]: Leaving directory.*?$"
172
+
173
+ split_regex_gcc = rf"(.*?)({make_regex}|{in_file_regex}|{in_function_regex}|{entering_dir_regex}|{leaving_dir_regex})"
174
+
175
+
176
+ class Plugin(BasePlugin, BaseLogParser):
177
+
178
+ def post_process_test_name(self, text):
179
+ # Remove "builds/linux" if there
180
+ text = re.sub(r"builds/linux", "", text)
181
+
182
+ # Change "/" and "." to "_" for readability
183
+ text = re.sub(r"[/\.]", "_", text)
184
+
185
+ # Remove numbers and hex
186
+ text = re.sub(r"(0x[a-f0-9]+|[<\[][0-9a-f]+?[>\]]|\d+)", "", text)
187
+
188
+ # Remove "{...}" and "[...]"
189
+ text = re.sub(r"\{.+?\}", "", text)
190
+ text = re.sub(r"\[.+?\]", "", text)
191
+
192
+ return text
193
+
194
+ def create_name(self, snippet, compiled_regex=None):
195
+ matches = None
196
+ if compiled_regex:
197
+ matches = compiled_regex.findall(snippet)
198
+ if not matches:
199
+ # Only extract a name if we provide a regex to extract the name and
200
+ # there is a match
201
+ return None
202
+ snippet = matches[0]
203
+ without_numbers_and_time = self.remove_numbers_and_time(snippet)
204
+
205
+ name = slugify(self.post_process_test_name(without_numbers_and_time))
206
+
207
+ return name
208
+
209
+ def split_by_regex(self, log, regex):
210
+ # Split up the log by the keywords we want to capture
211
+ s_lines_compiled = re.compile(regex, re.DOTALL | re.MULTILINE)
212
+ split_by_regex_list = s_lines_compiled.split(log)
213
+ split_by_regex_list = [
214
+ f for f in split_by_regex_list if f is not None and f != ""
215
+ ]
216
+
217
+ return split_by_regex_list
218
+
219
+ def process_blocks(
220
+ self,
221
+ blocks_to_process,
222
+ regexes,
223
+ make_regex=make_regex,
224
+ entering_dir_regex=entering_dir_regex,
225
+ leaving_dir_regex=leaving_dir_regex,
226
+ in_file_regex=in_file_regex,
227
+ in_function_regex=in_function_regex,
228
+ ):
229
+ snippets = dict()
230
+ regex_compiled = self.compile_regexes(regexes)
231
+ make_regex_compiled = re.compile(make_regex, re.DOTALL | re.MULTILINE)
232
+ entering_dir_regex_compiled = re.compile(
233
+ entering_dir_regex, re.DOTALL | re.MULTILINE
234
+ )
235
+ leaving_dir_regex_compiled = re.compile(
236
+ leaving_dir_regex, re.DOTALL | re.MULTILINE
237
+ )
238
+ in_file_regex_compiled = re.compile(in_file_regex, re.DOTALL | re.MULTILINE)
239
+ in_function_regex_compiled = re.compile(
240
+ in_function_regex, re.DOTALL | re.MULTILINE
241
+ )
242
+
243
+ # For tracking the last piece of information we saw
244
+ make_command = None
245
+ entering_dir = None
246
+ in_file = None
247
+ in_function = None
248
+
249
+ for regex_id in range(len(regexes)):
250
+ snippets[regex_id] = []
251
+ for block in blocks_to_process:
252
+ if make_regex_compiled.match(block):
253
+ make_command = block
254
+ entering_dir = None
255
+ in_file = None
256
+ in_function = None
257
+ elif entering_dir_regex_compiled.match(block):
258
+ entering_dir = block
259
+ in_file = None
260
+ in_function = None
261
+ elif leaving_dir_regex_compiled.match(block):
262
+ entering_dir = None
263
+ in_file = None
264
+ in_function = None
265
+ elif in_file_regex_compiled.match(block):
266
+ in_file = block
267
+ in_function = None
268
+ elif in_function_regex_compiled.match(block):
269
+ in_function = block
270
+ else:
271
+ matches = regex_compiled.findall(block)
272
+ sub_snippets = self.join_matches(matches, regexes)
273
+ prepend = ""
274
+ if make_command:
275
+ prepend += make_command + "\n"
276
+ if entering_dir:
277
+ prepend += entering_dir + "\n"
278
+ if in_file:
279
+ prepend += in_file + "\n"
280
+ if in_function:
281
+ prepend += in_function + "\n"
282
+ for regex_id in range(len(regexes)):
283
+ for s in sub_snippets[regex_id]:
284
+ snippets[regex_id].append(prepend + s)
285
+
286
+ return snippets
287
+
288
+ def postprocess_testrun(self, testrun, squad=True, print=False):
289
+ """
290
+ Check:
291
+ - There is a log file
292
+ - If running as SQUAD plugin, the testrun contains the "build"
293
+ suite - this tells us that the testrun's log is a build log
294
+ """
295
+ if testrun.log_file is None or (
296
+ squad and not testrun.tests.filter(suite__slug="build").exists()
297
+ ):
298
+ return
299
+
300
+ regexes = None
301
+ for toolchain, toolchain_regexes in supported_toolchains.items():
302
+ if f"--toolchain={toolchain}" in testrun.log_file:
303
+ toolchain_name = toolchain
304
+ regexes = toolchain_regexes
305
+
306
+ # If a supported toolchain was not found in the log
307
+ if regexes is None:
308
+ return
309
+
310
+ # If running in SQUAD, create the suite
311
+ suite_name = f"log-parser-build-{toolchain_name}"
312
+
313
+ blocks_to_process = self.split_by_regex(testrun.log_file, split_regex_gcc)
314
+
315
+ snippets = self.process_blocks(blocks_to_process, regexes)
316
+
317
+ for regex_id in range(len(regexes)):
318
+ test_name = regexes[regex_id][REGEX_NAME]
319
+ regex_pattern = regexes[regex_id][REGEX_EXTRACT_NAME]
320
+ test_name_regex = None
321
+ if regex_pattern:
322
+ test_name_regex = re.compile(regex_pattern, re.S | re.M)
323
+ self.create_squad_tests(
324
+ testrun,
325
+ suite_name,
326
+ test_name,
327
+ snippets[regex_id],
328
+ test_name_regex,
329
+ create_shas=False,
330
+ print=print,
331
+ squad=squad,
332
+ )
squad/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.91'
1
+ __version__ = '1.93'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: squad
3
- Version: 1.91
3
+ Version: 1.93
4
4
  Summary: Software Quality Dashboard
5
5
  Home-page: https://github.com/Linaro/squad
6
6
  Author: Antonio Terceiro
@@ -10,14 +10,14 @@ squad/manage.py,sha256=Z-LXT67p0R-IzwJ9fLIAacEZmU0VUjqDOSg7j2ZSxJ4,1437
10
10
  squad/settings.py,sha256=0MZ48SV_7CTrLMik2ubWf8-ROQiFju6CKnUC3iR8KAc,14800
11
11
  squad/socialaccount.py,sha256=vySqPwQ3qVVpahuJ-Snln8K--yzRL3bw4Nx27AsB39A,789
12
12
  squad/urls.py,sha256=JiEfVW8YlzLPE52c2aHzdn5kVVKK4o22w8h5KOA6QhQ,2776
13
- squad/version.py,sha256=S8qXLXebPToWZIRCvA5VIWk_5c_MVZR49-YdwQ0ypzc,21
13
+ squad/version.py,sha256=K6dPzAHfR0_z4DOLvpBamlquLtpey9vWxxcBlvLvrn4,21
14
14
  squad/wsgi.py,sha256=SF8T0cQ0OPVyuYjO5YXBIQzvSXQHV0M2BTmd4gP1rPs,387
15
15
  squad/api/__init__.py,sha256=CJiVakfAlHVN5mIFRVQYZQfuNUhUgWVbsdYTME4tq7U,1349
16
16
  squad/api/apps.py,sha256=Trk72p-iV1uGn0o5mdJn5HARUoHGbfgO49jwXvpkmdQ,141
17
17
  squad/api/ci.py,sha256=QjGIhSpm8gmIjH4Nd2NAWtJItSVleg3QOLxBU_p9h1E,7082
18
18
  squad/api/data.py,sha256=obKDV0-neEvj5lPF9VED2gy_hpfhGtLJABYvSY38ing,2379
19
19
  squad/api/filters.py,sha256=Zvp8DCJmiNquFWqvfVseEAAMYYPiT95RUjqKdzcqSnw,6917
20
- squad/api/prometheus.py,sha256=0usJgOz14g1a71sdfjM-cOC8IGXkpE-5-TqpvJj-Oyk,1840
20
+ squad/api/prometheus.py,sha256=MEzSZtYYx6PXreIwZzcVHLp-1vVQ9IKhi9hb8b0vjUk,1839
21
21
  squad/api/rest.py,sha256=ZtbK0c1BLPPnsX79XlKFVYONM_VJ0vacWZ2JsdCd4l0,77342
22
22
  squad/api/urls.py,sha256=c-o27_RP0ynOtxuyRKUl274fFMWWrzoii31Mr2saxSQ,1414
23
23
  squad/api/utils.py,sha256=Sa8QFId3_oSqD2UOoY3Kuh54LLDLPNMq2sub5ktd6Fs,1160
@@ -33,7 +33,7 @@ squad/ci/backend/__init__.py,sha256=yhpotXT9F4IdAOXvGQ3-17eOHAFwoaqf9SnMX17ab30,
33
33
  squad/ci/backend/fake.py,sha256=7Rl-JXnBYThDomOBzBsN9XuVkSjSHTZjtZOURdowZbA,2397
34
34
  squad/ci/backend/lava.py,sha256=WeOJJNxv42geGf3Y6r-I0WnhWinxpSSgZAFAwfkiXGY,34039
35
35
  squad/ci/backend/null.py,sha256=htEd4NbrXLKdPgFfTS0Ixm8PdT6Ghat3BCYi2zjfuv0,5624
36
- squad/ci/backend/tuxsuite.py,sha256=HTYLyJvtraHnkMKOjYix66bq1QV4m8bamNBahV5SZZw,19129
36
+ squad/ci/backend/tuxsuite.py,sha256=pFcNdcHpFzalHPQhbSY6ryOci_PU3LFsaNjSsgjbqGg,18676
37
37
  squad/ci/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  squad/ci/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
39
  squad/ci/management/commands/create_tuxsuite_boot_tests.py,sha256=JvjNusebLX71eyz9d-kaeCyekYSpzc1eXoeIqWK9ygo,4045
@@ -82,7 +82,7 @@ squad/core/comparison.py,sha256=LR3-Unv0CTmakFCDzF_h8fm2peTJzkv79mQWNau1iwI,2442
82
82
  squad/core/data.py,sha256=2zw56v7iYRTUc7wlhuUNgwIIMmK2w84hi-amR9J7EPU,2236
83
83
  squad/core/failures.py,sha256=X6lJVghM2fOrd-RfuHeLlezW2pt7owDZ8eX-Kn_Qrt0,918
84
84
  squad/core/history.py,sha256=QRSIoDOw6R6vUWMtsPMknsHGM7FaCAeuCYqASCayHTk,3541
85
- squad/core/models.py,sha256=sXQmgPtl54IZT7rDmJEU3QK6JSPbi0hTUGRsjwL6PIo,60851
85
+ squad/core/models.py,sha256=peMmwugkToA35bRZTdcJ_4TIpzT3Pj4GXCQ4bWeAcpY,60992
86
86
  squad/core/notification.py,sha256=rOpO6F63w7_5l9gQgWBBEk-MFBjp7x_hVzoVIVyDze0,10030
87
87
  squad/core/plugins.py,sha256=FLgyoXXKnPBYEf2MgHup9M017rHuADHivLhgzmx_cJE,6354
88
88
  squad/core/queries.py,sha256=78fhIJZWXIlDryewYAt96beK1VJad66Ufu8cg3dHh4w,7698
@@ -105,6 +105,7 @@ squad/core/management/commands/migrate_test_runs.py,sha256=RHV06tb4gWyv_q-ooC821
105
105
  squad/core/management/commands/populate_metric_build_and_environment.py,sha256=DJP9_YLRso0RiERBVsB0GP4-GaiRtJb0rAiUQDfFNQk,3166
106
106
  squad/core/management/commands/populate_test_build_and_environment.py,sha256=0yHClC0x_8LSZlvT6Ag0BnipC9Xk-U6lcIaCsqAGEWk,3146
107
107
  squad/core/management/commands/prepdump.py,sha256=WM58leVdJj45KhWPw3DGO7vwnNY70ReXrJRSIIzGXkI,518
108
+ squad/core/management/commands/run_log_parser.py,sha256=SeksSD1cnbgl8oRsD3wu12p30_FMw090T6ouQyO4ZsI,1113
108
109
  squad/core/management/commands/send-email.py,sha256=wb1o5oKLDyH2ZonnQY-Jw28Y0Mu61OHWP8b1AQGKqbU,1120
109
110
  squad/core/management/commands/update_project_statuses.py,sha256=JleCesbVhYOSXr90ntH7s5u9Isknt7EnlX22VC6yI78,2089
110
111
  squad/core/management/commands/users.py,sha256=qIp87xRMfKWHymsAft5-gnYajm2mgaiHvVn7z86DCT8,9429
@@ -426,17 +427,18 @@ squad/plugins/__init__.py,sha256=9BSzy2jFIoDpWlhD7odPPrLdW4CC3btBhdFCvB651dM,152
426
427
  squad/plugins/example.py,sha256=BKpwd315lHRIuNXJPteibpwfnI6C5eXYHYdFYBtVmsI,89
427
428
  squad/plugins/gerrit.py,sha256=CqO2KnFQzu9utr_TQ-sGr1wg3ln0B-bS2-c0_i8T5-c,7009
428
429
  squad/plugins/github.py,sha256=pdtLZw_7xNuzkaFvY_zWi0f2rsMlalXjKm7sz0eADz4,2429
429
- squad/plugins/linux_log_parser.py,sha256=WrDbyfupEcP1-E4ke9wjHiddio8sD5BFuEtF4AH0aXA,3274
430
+ squad/plugins/linux_log_parser.py,sha256=HQVreyZLBmLuv-K-MjlN43sQQSkcls4hkUsjJ9_5WfM,3472
431
+ squad/plugins/linux_log_parser_build.py,sha256=KotAmKX9OCvnJqjaCudrMJu8CytXf_CYV3zqEKi2zNQ,10439
430
432
  squad/plugins/lib/__init__.py,sha256=jzazbAvp2_ibblAs0cKZrmo9aR2EL3hKLyRDE008r2I,40
431
- squad/plugins/lib/base_log_parser.py,sha256=OW6JkZ3PM5RiDkt9UZ7OFFpUIArCxFUaqovynzwBL1Y,6573
433
+ squad/plugins/lib/base_log_parser.py,sha256=LiBCtx3dajSXITRTYxfgAmCkVS8KLI2527JKDa-D9ow,8430
432
434
  squad/run/__init__.py,sha256=ssE8GPAGFiK6V0WpZYowav6Zqsd63dfDMMYasNa1sQg,1410
433
435
  squad/run/__main__.py,sha256=DOl8JOi4Yg7DdtwnUeGqtYBJ6P2k-D2psAEuYOjWr8w,66
434
436
  squad/run/listener.py,sha256=jBeOQhPGb4EdIREB1QsCzYuumsfJ-TqJPd3nR-0m59g,200
435
437
  squad/run/scheduler.py,sha256=CDJG3q5C0GuQuxwlMOfWTSSJpDdwbR6rzpbJfuA0xuw,277
436
438
  squad/run/worker.py,sha256=jtML0h5qKDuSbpJ6_rpWP4MT_rsGA7a24AhwGxBquzk,594
437
- squad-1.91.dist-info/COPYING,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
438
- squad-1.91.dist-info/METADATA,sha256=U074123n6U3T9v0BY5GTaFCynFxFjL4HA7GEoLQjXX4,1278
439
- squad-1.91.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
440
- squad-1.91.dist-info/entry_points.txt,sha256=J_jG3qnkoOHX4RFNGC0f83eJ4BSvK3pqLFkoF3HWfmA,195
441
- squad-1.91.dist-info/top_level.txt,sha256=_x9uqE1XppiiytmVTl_qNgpnXus6Gsef69HqfliE7WI,6
442
- squad-1.91.dist-info/RECORD,,
439
+ squad-1.93.dist-info/COPYING,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
440
+ squad-1.93.dist-info/METADATA,sha256=fQk8aD6kMXho1Q6eA5gllimHyMpDGRju3ql6qEo-3PM,1278
441
+ squad-1.93.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
442
+ squad-1.93.dist-info/entry_points.txt,sha256=J_jG3qnkoOHX4RFNGC0f83eJ4BSvK3pqLFkoF3HWfmA,195
443
+ squad-1.93.dist-info/top_level.txt,sha256=_x9uqE1XppiiytmVTl_qNgpnXus6Gsef69HqfliE7WI,6
444
+ squad-1.93.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.44.0)
2
+ Generator: bdist_wheel (0.45.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
File without changes