lintro 0.6.2__py3-none-any.whl → 0.17.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lintro/__init__.py +1 -1
- lintro/cli.py +230 -14
- lintro/cli_utils/commands/__init__.py +8 -1
- lintro/cli_utils/commands/check.py +1 -0
- lintro/cli_utils/commands/config.py +325 -0
- lintro/cli_utils/commands/format.py +2 -2
- lintro/cli_utils/commands/init.py +361 -0
- lintro/cli_utils/commands/list_tools.py +180 -42
- lintro/cli_utils/commands/test.py +316 -0
- lintro/cli_utils/commands/versions.py +81 -0
- lintro/config/__init__.py +62 -0
- lintro/config/config_loader.py +420 -0
- lintro/config/lintro_config.py +189 -0
- lintro/config/tool_config_generator.py +403 -0
- lintro/enums/__init__.py +1 -0
- lintro/enums/darglint_strictness.py +10 -0
- lintro/enums/hadolint_enums.py +22 -0
- lintro/enums/tool_name.py +2 -0
- lintro/enums/tool_type.py +2 -0
- lintro/enums/yamllint_format.py +11 -0
- lintro/exceptions/__init__.py +1 -0
- lintro/formatters/__init__.py +1 -0
- lintro/formatters/core/__init__.py +1 -0
- lintro/formatters/core/output_style.py +11 -0
- lintro/formatters/core/table_descriptor.py +8 -0
- lintro/formatters/styles/csv.py +2 -0
- lintro/formatters/styles/grid.py +2 -0
- lintro/formatters/styles/html.py +2 -0
- lintro/formatters/styles/json.py +2 -0
- lintro/formatters/styles/markdown.py +2 -0
- lintro/formatters/styles/plain.py +2 -0
- lintro/formatters/tools/__init__.py +12 -0
- lintro/formatters/tools/black_formatter.py +27 -5
- lintro/formatters/tools/darglint_formatter.py +16 -1
- lintro/formatters/tools/eslint_formatter.py +108 -0
- lintro/formatters/tools/hadolint_formatter.py +13 -0
- lintro/formatters/tools/markdownlint_formatter.py +88 -0
- lintro/formatters/tools/prettier_formatter.py +15 -0
- lintro/formatters/tools/pytest_formatter.py +201 -0
- lintro/formatters/tools/ruff_formatter.py +26 -5
- lintro/formatters/tools/yamllint_formatter.py +14 -1
- lintro/models/__init__.py +1 -0
- lintro/models/core/__init__.py +1 -0
- lintro/models/core/tool_config.py +11 -7
- lintro/parsers/__init__.py +69 -9
- lintro/parsers/actionlint/actionlint_parser.py +1 -1
- lintro/parsers/bandit/__init__.py +6 -0
- lintro/parsers/bandit/bandit_issue.py +49 -0
- lintro/parsers/bandit/bandit_parser.py +99 -0
- lintro/parsers/black/black_issue.py +4 -0
- lintro/parsers/darglint/__init__.py +1 -0
- lintro/parsers/darglint/darglint_issue.py +11 -0
- lintro/parsers/eslint/__init__.py +6 -0
- lintro/parsers/eslint/eslint_issue.py +26 -0
- lintro/parsers/eslint/eslint_parser.py +63 -0
- lintro/parsers/markdownlint/__init__.py +6 -0
- lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
- lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
- lintro/parsers/prettier/__init__.py +1 -0
- lintro/parsers/prettier/prettier_issue.py +12 -0
- lintro/parsers/prettier/prettier_parser.py +1 -1
- lintro/parsers/pytest/__init__.py +21 -0
- lintro/parsers/pytest/pytest_issue.py +28 -0
- lintro/parsers/pytest/pytest_parser.py +483 -0
- lintro/parsers/ruff/ruff_parser.py +6 -2
- lintro/parsers/yamllint/__init__.py +1 -0
- lintro/tools/__init__.py +3 -1
- lintro/tools/core/__init__.py +1 -0
- lintro/tools/core/timeout_utils.py +112 -0
- lintro/tools/core/tool_base.py +286 -50
- lintro/tools/core/tool_manager.py +77 -24
- lintro/tools/core/version_requirements.py +482 -0
- lintro/tools/implementations/__init__.py +1 -0
- lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
- lintro/tools/implementations/pytest/pytest_config.py +200 -0
- lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
- lintro/tools/implementations/pytest/pytest_executor.py +122 -0
- lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
- lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
- lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
- lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
- lintro/tools/implementations/pytest/pytest_utils.py +697 -0
- lintro/tools/implementations/tool_actionlint.py +106 -16
- lintro/tools/implementations/tool_bandit.py +34 -29
- lintro/tools/implementations/tool_black.py +236 -29
- lintro/tools/implementations/tool_darglint.py +183 -22
- lintro/tools/implementations/tool_eslint.py +374 -0
- lintro/tools/implementations/tool_hadolint.py +94 -25
- lintro/tools/implementations/tool_markdownlint.py +354 -0
- lintro/tools/implementations/tool_prettier.py +317 -24
- lintro/tools/implementations/tool_pytest.py +327 -0
- lintro/tools/implementations/tool_ruff.py +278 -84
- lintro/tools/implementations/tool_yamllint.py +448 -34
- lintro/tools/tool_enum.py +8 -0
- lintro/utils/__init__.py +1 -0
- lintro/utils/ascii_normalize_cli.py +5 -0
- lintro/utils/config.py +41 -18
- lintro/utils/console_logger.py +211 -25
- lintro/utils/path_utils.py +42 -0
- lintro/utils/tool_executor.py +339 -45
- lintro/utils/tool_utils.py +51 -24
- lintro/utils/unified_config.py +926 -0
- {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/METADATA +172 -30
- lintro-0.17.2.dist-info/RECORD +134 -0
- lintro-0.6.2.dist-info/RECORD +0 -96
- {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
- {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
- {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
- {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
"""Parser for pytest output.
|
|
2
|
+
|
|
3
|
+
This module provides functions to parse pytest output in various formats:
|
|
4
|
+
- JSON output from pytest --json-report
|
|
5
|
+
- Plain text output from pytest
|
|
6
|
+
- JUnit XML output from pytest --junitxml
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
|
|
13
|
+
from defusedxml import ElementTree
|
|
14
|
+
|
|
15
|
+
from lintro.parsers.pytest.pytest_issue import PytestIssue
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class PytestSummary:
|
|
20
|
+
"""Summary statistics from pytest execution.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
total: Total number of tests run.
|
|
24
|
+
passed: Number of tests that passed.
|
|
25
|
+
failed: Number of tests that failed.
|
|
26
|
+
skipped: Number of tests that were skipped.
|
|
27
|
+
error: Number of tests that had errors (setup/teardown failures).
|
|
28
|
+
xfailed: Number of tests that were expected to fail and did fail.
|
|
29
|
+
xpassed: Number of tests that were expected to fail but passed.
|
|
30
|
+
duration: Total execution duration in seconds.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
total: int = 0
|
|
34
|
+
passed: int = 0
|
|
35
|
+
failed: int = 0
|
|
36
|
+
skipped: int = 0
|
|
37
|
+
error: int = 0
|
|
38
|
+
xfailed: int = 0
|
|
39
|
+
xpassed: int = 0
|
|
40
|
+
duration: float = 0.0
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def extract_pytest_summary(output: str) -> PytestSummary:
|
|
44
|
+
"""Extract test summary statistics from pytest output.
|
|
45
|
+
|
|
46
|
+
Parses the summary line from pytest output to extract:
|
|
47
|
+
- Number of passed tests
|
|
48
|
+
- Number of failed tests
|
|
49
|
+
- Number of skipped tests
|
|
50
|
+
- Number of error tests
|
|
51
|
+
- Execution duration
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
output: Raw output from pytest.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
PytestSummary: Extracted summary statistics.
|
|
58
|
+
"""
|
|
59
|
+
summary = PytestSummary()
|
|
60
|
+
|
|
61
|
+
if not output:
|
|
62
|
+
return summary
|
|
63
|
+
|
|
64
|
+
# Strip ANSI color codes
|
|
65
|
+
ansi_re = re.compile(r"\x1b\[[0-9;]*m")
|
|
66
|
+
clean_output = ansi_re.sub("", output)
|
|
67
|
+
|
|
68
|
+
# Extract duration first (it's always at the end)
|
|
69
|
+
duration_match = re.search(r"in\s+([\d.]+)s", clean_output)
|
|
70
|
+
if duration_match:
|
|
71
|
+
summary.duration = float(duration_match.group(1))
|
|
72
|
+
|
|
73
|
+
# Extract counts independently since order can vary
|
|
74
|
+
# Patterns handle various formats like:
|
|
75
|
+
# - "511 passed in 18.53s"
|
|
76
|
+
# - "509 passed, 2 failed in 18.53s"
|
|
77
|
+
# - "7 failed, 505 passed, 1 warning in 17.06s"
|
|
78
|
+
# - "510 passed, 1 skipped in 18.53s"
|
|
79
|
+
|
|
80
|
+
passed_match = re.search(r"(\d+)\s+passed", clean_output)
|
|
81
|
+
if passed_match:
|
|
82
|
+
summary.passed = int(passed_match.group(1))
|
|
83
|
+
|
|
84
|
+
failed_match = re.search(r"(\d+)\s+failed", clean_output)
|
|
85
|
+
if failed_match:
|
|
86
|
+
summary.failed = int(failed_match.group(1))
|
|
87
|
+
|
|
88
|
+
skipped_match = re.search(r"(\d+)\s+skipped", clean_output)
|
|
89
|
+
if skipped_match:
|
|
90
|
+
summary.skipped = int(skipped_match.group(1))
|
|
91
|
+
|
|
92
|
+
error_match = re.search(r"(\d+)\s+errors?", clean_output)
|
|
93
|
+
if error_match:
|
|
94
|
+
summary.error = int(error_match.group(1))
|
|
95
|
+
|
|
96
|
+
xfailed_match = re.search(r"(\d+)\s+xfailed", clean_output)
|
|
97
|
+
if xfailed_match:
|
|
98
|
+
summary.xfailed = int(xfailed_match.group(1))
|
|
99
|
+
|
|
100
|
+
xpassed_match = re.search(r"(\d+)\s+xpassed", clean_output)
|
|
101
|
+
if xpassed_match:
|
|
102
|
+
summary.xpassed = int(xpassed_match.group(1))
|
|
103
|
+
|
|
104
|
+
# Calculate total as sum of all test outcomes
|
|
105
|
+
summary.total = (
|
|
106
|
+
summary.passed
|
|
107
|
+
+ summary.failed
|
|
108
|
+
+ summary.skipped
|
|
109
|
+
+ summary.error
|
|
110
|
+
+ summary.xfailed
|
|
111
|
+
+ summary.xpassed
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return summary
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def parse_pytest_json_output(output: str) -> list[PytestIssue]:
|
|
118
|
+
"""Parse pytest JSON output into PytestIssue objects.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
output: Raw output from pytest with --json-report.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
125
|
+
"""
|
|
126
|
+
issues: list[PytestIssue] = []
|
|
127
|
+
|
|
128
|
+
if not output or output.strip() in ("{}", "[]"):
|
|
129
|
+
return issues
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
data = json.loads(output)
|
|
133
|
+
|
|
134
|
+
# Handle different JSON report formats
|
|
135
|
+
if "tests" in data:
|
|
136
|
+
# pytest-json-report format
|
|
137
|
+
for test in data["tests"]:
|
|
138
|
+
if test.get("outcome") in ("failed", "error", "skipped"):
|
|
139
|
+
issues.append(_parse_json_test_item(test))
|
|
140
|
+
elif isinstance(data, list):
|
|
141
|
+
# Alternative JSON format
|
|
142
|
+
for item in data:
|
|
143
|
+
if isinstance(item, dict) and item.get("outcome") in (
|
|
144
|
+
"failed",
|
|
145
|
+
"error",
|
|
146
|
+
"skipped",
|
|
147
|
+
):
|
|
148
|
+
issues.append(_parse_json_test_item(item))
|
|
149
|
+
|
|
150
|
+
except (json.JSONDecodeError, TypeError, KeyError):
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
return issues
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _parse_json_test_item(test_item: dict) -> PytestIssue:
|
|
157
|
+
"""Parse a single test item from JSON output.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
test_item: Dictionary containing test information.
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
PytestIssue: Parsed test issue.
|
|
164
|
+
"""
|
|
165
|
+
file_path = test_item.get("file", "")
|
|
166
|
+
line = test_item.get("lineno", 0)
|
|
167
|
+
test_name = test_item.get("name", "")
|
|
168
|
+
message = test_item.get("call", {}).get("longrepr", "") or test_item.get(
|
|
169
|
+
"longrepr",
|
|
170
|
+
"",
|
|
171
|
+
)
|
|
172
|
+
status = test_item.get("outcome", "UNKNOWN")
|
|
173
|
+
duration = test_item.get("duration", 0.0)
|
|
174
|
+
node_id = test_item.get("nodeid", "")
|
|
175
|
+
|
|
176
|
+
return PytestIssue(
|
|
177
|
+
file=file_path,
|
|
178
|
+
line=line,
|
|
179
|
+
test_name=test_name,
|
|
180
|
+
message=message,
|
|
181
|
+
test_status=status.upper(),
|
|
182
|
+
duration=duration,
|
|
183
|
+
node_id=node_id,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def parse_pytest_text_output(output: str) -> list[PytestIssue]:
|
|
188
|
+
"""Parse pytest plain text output into PytestIssue objects.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
output: Raw output from pytest.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
195
|
+
"""
|
|
196
|
+
issues: list[PytestIssue] = []
|
|
197
|
+
|
|
198
|
+
if not output:
|
|
199
|
+
return issues
|
|
200
|
+
|
|
201
|
+
lines = output.splitlines()
|
|
202
|
+
current_file = ""
|
|
203
|
+
current_line = 0
|
|
204
|
+
|
|
205
|
+
# Patterns for different pytest output formats
|
|
206
|
+
file_pattern = re.compile(r"^(.+\.py)::(.+)$")
|
|
207
|
+
failure_pattern = re.compile(r"^FAILED\s+(.+\.py)::(.+)\s+-\s+(.+)$")
|
|
208
|
+
error_pattern = re.compile(r"^ERROR\s+(.+\.py)::(.+)\s+-\s+(.+)$")
|
|
209
|
+
skipped_pattern = re.compile(r"^(.+\.py)::([^\s]+)\s+SKIPPED\s+\((.+)\)\s+\[")
|
|
210
|
+
line_pattern = re.compile(r"^(.+\.py):(\d+):\s+(.+)$")
|
|
211
|
+
|
|
212
|
+
# Alternative patterns for different pytest output formats
|
|
213
|
+
# Use non-greedy matching for test name to stop at first space
|
|
214
|
+
failure_pattern_alt = re.compile(r"^FAILED\s+(.+\.py)::([^\s]+)\s+(.+)$")
|
|
215
|
+
error_pattern_alt = re.compile(r"^ERROR\s+(.+\.py)::([^\s]+)\s+(.+)$")
|
|
216
|
+
# Alternative skipped pattern without trailing bracket (for compact output)
|
|
217
|
+
skipped_pattern_alt = re.compile(r"^(.+\.py)::([^\s]+)\s+SKIPPED\s+\((.+)\)$")
|
|
218
|
+
|
|
219
|
+
# Strip ANSI color codes for stable parsing
|
|
220
|
+
ansi_re = re.compile(r"\x1b\[[0-9;]*m")
|
|
221
|
+
|
|
222
|
+
for line in lines:
|
|
223
|
+
# Strip ANSI color codes for stable parsing
|
|
224
|
+
line = ansi_re.sub("", line).strip()
|
|
225
|
+
|
|
226
|
+
# Match FAILED format
|
|
227
|
+
failure_match = failure_pattern.match(line)
|
|
228
|
+
if failure_match:
|
|
229
|
+
file_path = failure_match.group(1)
|
|
230
|
+
test_name = failure_match.group(2)
|
|
231
|
+
message = failure_match.group(3)
|
|
232
|
+
issues.append(
|
|
233
|
+
PytestIssue(
|
|
234
|
+
file=file_path,
|
|
235
|
+
line=0,
|
|
236
|
+
test_name=test_name,
|
|
237
|
+
message=message,
|
|
238
|
+
test_status="FAILED",
|
|
239
|
+
),
|
|
240
|
+
)
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
# Match FAILED format (alternative)
|
|
244
|
+
failure_match_alt = failure_pattern_alt.match(line)
|
|
245
|
+
if failure_match_alt:
|
|
246
|
+
file_path = failure_match_alt.group(1)
|
|
247
|
+
test_name = failure_match_alt.group(2)
|
|
248
|
+
message = failure_match_alt.group(3)
|
|
249
|
+
issues.append(
|
|
250
|
+
PytestIssue(
|
|
251
|
+
file=file_path,
|
|
252
|
+
line=0,
|
|
253
|
+
test_name=test_name,
|
|
254
|
+
message=message,
|
|
255
|
+
test_status="FAILED",
|
|
256
|
+
),
|
|
257
|
+
)
|
|
258
|
+
continue
|
|
259
|
+
|
|
260
|
+
# Match ERROR format
|
|
261
|
+
error_match = error_pattern.match(line)
|
|
262
|
+
if error_match:
|
|
263
|
+
file_path = error_match.group(1)
|
|
264
|
+
test_name = error_match.group(2)
|
|
265
|
+
message = error_match.group(3)
|
|
266
|
+
issues.append(
|
|
267
|
+
PytestIssue(
|
|
268
|
+
file=file_path,
|
|
269
|
+
line=0,
|
|
270
|
+
test_name=test_name,
|
|
271
|
+
message=message,
|
|
272
|
+
test_status="ERROR",
|
|
273
|
+
),
|
|
274
|
+
)
|
|
275
|
+
continue
|
|
276
|
+
|
|
277
|
+
# Match ERROR format (alternative)
|
|
278
|
+
error_match_alt = error_pattern_alt.match(line)
|
|
279
|
+
if error_match_alt:
|
|
280
|
+
file_path = error_match_alt.group(1)
|
|
281
|
+
test_name = error_match_alt.group(2)
|
|
282
|
+
message = error_match_alt.group(3)
|
|
283
|
+
issues.append(
|
|
284
|
+
PytestIssue(
|
|
285
|
+
file=file_path,
|
|
286
|
+
line=0,
|
|
287
|
+
test_name=test_name,
|
|
288
|
+
message=message,
|
|
289
|
+
test_status="ERROR",
|
|
290
|
+
),
|
|
291
|
+
)
|
|
292
|
+
continue
|
|
293
|
+
|
|
294
|
+
# Match SKIPPED format
|
|
295
|
+
skipped_match = skipped_pattern.match(line)
|
|
296
|
+
if skipped_match:
|
|
297
|
+
file_path = skipped_match.group(1)
|
|
298
|
+
test_name = skipped_match.group(2)
|
|
299
|
+
message = skipped_match.group(3)
|
|
300
|
+
issues.append(
|
|
301
|
+
PytestIssue(
|
|
302
|
+
file=file_path,
|
|
303
|
+
line=0,
|
|
304
|
+
test_name=test_name,
|
|
305
|
+
message=message,
|
|
306
|
+
test_status="SKIPPED",
|
|
307
|
+
),
|
|
308
|
+
)
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
# Match SKIPPED format (alternative)
|
|
312
|
+
skipped_match_alt = skipped_pattern_alt.match(line)
|
|
313
|
+
if skipped_match_alt:
|
|
314
|
+
file_path = skipped_match_alt.group(1)
|
|
315
|
+
test_name = skipped_match_alt.group(2)
|
|
316
|
+
message = skipped_match_alt.group(3)
|
|
317
|
+
issues.append(
|
|
318
|
+
PytestIssue(
|
|
319
|
+
file=file_path,
|
|
320
|
+
line=0,
|
|
321
|
+
test_name=test_name,
|
|
322
|
+
message=message,
|
|
323
|
+
test_status="SKIPPED",
|
|
324
|
+
),
|
|
325
|
+
)
|
|
326
|
+
continue
|
|
327
|
+
|
|
328
|
+
# Match file::test format
|
|
329
|
+
file_match = file_pattern.match(line)
|
|
330
|
+
if file_match:
|
|
331
|
+
current_file = file_match.group(1)
|
|
332
|
+
continue
|
|
333
|
+
|
|
334
|
+
# Match line number format
|
|
335
|
+
line_match = line_pattern.match(line)
|
|
336
|
+
if line_match:
|
|
337
|
+
current_file = line_match.group(1)
|
|
338
|
+
current_line = int(line_match.group(2))
|
|
339
|
+
message = line_match.group(3)
|
|
340
|
+
if "FAILED" in message or "ERROR" in message or "SKIPPED" in message:
|
|
341
|
+
# Extract just the error message without the status prefix
|
|
342
|
+
if message.startswith("FAILED - "):
|
|
343
|
+
message = message[9:] # Remove "FAILED - "
|
|
344
|
+
status = "FAILED"
|
|
345
|
+
elif message.startswith("ERROR - "):
|
|
346
|
+
message = message[8:] # Remove "ERROR - "
|
|
347
|
+
status = "ERROR"
|
|
348
|
+
elif message.startswith("SKIPPED - "):
|
|
349
|
+
message = message[10:] # Remove "SKIPPED - "
|
|
350
|
+
status = "SKIPPED"
|
|
351
|
+
else:
|
|
352
|
+
status = "UNKNOWN"
|
|
353
|
+
|
|
354
|
+
issues.append(
|
|
355
|
+
PytestIssue(
|
|
356
|
+
file=current_file,
|
|
357
|
+
line=current_line,
|
|
358
|
+
test_name="",
|
|
359
|
+
message=message,
|
|
360
|
+
test_status=status,
|
|
361
|
+
),
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
return issues
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def parse_pytest_junit_xml(output: str) -> list[PytestIssue]:
|
|
368
|
+
"""Parse pytest JUnit XML output into PytestIssue objects.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
output: Raw output from pytest with --junitxml.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
375
|
+
"""
|
|
376
|
+
issues: list[PytestIssue] = []
|
|
377
|
+
|
|
378
|
+
if not output:
|
|
379
|
+
return issues
|
|
380
|
+
|
|
381
|
+
try:
|
|
382
|
+
root = ElementTree.fromstring(output)
|
|
383
|
+
|
|
384
|
+
# Handle different JUnit XML structures
|
|
385
|
+
for testcase in root.findall(".//testcase"):
|
|
386
|
+
file_path = testcase.get("file", "")
|
|
387
|
+
line = int(testcase.get("line", 0))
|
|
388
|
+
test_name = testcase.get("name", "")
|
|
389
|
+
duration = float(testcase.get("time", 0.0))
|
|
390
|
+
class_name = testcase.get("classname", "")
|
|
391
|
+
# If file attribute is missing, try to derive it from classname
|
|
392
|
+
if not file_path and class_name:
|
|
393
|
+
# Convert class name like
|
|
394
|
+
# "tests.scripts.test_script_environment.TestEnvironmentHandling"
|
|
395
|
+
# to file path like "tests/scripts/test_script_environment.py"
|
|
396
|
+
class_parts = class_name.split(".")
|
|
397
|
+
if len(class_parts) >= 2 and class_parts[0] == "tests":
|
|
398
|
+
file_path = "/".join(class_parts[:-1]) + ".py"
|
|
399
|
+
node_id = f"{class_name}::{test_name}" if class_name else test_name
|
|
400
|
+
|
|
401
|
+
# Check for failure
|
|
402
|
+
failure = testcase.find("failure")
|
|
403
|
+
if failure is not None:
|
|
404
|
+
message = failure.text or failure.get("message", "")
|
|
405
|
+
issues.append(
|
|
406
|
+
PytestIssue(
|
|
407
|
+
file=file_path,
|
|
408
|
+
line=line,
|
|
409
|
+
test_name=test_name,
|
|
410
|
+
message=message,
|
|
411
|
+
test_status="FAILED",
|
|
412
|
+
duration=duration,
|
|
413
|
+
node_id=node_id,
|
|
414
|
+
),
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Check for error
|
|
418
|
+
error = testcase.find("error")
|
|
419
|
+
if error is not None:
|
|
420
|
+
message = error.text or error.get("message", "")
|
|
421
|
+
issues.append(
|
|
422
|
+
PytestIssue(
|
|
423
|
+
file=file_path,
|
|
424
|
+
line=line,
|
|
425
|
+
test_name=test_name,
|
|
426
|
+
message=message,
|
|
427
|
+
test_status="ERROR",
|
|
428
|
+
duration=duration,
|
|
429
|
+
node_id=node_id,
|
|
430
|
+
),
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# Check for skip
|
|
434
|
+
skip = testcase.find("skipped")
|
|
435
|
+
if skip is not None:
|
|
436
|
+
message = skip.text or skip.get("message", "")
|
|
437
|
+
# Clean up skip message by removing file path prefix if present
|
|
438
|
+
# Format is typically: "/path/to/file.py:line: actual message"
|
|
439
|
+
if message and ":" in message:
|
|
440
|
+
# Find the first colon after a file path pattern
|
|
441
|
+
parts = message.split(":")
|
|
442
|
+
if (
|
|
443
|
+
len(parts) >= 3
|
|
444
|
+
and parts[0].startswith("/")
|
|
445
|
+
and parts[0].endswith(".py")
|
|
446
|
+
):
|
|
447
|
+
# Remove file path and line number, keep only the actual reason
|
|
448
|
+
message = ":".join(parts[2:]).lstrip()
|
|
449
|
+
|
|
450
|
+
issues.append(
|
|
451
|
+
PytestIssue(
|
|
452
|
+
file=file_path,
|
|
453
|
+
line=line,
|
|
454
|
+
test_name=test_name,
|
|
455
|
+
message=message,
|
|
456
|
+
test_status="SKIPPED",
|
|
457
|
+
duration=duration,
|
|
458
|
+
node_id=node_id,
|
|
459
|
+
),
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
except ElementTree.ParseError:
|
|
463
|
+
pass
|
|
464
|
+
|
|
465
|
+
return issues
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def parse_pytest_output(output: str, format: str = "text") -> list[PytestIssue]:
|
|
469
|
+
"""Parse pytest output based on the specified format.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
output: Raw output from pytest.
|
|
473
|
+
format: Output format ("json", "text", "junit").
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
list[PytestIssue]: Parsed test failures and errors.
|
|
477
|
+
"""
|
|
478
|
+
if format == "json":
|
|
479
|
+
return parse_pytest_json_output(output)
|
|
480
|
+
elif format == "junit":
|
|
481
|
+
return parse_pytest_junit_xml(output)
|
|
482
|
+
else:
|
|
483
|
+
return parse_pytest_text_output(output)
|
|
@@ -129,8 +129,12 @@ def parse_ruff_format_check_output(output: str) -> list[str]:
|
|
|
129
129
|
if not output:
|
|
130
130
|
return []
|
|
131
131
|
files = []
|
|
132
|
-
|
|
133
|
-
|
|
132
|
+
import re
|
|
133
|
+
|
|
134
|
+
ansi_re = re.compile(r"\x1b\[[0-9;]*m")
|
|
135
|
+
for raw in output.splitlines():
|
|
136
|
+
# Strip ANSI color codes for stable parsing across environments
|
|
137
|
+
line = ansi_re.sub("", raw).strip()
|
|
134
138
|
# Ruff format --check output: 'Would reformat: path/to/file.py' or
|
|
135
139
|
# 'Would reformat path/to/file.py'
|
|
136
140
|
if line.startswith("Would reformat: "):
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Parsing utilities and types for Yamllint output."""
|
lintro/tools/__init__.py
CHANGED
|
@@ -10,6 +10,7 @@ from lintro.tools.implementations.tool_black import BlackTool
|
|
|
10
10
|
from lintro.tools.implementations.tool_darglint import DarglintTool
|
|
11
11
|
from lintro.tools.implementations.tool_hadolint import HadolintTool
|
|
12
12
|
from lintro.tools.implementations.tool_prettier import PrettierTool
|
|
13
|
+
from lintro.tools.implementations.tool_pytest import PytestTool
|
|
13
14
|
from lintro.tools.implementations.tool_ruff import RuffTool
|
|
14
15
|
from lintro.tools.implementations.tool_yamllint import YamllintTool
|
|
15
16
|
from lintro.tools.tool_enum import ToolEnum
|
|
@@ -21,7 +22,7 @@ tool_manager = ToolManager()
|
|
|
21
22
|
AVAILABLE_TOOLS = {tool_enum: tool_enum.value for tool_enum in ToolEnum}
|
|
22
23
|
|
|
23
24
|
|
|
24
|
-
for
|
|
25
|
+
for _tool_enum, tool_class in AVAILABLE_TOOLS.items():
|
|
25
26
|
tool_manager.register_tool(tool_class)
|
|
26
27
|
|
|
27
28
|
# Consolidated exports
|
|
@@ -39,6 +40,7 @@ __all__ = [
|
|
|
39
40
|
"DarglintTool",
|
|
40
41
|
"HadolintTool",
|
|
41
42
|
"PrettierTool",
|
|
43
|
+
"PytestTool",
|
|
42
44
|
"RuffTool",
|
|
43
45
|
"YamllintTool",
|
|
44
46
|
]
|
lintro/tools/core/__init__.py
CHANGED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Base classes and utilities for tool implementations."""
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""Shared timeout handling utilities for tool implementations.
|
|
2
|
+
|
|
3
|
+
This module provides standardized timeout handling across different tools,
|
|
4
|
+
ensuring consistent behavior and error messages for subprocess timeouts.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import subprocess # nosec B404 - used safely with shell disabled
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from loguru import logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def run_subprocess_with_timeout(
|
|
14
|
+
tool,
|
|
15
|
+
cmd: list[str],
|
|
16
|
+
timeout: int | None = None,
|
|
17
|
+
cwd: str | None = None,
|
|
18
|
+
tool_name: str | None = None,
|
|
19
|
+
) -> tuple[bool, str]:
|
|
20
|
+
"""Run a subprocess command with timeout handling.
|
|
21
|
+
|
|
22
|
+
This is a wrapper around tool._run_subprocess that provides consistent
|
|
23
|
+
timeout error handling and messaging across different tools.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
tool: Tool instance with _run_subprocess method.
|
|
27
|
+
cmd: Command to run.
|
|
28
|
+
timeout: Timeout in seconds. If None, uses tool's default timeout.
|
|
29
|
+
cwd: Working directory for command execution.
|
|
30
|
+
tool_name: Name of the tool for error messages. If None, uses tool.name.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
tuple[bool, str]: (success, output) where success is True if command
|
|
34
|
+
succeeded without timeout, and output contains command output or
|
|
35
|
+
timeout error message.
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
TimeoutExpired: If command times out (re-raised from subprocess).
|
|
39
|
+
"""
|
|
40
|
+
tool_name = tool_name or tool.name
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
return tool._run_subprocess(cmd=cmd, timeout=timeout, cwd=cwd)
|
|
44
|
+
except subprocess.TimeoutExpired as e:
|
|
45
|
+
# Re-raise with more context for the calling tool
|
|
46
|
+
actual_timeout = timeout or tool.options.get("timeout", tool._default_timeout)
|
|
47
|
+
timeout_msg = (
|
|
48
|
+
f"{tool_name} execution timed out ({actual_timeout}s limit exceeded).\n\n"
|
|
49
|
+
"This may indicate:\n"
|
|
50
|
+
" - Large codebase taking too long to process\n"
|
|
51
|
+
" - Need to increase timeout via --tool-options timeout=N\n"
|
|
52
|
+
" - Command hanging due to external dependencies\n"
|
|
53
|
+
)
|
|
54
|
+
logger.warning(timeout_msg)
|
|
55
|
+
|
|
56
|
+
# Create a new TimeoutExpired with enhanced message
|
|
57
|
+
raise subprocess.TimeoutExpired(
|
|
58
|
+
cmd=cmd,
|
|
59
|
+
timeout=actual_timeout,
|
|
60
|
+
output=timeout_msg,
|
|
61
|
+
) from e
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_timeout_value(tool, default_timeout: int | None = None) -> int:
|
|
65
|
+
"""Get timeout value from tool options with fallback to default.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
tool: Tool instance with options.
|
|
69
|
+
default_timeout: Default timeout if not specified in options.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
int: Timeout value in seconds.
|
|
73
|
+
"""
|
|
74
|
+
if default_timeout is None:
|
|
75
|
+
default_timeout = getattr(tool, "_default_timeout", 300)
|
|
76
|
+
|
|
77
|
+
return tool.options.get("timeout", default_timeout)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def create_timeout_result(
|
|
81
|
+
tool,
|
|
82
|
+
timeout: int,
|
|
83
|
+
cmd: list[str] | None = None,
|
|
84
|
+
tool_name: str | None = None,
|
|
85
|
+
) -> dict[str, Any]:
|
|
86
|
+
"""Create a standardized timeout result dictionary.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
tool: Tool instance.
|
|
90
|
+
timeout: Timeout value that was exceeded.
|
|
91
|
+
cmd: Optional command that timed out.
|
|
92
|
+
tool_name: Optional tool name override.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
dict: Result dictionary with timeout information.
|
|
96
|
+
"""
|
|
97
|
+
tool_name = tool_name or tool.name
|
|
98
|
+
|
|
99
|
+
return {
|
|
100
|
+
"success": False,
|
|
101
|
+
"output": (
|
|
102
|
+
f"{tool_name} execution timed out ({timeout}s limit exceeded).\n\n"
|
|
103
|
+
"This may indicate:\n"
|
|
104
|
+
" - Large codebase taking too long to process\n"
|
|
105
|
+
" - Need to increase timeout via --tool-options timeout=N\n"
|
|
106
|
+
" - Command hanging due to external dependencies\n"
|
|
107
|
+
),
|
|
108
|
+
"issues_count": 1, # Count timeout as execution failure
|
|
109
|
+
"issues": [],
|
|
110
|
+
"timed_out": True,
|
|
111
|
+
"timeout_seconds": timeout,
|
|
112
|
+
}
|