lintro 0.13.2__py3-none-any.whl → 0.17.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lintro/__init__.py +1 -1
- lintro/cli.py +226 -16
- lintro/cli_utils/commands/__init__.py +8 -1
- lintro/cli_utils/commands/check.py +1 -0
- lintro/cli_utils/commands/config.py +325 -0
- lintro/cli_utils/commands/init.py +361 -0
- lintro/cli_utils/commands/list_tools.py +180 -42
- lintro/cli_utils/commands/test.py +316 -0
- lintro/cli_utils/commands/versions.py +81 -0
- lintro/config/__init__.py +62 -0
- lintro/config/config_loader.py +420 -0
- lintro/config/lintro_config.py +189 -0
- lintro/config/tool_config_generator.py +403 -0
- lintro/enums/tool_name.py +2 -0
- lintro/enums/tool_type.py +2 -0
- lintro/formatters/tools/__init__.py +12 -0
- lintro/formatters/tools/eslint_formatter.py +108 -0
- lintro/formatters/tools/markdownlint_formatter.py +88 -0
- lintro/formatters/tools/pytest_formatter.py +201 -0
- lintro/parsers/__init__.py +69 -9
- lintro/parsers/bandit/__init__.py +6 -0
- lintro/parsers/bandit/bandit_issue.py +49 -0
- lintro/parsers/bandit/bandit_parser.py +99 -0
- lintro/parsers/black/black_issue.py +4 -0
- lintro/parsers/eslint/__init__.py +6 -0
- lintro/parsers/eslint/eslint_issue.py +26 -0
- lintro/parsers/eslint/eslint_parser.py +63 -0
- lintro/parsers/markdownlint/__init__.py +6 -0
- lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
- lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
- lintro/parsers/pytest/__init__.py +21 -0
- lintro/parsers/pytest/pytest_issue.py +28 -0
- lintro/parsers/pytest/pytest_parser.py +483 -0
- lintro/tools/__init__.py +2 -0
- lintro/tools/core/timeout_utils.py +112 -0
- lintro/tools/core/tool_base.py +255 -45
- lintro/tools/core/tool_manager.py +77 -24
- lintro/tools/core/version_requirements.py +482 -0
- lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
- lintro/tools/implementations/pytest/pytest_config.py +200 -0
- lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
- lintro/tools/implementations/pytest/pytest_executor.py +122 -0
- lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
- lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
- lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
- lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
- lintro/tools/implementations/pytest/pytest_utils.py +697 -0
- lintro/tools/implementations/tool_actionlint.py +106 -16
- lintro/tools/implementations/tool_bandit.py +23 -7
- lintro/tools/implementations/tool_black.py +236 -29
- lintro/tools/implementations/tool_darglint.py +180 -21
- lintro/tools/implementations/tool_eslint.py +374 -0
- lintro/tools/implementations/tool_hadolint.py +94 -25
- lintro/tools/implementations/tool_markdownlint.py +354 -0
- lintro/tools/implementations/tool_prettier.py +313 -26
- lintro/tools/implementations/tool_pytest.py +327 -0
- lintro/tools/implementations/tool_ruff.py +247 -70
- lintro/tools/implementations/tool_yamllint.py +448 -34
- lintro/tools/tool_enum.py +6 -0
- lintro/utils/config.py +41 -18
- lintro/utils/console_logger.py +211 -25
- lintro/utils/path_utils.py +42 -0
- lintro/utils/tool_executor.py +336 -39
- lintro/utils/tool_utils.py +38 -2
- lintro/utils/unified_config.py +926 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/METADATA +131 -29
- lintro-0.17.2.dist-info/RECORD +134 -0
- lintro-0.13.2.dist-info/RECORD +0 -96
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""Parser for markdownlint-cli2 output."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
from lintro.parsers.markdownlint.markdownlint_issue import MarkdownlintIssue
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def parse_markdownlint_output(output: str) -> list[MarkdownlintIssue]:
|
|
9
|
+
"""Parse markdownlint-cli2 output into a list of MarkdownlintIssue objects.
|
|
10
|
+
|
|
11
|
+
Markdownlint-cli2 default formatter outputs lines like:
|
|
12
|
+
file:line:column MD###/rule-name Message [Context: "..."]
|
|
13
|
+
or
|
|
14
|
+
file:line MD###/rule-name Message [Context: "..."]
|
|
15
|
+
|
|
16
|
+
Example outputs:
|
|
17
|
+
dir/about.md:1:1 MD021/no-multiple-space-closed-atx Multiple spaces
|
|
18
|
+
inside hashes on closed atx style heading [Context: "# About #"]
|
|
19
|
+
dir/about.md:4 MD032/blanks-around-lists Lists should be surrounded
|
|
20
|
+
by blank lines [Context: "1. List"]
|
|
21
|
+
viewme.md:3:10 MD009/no-trailing-spaces Trailing spaces
|
|
22
|
+
[Expected: 0 or 2; Actual: 1]
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
output: The raw output from markdownlint-cli2
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
List of MarkdownlintIssue objects
|
|
29
|
+
"""
|
|
30
|
+
issues: list[MarkdownlintIssue] = []
|
|
31
|
+
|
|
32
|
+
# Skip empty output
|
|
33
|
+
if not output.strip():
|
|
34
|
+
return issues
|
|
35
|
+
|
|
36
|
+
lines: list[str] = output.splitlines()
|
|
37
|
+
|
|
38
|
+
# Pattern for markdownlint-cli2 default formatter:
|
|
39
|
+
# file:line[:column] [error] MD###/rule-name Message [Context: "..."]
|
|
40
|
+
# Column is optional, "error" keyword is optional, and Context is optional
|
|
41
|
+
# Also handles variations like: file:line MD### Message
|
|
42
|
+
# [Expected: ...; Actual: ...]
|
|
43
|
+
pattern: re.Pattern[str] = re.compile(
|
|
44
|
+
r"^([^:]+):(\d+)(?::(\d+))?\s+(?:error\s+)?(MD\d+)(?:/[^:\s]+)?(?::\s*)?"
|
|
45
|
+
r"(.+?)(?:\s+\[(?:Context|Expected|Actual):.*?\])?$",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
i = 0
|
|
49
|
+
while i < len(lines):
|
|
50
|
+
line = lines[i]
|
|
51
|
+
|
|
52
|
+
# Skip empty lines
|
|
53
|
+
if not line.strip():
|
|
54
|
+
i += 1
|
|
55
|
+
continue
|
|
56
|
+
|
|
57
|
+
# Skip metadata lines (version, Finding, Linting, Summary)
|
|
58
|
+
stripped_line = line.strip()
|
|
59
|
+
if (
|
|
60
|
+
stripped_line.startswith("markdownlint-cli2")
|
|
61
|
+
or stripped_line.startswith("Finding:")
|
|
62
|
+
or stripped_line.startswith("Linting:")
|
|
63
|
+
or stripped_line.startswith("Summary:")
|
|
64
|
+
):
|
|
65
|
+
i += 1
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
# Try to match the pattern on the current line
|
|
69
|
+
match: re.Match[str] | None = pattern.match(stripped_line)
|
|
70
|
+
if match:
|
|
71
|
+
filename: str
|
|
72
|
+
line_num: str
|
|
73
|
+
column: str | None
|
|
74
|
+
code: str
|
|
75
|
+
message: str
|
|
76
|
+
filename, line_num, column, code, message = match.groups()
|
|
77
|
+
|
|
78
|
+
# Collect continuation lines (lines that start with whitespace)
|
|
79
|
+
# These are part of the multi-line message
|
|
80
|
+
i += 1
|
|
81
|
+
continuation_lines: list[str] = []
|
|
82
|
+
while i < len(lines):
|
|
83
|
+
next_line = lines[i]
|
|
84
|
+
# Continuation lines start with whitespace (indentation)
|
|
85
|
+
# Empty lines break the continuation
|
|
86
|
+
if not next_line.strip():
|
|
87
|
+
break
|
|
88
|
+
if next_line[0].isspace():
|
|
89
|
+
continuation_lines.append(next_line.strip())
|
|
90
|
+
i += 1
|
|
91
|
+
else:
|
|
92
|
+
# Next line doesn't start with whitespace, stop collecting
|
|
93
|
+
break
|
|
94
|
+
|
|
95
|
+
# Combine main message with continuation lines
|
|
96
|
+
full_message = message.strip()
|
|
97
|
+
if continuation_lines:
|
|
98
|
+
full_message = " ".join([full_message] + continuation_lines)
|
|
99
|
+
|
|
100
|
+
issues.append(
|
|
101
|
+
MarkdownlintIssue(
|
|
102
|
+
file=filename,
|
|
103
|
+
line=int(line_num),
|
|
104
|
+
column=int(column) if column else None,
|
|
105
|
+
code=code,
|
|
106
|
+
message=full_message,
|
|
107
|
+
),
|
|
108
|
+
)
|
|
109
|
+
else:
|
|
110
|
+
# Line doesn't match pattern, skip it
|
|
111
|
+
i += 1
|
|
112
|
+
|
|
113
|
+
return issues
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Pytest parser module."""
|
|
2
|
+
|
|
3
|
+
from lintro.parsers.pytest.pytest_issue import PytestIssue
|
|
4
|
+
from lintro.parsers.pytest.pytest_parser import (
|
|
5
|
+
PytestSummary,
|
|
6
|
+
extract_pytest_summary,
|
|
7
|
+
parse_pytest_json_output,
|
|
8
|
+
parse_pytest_junit_xml,
|
|
9
|
+
parse_pytest_output,
|
|
10
|
+
parse_pytest_text_output,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"PytestIssue",
|
|
15
|
+
"PytestSummary",
|
|
16
|
+
"extract_pytest_summary",
|
|
17
|
+
"parse_pytest_json_output",
|
|
18
|
+
"parse_pytest_junit_xml",
|
|
19
|
+
"parse_pytest_output",
|
|
20
|
+
"parse_pytest_text_output",
|
|
21
|
+
]
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Models for pytest issues."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class PytestIssue:
|
|
10
|
+
"""Represents a pytest test result (failure, error, or skip).
|
|
11
|
+
|
|
12
|
+
Attributes:
|
|
13
|
+
file: File path where the test issue occurred.
|
|
14
|
+
line: Line number where the issue occurred.
|
|
15
|
+
test_name: Name of the test.
|
|
16
|
+
message: Error message, failure description, or skip reason.
|
|
17
|
+
test_status: Status of the test (FAILED, ERROR, SKIPPED, etc.).
|
|
18
|
+
duration: Duration of the test in seconds.
|
|
19
|
+
node_id: Full node ID of the test.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
file: str
|
|
23
|
+
line: int
|
|
24
|
+
test_name: str
|
|
25
|
+
message: str
|
|
26
|
+
test_status: str
|
|
27
|
+
duration: float | None = None
|
|
28
|
+
node_id: str | None = None
|
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
"""Parser for pytest output.
|
|
2
|
+
|
|
3
|
+
This module provides functions to parse pytest output in various formats:
|
|
4
|
+
- JSON output from pytest --json-report
|
|
5
|
+
- Plain text output from pytest
|
|
6
|
+
- JUnit XML output from pytest --junitxml
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import re
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
|
|
13
|
+
from defusedxml import ElementTree
|
|
14
|
+
|
|
15
|
+
from lintro.parsers.pytest.pytest_issue import PytestIssue
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class PytestSummary:
|
|
20
|
+
"""Summary statistics from pytest execution.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
total: Total number of tests run.
|
|
24
|
+
passed: Number of tests that passed.
|
|
25
|
+
failed: Number of tests that failed.
|
|
26
|
+
skipped: Number of tests that were skipped.
|
|
27
|
+
error: Number of tests that had errors (setup/teardown failures).
|
|
28
|
+
xfailed: Number of tests that were expected to fail and did fail.
|
|
29
|
+
xpassed: Number of tests that were expected to fail but passed.
|
|
30
|
+
duration: Total execution duration in seconds.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
total: int = 0
|
|
34
|
+
passed: int = 0
|
|
35
|
+
failed: int = 0
|
|
36
|
+
skipped: int = 0
|
|
37
|
+
error: int = 0
|
|
38
|
+
xfailed: int = 0
|
|
39
|
+
xpassed: int = 0
|
|
40
|
+
duration: float = 0.0
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def extract_pytest_summary(output: str) -> PytestSummary:
|
|
44
|
+
"""Extract test summary statistics from pytest output.
|
|
45
|
+
|
|
46
|
+
Parses the summary line from pytest output to extract:
|
|
47
|
+
- Number of passed tests
|
|
48
|
+
- Number of failed tests
|
|
49
|
+
- Number of skipped tests
|
|
50
|
+
- Number of error tests
|
|
51
|
+
- Execution duration
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
output: Raw output from pytest.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
PytestSummary: Extracted summary statistics.
|
|
58
|
+
"""
|
|
59
|
+
summary = PytestSummary()
|
|
60
|
+
|
|
61
|
+
if not output:
|
|
62
|
+
return summary
|
|
63
|
+
|
|
64
|
+
# Strip ANSI color codes
|
|
65
|
+
ansi_re = re.compile(r"\x1b\[[0-9;]*m")
|
|
66
|
+
clean_output = ansi_re.sub("", output)
|
|
67
|
+
|
|
68
|
+
# Extract duration first (it's always at the end)
|
|
69
|
+
duration_match = re.search(r"in\s+([\d.]+)s", clean_output)
|
|
70
|
+
if duration_match:
|
|
71
|
+
summary.duration = float(duration_match.group(1))
|
|
72
|
+
|
|
73
|
+
# Extract counts independently since order can vary
|
|
74
|
+
# Patterns handle various formats like:
|
|
75
|
+
# - "511 passed in 18.53s"
|
|
76
|
+
# - "509 passed, 2 failed in 18.53s"
|
|
77
|
+
# - "7 failed, 505 passed, 1 warning in 17.06s"
|
|
78
|
+
# - "510 passed, 1 skipped in 18.53s"
|
|
79
|
+
|
|
80
|
+
passed_match = re.search(r"(\d+)\s+passed", clean_output)
|
|
81
|
+
if passed_match:
|
|
82
|
+
summary.passed = int(passed_match.group(1))
|
|
83
|
+
|
|
84
|
+
failed_match = re.search(r"(\d+)\s+failed", clean_output)
|
|
85
|
+
if failed_match:
|
|
86
|
+
summary.failed = int(failed_match.group(1))
|
|
87
|
+
|
|
88
|
+
skipped_match = re.search(r"(\d+)\s+skipped", clean_output)
|
|
89
|
+
if skipped_match:
|
|
90
|
+
summary.skipped = int(skipped_match.group(1))
|
|
91
|
+
|
|
92
|
+
error_match = re.search(r"(\d+)\s+errors?", clean_output)
|
|
93
|
+
if error_match:
|
|
94
|
+
summary.error = int(error_match.group(1))
|
|
95
|
+
|
|
96
|
+
xfailed_match = re.search(r"(\d+)\s+xfailed", clean_output)
|
|
97
|
+
if xfailed_match:
|
|
98
|
+
summary.xfailed = int(xfailed_match.group(1))
|
|
99
|
+
|
|
100
|
+
xpassed_match = re.search(r"(\d+)\s+xpassed", clean_output)
|
|
101
|
+
if xpassed_match:
|
|
102
|
+
summary.xpassed = int(xpassed_match.group(1))
|
|
103
|
+
|
|
104
|
+
# Calculate total as sum of all test outcomes
|
|
105
|
+
summary.total = (
|
|
106
|
+
summary.passed
|
|
107
|
+
+ summary.failed
|
|
108
|
+
+ summary.skipped
|
|
109
|
+
+ summary.error
|
|
110
|
+
+ summary.xfailed
|
|
111
|
+
+ summary.xpassed
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return summary
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def parse_pytest_json_output(output: str) -> list[PytestIssue]:
|
|
118
|
+
"""Parse pytest JSON output into PytestIssue objects.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
output: Raw output from pytest with --json-report.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
125
|
+
"""
|
|
126
|
+
issues: list[PytestIssue] = []
|
|
127
|
+
|
|
128
|
+
if not output or output.strip() in ("{}", "[]"):
|
|
129
|
+
return issues
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
data = json.loads(output)
|
|
133
|
+
|
|
134
|
+
# Handle different JSON report formats
|
|
135
|
+
if "tests" in data:
|
|
136
|
+
# pytest-json-report format
|
|
137
|
+
for test in data["tests"]:
|
|
138
|
+
if test.get("outcome") in ("failed", "error", "skipped"):
|
|
139
|
+
issues.append(_parse_json_test_item(test))
|
|
140
|
+
elif isinstance(data, list):
|
|
141
|
+
# Alternative JSON format
|
|
142
|
+
for item in data:
|
|
143
|
+
if isinstance(item, dict) and item.get("outcome") in (
|
|
144
|
+
"failed",
|
|
145
|
+
"error",
|
|
146
|
+
"skipped",
|
|
147
|
+
):
|
|
148
|
+
issues.append(_parse_json_test_item(item))
|
|
149
|
+
|
|
150
|
+
except (json.JSONDecodeError, TypeError, KeyError):
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
return issues
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _parse_json_test_item(test_item: dict) -> PytestIssue:
|
|
157
|
+
"""Parse a single test item from JSON output.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
test_item: Dictionary containing test information.
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
PytestIssue: Parsed test issue.
|
|
164
|
+
"""
|
|
165
|
+
file_path = test_item.get("file", "")
|
|
166
|
+
line = test_item.get("lineno", 0)
|
|
167
|
+
test_name = test_item.get("name", "")
|
|
168
|
+
message = test_item.get("call", {}).get("longrepr", "") or test_item.get(
|
|
169
|
+
"longrepr",
|
|
170
|
+
"",
|
|
171
|
+
)
|
|
172
|
+
status = test_item.get("outcome", "UNKNOWN")
|
|
173
|
+
duration = test_item.get("duration", 0.0)
|
|
174
|
+
node_id = test_item.get("nodeid", "")
|
|
175
|
+
|
|
176
|
+
return PytestIssue(
|
|
177
|
+
file=file_path,
|
|
178
|
+
line=line,
|
|
179
|
+
test_name=test_name,
|
|
180
|
+
message=message,
|
|
181
|
+
test_status=status.upper(),
|
|
182
|
+
duration=duration,
|
|
183
|
+
node_id=node_id,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def parse_pytest_text_output(output: str) -> list[PytestIssue]:
|
|
188
|
+
"""Parse pytest plain text output into PytestIssue objects.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
output: Raw output from pytest.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
195
|
+
"""
|
|
196
|
+
issues: list[PytestIssue] = []
|
|
197
|
+
|
|
198
|
+
if not output:
|
|
199
|
+
return issues
|
|
200
|
+
|
|
201
|
+
lines = output.splitlines()
|
|
202
|
+
current_file = ""
|
|
203
|
+
current_line = 0
|
|
204
|
+
|
|
205
|
+
# Patterns for different pytest output formats
|
|
206
|
+
file_pattern = re.compile(r"^(.+\.py)::(.+)$")
|
|
207
|
+
failure_pattern = re.compile(r"^FAILED\s+(.+\.py)::(.+)\s+-\s+(.+)$")
|
|
208
|
+
error_pattern = re.compile(r"^ERROR\s+(.+\.py)::(.+)\s+-\s+(.+)$")
|
|
209
|
+
skipped_pattern = re.compile(r"^(.+\.py)::([^\s]+)\s+SKIPPED\s+\((.+)\)\s+\[")
|
|
210
|
+
line_pattern = re.compile(r"^(.+\.py):(\d+):\s+(.+)$")
|
|
211
|
+
|
|
212
|
+
# Alternative patterns for different pytest output formats
|
|
213
|
+
# Use non-greedy matching for test name to stop at first space
|
|
214
|
+
failure_pattern_alt = re.compile(r"^FAILED\s+(.+\.py)::([^\s]+)\s+(.+)$")
|
|
215
|
+
error_pattern_alt = re.compile(r"^ERROR\s+(.+\.py)::([^\s]+)\s+(.+)$")
|
|
216
|
+
# Alternative skipped pattern without trailing bracket (for compact output)
|
|
217
|
+
skipped_pattern_alt = re.compile(r"^(.+\.py)::([^\s]+)\s+SKIPPED\s+\((.+)\)$")
|
|
218
|
+
|
|
219
|
+
# Strip ANSI color codes for stable parsing
|
|
220
|
+
ansi_re = re.compile(r"\x1b\[[0-9;]*m")
|
|
221
|
+
|
|
222
|
+
for line in lines:
|
|
223
|
+
# Strip ANSI color codes for stable parsing
|
|
224
|
+
line = ansi_re.sub("", line).strip()
|
|
225
|
+
|
|
226
|
+
# Match FAILED format
|
|
227
|
+
failure_match = failure_pattern.match(line)
|
|
228
|
+
if failure_match:
|
|
229
|
+
file_path = failure_match.group(1)
|
|
230
|
+
test_name = failure_match.group(2)
|
|
231
|
+
message = failure_match.group(3)
|
|
232
|
+
issues.append(
|
|
233
|
+
PytestIssue(
|
|
234
|
+
file=file_path,
|
|
235
|
+
line=0,
|
|
236
|
+
test_name=test_name,
|
|
237
|
+
message=message,
|
|
238
|
+
test_status="FAILED",
|
|
239
|
+
),
|
|
240
|
+
)
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
# Match FAILED format (alternative)
|
|
244
|
+
failure_match_alt = failure_pattern_alt.match(line)
|
|
245
|
+
if failure_match_alt:
|
|
246
|
+
file_path = failure_match_alt.group(1)
|
|
247
|
+
test_name = failure_match_alt.group(2)
|
|
248
|
+
message = failure_match_alt.group(3)
|
|
249
|
+
issues.append(
|
|
250
|
+
PytestIssue(
|
|
251
|
+
file=file_path,
|
|
252
|
+
line=0,
|
|
253
|
+
test_name=test_name,
|
|
254
|
+
message=message,
|
|
255
|
+
test_status="FAILED",
|
|
256
|
+
),
|
|
257
|
+
)
|
|
258
|
+
continue
|
|
259
|
+
|
|
260
|
+
# Match ERROR format
|
|
261
|
+
error_match = error_pattern.match(line)
|
|
262
|
+
if error_match:
|
|
263
|
+
file_path = error_match.group(1)
|
|
264
|
+
test_name = error_match.group(2)
|
|
265
|
+
message = error_match.group(3)
|
|
266
|
+
issues.append(
|
|
267
|
+
PytestIssue(
|
|
268
|
+
file=file_path,
|
|
269
|
+
line=0,
|
|
270
|
+
test_name=test_name,
|
|
271
|
+
message=message,
|
|
272
|
+
test_status="ERROR",
|
|
273
|
+
),
|
|
274
|
+
)
|
|
275
|
+
continue
|
|
276
|
+
|
|
277
|
+
# Match ERROR format (alternative)
|
|
278
|
+
error_match_alt = error_pattern_alt.match(line)
|
|
279
|
+
if error_match_alt:
|
|
280
|
+
file_path = error_match_alt.group(1)
|
|
281
|
+
test_name = error_match_alt.group(2)
|
|
282
|
+
message = error_match_alt.group(3)
|
|
283
|
+
issues.append(
|
|
284
|
+
PytestIssue(
|
|
285
|
+
file=file_path,
|
|
286
|
+
line=0,
|
|
287
|
+
test_name=test_name,
|
|
288
|
+
message=message,
|
|
289
|
+
test_status="ERROR",
|
|
290
|
+
),
|
|
291
|
+
)
|
|
292
|
+
continue
|
|
293
|
+
|
|
294
|
+
# Match SKIPPED format
|
|
295
|
+
skipped_match = skipped_pattern.match(line)
|
|
296
|
+
if skipped_match:
|
|
297
|
+
file_path = skipped_match.group(1)
|
|
298
|
+
test_name = skipped_match.group(2)
|
|
299
|
+
message = skipped_match.group(3)
|
|
300
|
+
issues.append(
|
|
301
|
+
PytestIssue(
|
|
302
|
+
file=file_path,
|
|
303
|
+
line=0,
|
|
304
|
+
test_name=test_name,
|
|
305
|
+
message=message,
|
|
306
|
+
test_status="SKIPPED",
|
|
307
|
+
),
|
|
308
|
+
)
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
# Match SKIPPED format (alternative)
|
|
312
|
+
skipped_match_alt = skipped_pattern_alt.match(line)
|
|
313
|
+
if skipped_match_alt:
|
|
314
|
+
file_path = skipped_match_alt.group(1)
|
|
315
|
+
test_name = skipped_match_alt.group(2)
|
|
316
|
+
message = skipped_match_alt.group(3)
|
|
317
|
+
issues.append(
|
|
318
|
+
PytestIssue(
|
|
319
|
+
file=file_path,
|
|
320
|
+
line=0,
|
|
321
|
+
test_name=test_name,
|
|
322
|
+
message=message,
|
|
323
|
+
test_status="SKIPPED",
|
|
324
|
+
),
|
|
325
|
+
)
|
|
326
|
+
continue
|
|
327
|
+
|
|
328
|
+
# Match file::test format
|
|
329
|
+
file_match = file_pattern.match(line)
|
|
330
|
+
if file_match:
|
|
331
|
+
current_file = file_match.group(1)
|
|
332
|
+
continue
|
|
333
|
+
|
|
334
|
+
# Match line number format
|
|
335
|
+
line_match = line_pattern.match(line)
|
|
336
|
+
if line_match:
|
|
337
|
+
current_file = line_match.group(1)
|
|
338
|
+
current_line = int(line_match.group(2))
|
|
339
|
+
message = line_match.group(3)
|
|
340
|
+
if "FAILED" in message or "ERROR" in message or "SKIPPED" in message:
|
|
341
|
+
# Extract just the error message without the status prefix
|
|
342
|
+
if message.startswith("FAILED - "):
|
|
343
|
+
message = message[9:] # Remove "FAILED - "
|
|
344
|
+
status = "FAILED"
|
|
345
|
+
elif message.startswith("ERROR - "):
|
|
346
|
+
message = message[8:] # Remove "ERROR - "
|
|
347
|
+
status = "ERROR"
|
|
348
|
+
elif message.startswith("SKIPPED - "):
|
|
349
|
+
message = message[10:] # Remove "SKIPPED - "
|
|
350
|
+
status = "SKIPPED"
|
|
351
|
+
else:
|
|
352
|
+
status = "UNKNOWN"
|
|
353
|
+
|
|
354
|
+
issues.append(
|
|
355
|
+
PytestIssue(
|
|
356
|
+
file=current_file,
|
|
357
|
+
line=current_line,
|
|
358
|
+
test_name="",
|
|
359
|
+
message=message,
|
|
360
|
+
test_status=status,
|
|
361
|
+
),
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
return issues
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def parse_pytest_junit_xml(output: str) -> list[PytestIssue]:
|
|
368
|
+
"""Parse pytest JUnit XML output into PytestIssue objects.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
output: Raw output from pytest with --junitxml.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
list[PytestIssue]: Parsed test failures, errors, and skips.
|
|
375
|
+
"""
|
|
376
|
+
issues: list[PytestIssue] = []
|
|
377
|
+
|
|
378
|
+
if not output:
|
|
379
|
+
return issues
|
|
380
|
+
|
|
381
|
+
try:
|
|
382
|
+
root = ElementTree.fromstring(output)
|
|
383
|
+
|
|
384
|
+
# Handle different JUnit XML structures
|
|
385
|
+
for testcase in root.findall(".//testcase"):
|
|
386
|
+
file_path = testcase.get("file", "")
|
|
387
|
+
line = int(testcase.get("line", 0))
|
|
388
|
+
test_name = testcase.get("name", "")
|
|
389
|
+
duration = float(testcase.get("time", 0.0))
|
|
390
|
+
class_name = testcase.get("classname", "")
|
|
391
|
+
# If file attribute is missing, try to derive it from classname
|
|
392
|
+
if not file_path and class_name:
|
|
393
|
+
# Convert class name like
|
|
394
|
+
# "tests.scripts.test_script_environment.TestEnvironmentHandling"
|
|
395
|
+
# to file path like "tests/scripts/test_script_environment.py"
|
|
396
|
+
class_parts = class_name.split(".")
|
|
397
|
+
if len(class_parts) >= 2 and class_parts[0] == "tests":
|
|
398
|
+
file_path = "/".join(class_parts[:-1]) + ".py"
|
|
399
|
+
node_id = f"{class_name}::{test_name}" if class_name else test_name
|
|
400
|
+
|
|
401
|
+
# Check for failure
|
|
402
|
+
failure = testcase.find("failure")
|
|
403
|
+
if failure is not None:
|
|
404
|
+
message = failure.text or failure.get("message", "")
|
|
405
|
+
issues.append(
|
|
406
|
+
PytestIssue(
|
|
407
|
+
file=file_path,
|
|
408
|
+
line=line,
|
|
409
|
+
test_name=test_name,
|
|
410
|
+
message=message,
|
|
411
|
+
test_status="FAILED",
|
|
412
|
+
duration=duration,
|
|
413
|
+
node_id=node_id,
|
|
414
|
+
),
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Check for error
|
|
418
|
+
error = testcase.find("error")
|
|
419
|
+
if error is not None:
|
|
420
|
+
message = error.text or error.get("message", "")
|
|
421
|
+
issues.append(
|
|
422
|
+
PytestIssue(
|
|
423
|
+
file=file_path,
|
|
424
|
+
line=line,
|
|
425
|
+
test_name=test_name,
|
|
426
|
+
message=message,
|
|
427
|
+
test_status="ERROR",
|
|
428
|
+
duration=duration,
|
|
429
|
+
node_id=node_id,
|
|
430
|
+
),
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# Check for skip
|
|
434
|
+
skip = testcase.find("skipped")
|
|
435
|
+
if skip is not None:
|
|
436
|
+
message = skip.text or skip.get("message", "")
|
|
437
|
+
# Clean up skip message by removing file path prefix if present
|
|
438
|
+
# Format is typically: "/path/to/file.py:line: actual message"
|
|
439
|
+
if message and ":" in message:
|
|
440
|
+
# Find the first colon after a file path pattern
|
|
441
|
+
parts = message.split(":")
|
|
442
|
+
if (
|
|
443
|
+
len(parts) >= 3
|
|
444
|
+
and parts[0].startswith("/")
|
|
445
|
+
and parts[0].endswith(".py")
|
|
446
|
+
):
|
|
447
|
+
# Remove file path and line number, keep only the actual reason
|
|
448
|
+
message = ":".join(parts[2:]).lstrip()
|
|
449
|
+
|
|
450
|
+
issues.append(
|
|
451
|
+
PytestIssue(
|
|
452
|
+
file=file_path,
|
|
453
|
+
line=line,
|
|
454
|
+
test_name=test_name,
|
|
455
|
+
message=message,
|
|
456
|
+
test_status="SKIPPED",
|
|
457
|
+
duration=duration,
|
|
458
|
+
node_id=node_id,
|
|
459
|
+
),
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
except ElementTree.ParseError:
|
|
463
|
+
pass
|
|
464
|
+
|
|
465
|
+
return issues
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def parse_pytest_output(output: str, format: str = "text") -> list[PytestIssue]:
|
|
469
|
+
"""Parse pytest output based on the specified format.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
output: Raw output from pytest.
|
|
473
|
+
format: Output format ("json", "text", "junit").
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
list[PytestIssue]: Parsed test failures and errors.
|
|
477
|
+
"""
|
|
478
|
+
if format == "json":
|
|
479
|
+
return parse_pytest_json_output(output)
|
|
480
|
+
elif format == "junit":
|
|
481
|
+
return parse_pytest_junit_xml(output)
|
|
482
|
+
else:
|
|
483
|
+
return parse_pytest_text_output(output)
|
lintro/tools/__init__.py
CHANGED
|
@@ -10,6 +10,7 @@ from lintro.tools.implementations.tool_black import BlackTool
|
|
|
10
10
|
from lintro.tools.implementations.tool_darglint import DarglintTool
|
|
11
11
|
from lintro.tools.implementations.tool_hadolint import HadolintTool
|
|
12
12
|
from lintro.tools.implementations.tool_prettier import PrettierTool
|
|
13
|
+
from lintro.tools.implementations.tool_pytest import PytestTool
|
|
13
14
|
from lintro.tools.implementations.tool_ruff import RuffTool
|
|
14
15
|
from lintro.tools.implementations.tool_yamllint import YamllintTool
|
|
15
16
|
from lintro.tools.tool_enum import ToolEnum
|
|
@@ -39,6 +40,7 @@ __all__ = [
|
|
|
39
40
|
"DarglintTool",
|
|
40
41
|
"HadolintTool",
|
|
41
42
|
"PrettierTool",
|
|
43
|
+
"PytestTool",
|
|
42
44
|
"RuffTool",
|
|
43
45
|
"YamllintTool",
|
|
44
46
|
]
|