lintro 0.13.2__py3-none-any.whl → 0.17.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lintro/__init__.py +1 -1
- lintro/cli.py +226 -16
- lintro/cli_utils/commands/__init__.py +8 -1
- lintro/cli_utils/commands/check.py +1 -0
- lintro/cli_utils/commands/config.py +325 -0
- lintro/cli_utils/commands/init.py +361 -0
- lintro/cli_utils/commands/list_tools.py +180 -42
- lintro/cli_utils/commands/test.py +316 -0
- lintro/cli_utils/commands/versions.py +81 -0
- lintro/config/__init__.py +62 -0
- lintro/config/config_loader.py +420 -0
- lintro/config/lintro_config.py +189 -0
- lintro/config/tool_config_generator.py +403 -0
- lintro/enums/tool_name.py +2 -0
- lintro/enums/tool_type.py +2 -0
- lintro/formatters/tools/__init__.py +12 -0
- lintro/formatters/tools/eslint_formatter.py +108 -0
- lintro/formatters/tools/markdownlint_formatter.py +88 -0
- lintro/formatters/tools/pytest_formatter.py +201 -0
- lintro/parsers/__init__.py +69 -9
- lintro/parsers/bandit/__init__.py +6 -0
- lintro/parsers/bandit/bandit_issue.py +49 -0
- lintro/parsers/bandit/bandit_parser.py +99 -0
- lintro/parsers/black/black_issue.py +4 -0
- lintro/parsers/eslint/__init__.py +6 -0
- lintro/parsers/eslint/eslint_issue.py +26 -0
- lintro/parsers/eslint/eslint_parser.py +63 -0
- lintro/parsers/markdownlint/__init__.py +6 -0
- lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
- lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
- lintro/parsers/pytest/__init__.py +21 -0
- lintro/parsers/pytest/pytest_issue.py +28 -0
- lintro/parsers/pytest/pytest_parser.py +483 -0
- lintro/tools/__init__.py +2 -0
- lintro/tools/core/timeout_utils.py +112 -0
- lintro/tools/core/tool_base.py +255 -45
- lintro/tools/core/tool_manager.py +77 -24
- lintro/tools/core/version_requirements.py +482 -0
- lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
- lintro/tools/implementations/pytest/pytest_config.py +200 -0
- lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
- lintro/tools/implementations/pytest/pytest_executor.py +122 -0
- lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
- lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
- lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
- lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
- lintro/tools/implementations/pytest/pytest_utils.py +697 -0
- lintro/tools/implementations/tool_actionlint.py +106 -16
- lintro/tools/implementations/tool_bandit.py +23 -7
- lintro/tools/implementations/tool_black.py +236 -29
- lintro/tools/implementations/tool_darglint.py +180 -21
- lintro/tools/implementations/tool_eslint.py +374 -0
- lintro/tools/implementations/tool_hadolint.py +94 -25
- lintro/tools/implementations/tool_markdownlint.py +354 -0
- lintro/tools/implementations/tool_prettier.py +313 -26
- lintro/tools/implementations/tool_pytest.py +327 -0
- lintro/tools/implementations/tool_ruff.py +247 -70
- lintro/tools/implementations/tool_yamllint.py +448 -34
- lintro/tools/tool_enum.py +6 -0
- lintro/utils/config.py +41 -18
- lintro/utils/console_logger.py +211 -25
- lintro/utils/path_utils.py +42 -0
- lintro/utils/tool_executor.py +336 -39
- lintro/utils/tool_utils.py +38 -2
- lintro/utils/unified_config.py +926 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/METADATA +131 -29
- lintro-0.17.2.dist-info/RECORD +134 -0
- lintro-0.13.2.dist-info/RECORD +0 -96
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
- {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""Command building functions for pytest tool.
|
|
2
|
+
|
|
3
|
+
This module contains command building logic extracted from PytestTool to improve
|
|
4
|
+
maintainability and reduce file size. Functions are organized by command section.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
from loguru import logger
|
|
10
|
+
|
|
11
|
+
from lintro.tools.implementations.pytest.pytest_utils import (
|
|
12
|
+
check_plugin_installed,
|
|
13
|
+
get_parallel_workers_from_preset,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
# Constants for pytest configuration
|
|
17
|
+
PYTEST_TEST_MODE_ENV: str = "LINTRO_TEST_MODE"
|
|
18
|
+
PYTEST_TEST_MODE_VALUE: str = "1"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def build_base_command(tool) -> list[str]:
|
|
22
|
+
"""Build the base pytest command.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
tool: PytestTool instance.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
list[str]: Base command list starting with pytest executable.
|
|
29
|
+
"""
|
|
30
|
+
return tool._get_executable_command(tool_name="pytest")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def add_verbosity_options(cmd: list[str], options: dict) -> None:
|
|
34
|
+
"""Add verbosity and traceback options to command.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
cmd: Command list to modify.
|
|
38
|
+
options: Options dictionary.
|
|
39
|
+
"""
|
|
40
|
+
# Add verbosity - ensure it's enabled if show_progress is True
|
|
41
|
+
show_progress = options.get("show_progress", True)
|
|
42
|
+
verbose = options.get("verbose", show_progress) # Default to show_progress value
|
|
43
|
+
if verbose or show_progress:
|
|
44
|
+
cmd.append("-v")
|
|
45
|
+
|
|
46
|
+
# Add traceback format
|
|
47
|
+
tb_format = options.get("tb", "short")
|
|
48
|
+
cmd.extend(["--tb", tb_format])
|
|
49
|
+
|
|
50
|
+
# Add maxfail only if specified
|
|
51
|
+
# Note: We default to None to avoid stopping early and run all tests
|
|
52
|
+
maxfail = options.get("maxfail")
|
|
53
|
+
if maxfail is not None:
|
|
54
|
+
cmd.extend(["--maxfail", str(maxfail)])
|
|
55
|
+
|
|
56
|
+
# Add no-header
|
|
57
|
+
if options.get("no_header", True):
|
|
58
|
+
cmd.append("--no-header")
|
|
59
|
+
|
|
60
|
+
# Add disable-warnings
|
|
61
|
+
if options.get("disable_warnings", True):
|
|
62
|
+
cmd.append("--disable-warnings")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def add_output_options(cmd: list[str], options: dict) -> str | None:
|
|
66
|
+
"""Add output format options (JSON, JUnit XML, HTML) to command.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
cmd: Command list to modify.
|
|
70
|
+
options: Options dictionary.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
str | None: The junitxml path if auto-enabled, None otherwise.
|
|
74
|
+
"""
|
|
75
|
+
# Add output format options
|
|
76
|
+
if options.get("json_report", False):
|
|
77
|
+
cmd.append("--json-report")
|
|
78
|
+
cmd.append("--json-report-file=pytest-report.json")
|
|
79
|
+
|
|
80
|
+
# Track if junitxml was explicitly provided
|
|
81
|
+
junitxml_explicit = "junitxml" in options
|
|
82
|
+
junitxml_value = options.get("junitxml")
|
|
83
|
+
auto_junitxml_path: str | None = None
|
|
84
|
+
|
|
85
|
+
if junitxml_value:
|
|
86
|
+
# User provided a truthy value, use it
|
|
87
|
+
cmd.extend(["--junitxml", junitxml_value])
|
|
88
|
+
else:
|
|
89
|
+
# Auto-enable junitxml to capture all test results including skipped tests
|
|
90
|
+
# Only if user didn't explicitly disable it
|
|
91
|
+
# (junitxml_explicit True but falsy value)
|
|
92
|
+
auto_junitxml = options.get("auto_junitxml", True)
|
|
93
|
+
if not junitxml_explicit and auto_junitxml:
|
|
94
|
+
cmd.extend(["--junitxml", "report.xml"])
|
|
95
|
+
auto_junitxml_path = "report.xml"
|
|
96
|
+
logger.debug("Auto-enabled junitxml=report.xml to capture skipped tests")
|
|
97
|
+
|
|
98
|
+
# Add pytest-html HTML report if specified
|
|
99
|
+
html_report = options.get("html_report")
|
|
100
|
+
if html_report:
|
|
101
|
+
cmd.extend(["--html", html_report])
|
|
102
|
+
logger.debug(f"HTML report enabled: {html_report}")
|
|
103
|
+
|
|
104
|
+
return auto_junitxml_path
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def add_parallel_options(cmd: list[str], options: dict) -> None:
|
|
108
|
+
"""Add parallel execution options to command.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
cmd: Command list to modify.
|
|
112
|
+
options: Options dictionary.
|
|
113
|
+
"""
|
|
114
|
+
# Add pytest-xdist parallel execution
|
|
115
|
+
# Priority: parallel_preset > workers
|
|
116
|
+
workers = options.get("workers")
|
|
117
|
+
parallel_preset = options.get("parallel_preset")
|
|
118
|
+
if parallel_preset:
|
|
119
|
+
# Convert preset to worker count
|
|
120
|
+
workers = get_parallel_workers_from_preset(parallel_preset)
|
|
121
|
+
logger.debug(
|
|
122
|
+
f"Using parallel preset '{parallel_preset}' -> workers={workers}",
|
|
123
|
+
)
|
|
124
|
+
if workers:
|
|
125
|
+
cmd.extend(["-n", str(workers)])
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def add_coverage_options(cmd: list[str], options: dict) -> None:
|
|
129
|
+
"""Add coverage options to command.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
cmd: Command list to modify.
|
|
133
|
+
options: Options dictionary.
|
|
134
|
+
"""
|
|
135
|
+
# Add coverage threshold if specified
|
|
136
|
+
coverage_threshold = options.get("coverage_threshold")
|
|
137
|
+
if coverage_threshold is not None:
|
|
138
|
+
cmd.extend(["--cov-fail-under", str(coverage_threshold)])
|
|
139
|
+
|
|
140
|
+
# Add coverage report options (requires pytest-cov)
|
|
141
|
+
coverage_html = options.get("coverage_html")
|
|
142
|
+
coverage_xml = options.get("coverage_xml")
|
|
143
|
+
coverage_report = options.get("coverage_report", False)
|
|
144
|
+
|
|
145
|
+
# If coverage_report is True, generate both HTML and XML
|
|
146
|
+
if coverage_report:
|
|
147
|
+
if not coverage_html:
|
|
148
|
+
coverage_html = "htmlcov"
|
|
149
|
+
if not coverage_xml:
|
|
150
|
+
coverage_xml = "coverage.xml"
|
|
151
|
+
|
|
152
|
+
# Add coverage collection if any coverage options are specified
|
|
153
|
+
if coverage_html or coverage_xml or coverage_threshold is not None:
|
|
154
|
+
# Add --cov flag to enable coverage collection
|
|
155
|
+
# Default to current directory, but can be overridden
|
|
156
|
+
cmd.append("--cov=.")
|
|
157
|
+
|
|
158
|
+
# Add coverage HTML report
|
|
159
|
+
if coverage_html:
|
|
160
|
+
# pytest-cov uses --cov-report=html or --cov-report=html:dir
|
|
161
|
+
# Only use default --cov-report=html for exact "htmlcov" match
|
|
162
|
+
# Custom paths ending in "htmlcov" should use the custom directory format
|
|
163
|
+
if coverage_html == "htmlcov":
|
|
164
|
+
cmd.append("--cov-report=html")
|
|
165
|
+
else:
|
|
166
|
+
# Custom directory (remove trailing /index.html if present)
|
|
167
|
+
html_dir = coverage_html.replace(
|
|
168
|
+
"/index.html",
|
|
169
|
+
"",
|
|
170
|
+
).replace("index.html", "")
|
|
171
|
+
if html_dir:
|
|
172
|
+
cmd.extend(["--cov-report", f"html:{html_dir}"])
|
|
173
|
+
else:
|
|
174
|
+
cmd.append("--cov-report=html")
|
|
175
|
+
logger.debug(f"Coverage HTML report enabled: {coverage_html}")
|
|
176
|
+
|
|
177
|
+
# Add coverage XML report
|
|
178
|
+
if coverage_xml:
|
|
179
|
+
# pytest-cov uses --cov-report=xml or --cov-report=xml:file
|
|
180
|
+
# (without .xml extension)
|
|
181
|
+
if coverage_xml == "coverage.xml":
|
|
182
|
+
cmd.append("--cov-report=xml")
|
|
183
|
+
else:
|
|
184
|
+
# Custom file path (remove .xml extension for the flag)
|
|
185
|
+
xml_file = (
|
|
186
|
+
coverage_xml.replace(".xml", "")
|
|
187
|
+
if coverage_xml.endswith(".xml")
|
|
188
|
+
else coverage_xml
|
|
189
|
+
)
|
|
190
|
+
if xml_file:
|
|
191
|
+
cmd.extend(["--cov-report", f"xml:{xml_file}"])
|
|
192
|
+
else:
|
|
193
|
+
cmd.append("--cov-report=xml")
|
|
194
|
+
logger.debug(f"Coverage XML report enabled: {coverage_xml}")
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def add_test_mode_options(cmd: list[str]) -> None:
|
|
198
|
+
"""Add test mode isolation options to command.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
cmd: Command list to modify.
|
|
202
|
+
"""
|
|
203
|
+
# Add test mode isolation if in test mode
|
|
204
|
+
if os.environ.get(PYTEST_TEST_MODE_ENV) == PYTEST_TEST_MODE_VALUE:
|
|
205
|
+
cmd.append("--strict-markers")
|
|
206
|
+
cmd.append("--strict-config")
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def add_plugin_options(cmd: list[str], options: dict) -> None:
|
|
210
|
+
"""Add plugin-specific options to command.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
cmd: Command list to modify.
|
|
214
|
+
options: Options dictionary.
|
|
215
|
+
"""
|
|
216
|
+
# Add pytest-timeout options if timeout is specified
|
|
217
|
+
# Only add timeout arguments if pytest-timeout plugin is installed
|
|
218
|
+
timeout = options.get("timeout")
|
|
219
|
+
if timeout is not None:
|
|
220
|
+
if check_plugin_installed("pytest-timeout"):
|
|
221
|
+
cmd.extend(["--timeout", str(timeout)])
|
|
222
|
+
# Default timeout method to 'signal' if not specified
|
|
223
|
+
timeout_method = options.get("timeout_method", "signal")
|
|
224
|
+
cmd.extend(["--timeout-method", timeout_method])
|
|
225
|
+
logger.debug(f"Timeout enabled: {timeout}s (method: {timeout_method})")
|
|
226
|
+
else:
|
|
227
|
+
logger.warning(
|
|
228
|
+
"pytest-timeout plugin not installed; timeout option ignored. "
|
|
229
|
+
"Install with: pip install pytest-timeout",
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Add pytest-rerunfailures options
|
|
233
|
+
reruns = options.get("reruns")
|
|
234
|
+
if reruns is not None and reruns > 0:
|
|
235
|
+
cmd.extend(["--reruns", str(reruns)])
|
|
236
|
+
|
|
237
|
+
reruns_delay = options.get("reruns_delay")
|
|
238
|
+
if reruns_delay is not None and reruns_delay > 0:
|
|
239
|
+
cmd.extend(["--reruns-delay", str(reruns_delay)])
|
|
240
|
+
logger.debug(f"Reruns enabled: {reruns} times with {reruns_delay}s delay")
|
|
241
|
+
else:
|
|
242
|
+
logger.debug(f"Reruns enabled: {reruns} times")
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def add_ignore_options(cmd: list[str], tool) -> None:
|
|
246
|
+
"""Add ignore options to command for exclude patterns.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
cmd: Command list to modify.
|
|
250
|
+
tool: PytestTool instance.
|
|
251
|
+
"""
|
|
252
|
+
# Add --ignore flags for each exclude pattern
|
|
253
|
+
for pattern in tool.exclude_patterns:
|
|
254
|
+
# pytest --ignore expects directory paths, not glob patterns
|
|
255
|
+
# Convert glob patterns to directory paths where possible
|
|
256
|
+
if pattern.endswith("/*"):
|
|
257
|
+
# Remove /* from the end to get directory path
|
|
258
|
+
ignore_path = pattern[:-2]
|
|
259
|
+
cmd.extend(["--ignore", ignore_path])
|
|
260
|
+
elif pattern.endswith("/"):
|
|
261
|
+
# Pattern already ends with /, remove it
|
|
262
|
+
ignore_path = pattern[:-1]
|
|
263
|
+
cmd.extend(["--ignore", ignore_path])
|
|
264
|
+
else:
|
|
265
|
+
# For other patterns, try to use them as-is
|
|
266
|
+
# pytest --ignore works with directory names
|
|
267
|
+
cmd.extend(["--ignore", pattern])
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def build_check_command(
|
|
271
|
+
tool,
|
|
272
|
+
files: list[str],
|
|
273
|
+
fix: bool = False,
|
|
274
|
+
) -> tuple[list[str], str | None]:
|
|
275
|
+
"""Build the pytest command.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
tool: PytestTool instance.
|
|
279
|
+
files: list[str]: List of files to test.
|
|
280
|
+
fix: bool: Ignored for pytest (not applicable).
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
tuple[list[str], str | None]: Tuple of (command arguments, auto junitxml path).
|
|
284
|
+
"""
|
|
285
|
+
cmd = build_base_command(tool)
|
|
286
|
+
|
|
287
|
+
# Add verbosity options
|
|
288
|
+
add_verbosity_options(cmd, tool.options)
|
|
289
|
+
|
|
290
|
+
# Add output options and capture auto-enabled junitxml path
|
|
291
|
+
auto_junitxml_path = add_output_options(cmd, tool.options)
|
|
292
|
+
|
|
293
|
+
# Add parallel options
|
|
294
|
+
add_parallel_options(cmd, tool.options)
|
|
295
|
+
|
|
296
|
+
# Add coverage options
|
|
297
|
+
add_coverage_options(cmd, tool.options)
|
|
298
|
+
|
|
299
|
+
# Add plugin options (timeout, reruns, etc.)
|
|
300
|
+
add_plugin_options(cmd, tool.options)
|
|
301
|
+
|
|
302
|
+
# Add test mode options
|
|
303
|
+
add_test_mode_options(cmd)
|
|
304
|
+
|
|
305
|
+
# Add ignore options for exclude patterns
|
|
306
|
+
add_ignore_options(cmd, tool)
|
|
307
|
+
|
|
308
|
+
# Add files
|
|
309
|
+
cmd.extend(files)
|
|
310
|
+
|
|
311
|
+
return cmd, auto_junitxml_path
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"""Pytest configuration management.
|
|
2
|
+
|
|
3
|
+
This module contains the PytestConfiguration dataclass that encapsulates
|
|
4
|
+
all pytest-specific option management and validation logic.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from lintro.tools.implementations.pytest.pytest_option_validators import (
|
|
11
|
+
validate_pytest_options,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class PytestConfiguration:
|
|
17
|
+
"""Configuration class for pytest-specific options.
|
|
18
|
+
|
|
19
|
+
This dataclass encapsulates all pytest configuration options and provides
|
|
20
|
+
validation and management methods. It follows the project's preference for
|
|
21
|
+
dataclasses and proper data modeling.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
verbose: Enable verbose output.
|
|
25
|
+
tb: Traceback format (short, long, auto, line, native).
|
|
26
|
+
maxfail: Stop after first N failures.
|
|
27
|
+
no_header: Disable header.
|
|
28
|
+
disable_warnings: Disable warnings.
|
|
29
|
+
json_report: Enable JSON report output.
|
|
30
|
+
junitxml: Path for JUnit XML output.
|
|
31
|
+
run_docker_tests: Enable Docker tests (default: False).
|
|
32
|
+
slow_test_threshold: Duration threshold in seconds for slow test warning
|
|
33
|
+
(default: 1.0).
|
|
34
|
+
total_time_warning: Total execution time threshold in seconds for warning
|
|
35
|
+
(default: 60.0).
|
|
36
|
+
workers: Number of parallel workers for pytest-xdist (auto, N, or None).
|
|
37
|
+
coverage_threshold: Minimum coverage percentage to require (0-100).
|
|
38
|
+
auto_junitxml: Auto-enable junitxml in CI environments (default: True).
|
|
39
|
+
detect_flaky: Enable flaky test detection (default: True).
|
|
40
|
+
flaky_min_runs: Minimum runs before detecting flaky tests (default: 3).
|
|
41
|
+
flaky_failure_rate: Minimum failure rate to consider flaky (default: 0.3).
|
|
42
|
+
html_report: Path for HTML report output (pytest-html plugin).
|
|
43
|
+
parallel_preset: Parallel execution preset (auto, small, medium, large).
|
|
44
|
+
list_plugins: List all installed pytest plugins.
|
|
45
|
+
check_plugins: Check if required plugins are installed.
|
|
46
|
+
required_plugins: Comma-separated list of required plugin names.
|
|
47
|
+
coverage_html: Path for HTML coverage report (requires pytest-cov).
|
|
48
|
+
coverage_xml: Path for XML coverage report (requires pytest-cov).
|
|
49
|
+
coverage_report: Generate both HTML and XML coverage reports.
|
|
50
|
+
collect_only: List tests without executing them.
|
|
51
|
+
list_fixtures: List all available fixtures.
|
|
52
|
+
fixture_info: Show detailed information about a specific fixture.
|
|
53
|
+
list_markers: List all available markers.
|
|
54
|
+
parametrize_help: Show help for parametrized tests.
|
|
55
|
+
show_progress: Show progress during test execution (default: True).
|
|
56
|
+
timeout: Timeout in seconds for individual tests (pytest-timeout plugin).
|
|
57
|
+
reruns: Number of times to retry failed tests (pytest-rerunfailures plugin).
|
|
58
|
+
reruns_delay: Delay in seconds between retries (pytest-rerunfailures plugin).
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
verbose: bool | None = None
|
|
62
|
+
tb: str | None = None
|
|
63
|
+
maxfail: int | None = None
|
|
64
|
+
no_header: bool | None = None
|
|
65
|
+
disable_warnings: bool | None = None
|
|
66
|
+
json_report: bool | None = None
|
|
67
|
+
junitxml: str | None = None
|
|
68
|
+
run_docker_tests: bool | None = None
|
|
69
|
+
slow_test_threshold: float | None = None
|
|
70
|
+
total_time_warning: float | None = None
|
|
71
|
+
workers: str | None = None
|
|
72
|
+
coverage_threshold: float | None = None
|
|
73
|
+
auto_junitxml: bool | None = None
|
|
74
|
+
detect_flaky: bool | None = None
|
|
75
|
+
flaky_min_runs: int | None = None
|
|
76
|
+
flaky_failure_rate: float | None = None
|
|
77
|
+
html_report: str | None = None
|
|
78
|
+
parallel_preset: str | None = None
|
|
79
|
+
list_plugins: bool | None = None
|
|
80
|
+
check_plugins: bool | None = None
|
|
81
|
+
required_plugins: str | None = None
|
|
82
|
+
coverage_html: str | None = None
|
|
83
|
+
coverage_xml: str | None = None
|
|
84
|
+
coverage_report: bool | None = None
|
|
85
|
+
collect_only: bool | None = None
|
|
86
|
+
list_fixtures: bool | None = None
|
|
87
|
+
fixture_info: str | None = None
|
|
88
|
+
list_markers: bool | None = None
|
|
89
|
+
parametrize_help: bool | None = None
|
|
90
|
+
show_progress: bool | None = None
|
|
91
|
+
timeout: int | None = None
|
|
92
|
+
reruns: int | None = None
|
|
93
|
+
reruns_delay: int | None = None
|
|
94
|
+
|
|
95
|
+
def set_options(self, **kwargs: Any) -> None:
|
|
96
|
+
"""Set pytest-specific options with validation.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
**kwargs: Option key-value pairs to set.
|
|
100
|
+
"""
|
|
101
|
+
# Extract only the options that belong to this configuration
|
|
102
|
+
config_fields = {field.name for field in self.__dataclass_fields__.values()}
|
|
103
|
+
|
|
104
|
+
# Validate all options using extracted validator
|
|
105
|
+
validate_pytest_options(
|
|
106
|
+
**{k: v for k, v in kwargs.items() if k in config_fields},
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
# Set default junitxml if auto_junitxml is enabled and junitxml not
|
|
110
|
+
# explicitly set
|
|
111
|
+
junitxml = kwargs.get("junitxml")
|
|
112
|
+
auto_junitxml = kwargs.get("auto_junitxml")
|
|
113
|
+
if junitxml is None and (auto_junitxml is None or auto_junitxml):
|
|
114
|
+
junitxml = "report.xml"
|
|
115
|
+
kwargs = kwargs.copy()
|
|
116
|
+
kwargs["junitxml"] = junitxml
|
|
117
|
+
|
|
118
|
+
# Update the dataclass fields
|
|
119
|
+
for key, value in kwargs.items():
|
|
120
|
+
if key in config_fields:
|
|
121
|
+
setattr(self, key, value)
|
|
122
|
+
|
|
123
|
+
def get_options_dict(self) -> dict[str, Any]:
|
|
124
|
+
"""Get a dictionary of all non-None options.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Dict[str, Any]: Dictionary of option key-value pairs, excluding None values.
|
|
128
|
+
"""
|
|
129
|
+
options = {}
|
|
130
|
+
for field_name, _field_info in self.__dataclass_fields__.items():
|
|
131
|
+
value = getattr(self, field_name)
|
|
132
|
+
if value is not None:
|
|
133
|
+
options[field_name] = value
|
|
134
|
+
return options
|
|
135
|
+
|
|
136
|
+
def is_special_mode(self) -> bool:
|
|
137
|
+
"""Check if any special mode is enabled.
|
|
138
|
+
|
|
139
|
+
Special modes are modes that don't run tests but perform other operations
|
|
140
|
+
like listing plugins, fixtures, etc.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
bool: True if any special mode is enabled.
|
|
144
|
+
"""
|
|
145
|
+
special_modes = [
|
|
146
|
+
"list_plugins",
|
|
147
|
+
"check_plugins",
|
|
148
|
+
"collect_only",
|
|
149
|
+
"list_fixtures",
|
|
150
|
+
"list_markers",
|
|
151
|
+
"parametrize_help",
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
# Check boolean special modes
|
|
155
|
+
if any(getattr(self, mode, False) for mode in special_modes):
|
|
156
|
+
return True
|
|
157
|
+
|
|
158
|
+
# Check fixture_info (string value, not boolean)
|
|
159
|
+
return bool(getattr(self, "fixture_info", None))
|
|
160
|
+
|
|
161
|
+
def get_special_mode(self) -> str | None:
|
|
162
|
+
"""Get the active special mode, if any.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
str | None: Name of the active special mode, or None if no special mode.
|
|
166
|
+
"""
|
|
167
|
+
special_modes = [
|
|
168
|
+
("list_plugins", "list_plugins"),
|
|
169
|
+
("check_plugins", "check_plugins"),
|
|
170
|
+
("collect_only", "collect_only"),
|
|
171
|
+
("list_fixtures", "list_fixtures"),
|
|
172
|
+
("list_markers", "list_markers"),
|
|
173
|
+
("parametrize_help", "parametrize_help"),
|
|
174
|
+
]
|
|
175
|
+
|
|
176
|
+
for attr_name, mode_name in special_modes:
|
|
177
|
+
if getattr(self, attr_name, False):
|
|
178
|
+
return mode_name
|
|
179
|
+
|
|
180
|
+
# Check for fixture_info (string value, not boolean)
|
|
181
|
+
if getattr(self, "fixture_info", None):
|
|
182
|
+
return "fixture_info"
|
|
183
|
+
|
|
184
|
+
return None
|
|
185
|
+
|
|
186
|
+
def get_special_mode_value(self, mode: str) -> Any:
|
|
187
|
+
"""Get the value for a special mode.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
mode: The special mode name.
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Any: The value associated with the special mode.
|
|
194
|
+
"""
|
|
195
|
+
if mode == "fixture_info":
|
|
196
|
+
return self.fixture_info
|
|
197
|
+
elif mode == "check_plugins":
|
|
198
|
+
return self.required_plugins
|
|
199
|
+
else:
|
|
200
|
+
return getattr(self, mode, False)
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""Pytest error handling.
|
|
2
|
+
|
|
3
|
+
This module contains the PytestErrorHandler class that handles various error
|
|
4
|
+
scenarios consistently and provides standardized error messages.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import subprocess # nosec B404 - used safely with shell disabled
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from loguru import logger
|
|
11
|
+
|
|
12
|
+
from lintro.models.core.tool_result import ToolResult
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class PytestErrorHandler:
|
|
17
|
+
"""Handles pytest error scenarios consistently.
|
|
18
|
+
|
|
19
|
+
This class encapsulates error handling logic for various pytest execution
|
|
20
|
+
failures, providing standardized error messages and ToolResult objects.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
tool_name: Name of the tool (e.g., "pytest").
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
tool_name: str = "pytest"
|
|
27
|
+
|
|
28
|
+
def handle_timeout_error(
|
|
29
|
+
self,
|
|
30
|
+
timeout_val: int,
|
|
31
|
+
cmd: list[str],
|
|
32
|
+
initial_count: int = 0,
|
|
33
|
+
) -> ToolResult:
|
|
34
|
+
"""Handle timeout errors consistently.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
timeout_val: The timeout value that was exceeded.
|
|
38
|
+
cmd: Command that timed out.
|
|
39
|
+
initial_count: Number of issues discovered before timeout.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
ToolResult: Standardized timeout error result.
|
|
43
|
+
"""
|
|
44
|
+
# Format the command for display
|
|
45
|
+
cmd_str = " ".join(cmd[:4]) if len(cmd) >= 4 else " ".join(cmd)
|
|
46
|
+
if len(cmd) > 4:
|
|
47
|
+
cmd_str += f" ... ({len(cmd) - 4} more args)"
|
|
48
|
+
|
|
49
|
+
error_msg = (
|
|
50
|
+
f"❌ pytest execution timed out after {timeout_val}s\n\n"
|
|
51
|
+
f"Command: {cmd_str}\n\n"
|
|
52
|
+
"Possible causes:\n"
|
|
53
|
+
" • Tests are taking too long to run\n"
|
|
54
|
+
" • Some tests are hanging or blocked (e.g., waiting for I/O)\n"
|
|
55
|
+
" • Test discovery is slow or stuck\n"
|
|
56
|
+
" • Resource exhaustion (memory, file descriptors)\n\n"
|
|
57
|
+
"Solutions:\n"
|
|
58
|
+
" 1. Increase timeout: lintro test --tool-options timeout=600\n"
|
|
59
|
+
" 2. Run fewer tests: lintro test tests/unit/ (vs full test suite)\n"
|
|
60
|
+
" 3. Run in parallel: lintro test --tool-options workers=auto\n"
|
|
61
|
+
" 4. Skip slow tests: lintro test -m 'not slow'\n"
|
|
62
|
+
" 5. Debug directly: pytest -v --tb=short <test_file>\n"
|
|
63
|
+
)
|
|
64
|
+
logger.error(error_msg)
|
|
65
|
+
return ToolResult(
|
|
66
|
+
name=self.tool_name,
|
|
67
|
+
success=False,
|
|
68
|
+
issues=[],
|
|
69
|
+
output=error_msg,
|
|
70
|
+
issues_count=max(initial_count, 1), # Count timeout as execution failure
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
def handle_execution_error(
|
|
74
|
+
self,
|
|
75
|
+
error: Exception,
|
|
76
|
+
cmd: list[str],
|
|
77
|
+
) -> ToolResult:
|
|
78
|
+
"""Handle execution errors consistently.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
error: The exception that occurred.
|
|
82
|
+
cmd: Command that failed.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
ToolResult: Standardized error result.
|
|
86
|
+
"""
|
|
87
|
+
if isinstance(error, FileNotFoundError):
|
|
88
|
+
error_msg = (
|
|
89
|
+
f"pytest executable not found: {error}\n\n"
|
|
90
|
+
"Please ensure pytest is installed:\n"
|
|
91
|
+
" - Install via pip: pip install pytest\n"
|
|
92
|
+
" - Install via uv: uv add pytest\n"
|
|
93
|
+
" - Or install as dev dependency: uv add --dev pytest\n\n"
|
|
94
|
+
"After installation, verify pytest is available:\n"
|
|
95
|
+
" pytest --version"
|
|
96
|
+
)
|
|
97
|
+
elif isinstance(error, subprocess.CalledProcessError):
|
|
98
|
+
error_msg = (
|
|
99
|
+
f"pytest execution failed with return code {error.returncode}\n\n"
|
|
100
|
+
"Common causes:\n"
|
|
101
|
+
" - Syntax errors in test files\n"
|
|
102
|
+
" - Missing dependencies or imports\n"
|
|
103
|
+
" - Configuration issues in pytest.ini or pyproject.toml\n"
|
|
104
|
+
" - Permission errors accessing test files\n\n"
|
|
105
|
+
"Try running pytest directly to see detailed error:\n"
|
|
106
|
+
f" {' '.join(cmd[:3])} ..."
|
|
107
|
+
)
|
|
108
|
+
else:
|
|
109
|
+
# Generic error handling with helpful context
|
|
110
|
+
error_type = type(error).__name__
|
|
111
|
+
error_msg = (
|
|
112
|
+
f"Unexpected error running pytest: {error_type}: {error}\n\n"
|
|
113
|
+
"Please report this issue if it persists. "
|
|
114
|
+
"For troubleshooting:\n"
|
|
115
|
+
" - Verify pytest is installed: pytest --version\n"
|
|
116
|
+
" - Check test files for syntax errors\n"
|
|
117
|
+
" - Review pytest configuration files\n"
|
|
118
|
+
" - Run pytest directly to see full output"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
logger.error(error_msg)
|
|
122
|
+
return ToolResult(
|
|
123
|
+
name=self.tool_name,
|
|
124
|
+
success=False,
|
|
125
|
+
issues=[],
|
|
126
|
+
output=error_msg,
|
|
127
|
+
issues_count=1 if isinstance(error, subprocess.TimeoutExpired) else 0,
|
|
128
|
+
)
|