lintro 0.13.2__py3-none-any.whl → 0.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. lintro/__init__.py +1 -1
  2. lintro/cli.py +226 -16
  3. lintro/cli_utils/commands/__init__.py +8 -1
  4. lintro/cli_utils/commands/check.py +1 -0
  5. lintro/cli_utils/commands/config.py +325 -0
  6. lintro/cli_utils/commands/init.py +361 -0
  7. lintro/cli_utils/commands/list_tools.py +180 -42
  8. lintro/cli_utils/commands/test.py +316 -0
  9. lintro/cli_utils/commands/versions.py +81 -0
  10. lintro/config/__init__.py +62 -0
  11. lintro/config/config_loader.py +420 -0
  12. lintro/config/lintro_config.py +189 -0
  13. lintro/config/tool_config_generator.py +403 -0
  14. lintro/enums/tool_name.py +2 -0
  15. lintro/enums/tool_type.py +2 -0
  16. lintro/formatters/tools/__init__.py +12 -0
  17. lintro/formatters/tools/eslint_formatter.py +108 -0
  18. lintro/formatters/tools/markdownlint_formatter.py +88 -0
  19. lintro/formatters/tools/pytest_formatter.py +201 -0
  20. lintro/parsers/__init__.py +69 -9
  21. lintro/parsers/bandit/__init__.py +6 -0
  22. lintro/parsers/bandit/bandit_issue.py +49 -0
  23. lintro/parsers/bandit/bandit_parser.py +99 -0
  24. lintro/parsers/black/black_issue.py +4 -0
  25. lintro/parsers/eslint/__init__.py +6 -0
  26. lintro/parsers/eslint/eslint_issue.py +26 -0
  27. lintro/parsers/eslint/eslint_parser.py +63 -0
  28. lintro/parsers/markdownlint/__init__.py +6 -0
  29. lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
  30. lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
  31. lintro/parsers/pytest/__init__.py +21 -0
  32. lintro/parsers/pytest/pytest_issue.py +28 -0
  33. lintro/parsers/pytest/pytest_parser.py +483 -0
  34. lintro/tools/__init__.py +2 -0
  35. lintro/tools/core/timeout_utils.py +112 -0
  36. lintro/tools/core/tool_base.py +255 -45
  37. lintro/tools/core/tool_manager.py +77 -24
  38. lintro/tools/core/version_requirements.py +482 -0
  39. lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
  40. lintro/tools/implementations/pytest/pytest_config.py +200 -0
  41. lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
  42. lintro/tools/implementations/pytest/pytest_executor.py +122 -0
  43. lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
  44. lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
  45. lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
  46. lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
  47. lintro/tools/implementations/pytest/pytest_utils.py +697 -0
  48. lintro/tools/implementations/tool_actionlint.py +106 -16
  49. lintro/tools/implementations/tool_bandit.py +23 -7
  50. lintro/tools/implementations/tool_black.py +236 -29
  51. lintro/tools/implementations/tool_darglint.py +180 -21
  52. lintro/tools/implementations/tool_eslint.py +374 -0
  53. lintro/tools/implementations/tool_hadolint.py +94 -25
  54. lintro/tools/implementations/tool_markdownlint.py +354 -0
  55. lintro/tools/implementations/tool_prettier.py +313 -26
  56. lintro/tools/implementations/tool_pytest.py +327 -0
  57. lintro/tools/implementations/tool_ruff.py +247 -70
  58. lintro/tools/implementations/tool_yamllint.py +448 -34
  59. lintro/tools/tool_enum.py +6 -0
  60. lintro/utils/config.py +41 -18
  61. lintro/utils/console_logger.py +211 -25
  62. lintro/utils/path_utils.py +42 -0
  63. lintro/utils/tool_executor.py +336 -39
  64. lintro/utils/tool_utils.py +38 -2
  65. lintro/utils/unified_config.py +926 -0
  66. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/METADATA +131 -29
  67. lintro-0.17.2.dist-info/RECORD +134 -0
  68. lintro-0.13.2.dist-info/RECORD +0 -96
  69. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
  70. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
  71. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
  72. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,122 @@
1
+ """Pytest execution logic.
2
+
3
+ This module contains the PytestExecutor class that handles test execution,
4
+ environment management, and subprocess operations.
5
+ """
6
+
7
+ import os
8
+ from dataclasses import dataclass
9
+
10
+ from loguru import logger
11
+
12
+ from lintro.tools.implementations.pytest.pytest_config import PytestConfiguration
13
+ from lintro.tools.implementations.pytest.pytest_utils import collect_tests_once
14
+
15
+
16
+ @dataclass
17
+ class PytestExecutor:
18
+ """Handles pytest test execution and environment management.
19
+
20
+ This class encapsulates the logic for executing pytest tests, managing
21
+ Docker test environment variables, and handling subprocess operations.
22
+
23
+ Attributes:
24
+ config: PytestConfiguration instance with test execution options.
25
+ tool: Reference to the parent tool for subprocess execution.
26
+ """
27
+
28
+ config: PytestConfiguration
29
+ tool: object # Required: must be set by the parent tool
30
+
31
+ def prepare_test_execution(
32
+ self,
33
+ target_files: list[str],
34
+ ) -> tuple[int, int, str | None]:
35
+ """Prepare test execution by collecting tests and setting up environment.
36
+
37
+ Args:
38
+ target_files: Files or directories to test.
39
+
40
+ Raises:
41
+ ValueError: If tool reference is not set.
42
+
43
+ Returns:
44
+ Tuple[int, int, str | None]: Tuple of (total_available_tests,
45
+ docker_test_count, original_docker_env).
46
+ """
47
+ if self.tool is None:
48
+ raise ValueError("Tool reference not set on executor")
49
+
50
+ # Docker tests are disabled by default and must be explicitly enabled
51
+ run_docker_tests = self.config.run_docker_tests or False
52
+
53
+ # Store original environment state for cleanup
54
+ original_docker_env = os.environ.get("LINTRO_RUN_DOCKER_TESTS")
55
+
56
+ # Collect tests once and get both total count and docker test count
57
+ # This avoids duplicate pytest --collect-only calls
58
+ total_available_tests, docker_test_count = collect_tests_once(
59
+ self.tool,
60
+ target_files,
61
+ )
62
+
63
+ if run_docker_tests:
64
+ # Set environment variable to enable Docker tests
65
+ os.environ["LINTRO_RUN_DOCKER_TESTS"] = "1"
66
+ # Log that Docker tests are enabled (may take longer) in blue format
67
+ docker_msg = (
68
+ f"[LINTRO] Docker tests enabled ({docker_test_count} tests) - "
69
+ "this may take longer than usual."
70
+ )
71
+ logger.info(f"\033[36;1m{docker_msg}\033[0m")
72
+ else:
73
+ # Explicitly unset the environment variable to disable Docker tests
74
+ if "LINTRO_RUN_DOCKER_TESTS" in os.environ:
75
+ del os.environ["LINTRO_RUN_DOCKER_TESTS"]
76
+
77
+ if docker_test_count > 0:
78
+ # Log that Docker tests are disabled in blue format
79
+ docker_msg = (
80
+ f"[LINTRO] Docker tests disabled "
81
+ f"({docker_test_count} tests not collected). "
82
+ "Use --enable-docker to include them."
83
+ )
84
+ logger.info(f"\033[36;1m{docker_msg}\033[0m")
85
+
86
+ return (total_available_tests, docker_test_count, original_docker_env)
87
+
88
+ def execute_tests(
89
+ self,
90
+ cmd: list[str],
91
+ ) -> tuple[bool, str, int]:
92
+ """Execute pytest tests and parse output.
93
+
94
+ Args:
95
+ cmd: Command to execute.
96
+
97
+ Raises:
98
+ ValueError: If tool reference is not set.
99
+
100
+ Returns:
101
+ Tuple[bool, str, int]: Tuple of (success, output, return_code).
102
+ """
103
+ if self.tool is None:
104
+ raise ValueError("Tool reference not set on executor")
105
+
106
+ success, output = self.tool._run_subprocess(cmd)
107
+ # Parse output with actual success status
108
+ # (pytest returns non-zero on failures)
109
+ return_code = 0 if success else 1
110
+ return (success, output, return_code)
111
+
112
+ def restore_environment(self, original_docker_env: str | None) -> None:
113
+ """Restore the original environment state.
114
+
115
+ Args:
116
+ original_docker_env: Original value of LINTRO_RUN_DOCKER_TESTS env var.
117
+ """
118
+ # Restore original environment state
119
+ if original_docker_env is not None:
120
+ os.environ["LINTRO_RUN_DOCKER_TESTS"] = original_docker_env
121
+ elif "LINTRO_RUN_DOCKER_TESTS" in os.environ:
122
+ del os.environ["LINTRO_RUN_DOCKER_TESTS"]
@@ -0,0 +1,375 @@
1
+ """Handler functions for pytest tool special modes.
2
+
3
+ This module contains handler functions extracted from PytestTool to improve
4
+ maintainability and reduce file size. These handlers implement special modes
5
+ like listing plugins, collecting tests, listing fixtures, etc.
6
+ """
7
+
8
+ import re
9
+ import shlex
10
+
11
+ from lintro.models.core.tool_result import ToolResult
12
+ from lintro.tools.implementations.pytest.pytest_utils import (
13
+ check_plugin_installed,
14
+ get_pytest_version_info,
15
+ list_installed_plugins,
16
+ )
17
+
18
+
19
+ def handle_list_plugins(tool) -> ToolResult:
20
+ """Handle list plugins mode.
21
+
22
+ Args:
23
+ tool: PytestTool instance.
24
+
25
+ Returns:
26
+ ToolResult: Results with plugin list.
27
+ """
28
+ plugins = list_installed_plugins()
29
+ version_info = get_pytest_version_info()
30
+
31
+ output_lines = [version_info, ""]
32
+ if plugins:
33
+ output_lines.append(f"Installed pytest plugins ({len(plugins)}):")
34
+ for plugin in plugins:
35
+ output_lines.append(f" - {plugin['name']} ({plugin['version']})")
36
+ else:
37
+ output_lines.append("No pytest plugins found.")
38
+
39
+ return ToolResult(
40
+ name=tool.name,
41
+ success=True,
42
+ issues=[],
43
+ output="\n".join(output_lines),
44
+ issues_count=0,
45
+ )
46
+
47
+
48
+ def handle_check_plugins(
49
+ tool,
50
+ required_plugins: str | None,
51
+ ) -> ToolResult:
52
+ """Handle check plugins mode.
53
+
54
+ Args:
55
+ tool: PytestTool instance.
56
+ required_plugins: Comma-separated list of required plugin names.
57
+
58
+ Returns:
59
+ ToolResult: Results with plugin check status.
60
+ """
61
+ if not required_plugins:
62
+ return ToolResult(
63
+ name=tool.name,
64
+ success=False,
65
+ issues=[],
66
+ output=(
67
+ "Error: required_plugins must be specified " "when check_plugins=True"
68
+ ),
69
+ issues_count=0,
70
+ )
71
+
72
+ plugin_list = [p.strip() for p in required_plugins.split(",") if p.strip()]
73
+ missing_plugins: list[str] = []
74
+ installed_plugins: list[str] = []
75
+
76
+ for plugin in plugin_list:
77
+ if check_plugin_installed(plugin):
78
+ installed_plugins.append(plugin)
79
+ else:
80
+ missing_plugins.append(plugin)
81
+
82
+ output_lines = []
83
+ if installed_plugins:
84
+ output_lines.append(f"✓ Installed plugins ({len(installed_plugins)}):")
85
+ for plugin in installed_plugins:
86
+ output_lines.append(f" - {plugin}")
87
+
88
+ if missing_plugins:
89
+ output_lines.append(f"\n✗ Missing plugins ({len(missing_plugins)}):")
90
+ for plugin in missing_plugins:
91
+ output_lines.append(f" - {plugin}")
92
+ output_lines.append("\nInstall missing plugins with:")
93
+ quoted_plugins = " ".join(shlex.quote(plugin) for plugin in missing_plugins)
94
+ output_lines.append(f" pip install {quoted_plugins}")
95
+
96
+ success = len(missing_plugins) == 0
97
+
98
+ return ToolResult(
99
+ name=tool.name,
100
+ success=success,
101
+ issues=[],
102
+ output="\n".join(output_lines) if output_lines else "No plugins specified.",
103
+ issues_count=len(missing_plugins),
104
+ )
105
+
106
+
107
+ def handle_collect_only(
108
+ tool,
109
+ target_files: list[str],
110
+ ) -> ToolResult:
111
+ """Handle collect-only mode.
112
+
113
+ Args:
114
+ tool: PytestTool instance.
115
+ target_files: Files or directories to collect tests from.
116
+
117
+ Returns:
118
+ ToolResult: Results with collected test list.
119
+ """
120
+ try:
121
+ collect_cmd = tool._get_executable_command(tool_name="pytest")
122
+ collect_cmd.append("--collect-only")
123
+ collect_cmd.extend(target_files)
124
+
125
+ success, output = tool._run_subprocess(collect_cmd)
126
+ if not success:
127
+ return ToolResult(
128
+ name=tool.name,
129
+ success=False,
130
+ issues=[],
131
+ output=output,
132
+ issues_count=0,
133
+ )
134
+
135
+ # Parse collected tests from output
136
+ test_list: list[str] = []
137
+ for line in output.splitlines():
138
+ line = line.strip()
139
+ # Match test collection lines
140
+ # (e.g., "<Function test_example>" or "test_file.py::test_function")
141
+ if "<Function" in line or "::" in line:
142
+ # Extract test identifier
143
+ if "::" in line:
144
+ test_list.append(line.split("::")[-1].strip())
145
+ elif "<Function" in line:
146
+ # Extract function name from <Function test_name>
147
+ match = re.search(r"<Function\s+(\w+)>", line)
148
+ if match:
149
+ test_list.append(match.group(1))
150
+
151
+ output_lines = [f"Collected {len(test_list)} test(s):", ""]
152
+ for test in test_list:
153
+ output_lines.append(f" - {test}")
154
+
155
+ return ToolResult(
156
+ name=tool.name,
157
+ success=True,
158
+ issues=[],
159
+ output="\n".join(output_lines),
160
+ issues_count=0,
161
+ )
162
+ except Exception as e:
163
+ return ToolResult(
164
+ name=tool.name,
165
+ success=False,
166
+ issues=[],
167
+ output=f"Error collecting tests: {e}",
168
+ issues_count=0,
169
+ )
170
+
171
+
172
+ def handle_list_fixtures(
173
+ tool,
174
+ target_files: list[str],
175
+ ) -> ToolResult:
176
+ """Handle list fixtures mode.
177
+
178
+ Args:
179
+ tool: PytestTool instance.
180
+ target_files: Files or directories to collect fixtures from.
181
+
182
+ Returns:
183
+ ToolResult: Results with fixture list.
184
+ """
185
+ try:
186
+ fixtures_cmd = tool._get_executable_command(tool_name="pytest")
187
+ fixtures_cmd.extend(["--fixtures", "-q"])
188
+ fixtures_cmd.extend(target_files)
189
+
190
+ success, output = tool._run_subprocess(fixtures_cmd)
191
+ if not success:
192
+ return ToolResult(
193
+ name=tool.name,
194
+ success=False,
195
+ issues=[],
196
+ output=output,
197
+ issues_count=0,
198
+ )
199
+
200
+ return ToolResult(
201
+ name=tool.name,
202
+ success=True,
203
+ issues=[],
204
+ output=output,
205
+ issues_count=0,
206
+ )
207
+ except Exception as e:
208
+ return ToolResult(
209
+ name=tool.name,
210
+ success=False,
211
+ issues=[],
212
+ output=f"Error listing fixtures: {e}",
213
+ issues_count=0,
214
+ )
215
+
216
+
217
+ def handle_fixture_info(
218
+ tool,
219
+ fixture_name: str,
220
+ target_files: list[str],
221
+ ) -> ToolResult:
222
+ """Handle fixture info mode.
223
+
224
+ Args:
225
+ tool: PytestTool instance.
226
+ fixture_name: Name of fixture to get info for.
227
+ target_files: Files or directories to search.
228
+
229
+ Returns:
230
+ ToolResult: Results with fixture information.
231
+ """
232
+ try:
233
+ fixtures_cmd = tool._get_executable_command(tool_name="pytest")
234
+ fixtures_cmd.extend(["--fixtures", "-v"])
235
+ fixtures_cmd.extend(target_files)
236
+
237
+ success, output = tool._run_subprocess(fixtures_cmd)
238
+ if not success:
239
+ return ToolResult(
240
+ name=tool.name,
241
+ success=False,
242
+ issues=[],
243
+ output=output,
244
+ issues_count=0,
245
+ )
246
+
247
+ # Extract fixture info for the specific fixture
248
+ lines = output.splitlines()
249
+ fixture_info_lines: list[str] = []
250
+ in_fixture = False
251
+
252
+ for line in lines:
253
+ # Check if line starts with fixture name (pytest format)
254
+ stripped_line = line.strip()
255
+ if stripped_line.startswith(fixture_name) and (
256
+ len(stripped_line) == len(fixture_name)
257
+ or stripped_line[len(fixture_name)] in (" ", ":", "\n")
258
+ ):
259
+ in_fixture = True
260
+ fixture_info_lines.append(line)
261
+ elif in_fixture:
262
+ if line.strip() and not line.startswith(" "):
263
+ # New fixture or section, stop
264
+ break
265
+ fixture_info_lines.append(line)
266
+
267
+ if fixture_info_lines:
268
+ output_text = "\n".join(fixture_info_lines)
269
+ else:
270
+ output_text = f"Fixture '{fixture_name}' not found."
271
+
272
+ return ToolResult(
273
+ name=tool.name,
274
+ success=len(fixture_info_lines) > 0,
275
+ issues=[],
276
+ output=output_text,
277
+ issues_count=0,
278
+ )
279
+ except Exception as e:
280
+ return ToolResult(
281
+ name=tool.name,
282
+ success=False,
283
+ issues=[],
284
+ output=f"Error getting fixture info: {e}",
285
+ issues_count=0,
286
+ )
287
+
288
+
289
+ def handle_list_markers(tool) -> ToolResult:
290
+ """Handle list markers mode.
291
+
292
+ Args:
293
+ tool: PytestTool instance.
294
+
295
+ Returns:
296
+ ToolResult: Results with marker list.
297
+ """
298
+ try:
299
+ markers_cmd = tool._get_executable_command(tool_name="pytest")
300
+ markers_cmd.extend(["--markers"])
301
+
302
+ success, output = tool._run_subprocess(markers_cmd)
303
+ if not success:
304
+ return ToolResult(
305
+ name=tool.name,
306
+ success=False,
307
+ issues=[],
308
+ output=output,
309
+ issues_count=0,
310
+ )
311
+
312
+ return ToolResult(
313
+ name=tool.name,
314
+ success=True,
315
+ issues=[],
316
+ output=output,
317
+ issues_count=0,
318
+ )
319
+ except Exception as e:
320
+ return ToolResult(
321
+ name=tool.name,
322
+ success=False,
323
+ issues=[],
324
+ output=f"Error listing markers: {e}",
325
+ issues_count=0,
326
+ )
327
+
328
+
329
+ def handle_parametrize_help(tool) -> ToolResult:
330
+ """Handle parametrize help mode.
331
+
332
+ Args:
333
+ tool: PytestTool instance.
334
+
335
+ Returns:
336
+ ToolResult: Results with parametrization help.
337
+ """
338
+ help_text = """Pytest Parametrization Help
339
+
340
+ Parametrization allows you to run the same test with different inputs.
341
+
342
+ Basic Usage:
343
+ -----------
344
+ Use @pytest.mark.parametrize to provide multiple input values for a test function.
345
+ The test will run once for each set of parameters.
346
+
347
+ Example:
348
+ @pytest.mark.parametrize("input,expected", [(1, 2), (2, 4), (3, 6)])
349
+ def test_multiply(input, expected):
350
+ assert input * 2 == expected
351
+
352
+ Multiple Parameters:
353
+ --------------------
354
+ You can parametrize multiple parameters at once by providing tuples of values.
355
+
356
+ Using Fixtures with Parametrization:
357
+ -------------------------------------
358
+ Parametrized tests can use fixtures. The parametrization runs for each fixture
359
+ instance, creating a cartesian product of parameters and fixtures.
360
+
361
+ Multiple Parametrizations:
362
+ --------------------------
363
+ You can stack multiple @pytest.mark.parametrize decorators to create a cartesian
364
+ product of all parameter combinations.
365
+
366
+ For detailed examples and advanced usage, see:
367
+ https://docs.pytest.org/en/stable/how-to/parametrize.html
368
+ """
369
+ return ToolResult(
370
+ name=tool.name,
371
+ success=True,
372
+ issues=[],
373
+ output=help_text,
374
+ issues_count=0,
375
+ )
@@ -0,0 +1,212 @@
1
+ """Option validation functions for pytest tool.
2
+
3
+ This module contains validation logic extracted from PytestTool.set_options()
4
+ to improve maintainability and reduce file size.
5
+ """
6
+
7
+ from lintro.tools.implementations.pytest.pytest_utils import (
8
+ get_parallel_workers_from_preset,
9
+ )
10
+
11
+
12
+ def validate_pytest_options(
13
+ verbose: bool | None = None,
14
+ tb: str | None = None,
15
+ maxfail: int | None = None,
16
+ no_header: bool | None = None,
17
+ disable_warnings: bool | None = None,
18
+ json_report: bool | None = None,
19
+ junitxml: str | None = None,
20
+ run_docker_tests: bool | None = None,
21
+ slow_test_threshold: float | None = None,
22
+ total_time_warning: float | None = None,
23
+ workers: str | None = None,
24
+ coverage_threshold: float | None = None,
25
+ auto_junitxml: bool | None = None,
26
+ detect_flaky: bool | None = None,
27
+ flaky_min_runs: int | None = None,
28
+ flaky_failure_rate: float | None = None,
29
+ html_report: str | None = None,
30
+ parallel_preset: str | None = None,
31
+ list_plugins: bool | None = None,
32
+ check_plugins: bool | None = None,
33
+ required_plugins: str | None = None,
34
+ coverage_html: str | None = None,
35
+ coverage_xml: str | None = None,
36
+ coverage_report: bool | None = None,
37
+ collect_only: bool | None = None,
38
+ list_fixtures: bool | None = None,
39
+ fixture_info: str | None = None,
40
+ list_markers: bool | None = None,
41
+ parametrize_help: bool | None = None,
42
+ show_progress: bool | None = None,
43
+ timeout: int | None = None,
44
+ reruns: int | None = None,
45
+ reruns_delay: int | None = None,
46
+ ) -> None:
47
+ """Validate pytest-specific options.
48
+
49
+ Args:
50
+ verbose: Enable verbose output.
51
+ tb: Traceback format (short, long, auto, line, native).
52
+ maxfail: Stop after first N failures.
53
+ no_header: Disable header.
54
+ disable_warnings: Disable warnings.
55
+ json_report: Enable JSON report output.
56
+ junitxml: Path for JUnit XML output.
57
+ run_docker_tests: Enable Docker tests (default: False).
58
+ slow_test_threshold: Duration threshold in seconds for slow test warning
59
+ (default: 1.0).
60
+ total_time_warning: Total execution time threshold in seconds for warning
61
+ (default: 60.0).
62
+ workers: Number of parallel workers for pytest-xdist (auto, N, or None).
63
+ coverage_threshold: Minimum coverage percentage to require (0-100).
64
+ auto_junitxml: Auto-enable junitxml in CI environments (default: True).
65
+ detect_flaky: Enable flaky test detection (default: True).
66
+ flaky_min_runs: Minimum runs before detecting flaky tests (default: 3).
67
+ flaky_failure_rate: Minimum failure rate to consider flaky (default: 0.3).
68
+ html_report: Path for HTML report output (pytest-html plugin).
69
+ parallel_preset: Parallel execution preset (auto, small, medium, large).
70
+ list_plugins: List all installed pytest plugins.
71
+ check_plugins: Check if required plugins are installed.
72
+ required_plugins: Comma-separated list of required plugin names.
73
+ coverage_html: Path for HTML coverage report (requires pytest-cov).
74
+ coverage_xml: Path for XML coverage report (requires pytest-cov).
75
+ coverage_report: Generate both HTML and XML coverage reports.
76
+ collect_only: List tests without executing them.
77
+ list_fixtures: List all available fixtures.
78
+ fixture_info: Show detailed information about a specific fixture.
79
+ list_markers: List all available markers.
80
+ parametrize_help: Show help for parametrized tests.
81
+ show_progress: Show progress during test execution.
82
+ timeout: Timeout in seconds for individual tests (pytest-timeout plugin).
83
+ reruns: Number of times to retry failed tests (pytest-rerunfailures plugin).
84
+ reruns_delay: Delay in seconds between retries (pytest-rerunfailures plugin).
85
+
86
+ Raises:
87
+ ValueError: If an option value is invalid.
88
+ """
89
+ if verbose is not None and not isinstance(verbose, bool):
90
+ raise ValueError("verbose must be a boolean")
91
+
92
+ if tb is not None and tb not in ("short", "long", "auto", "line", "native"):
93
+ raise ValueError("tb must be one of: short, long, auto, line, native")
94
+
95
+ if maxfail is not None and (not isinstance(maxfail, int) or maxfail <= 0):
96
+ raise ValueError("maxfail must be a positive integer")
97
+
98
+ if no_header is not None and not isinstance(no_header, bool):
99
+ raise ValueError("no_header must be a boolean")
100
+
101
+ if disable_warnings is not None and not isinstance(disable_warnings, bool):
102
+ raise ValueError("disable_warnings must be a boolean")
103
+
104
+ if json_report is not None and not isinstance(json_report, bool):
105
+ raise ValueError("json_report must be a boolean")
106
+
107
+ if junitxml is not None and not isinstance(junitxml, str):
108
+ raise ValueError("junitxml must be a string")
109
+
110
+ if run_docker_tests is not None and not isinstance(run_docker_tests, bool):
111
+ raise ValueError("run_docker_tests must be a boolean")
112
+
113
+ if slow_test_threshold is not None and (
114
+ not isinstance(slow_test_threshold, (int, float)) or slow_test_threshold < 0
115
+ ):
116
+ raise ValueError("slow_test_threshold must be a non-negative number")
117
+
118
+ if total_time_warning is not None and (
119
+ not isinstance(total_time_warning, (int, float)) or total_time_warning < 0
120
+ ):
121
+ raise ValueError("total_time_warning must be a non-negative number")
122
+
123
+ if workers is not None and not isinstance(workers, str):
124
+ raise ValueError("workers must be a string (e.g., 'auto', '2', '4')")
125
+
126
+ if coverage_threshold is not None and not isinstance(
127
+ coverage_threshold,
128
+ (int, float),
129
+ ):
130
+ raise ValueError("coverage_threshold must be a number")
131
+ if coverage_threshold is not None and not (0 <= coverage_threshold <= 100):
132
+ raise ValueError("coverage_threshold must be between 0 and 100")
133
+
134
+ if auto_junitxml is not None and not isinstance(auto_junitxml, bool):
135
+ raise ValueError("auto_junitxml must be a boolean")
136
+
137
+ if detect_flaky is not None and not isinstance(detect_flaky, bool):
138
+ raise ValueError("detect_flaky must be a boolean")
139
+
140
+ if flaky_min_runs is not None and (
141
+ not isinstance(flaky_min_runs, int) or flaky_min_runs < 1
142
+ ):
143
+ raise ValueError("flaky_min_runs must be a positive integer")
144
+
145
+ if flaky_failure_rate is not None:
146
+ if not isinstance(flaky_failure_rate, (int, float)):
147
+ raise ValueError("flaky_failure_rate must be a number")
148
+ if not (0 <= flaky_failure_rate <= 1):
149
+ raise ValueError("flaky_failure_rate must be between 0 and 1")
150
+
151
+ if html_report is not None and not isinstance(html_report, str):
152
+ raise ValueError("html_report must be a string (path to HTML report)")
153
+
154
+ if parallel_preset is not None and not isinstance(parallel_preset, str):
155
+ raise ValueError("parallel_preset must be a string")
156
+ # Validate preset value
157
+ if parallel_preset is not None:
158
+ try:
159
+ get_parallel_workers_from_preset(parallel_preset)
160
+ except ValueError as e:
161
+ raise ValueError(f"Invalid parallel_preset: {e}") from e
162
+
163
+ # Validate plugin options
164
+ if list_plugins is not None and not isinstance(list_plugins, bool):
165
+ raise ValueError("list_plugins must be a boolean")
166
+
167
+ if check_plugins is not None and not isinstance(check_plugins, bool):
168
+ raise ValueError("check_plugins must be a boolean")
169
+
170
+ if required_plugins is not None and not isinstance(required_plugins, str):
171
+ raise ValueError("required_plugins must be a string")
172
+
173
+ # Validate coverage options
174
+ if coverage_html is not None and not isinstance(coverage_html, str):
175
+ raise ValueError("coverage_html must be a string")
176
+
177
+ if coverage_xml is not None and not isinstance(coverage_xml, str):
178
+ raise ValueError("coverage_xml must be a string")
179
+
180
+ if coverage_report is not None and not isinstance(coverage_report, bool):
181
+ raise ValueError("coverage_report must be a boolean")
182
+
183
+ # Validate discovery and inspection options
184
+ if collect_only is not None and not isinstance(collect_only, bool):
185
+ raise ValueError("collect_only must be a boolean")
186
+
187
+ if list_fixtures is not None and not isinstance(list_fixtures, bool):
188
+ raise ValueError("list_fixtures must be a boolean")
189
+
190
+ if fixture_info is not None and not isinstance(fixture_info, str):
191
+ raise ValueError("fixture_info must be a string")
192
+
193
+ if list_markers is not None and not isinstance(list_markers, bool):
194
+ raise ValueError("list_markers must be a boolean")
195
+
196
+ if parametrize_help is not None and not isinstance(parametrize_help, bool):
197
+ raise ValueError("parametrize_help must be a boolean")
198
+
199
+ if show_progress is not None and not isinstance(show_progress, bool):
200
+ raise ValueError("show_progress must be a boolean")
201
+
202
+ # Validate plugin-specific options
203
+ if timeout is not None and (not isinstance(timeout, int) or timeout <= 0):
204
+ raise ValueError("timeout must be a positive integer (seconds)")
205
+
206
+ if reruns is not None and (not isinstance(reruns, int) or reruns < 0):
207
+ raise ValueError("reruns must be a non-negative integer")
208
+
209
+ if reruns_delay is not None and (
210
+ not isinstance(reruns_delay, int) or reruns_delay < 0
211
+ ):
212
+ raise ValueError("reruns_delay must be a non-negative integer (seconds)")