lintro 0.13.2__py3-none-any.whl → 0.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. lintro/__init__.py +1 -1
  2. lintro/cli.py +226 -16
  3. lintro/cli_utils/commands/__init__.py +8 -1
  4. lintro/cli_utils/commands/check.py +1 -0
  5. lintro/cli_utils/commands/config.py +325 -0
  6. lintro/cli_utils/commands/init.py +361 -0
  7. lintro/cli_utils/commands/list_tools.py +180 -42
  8. lintro/cli_utils/commands/test.py +316 -0
  9. lintro/cli_utils/commands/versions.py +81 -0
  10. lintro/config/__init__.py +62 -0
  11. lintro/config/config_loader.py +420 -0
  12. lintro/config/lintro_config.py +189 -0
  13. lintro/config/tool_config_generator.py +403 -0
  14. lintro/enums/tool_name.py +2 -0
  15. lintro/enums/tool_type.py +2 -0
  16. lintro/formatters/tools/__init__.py +12 -0
  17. lintro/formatters/tools/eslint_formatter.py +108 -0
  18. lintro/formatters/tools/markdownlint_formatter.py +88 -0
  19. lintro/formatters/tools/pytest_formatter.py +201 -0
  20. lintro/parsers/__init__.py +69 -9
  21. lintro/parsers/bandit/__init__.py +6 -0
  22. lintro/parsers/bandit/bandit_issue.py +49 -0
  23. lintro/parsers/bandit/bandit_parser.py +99 -0
  24. lintro/parsers/black/black_issue.py +4 -0
  25. lintro/parsers/eslint/__init__.py +6 -0
  26. lintro/parsers/eslint/eslint_issue.py +26 -0
  27. lintro/parsers/eslint/eslint_parser.py +63 -0
  28. lintro/parsers/markdownlint/__init__.py +6 -0
  29. lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
  30. lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
  31. lintro/parsers/pytest/__init__.py +21 -0
  32. lintro/parsers/pytest/pytest_issue.py +28 -0
  33. lintro/parsers/pytest/pytest_parser.py +483 -0
  34. lintro/tools/__init__.py +2 -0
  35. lintro/tools/core/timeout_utils.py +112 -0
  36. lintro/tools/core/tool_base.py +255 -45
  37. lintro/tools/core/tool_manager.py +77 -24
  38. lintro/tools/core/version_requirements.py +482 -0
  39. lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
  40. lintro/tools/implementations/pytest/pytest_config.py +200 -0
  41. lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
  42. lintro/tools/implementations/pytest/pytest_executor.py +122 -0
  43. lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
  44. lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
  45. lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
  46. lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
  47. lintro/tools/implementations/pytest/pytest_utils.py +697 -0
  48. lintro/tools/implementations/tool_actionlint.py +106 -16
  49. lintro/tools/implementations/tool_bandit.py +23 -7
  50. lintro/tools/implementations/tool_black.py +236 -29
  51. lintro/tools/implementations/tool_darglint.py +180 -21
  52. lintro/tools/implementations/tool_eslint.py +374 -0
  53. lintro/tools/implementations/tool_hadolint.py +94 -25
  54. lintro/tools/implementations/tool_markdownlint.py +354 -0
  55. lintro/tools/implementations/tool_prettier.py +313 -26
  56. lintro/tools/implementations/tool_pytest.py +327 -0
  57. lintro/tools/implementations/tool_ruff.py +247 -70
  58. lintro/tools/implementations/tool_yamllint.py +448 -34
  59. lintro/tools/tool_enum.py +6 -0
  60. lintro/utils/config.py +41 -18
  61. lintro/utils/console_logger.py +211 -25
  62. lintro/utils/path_utils.py +42 -0
  63. lintro/utils/tool_executor.py +336 -39
  64. lintro/utils/tool_utils.py +38 -2
  65. lintro/utils/unified_config.py +926 -0
  66. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/METADATA +131 -29
  67. lintro-0.17.2.dist-info/RECORD +134 -0
  68. lintro-0.13.2.dist-info/RECORD +0 -96
  69. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
  70. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
  71. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
  72. {lintro-0.13.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,697 @@
1
+ """Utility functions for pytest tool implementation.
2
+
3
+ This module contains helper functions extracted from tool_pytest.py to improve
4
+ maintainability and reduce file size. Functions are organized by category:
5
+ - JUnit XML processing
6
+ - Environment and system utilities
7
+ - Flaky test detection
8
+ - Configuration loading
9
+ - Plugin management
10
+ """
11
+
12
+ import json
13
+ import os
14
+ import shlex
15
+ import subprocess # nosec B404 - used safely with shell disabled
16
+ from pathlib import Path
17
+
18
+ from loguru import logger
19
+
20
+ from lintro.parsers.pytest.pytest_issue import PytestIssue
21
+
22
+ # Constants for flaky test detection
23
+ PYTEST_FLAKY_CACHE_FILE: str = ".pytest_cache/lintro_flaky_tests.json"
24
+ PYTEST_FLAKY_MIN_RUNS: int = 3 # Minimum runs before detecting flaky tests
25
+ PYTEST_FLAKY_FAILURE_RATE: float = 0.3 # Consider flaky if fails >= 30% but < 100%
26
+
27
+
28
+ def extract_all_test_results_from_junit(junitxml_path: str) -> dict[str, str] | None:
29
+ """Extract all test results from JUnit XML file.
30
+
31
+ Args:
32
+ junitxml_path: Path to JUnit XML file.
33
+
34
+ Returns:
35
+ dict[str, str] | None: Dictionary mapping node_id to status
36
+ (PASSED/FAILED/ERROR), or None if file doesn't exist or can't be parsed.
37
+ """
38
+ xml_path = Path(junitxml_path)
39
+ if not xml_path.exists():
40
+ return None
41
+
42
+ try:
43
+ from defusedxml import ElementTree
44
+
45
+ tree = ElementTree.parse(xml_path)
46
+ root = tree.getroot()
47
+
48
+ test_results: dict[str, str] = {}
49
+
50
+ for testcase in root.findall(".//testcase"):
51
+ file_path = testcase.get("file", "")
52
+ class_name = testcase.get("classname", "")
53
+ test_name = testcase.get("name", "")
54
+ if file_path:
55
+ if class_name:
56
+ node_id = f"{file_path}::{class_name}::{test_name}"
57
+ else:
58
+ node_id = f"{file_path}::{test_name}"
59
+ else:
60
+ node_id = f"{class_name}::{test_name}" if class_name else test_name
61
+
62
+ # Determine status
63
+ if testcase.find("failure") is not None:
64
+ status = "FAILED"
65
+ elif testcase.find("error") is not None:
66
+ status = "ERROR"
67
+ elif testcase.find("skipped") is not None:
68
+ status = "SKIPPED"
69
+ else:
70
+ status = "PASSED"
71
+
72
+ test_results[node_id] = status
73
+
74
+ return test_results
75
+ except Exception as e:
76
+ logger.debug(f"Failed to parse JUnit XML for all tests: {e}")
77
+ return None
78
+
79
+
80
+ def get_cpu_count() -> int:
81
+ """Get the number of available CPU cores.
82
+
83
+ Returns:
84
+ int: Number of CPU cores, minimum 1.
85
+ """
86
+ try:
87
+ import multiprocessing
88
+
89
+ return max(1, multiprocessing.cpu_count())
90
+ except Exception:
91
+ return 1
92
+
93
+
94
+ def get_parallel_workers_from_preset(
95
+ preset: str,
96
+ test_count: int | None = None,
97
+ ) -> str:
98
+ """Convert parallel preset to worker count.
99
+
100
+ Args:
101
+ preset: Preset name (auto, small, medium, large) or number as string.
102
+ test_count: Optional test count for dynamic presets.
103
+
104
+ Returns:
105
+ str: Worker count string for pytest-xdist (-n flag).
106
+
107
+ Raises:
108
+ ValueError: If preset is invalid.
109
+ """
110
+ preset_lower = preset.lower()
111
+
112
+ if preset_lower == "auto":
113
+ return "auto"
114
+ elif preset_lower == "small":
115
+ return "2"
116
+ elif preset_lower == "medium":
117
+ return "4"
118
+ elif preset_lower == "large":
119
+ cpu_count = get_cpu_count()
120
+ # Use up to 8 workers for large suites, but not more than CPU count
121
+ return str(min(8, cpu_count))
122
+ elif preset_lower.isdigit():
123
+ # Already a number, return as-is
124
+ return preset
125
+ else:
126
+ raise ValueError(
127
+ f"Invalid parallel preset: {preset}. "
128
+ "Must be one of: auto, small, medium, large, or a number",
129
+ )
130
+
131
+
132
+ def is_ci_environment() -> bool:
133
+ """Detect if running in a CI/CD environment.
134
+
135
+ Checks for common CI environment variables:
136
+ - CI (generic CI indicator)
137
+ - GITHUB_ACTIONS (GitHub Actions)
138
+ - GITLAB_CI (GitLab CI)
139
+ - JENKINS_URL (Jenkins)
140
+ - CIRCLE CI (CircleCI)
141
+ - TRAVIS (Travis CI)
142
+ - AZURE_HTTP_USER_AGENT (Azure DevOps)
143
+ - TEAMCITY_VERSION (TeamCity)
144
+ - BUILDKITE (Buildkite)
145
+ - DRONE (Drone CI)
146
+
147
+ Returns:
148
+ bool: True if running in CI environment, False otherwise.
149
+ """
150
+ ci_indicators = [
151
+ "CI",
152
+ "GITHUB_ACTIONS",
153
+ "GITLAB_CI",
154
+ "JENKINS_URL",
155
+ "CIRCLE_CI",
156
+ "CIRCLECI",
157
+ "TRAVIS",
158
+ "AZURE_HTTP_USER_AGENT",
159
+ "TEAMCITY_VERSION",
160
+ "BUILDKITE",
161
+ "DRONE",
162
+ ]
163
+ return any(os.environ.get(indicator) for indicator in ci_indicators)
164
+
165
+
166
+ def get_flaky_cache_path() -> Path:
167
+ """Get the path to the flaky test cache file.
168
+
169
+ Returns:
170
+ Path: Path to the cache file.
171
+ """
172
+ cache_dir = Path(".pytest_cache")
173
+ cache_dir.mkdir(exist_ok=True)
174
+ return cache_dir / "lintro_flaky_tests.json"
175
+
176
+
177
+ def load_flaky_test_history() -> dict[str, dict[str, int]]:
178
+ """Load flaky test history from cache file.
179
+
180
+ Returns:
181
+ dict[str, dict[str, int]]: Dictionary mapping test node_id to status counts.
182
+ Format: {node_id: {"passed": count, "failed": count, "error": count}}
183
+ """
184
+ cache_path = get_flaky_cache_path()
185
+ if not cache_path.exists():
186
+ return {}
187
+
188
+ try:
189
+ with open(cache_path, encoding="utf-8") as f:
190
+ return json.load(f)
191
+ except (json.JSONDecodeError, OSError) as e:
192
+ logger.debug(f"Failed to load flaky test history: {e}")
193
+ return {}
194
+
195
+
196
+ def save_flaky_test_history(history: dict[str, dict[str, int]]) -> None:
197
+ """Save flaky test history to cache file.
198
+
199
+ Args:
200
+ history: Dictionary mapping test node_id to status counts.
201
+ """
202
+ cache_path = get_flaky_cache_path()
203
+ try:
204
+ with open(cache_path, "w", encoding="utf-8") as f:
205
+ json.dump(history, f, indent=2)
206
+ except OSError as e:
207
+ logger.debug(f"Failed to save flaky test history: {e}")
208
+
209
+
210
+ def update_flaky_test_history(
211
+ issues: list[PytestIssue],
212
+ all_test_results: dict[str, str] | None = None,
213
+ ) -> dict[str, dict[str, int]]:
214
+ """Update flaky test history with current test results.
215
+
216
+ Args:
217
+ issues: List of parsed test issues (failures/errors).
218
+ all_test_results: Optional dictionary mapping node_id to status for all tests.
219
+ If None, only tracks failures from issues.
220
+
221
+ Returns:
222
+ dict[str, dict[str, int]]: Updated history dictionary.
223
+ """
224
+ history = load_flaky_test_history()
225
+
226
+ # If we have full test results (e.g., from JUnit XML), use those
227
+ if all_test_results:
228
+ for node_id, status in all_test_results.items():
229
+ if node_id not in history:
230
+ history[node_id] = {"passed": 0, "failed": 0, "error": 0}
231
+
232
+ if status == "FAILED":
233
+ history[node_id]["failed"] += 1
234
+ elif status == "ERROR":
235
+ history[node_id]["error"] += 1
236
+ elif status == "PASSED":
237
+ history[node_id]["passed"] += 1
238
+ else:
239
+ # Only track failures from issues (simpler but less accurate)
240
+ for issue in issues:
241
+ # Skip Mock objects in tests - only process real PytestIssue objects
242
+ if not isinstance(issue, PytestIssue):
243
+ continue
244
+ if issue.node_id and isinstance(issue.node_id, str):
245
+ if issue.node_id not in history:
246
+ history[issue.node_id] = {"passed": 0, "failed": 0, "error": 0}
247
+
248
+ if issue.test_status == "FAILED":
249
+ history[issue.node_id]["failed"] += 1
250
+ elif issue.test_status == "ERROR":
251
+ history[issue.node_id]["error"] += 1
252
+
253
+ save_flaky_test_history(history)
254
+ return history
255
+
256
+
257
+ def detect_flaky_tests(
258
+ history: dict[str, dict[str, int]],
259
+ min_runs: int = PYTEST_FLAKY_MIN_RUNS,
260
+ failure_rate: float = PYTEST_FLAKY_FAILURE_RATE,
261
+ ) -> list[tuple[str, float]]:
262
+ """Detect flaky tests from history.
263
+
264
+ A test is considered flaky if:
265
+ - It has been run at least min_runs times
266
+ - It has failures but not 100% failure rate
267
+ - Failure rate >= failure_rate threshold
268
+
269
+ Args:
270
+ history: Test history dictionary.
271
+ min_runs: Minimum number of runs before considering flaky.
272
+ failure_rate: Minimum failure rate to consider flaky (0.0 to 1.0).
273
+
274
+ Returns:
275
+ list[tuple[str, float]]: List of (test_node_id, failure_rate) tuples.
276
+ """
277
+ flaky_tests: list[tuple[str, float]] = []
278
+
279
+ for node_id, counts in history.items():
280
+ total_runs = (
281
+ counts.get("passed", 0) + counts.get("failed", 0) + counts.get("error", 0)
282
+ )
283
+
284
+ if total_runs < min_runs:
285
+ continue
286
+
287
+ failed_count = counts.get("failed", 0) + counts.get("error", 0)
288
+ current_failure_rate = failed_count / total_runs
289
+
290
+ # Consider flaky if:
291
+ # 1. Has failures (failure_rate > 0)
292
+ # 2. Not always failing (failure_rate < 1.0)
293
+ # 3. Failure rate >= threshold
294
+ if 0 < current_failure_rate < 1.0 and current_failure_rate >= failure_rate:
295
+ flaky_tests.append((node_id, current_failure_rate))
296
+
297
+ # Sort by failure rate descending
298
+ flaky_tests.sort(key=lambda x: x[1], reverse=True)
299
+ return flaky_tests
300
+
301
+
302
+ # Module-level cache for pytest config to avoid repeated file parsing
303
+ _PYTEST_CONFIG_CACHE: dict[tuple[str, float, float], dict] = {}
304
+
305
+
306
+ def clear_pytest_config_cache() -> None:
307
+ """Clear the pytest config cache.
308
+
309
+ This function is primarily intended for testing to ensure
310
+ config files are re-read when needed.
311
+ """
312
+ _PYTEST_CONFIG_CACHE.clear()
313
+
314
+
315
+ def load_pytest_config() -> dict:
316
+ """Load pytest configuration from pyproject.toml or pytest.ini.
317
+
318
+ Priority order (highest to lowest):
319
+ 1. pyproject.toml [tool.pytest.ini_options] (pytest convention)
320
+ 2. pyproject.toml [tool.pytest] (backward compatibility)
321
+ 3. pytest.ini [pytest]
322
+
323
+ This function uses caching to avoid repeatedly parsing config files
324
+ during the same process run. Cache is keyed by working directory and
325
+ file modification times to ensure freshness.
326
+
327
+ Returns:
328
+ dict: Pytest configuration dictionary.
329
+ """
330
+ cwd = os.getcwd()
331
+ pyproject_path = Path("pyproject.toml")
332
+ pytest_ini_path = Path("pytest.ini")
333
+
334
+ # Create cache key from working directory and file modification times
335
+ cache_key = (
336
+ cwd,
337
+ pyproject_path.stat().st_mtime if pyproject_path.exists() else 0.0,
338
+ pytest_ini_path.stat().st_mtime if pytest_ini_path.exists() else 0.0,
339
+ )
340
+
341
+ # Return cached result if available
342
+ if cache_key in _PYTEST_CONFIG_CACHE:
343
+ return _PYTEST_CONFIG_CACHE[cache_key].copy()
344
+
345
+ config: dict = {}
346
+
347
+ # Check pyproject.toml first
348
+ pyproject_path = Path("pyproject.toml")
349
+ if pyproject_path.exists():
350
+ try:
351
+ import tomllib
352
+
353
+ with open(pyproject_path, "rb") as f:
354
+ pyproject_data = tomllib.load(f)
355
+ if "tool" in pyproject_data and "pytest" in pyproject_data["tool"]:
356
+ pytest_tool_data = pyproject_data["tool"]["pytest"]
357
+ # Check for ini_options first (pytest convention)
358
+ if (
359
+ isinstance(pytest_tool_data, dict)
360
+ and "ini_options" in pytest_tool_data
361
+ ):
362
+ config = pytest_tool_data["ini_options"]
363
+ # Fall back to direct pytest config (backward compatibility)
364
+ elif isinstance(pytest_tool_data, dict):
365
+ config = pytest_tool_data
366
+ except Exception as e:
367
+ logger.warning(
368
+ f"Failed to load pytest configuration from pyproject.toml: {e}",
369
+ )
370
+
371
+ # Check pytest.ini (lowest priority, updates existing config)
372
+ pytest_ini_path = Path("pytest.ini")
373
+ if pytest_ini_path.exists():
374
+ try:
375
+ import configparser
376
+
377
+ parser = configparser.ConfigParser()
378
+ parser.read(pytest_ini_path)
379
+ if "pytest" in parser:
380
+ config.update(dict(parser["pytest"]))
381
+ except Exception as e:
382
+ logger.warning(f"Failed to load pytest configuration from pytest.ini: {e}")
383
+
384
+ # Cache the result
385
+ _PYTEST_CONFIG_CACHE[cache_key] = config.copy()
386
+ return config
387
+
388
+
389
+ def load_lintro_ignore() -> list[str]:
390
+ """Load ignore patterns from .lintro-ignore file.
391
+
392
+ Returns:
393
+ list[str]: List of ignore patterns.
394
+ """
395
+ from lintro.utils.path_utils import find_lintro_ignore
396
+
397
+ ignore_patterns: list[str] = []
398
+ ignore_file = find_lintro_ignore()
399
+
400
+ if ignore_file and ignore_file.exists():
401
+ try:
402
+ with open(ignore_file, encoding="utf-8") as f:
403
+ for line in f:
404
+ line = line.strip()
405
+ if line and not line.startswith("#"):
406
+ ignore_patterns.append(line)
407
+ except Exception as e:
408
+ logger.warning(f"Failed to load .lintro-ignore: {e}")
409
+
410
+ return ignore_patterns
411
+
412
+
413
+ def load_file_patterns_from_config(
414
+ pytest_config: dict,
415
+ ) -> list[str]:
416
+ """Load file patterns from pytest configuration.
417
+
418
+ Args:
419
+ pytest_config: Pytest configuration dictionary.
420
+
421
+ Returns:
422
+ list[str]: File patterns from config, or empty list if not configured.
423
+ """
424
+ if not pytest_config:
425
+ return []
426
+
427
+ # Get python_files from config
428
+ python_files = pytest_config.get("python_files")
429
+ if not python_files:
430
+ return []
431
+
432
+ # Handle both string and list formats
433
+ if isinstance(python_files, str):
434
+ # Split on whitespace and commas
435
+ patterns = [
436
+ p.strip() for p in python_files.replace(",", " ").split() if p.strip()
437
+ ]
438
+ return patterns
439
+ elif isinstance(python_files, list):
440
+ return python_files
441
+ else:
442
+ logger.warning(f"Unexpected python_files type: {type(python_files)}")
443
+ return []
444
+
445
+
446
+ def initialize_pytest_tool_config(tool) -> None:
447
+ """Initialize pytest tool configuration from config files.
448
+
449
+ Loads pytest config, file patterns, and default options.
450
+ Updates tool.config.file_patterns and tool.options.
451
+
452
+ Args:
453
+ tool: PytestTool instance to initialize.
454
+ """
455
+ # Load pytest configuration
456
+ pytest_config = load_pytest_config()
457
+
458
+ # Load file patterns from config if available
459
+ config_file_patterns = load_file_patterns_from_config(pytest_config)
460
+ if config_file_patterns:
461
+ # Override default patterns with config patterns
462
+ tool.config.file_patterns = config_file_patterns
463
+
464
+ # Set default options based on configuration
465
+ default_options = {
466
+ "verbose": True,
467
+ "tb": "short", # Traceback format
468
+ "maxfail": None, # Don't stop early - run all tests
469
+ "no_header": True,
470
+ "disable_warnings": True,
471
+ }
472
+
473
+ # Override with config file settings
474
+ if pytest_config and "addopts" in pytest_config:
475
+ # Parse addopts string with proper handling of quoted values
476
+ tokens = shlex.split(pytest_config["addopts"])
477
+ idx = 0
478
+ while idx < len(tokens):
479
+ token = tokens[idx]
480
+ idx += 1
481
+ if not token.startswith("-"):
482
+ continue
483
+
484
+ key = token.lstrip("-")
485
+ value: object = True
486
+ if "=" in key:
487
+ key, raw = key.split("=", 1)
488
+ value = raw
489
+ elif idx < len(tokens) and not tokens[idx].startswith("-"):
490
+ value = tokens[idx]
491
+ idx += 1
492
+
493
+ option_key = key.replace("-", "_")
494
+ default_options[option_key] = value
495
+
496
+ tool.options.update(default_options)
497
+
498
+
499
+ def check_plugin_installed(plugin_name: str) -> bool:
500
+ """Check if a pytest plugin is installed.
501
+
502
+ Args:
503
+ plugin_name: Name of the plugin to check (e.g., 'pytest-cov').
504
+
505
+ Returns:
506
+ bool: True if plugin is installed, False otherwise.
507
+ """
508
+ import importlib.metadata
509
+
510
+ # Try to find the plugin package
511
+ try:
512
+ importlib.metadata.distribution(plugin_name)
513
+ return True
514
+ except importlib.metadata.PackageNotFoundError:
515
+ # Try alternative names (e.g., pytest-cov -> pytest_cov)
516
+ alt_name = plugin_name.replace("-", "_")
517
+ try:
518
+ importlib.metadata.distribution(alt_name)
519
+ return True
520
+ except importlib.metadata.PackageNotFoundError:
521
+ return False
522
+
523
+
524
+ def list_installed_plugins() -> list[dict[str, str]]:
525
+ """List all installed pytest plugins.
526
+
527
+ Returns:
528
+ list[dict[str, str]]: List of plugin information dictionaries with
529
+ 'name' and 'version' keys.
530
+ """
531
+ plugins: list[dict[str, str]] = []
532
+
533
+ import importlib.metadata
534
+
535
+ # Get all installed packages
536
+ distributions = importlib.metadata.distributions()
537
+
538
+ # Filter for pytest plugins
539
+ for dist in distributions:
540
+ dist_name = dist.metadata.get("Name", "")
541
+ if dist_name.startswith("pytest-") or dist_name.startswith("pytest_"):
542
+ version = dist.metadata.get("Version", "unknown")
543
+ plugins.append({"name": dist_name, "version": version})
544
+
545
+ # Sort by name
546
+ plugins.sort(key=lambda x: x["name"])
547
+ return plugins
548
+
549
+
550
+ def get_pytest_version_info() -> str:
551
+ """Get pytest version and plugin information.
552
+
553
+ Returns:
554
+ str: Formatted string with pytest version and plugin list.
555
+ """
556
+ try:
557
+ cmd = ["pytest", "--version"]
558
+ result = subprocess.run( # nosec B603 - pytest is a trusted executable
559
+ cmd,
560
+ capture_output=True,
561
+ text=True,
562
+ timeout=10,
563
+ check=False,
564
+ )
565
+ return result.stdout.strip()
566
+ except Exception:
567
+ return "pytest version information unavailable"
568
+
569
+
570
+ def collect_tests_once(
571
+ tool,
572
+ target_files: list[str],
573
+ ) -> tuple[int, int]:
574
+ """Collect tests once and return both total count and docker test count.
575
+
576
+ This function optimizes test collection by running pytest --collect-only
577
+ once and extracting both metrics from the same output, avoiding the
578
+ overhead of duplicate collection calls.
579
+
580
+ Args:
581
+ tool: PytestTool instance.
582
+ target_files: Files or directories to check.
583
+
584
+ Returns:
585
+ tuple[int, int]: Tuple of (total_test_count, docker_test_count).
586
+ """
587
+ import re
588
+
589
+ try:
590
+ # Use pytest --collect-only to list all tests
591
+ collect_cmd = tool._get_executable_command(tool_name="pytest")
592
+ collect_cmd.append("--collect-only")
593
+ collect_cmd.extend(target_files)
594
+
595
+ # Temporarily enable all tests to see total count
596
+ original_docker_env = os.environ.get("LINTRO_RUN_DOCKER_TESTS")
597
+ os.environ["LINTRO_RUN_DOCKER_TESTS"] = "1"
598
+
599
+ try:
600
+ success, output = tool._run_subprocess(collect_cmd)
601
+ if not success:
602
+ return (0, 0)
603
+
604
+ # Extract the total count from collection output
605
+ # Format: "XXXX tests collected in Y.YYs" or "1 test collected"
606
+ total_count = 0
607
+ match = re.search(r"(\d+)\s+tests?\s+collected", output)
608
+ if match:
609
+ total_count = int(match.group(1))
610
+
611
+ # Count docker tests from the same output
612
+ # Track when we're inside the docker directory and count Function items
613
+ docker_test_count = 0
614
+ in_docker_dir = False
615
+ depth = 0
616
+
617
+ for line in output.splitlines():
618
+ # Stop counting when we hit coverage section
619
+ if "coverage:" in line or "TOTAL" in line:
620
+ break
621
+
622
+ stripped = line.strip()
623
+
624
+ # Check if we're entering the docker directory
625
+ if "<Dir docker>" in line or "<Package docker>" in line:
626
+ in_docker_dir = True
627
+ depth = len(line) - len(stripped) # Track indentation level
628
+ continue
629
+
630
+ # Check if we're leaving the docker directory
631
+ # (next directory at same or higher level)
632
+ if in_docker_dir and stripped.startswith("<"):
633
+ current_depth = len(line) - len(stripped)
634
+ if current_depth <= depth and not stripped.startswith(
635
+ "<Function",
636
+ ):
637
+ # We've left the docker directory
638
+ # (backed up to same or higher level)
639
+ in_docker_dir = False
640
+ continue
641
+
642
+ # Count Function items while inside docker directory
643
+ if in_docker_dir and "<Function" in line:
644
+ docker_test_count += 1
645
+
646
+ return (total_count, docker_test_count)
647
+ finally:
648
+ # Restore original environment
649
+ if original_docker_env is not None:
650
+ os.environ["LINTRO_RUN_DOCKER_TESTS"] = original_docker_env
651
+ elif "LINTRO_RUN_DOCKER_TESTS" in os.environ:
652
+ del os.environ["LINTRO_RUN_DOCKER_TESTS"]
653
+ except Exception as e:
654
+ logger.debug(f"Failed to collect tests: {e}")
655
+ return (0, 0)
656
+
657
+
658
+ def get_total_test_count(
659
+ tool,
660
+ target_files: list[str],
661
+ ) -> int:
662
+ """Get total count of all available tests (including deselected ones).
663
+
664
+ Note: This function is kept for backward compatibility but delegates to
665
+ collect_tests_once() for efficiency. Consider using collect_tests_once()
666
+ directly if you also need docker test count.
667
+
668
+ Args:
669
+ tool: PytestTool instance.
670
+ target_files: Files or directories to check.
671
+
672
+ Returns:
673
+ int: Total number of tests that exist.
674
+ """
675
+ total_count, _ = collect_tests_once(tool, target_files)
676
+ return total_count
677
+
678
+
679
+ def count_docker_tests(
680
+ tool,
681
+ target_files: list[str],
682
+ ) -> int:
683
+ """Count docker tests that would be skipped.
684
+
685
+ Note: This function is kept for backward compatibility but delegates to
686
+ collect_tests_once() for efficiency. Consider using collect_tests_once()
687
+ directly if you also need total test count.
688
+
689
+ Args:
690
+ tool: PytestTool instance.
691
+ target_files: Files or directories to check.
692
+
693
+ Returns:
694
+ int: Number of docker tests found.
695
+ """
696
+ _, docker_count = collect_tests_once(tool, target_files)
697
+ return docker_count