lintro 0.6.2__py3-none-any.whl → 0.17.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. lintro/__init__.py +1 -1
  2. lintro/cli.py +230 -14
  3. lintro/cli_utils/commands/__init__.py +8 -1
  4. lintro/cli_utils/commands/check.py +1 -0
  5. lintro/cli_utils/commands/config.py +325 -0
  6. lintro/cli_utils/commands/format.py +2 -2
  7. lintro/cli_utils/commands/init.py +361 -0
  8. lintro/cli_utils/commands/list_tools.py +180 -42
  9. lintro/cli_utils/commands/test.py +316 -0
  10. lintro/cli_utils/commands/versions.py +81 -0
  11. lintro/config/__init__.py +62 -0
  12. lintro/config/config_loader.py +420 -0
  13. lintro/config/lintro_config.py +189 -0
  14. lintro/config/tool_config_generator.py +403 -0
  15. lintro/enums/__init__.py +1 -0
  16. lintro/enums/darglint_strictness.py +10 -0
  17. lintro/enums/hadolint_enums.py +22 -0
  18. lintro/enums/tool_name.py +2 -0
  19. lintro/enums/tool_type.py +2 -0
  20. lintro/enums/yamllint_format.py +11 -0
  21. lintro/exceptions/__init__.py +1 -0
  22. lintro/formatters/__init__.py +1 -0
  23. lintro/formatters/core/__init__.py +1 -0
  24. lintro/formatters/core/output_style.py +11 -0
  25. lintro/formatters/core/table_descriptor.py +8 -0
  26. lintro/formatters/styles/csv.py +2 -0
  27. lintro/formatters/styles/grid.py +2 -0
  28. lintro/formatters/styles/html.py +2 -0
  29. lintro/formatters/styles/json.py +2 -0
  30. lintro/formatters/styles/markdown.py +2 -0
  31. lintro/formatters/styles/plain.py +2 -0
  32. lintro/formatters/tools/__init__.py +12 -0
  33. lintro/formatters/tools/black_formatter.py +27 -5
  34. lintro/formatters/tools/darglint_formatter.py +16 -1
  35. lintro/formatters/tools/eslint_formatter.py +108 -0
  36. lintro/formatters/tools/hadolint_formatter.py +13 -0
  37. lintro/formatters/tools/markdownlint_formatter.py +88 -0
  38. lintro/formatters/tools/prettier_formatter.py +15 -0
  39. lintro/formatters/tools/pytest_formatter.py +201 -0
  40. lintro/formatters/tools/ruff_formatter.py +26 -5
  41. lintro/formatters/tools/yamllint_formatter.py +14 -1
  42. lintro/models/__init__.py +1 -0
  43. lintro/models/core/__init__.py +1 -0
  44. lintro/models/core/tool_config.py +11 -7
  45. lintro/parsers/__init__.py +69 -9
  46. lintro/parsers/actionlint/actionlint_parser.py +1 -1
  47. lintro/parsers/bandit/__init__.py +6 -0
  48. lintro/parsers/bandit/bandit_issue.py +49 -0
  49. lintro/parsers/bandit/bandit_parser.py +99 -0
  50. lintro/parsers/black/black_issue.py +4 -0
  51. lintro/parsers/darglint/__init__.py +1 -0
  52. lintro/parsers/darglint/darglint_issue.py +11 -0
  53. lintro/parsers/eslint/__init__.py +6 -0
  54. lintro/parsers/eslint/eslint_issue.py +26 -0
  55. lintro/parsers/eslint/eslint_parser.py +63 -0
  56. lintro/parsers/markdownlint/__init__.py +6 -0
  57. lintro/parsers/markdownlint/markdownlint_issue.py +22 -0
  58. lintro/parsers/markdownlint/markdownlint_parser.py +113 -0
  59. lintro/parsers/prettier/__init__.py +1 -0
  60. lintro/parsers/prettier/prettier_issue.py +12 -0
  61. lintro/parsers/prettier/prettier_parser.py +1 -1
  62. lintro/parsers/pytest/__init__.py +21 -0
  63. lintro/parsers/pytest/pytest_issue.py +28 -0
  64. lintro/parsers/pytest/pytest_parser.py +483 -0
  65. lintro/parsers/ruff/ruff_parser.py +6 -2
  66. lintro/parsers/yamllint/__init__.py +1 -0
  67. lintro/tools/__init__.py +3 -1
  68. lintro/tools/core/__init__.py +1 -0
  69. lintro/tools/core/timeout_utils.py +112 -0
  70. lintro/tools/core/tool_base.py +286 -50
  71. lintro/tools/core/tool_manager.py +77 -24
  72. lintro/tools/core/version_requirements.py +482 -0
  73. lintro/tools/implementations/__init__.py +1 -0
  74. lintro/tools/implementations/pytest/pytest_command_builder.py +311 -0
  75. lintro/tools/implementations/pytest/pytest_config.py +200 -0
  76. lintro/tools/implementations/pytest/pytest_error_handler.py +128 -0
  77. lintro/tools/implementations/pytest/pytest_executor.py +122 -0
  78. lintro/tools/implementations/pytest/pytest_handlers.py +375 -0
  79. lintro/tools/implementations/pytest/pytest_option_validators.py +212 -0
  80. lintro/tools/implementations/pytest/pytest_output_processor.py +408 -0
  81. lintro/tools/implementations/pytest/pytest_result_processor.py +113 -0
  82. lintro/tools/implementations/pytest/pytest_utils.py +697 -0
  83. lintro/tools/implementations/tool_actionlint.py +106 -16
  84. lintro/tools/implementations/tool_bandit.py +34 -29
  85. lintro/tools/implementations/tool_black.py +236 -29
  86. lintro/tools/implementations/tool_darglint.py +183 -22
  87. lintro/tools/implementations/tool_eslint.py +374 -0
  88. lintro/tools/implementations/tool_hadolint.py +94 -25
  89. lintro/tools/implementations/tool_markdownlint.py +354 -0
  90. lintro/tools/implementations/tool_prettier.py +317 -24
  91. lintro/tools/implementations/tool_pytest.py +327 -0
  92. lintro/tools/implementations/tool_ruff.py +278 -84
  93. lintro/tools/implementations/tool_yamllint.py +448 -34
  94. lintro/tools/tool_enum.py +8 -0
  95. lintro/utils/__init__.py +1 -0
  96. lintro/utils/ascii_normalize_cli.py +5 -0
  97. lintro/utils/config.py +41 -18
  98. lintro/utils/console_logger.py +211 -25
  99. lintro/utils/path_utils.py +42 -0
  100. lintro/utils/tool_executor.py +339 -45
  101. lintro/utils/tool_utils.py +51 -24
  102. lintro/utils/unified_config.py +926 -0
  103. {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/METADATA +172 -30
  104. lintro-0.17.2.dist-info/RECORD +134 -0
  105. lintro-0.6.2.dist-info/RECORD +0 -96
  106. {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/WHEEL +0 -0
  107. {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/entry_points.txt +0 -0
  108. {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/licenses/LICENSE +0 -0
  109. {lintro-0.6.2.dist-info → lintro-0.17.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,408 @@
1
+ """Output processing functions for pytest tool.
2
+
3
+ This module contains output parsing, summary extraction, performance warnings,
4
+ and flaky test detection logic extracted from PytestTool to improve
5
+ maintainability and reduce file size.
6
+ """
7
+
8
+ import json
9
+ from pathlib import Path
10
+
11
+ from loguru import logger
12
+
13
+ from lintro.parsers.pytest.pytest_issue import PytestIssue
14
+ from lintro.parsers.pytest.pytest_parser import (
15
+ extract_pytest_summary,
16
+ parse_pytest_output,
17
+ )
18
+ from lintro.tools.implementations.pytest.pytest_utils import (
19
+ detect_flaky_tests,
20
+ extract_all_test_results_from_junit,
21
+ is_ci_environment,
22
+ update_flaky_test_history,
23
+ )
24
+
25
+ # Constants for pytest configuration
26
+ PYTEST_SLOW_TEST_THRESHOLD: float = 1.0 # Warn if any test takes > 1 second
27
+ PYTEST_TOTAL_TIME_WARNING: float = 60.0 # Warn if total execution time > 60 seconds
28
+ PYTEST_FLAKY_MIN_RUNS: int = 3 # Minimum runs before detecting flaky tests
29
+ PYTEST_FLAKY_FAILURE_RATE: float = 0.3 # Consider flaky if fails >= 30% but < 100%
30
+
31
+
32
+ def parse_pytest_output_with_fallback(
33
+ output: str,
34
+ return_code: int,
35
+ options: dict,
36
+ subprocess_start_time: float | None = None,
37
+ ) -> list[PytestIssue]:
38
+ """Parse pytest output into issues with format detection and fallback.
39
+
40
+ Prioritizes JSON format when available, then JUnit XML, then falls back to text.
41
+ Validates parsed output structure to ensure reliability.
42
+ Always tries to parse JUnit XML file if available to capture skipped tests.
43
+
44
+ Args:
45
+ output: Raw output from pytest.
46
+ return_code: Return code from pytest.
47
+ options: Options dictionary.
48
+ subprocess_start_time: Optional Unix timestamp when subprocess started.
49
+ If provided, only JUnit XML files modified after this time will be read.
50
+
51
+ Returns:
52
+ list[PytestIssue]: Parsed test failures, errors, and skips.
53
+ """
54
+ issues = []
55
+
56
+ # Try to parse JUnit XML file if it exists and was explicitly requested
57
+ # This captures all test results including skips when using JUnit XML format
58
+ # But only if the output we're parsing is not already JUnit XML
59
+ # AND we're not in JSON mode (prioritize JSON over JUnit XML)
60
+ # Check this BEFORE early return to ensure JUnit XML parsing happens even
61
+ # when output is empty (e.g., quiet mode or redirected output)
62
+ junitxml_path = None
63
+ if (
64
+ options.get("junitxml")
65
+ and (not output or not output.strip().startswith("<?xml"))
66
+ and not options.get("json_report", False)
67
+ ):
68
+ junitxml_path = options.get("junitxml")
69
+
70
+ # Early return only if output is empty AND no JUnit XML file to parse
71
+ if not output and not (junitxml_path and Path(junitxml_path).exists()):
72
+ return []
73
+
74
+ if junitxml_path and Path(junitxml_path).exists():
75
+ # Only read the file if it was modified after subprocess started
76
+ # This prevents reading stale files from previous test runs
77
+ junitxml_file = Path(junitxml_path)
78
+ file_mtime = junitxml_file.stat().st_mtime
79
+ should_read = True
80
+
81
+ if subprocess_start_time is not None and file_mtime < subprocess_start_time:
82
+ logger.debug(
83
+ f"Skipping stale JUnit XML file {junitxml_path} "
84
+ f"(modified before subprocess started)",
85
+ )
86
+ should_read = False
87
+
88
+ if should_read:
89
+ try:
90
+ with open(junitxml_path, encoding="utf-8") as f:
91
+ junit_content = f.read()
92
+ junit_issues = parse_pytest_output(junit_content, format="junit")
93
+ if junit_issues:
94
+ issues.extend(junit_issues)
95
+ logger.debug(
96
+ f"Parsed {len(junit_issues)} issues from JUnit XML file",
97
+ )
98
+ except OSError as e:
99
+ logger.debug(f"Failed to read JUnit XML file {junitxml_path}: {e}")
100
+
101
+ # If we already have issues from JUnit XML, return them
102
+ # Otherwise, fall back to parsing the output
103
+ if issues:
104
+ return issues
105
+
106
+ # Try to detect output format automatically
107
+ # Priority: JSON > JUnit XML > Text
108
+ output_format = "text"
109
+
110
+ # Check for JSON format (pytest-json-report)
111
+ if options.get("json_report", False):
112
+ output_format = "json"
113
+ elif options.get("junitxml"):
114
+ output_format = "junit"
115
+ else:
116
+ # Auto-detect format from output content
117
+ # Check for JSON report file reference or JSON content
118
+ if "pytest-report.json" in output or (
119
+ output.strip().startswith("{") and "test_reports" in output
120
+ ):
121
+ output_format = "json"
122
+ # Check for JUnit XML structure
123
+ elif output.strip().startswith("<?xml") and "<testsuite" in output:
124
+ output_format = "junit"
125
+ # Default to text parsing
126
+ else:
127
+ output_format = "text"
128
+
129
+ # Parse based on detected format
130
+ issues = parse_pytest_output(output, format=output_format)
131
+
132
+ # Validate parsed output structure
133
+ if not isinstance(issues, list):
134
+ logger.warning(
135
+ f"Parser returned unexpected type: {type(issues)}, "
136
+ "falling back to text parsing",
137
+ )
138
+ issues = []
139
+ else:
140
+ # Validate that all items are PytestIssue instances
141
+ validated_issues = []
142
+ for issue in issues:
143
+ if isinstance(issue, PytestIssue):
144
+ validated_issues.append(issue)
145
+ else:
146
+ logger.warning(
147
+ f"Skipping invalid issue type: {type(issue)}",
148
+ )
149
+ issues = validated_issues
150
+
151
+ # If no issues found but return code indicates failure, try text parsing
152
+ if not issues and return_code != 0 and output_format != "text":
153
+ logger.debug(
154
+ f"No issues parsed from {output_format} format, "
155
+ "trying text parsing fallback",
156
+ )
157
+ fallback_issues = parse_pytest_output(output, format="text")
158
+ if fallback_issues:
159
+ logger.info(
160
+ f"Fallback text parsing found {len(fallback_issues)} issues",
161
+ )
162
+ issues = fallback_issues
163
+
164
+ return issues
165
+
166
+
167
+ def process_test_summary(
168
+ output: str,
169
+ issues: list[PytestIssue],
170
+ total_available_tests: int,
171
+ docker_test_count: int,
172
+ run_docker_tests: bool,
173
+ ) -> dict:
174
+ """Process test summary and calculate skipped tests.
175
+
176
+ Args:
177
+ output: Raw output from pytest.
178
+ issues: Parsed test issues.
179
+ total_available_tests: Total number of available tests.
180
+ docker_test_count: Number of docker tests.
181
+ run_docker_tests: Whether docker tests were enabled.
182
+
183
+ Returns:
184
+ dict: Summary data dictionary.
185
+ """
186
+ # Extract summary statistics
187
+ summary = extract_pytest_summary(output)
188
+
189
+ # Filter to only failed/error issues for display
190
+ failed_issues = [
191
+ issue for issue in issues if issue.test_status in ("FAILED", "ERROR")
192
+ ]
193
+
194
+ # Use actual failed issues count, not summary count
195
+ # (in case parsing is inconsistent)
196
+ actual_failures = len(failed_issues)
197
+
198
+ # Calculate docker skipped tests
199
+ # If docker tests are disabled and we have some,
200
+ # they should show as skipped in the output
201
+ docker_skipped = 0
202
+ if not run_docker_tests and docker_test_count > 0:
203
+ # When Docker tests are disabled, they are deselected by pytest
204
+ # so they won't appear in summary.skipped
205
+ docker_skipped = docker_test_count
206
+
207
+ # Calculate actual skipped tests (tests that exist but weren't run)
208
+ # This includes deselected tests that pytest doesn't report in summary
209
+ # Note: summary.error is already counted in actual_failures, so don't double-count
210
+ # Include xfailed and xpassed in collected count as they are tests that ran
211
+ collected_tests = (
212
+ summary.passed
213
+ + actual_failures
214
+ + summary.skipped
215
+ + summary.xfailed
216
+ + summary.xpassed
217
+ )
218
+ actual_skipped = max(0, total_available_tests - collected_tests)
219
+
220
+ logger.debug(f"Total available tests: {total_available_tests}")
221
+ logger.debug(f"Collected tests: {collected_tests}")
222
+ logger.debug(
223
+ f"Summary: passed={summary.passed}, "
224
+ f"failed={actual_failures}, "
225
+ f"skipped={summary.skipped}, "
226
+ f"error={summary.error}",
227
+ )
228
+ logger.debug(f"Actual skipped: {actual_skipped}")
229
+ logger.debug(f"Docker skipped: {docker_skipped}")
230
+
231
+ # Use the larger of summary.skipped or actual_skipped
232
+ # (summary.skipped is runtime skips, actual_skipped includes deselected)
233
+ # But ensure docker_skipped is included in the total
234
+ total_skipped = max(summary.skipped, actual_skipped)
235
+
236
+ # Ensure docker_skipped is included in the total skipped count
237
+ # This makes Docker tests show as skipped when --enable-docker is not used
238
+ if docker_skipped > 0 and total_skipped < docker_skipped:
239
+ total_skipped = docker_skipped
240
+
241
+ summary_data = {
242
+ "passed": summary.passed,
243
+ # Use actual parsed failures, not regex summary
244
+ "failed": actual_failures,
245
+ "skipped": total_skipped,
246
+ "error": summary.error,
247
+ "docker_skipped": docker_skipped,
248
+ "duration": summary.duration,
249
+ "total": total_available_tests,
250
+ }
251
+
252
+ return summary_data
253
+
254
+
255
+ def detect_and_log_slow_tests(
256
+ issues: list[PytestIssue],
257
+ options: dict,
258
+ ) -> list[tuple[str, float]]:
259
+ """Detect slow tests and log warnings.
260
+
261
+ Args:
262
+ issues: List of parsed test issues.
263
+ options: Options dictionary.
264
+
265
+ Returns:
266
+ list[tuple[str, float]]: List of (test_name, duration) tuples for slow tests.
267
+ """
268
+ slow_tests: list[tuple[str, float]] = []
269
+ # Check all issues (including passed tests) for slow tests
270
+ if issues:
271
+ # Find slow tests (individual test duration > threshold)
272
+ slow_threshold = options.get(
273
+ "slow_test_threshold",
274
+ PYTEST_SLOW_TEST_THRESHOLD,
275
+ )
276
+ for issue in issues:
277
+ if (
278
+ issue.duration
279
+ and isinstance(issue.duration, (int, float))
280
+ and issue.duration > slow_threshold
281
+ ):
282
+ slow_tests.append((issue.test_name, issue.duration))
283
+
284
+ # Log slow test files
285
+ if slow_tests:
286
+ # Sort by duration descending
287
+ slow_tests.sort(key=lambda x: x[1], reverse=True)
288
+ slow_threshold = options.get(
289
+ "slow_test_threshold",
290
+ PYTEST_SLOW_TEST_THRESHOLD,
291
+ )
292
+ slow_msg = f"🐌 Found {len(slow_tests)} slow test(s) (> {slow_threshold}s):"
293
+ logger.info(slow_msg)
294
+ for test_name, duration in slow_tests[:10]: # Show top 10 slowest
295
+ logger.info(f" - {test_name}: {duration:.2f}s")
296
+ if len(slow_tests) > 10:
297
+ logger.info(f" ... and {len(slow_tests) - 10} more")
298
+
299
+ return slow_tests
300
+
301
+
302
+ def check_total_time_warning(
303
+ summary_duration: float,
304
+ options: dict,
305
+ ) -> None:
306
+ """Check and warn if total execution time exceeds threshold.
307
+
308
+ Args:
309
+ summary_duration: Total test execution duration.
310
+ options: Options dictionary.
311
+ """
312
+ total_time_warning = options.get(
313
+ "total_time_warning",
314
+ PYTEST_TOTAL_TIME_WARNING,
315
+ )
316
+ if summary_duration > total_time_warning:
317
+ warning_msg = (
318
+ f"⚠️ Tests took {summary_duration:.1f}s to run "
319
+ f"(threshold: {total_time_warning}s). "
320
+ "Consider optimizing slow tests or using pytest-xdist "
321
+ "for parallel execution."
322
+ )
323
+ logger.warning(warning_msg)
324
+
325
+
326
+ def detect_and_log_flaky_tests(
327
+ issues: list[PytestIssue],
328
+ options: dict,
329
+ ) -> list[tuple[str, float]]:
330
+ """Detect flaky tests and log warnings.
331
+
332
+ Args:
333
+ issues: List of parsed test issues.
334
+ options: Options dictionary.
335
+
336
+ Returns:
337
+ list[tuple[str, float]]: List of (node_id, failure_rate) tuples for flaky tests.
338
+ """
339
+ enable_flaky_detection = options.get("detect_flaky", True)
340
+ flaky_tests: list[tuple[str, float]] = []
341
+ if enable_flaky_detection:
342
+ # Try to get all test results from JUnit XML if available
343
+ all_test_results: dict[str, str] | None = None
344
+ junitxml_path = options.get("junitxml") or (
345
+ "report.xml" if is_ci_environment() else None
346
+ )
347
+ if junitxml_path and Path(junitxml_path).exists():
348
+ all_test_results = extract_all_test_results_from_junit(
349
+ junitxml_path,
350
+ )
351
+
352
+ # Update flaky test history
353
+ history = update_flaky_test_history(issues, all_test_results)
354
+
355
+ # Detect flaky tests
356
+ min_runs = options.get("flaky_min_runs", PYTEST_FLAKY_MIN_RUNS)
357
+ failure_rate = options.get(
358
+ "flaky_failure_rate",
359
+ PYTEST_FLAKY_FAILURE_RATE,
360
+ )
361
+ flaky_tests = detect_flaky_tests(history, min_runs, failure_rate)
362
+
363
+ # Report flaky tests
364
+ if flaky_tests:
365
+ flaky_msg = f"⚠️ Found {len(flaky_tests)} potentially flaky test(s):"
366
+ logger.warning(flaky_msg)
367
+ for node_id, rate in flaky_tests[:10]: # Show top 10 flakiest
368
+ logger.warning(
369
+ f" - {node_id}: {rate:.0%} failure rate "
370
+ f"({history[node_id]['failed'] + history[node_id]['error']}"
371
+ f" failures in {sum(history[node_id].values())} runs)",
372
+ )
373
+ if len(flaky_tests) > 10:
374
+ logger.warning(f" ... and {len(flaky_tests) - 10} more")
375
+
376
+ return flaky_tests
377
+
378
+
379
+ def build_output_with_failures(
380
+ summary_data: dict,
381
+ all_issues: list[PytestIssue],
382
+ ) -> str:
383
+ """Build output string with summary and test details.
384
+
385
+ Args:
386
+ summary_data: Summary data dictionary.
387
+ all_issues: List of all test issues (failures, errors, skips).
388
+
389
+ Returns:
390
+ str: Formatted output string.
391
+ """
392
+ # Build output with summary and test details
393
+ output_lines = [json.dumps(summary_data)]
394
+
395
+ # Format issues as tables (failures and skipped tests)
396
+ if all_issues:
397
+ # Import the pytest formatter to format issues as tables
398
+ from lintro.formatters.tools.pytest_formatter import (
399
+ format_pytest_issues,
400
+ )
401
+
402
+ # Format issues as tables (includes both failures and skipped tests)
403
+ issues_tables = format_pytest_issues(all_issues, format="grid")
404
+ if issues_tables.strip():
405
+ output_lines.append("") # Blank line before tables
406
+ output_lines.append(issues_tables)
407
+
408
+ return "\n".join(output_lines)
@@ -0,0 +1,113 @@
1
+ """Pytest result processing.
2
+
3
+ This module contains the PytestResultProcessor class that handles test result
4
+ processing, summary generation, and ToolResult building.
5
+ """
6
+
7
+ from dataclasses import dataclass
8
+
9
+ from lintro.models.core.tool_result import ToolResult
10
+ from lintro.tools.implementations.pytest.pytest_config import PytestConfiguration
11
+ from lintro.tools.implementations.pytest.pytest_output_processor import (
12
+ build_output_with_failures,
13
+ check_total_time_warning,
14
+ detect_and_log_flaky_tests,
15
+ detect_and_log_slow_tests,
16
+ process_test_summary,
17
+ )
18
+
19
+
20
+ @dataclass
21
+ class PytestResultProcessor:
22
+ """Handles pytest result processing and ToolResult building.
23
+
24
+ This class encapsulates the logic for processing test results, generating
25
+ summaries, and building ToolResult objects from pytest execution data.
26
+
27
+ Attributes:
28
+ config: PytestConfiguration instance with result processing options.
29
+ tool_name: Name of the tool (e.g., "pytest").
30
+ """
31
+
32
+ config: PytestConfiguration
33
+ tool_name: str = "pytest"
34
+
35
+ def process_test_results(
36
+ self,
37
+ output: str,
38
+ return_code: int,
39
+ issues: list,
40
+ total_available_tests: int,
41
+ docker_test_count: int,
42
+ run_docker_tests: bool,
43
+ ) -> tuple[dict, list]:
44
+ """Process test results and generate summary.
45
+
46
+ Args:
47
+ output: Raw output from pytest.
48
+ return_code: Return code from pytest.
49
+ issues: Parsed test issues.
50
+ total_available_tests: Total number of available tests.
51
+ docker_test_count: Number of docker tests.
52
+ run_docker_tests: Whether docker tests were enabled.
53
+
54
+ Returns:
55
+ Tuple[Dict, List]: Tuple of (summary_data, all_issues).
56
+ """
57
+ # Process summary
58
+ summary_data = process_test_summary(
59
+ output=output,
60
+ issues=issues,
61
+ total_available_tests=total_available_tests,
62
+ docker_test_count=docker_test_count,
63
+ run_docker_tests=run_docker_tests,
64
+ )
65
+
66
+ # Performance warnings (uses all issues including passed for duration info)
67
+ detect_and_log_slow_tests(issues, self.config.get_options_dict())
68
+ check_total_time_warning(
69
+ summary_data["duration"],
70
+ self.config.get_options_dict(),
71
+ )
72
+
73
+ # Flaky test detection
74
+ detect_and_log_flaky_tests(issues, self.config.get_options_dict())
75
+
76
+ # Return all issues - filtering for ToolResult.issues happens in build_result
77
+ return (summary_data, issues)
78
+
79
+ def build_result(
80
+ self,
81
+ success: bool,
82
+ summary_data: dict,
83
+ all_issues: list,
84
+ ) -> ToolResult:
85
+ """Build final ToolResult from processed data.
86
+
87
+ Args:
88
+ success: Whether tests passed.
89
+ summary_data: Summary data dictionary.
90
+ all_issues: List of all test issues (failures, errors, skips).
91
+
92
+ Returns:
93
+ ToolResult: Final result object.
94
+ """
95
+ # Filter to only failed/error issues for the ToolResult.issues field
96
+ failed_issues = [
97
+ issue for issue in all_issues if issue.test_status in ("FAILED", "ERROR")
98
+ ]
99
+
100
+ output_text = build_output_with_failures(summary_data, all_issues)
101
+
102
+ result = ToolResult(
103
+ name=self.tool_name,
104
+ success=success,
105
+ issues=failed_issues,
106
+ output=output_text,
107
+ issues_count=len(failed_issues),
108
+ )
109
+
110
+ # Store summary data for display in Execution Summary table
111
+ result.pytest_summary = summary_data
112
+
113
+ return result