crackerjack 0.30.3__py3-none-any.whl โ 0.31.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +225 -299
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +169 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +652 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +401 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +618 -928
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +561 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +640 -0
- crackerjack/dynamic_config.py +94 -103
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +411 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +435 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +144 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +615 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +370 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +141 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +360 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +347 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +347 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +395 -0
- crackerjack/services/git.py +165 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +847 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.4.dist-info/METADATA +742 -0
- crackerjack-0.31.4.dist-info/RECORD +148 -0
- crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/crackerjack.py +0 -3805
- crackerjack/pyproject.toml +0 -286
- crackerjack-0.30.3.dist-info/METADATA +0 -1290
- crackerjack-0.30.3.dist-info/RECORD +0 -16
- {crackerjack-0.30.3.dist-info โ crackerjack-0.31.4.dist-info}/WHEEL +0 -0
- {crackerjack-0.30.3.dist-info โ crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,636 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import re
|
|
3
|
+
import subprocess
|
|
4
|
+
import time
|
|
5
|
+
import typing as t
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
|
|
11
|
+
from crackerjack.models.protocols import OptionsProtocol
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class TestProgress:
|
|
16
|
+
test_id: str
|
|
17
|
+
test_file: str
|
|
18
|
+
test_class: str | None = None
|
|
19
|
+
test_method: str | None = None
|
|
20
|
+
status: str = "pending"
|
|
21
|
+
start_time: float | None = None
|
|
22
|
+
end_time: float | None = None
|
|
23
|
+
duration: float | None = None
|
|
24
|
+
output_lines: list[str] | None = None
|
|
25
|
+
error_message: str | None = None
|
|
26
|
+
failure_traceback: str | None = None
|
|
27
|
+
assertions_count: int = 0
|
|
28
|
+
errors_found: int = 0
|
|
29
|
+
warnings_found: int = 0
|
|
30
|
+
error_details: list[dict[str, t.Any]] | None = None
|
|
31
|
+
|
|
32
|
+
def __post_init__(self) -> None:
|
|
33
|
+
if self.output_lines is None:
|
|
34
|
+
self.output_lines = []
|
|
35
|
+
if self.error_details is None:
|
|
36
|
+
self.error_details = []
|
|
37
|
+
if self.end_time and self.start_time:
|
|
38
|
+
self.duration = self.end_time - self.start_time
|
|
39
|
+
|
|
40
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
41
|
+
return {
|
|
42
|
+
"test_id": self.test_id,
|
|
43
|
+
"test_file": self.test_file,
|
|
44
|
+
"test_class": self.test_class,
|
|
45
|
+
"test_method": self.test_method,
|
|
46
|
+
"status": self.status,
|
|
47
|
+
"start_time": self.start_time,
|
|
48
|
+
"end_time": self.end_time,
|
|
49
|
+
"duration": self.duration,
|
|
50
|
+
"output_lines": self.output_lines[-5:] if self.output_lines else [],
|
|
51
|
+
"error_message": self.error_message,
|
|
52
|
+
"failure_traceback": self.failure_traceback[:500]
|
|
53
|
+
if self.failure_traceback
|
|
54
|
+
else None,
|
|
55
|
+
"assertions_count": self.assertions_count,
|
|
56
|
+
"errors_found": self.errors_found,
|
|
57
|
+
"warnings_found": self.warnings_found,
|
|
58
|
+
"error_details": self.error_details,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class TestSuiteProgress:
|
|
64
|
+
total_tests: int = 0
|
|
65
|
+
completed_tests: int = 0
|
|
66
|
+
passed_tests: int = 0
|
|
67
|
+
failed_tests: int = 0
|
|
68
|
+
skipped_tests: int = 0
|
|
69
|
+
error_tests: int = 0
|
|
70
|
+
start_time: float | None = None
|
|
71
|
+
end_time: float | None = None
|
|
72
|
+
duration: float | None = None
|
|
73
|
+
coverage_percentage: float | None = None
|
|
74
|
+
current_test: str | None = None
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def progress_percentage(self) -> float:
|
|
78
|
+
if self.total_tests == 0:
|
|
79
|
+
return 0.0
|
|
80
|
+
return (self.completed_tests / self.total_tests) * 100
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def success_rate(self) -> float:
|
|
84
|
+
if self.completed_tests == 0:
|
|
85
|
+
return 0.0
|
|
86
|
+
return (self.passed_tests / self.completed_tests) * 100
|
|
87
|
+
|
|
88
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
89
|
+
return {
|
|
90
|
+
"total_tests": self.total_tests,
|
|
91
|
+
"completed_tests": self.completed_tests,
|
|
92
|
+
"passed_tests": self.passed_tests,
|
|
93
|
+
"failed_tests": self.failed_tests,
|
|
94
|
+
"skipped_tests": self.skipped_tests,
|
|
95
|
+
"error_tests": self.error_tests,
|
|
96
|
+
"start_time": self.start_time,
|
|
97
|
+
"end_time": self.end_time,
|
|
98
|
+
"duration": self.duration,
|
|
99
|
+
"coverage_percentage": self.coverage_percentage,
|
|
100
|
+
"current_test": self.current_test,
|
|
101
|
+
"progress_percentage": self.progress_percentage,
|
|
102
|
+
"success_rate": self.success_rate,
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class PytestOutputParser:
|
|
107
|
+
TEST_START_PATTERN = re.compile(
|
|
108
|
+
r"^(.+?)::(.*)::(.*)(?:PASSED|FAILED|SKIPPED|ERROR)",
|
|
109
|
+
)
|
|
110
|
+
TEST_RESULT_PATTERN = re.compile(
|
|
111
|
+
r"^(.+?)(?:PASSED|FAILED|SKIPPED|ERROR)(?:\s+\[.*?\])?\s*$",
|
|
112
|
+
)
|
|
113
|
+
TEST_COLLECTION_PATTERN = re.compile(r"collected (\d+) items?")
|
|
114
|
+
TEST_SESSION_START = re.compile(r"test session starts")
|
|
115
|
+
COVERAGE_PATTERN = re.compile(r"TOTAL\s+\d+\s+\d+\s+(\d+)%")
|
|
116
|
+
|
|
117
|
+
DETAILED_TEST_PATTERN = re.compile(
|
|
118
|
+
r"^(.+?\.py)::(.*)(?:PASSED|FAILED|SKIPPED|ERROR)(?:\s+\[(\d+)%\])?\s*(?:\[(.*?)\])?\s*$",
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def __init__(self) -> None:
|
|
122
|
+
self.current_test: str | None = None
|
|
123
|
+
self.test_traceback_buffer: list[str] = []
|
|
124
|
+
self.in_traceback = False
|
|
125
|
+
|
|
126
|
+
def parse_pytest_output(self, output_lines: list[str]) -> dict[str, t.Any]:
|
|
127
|
+
tests: dict[str, TestProgress] = {}
|
|
128
|
+
suite_info = TestSuiteProgress()
|
|
129
|
+
|
|
130
|
+
for line in output_lines:
|
|
131
|
+
line = line.strip()
|
|
132
|
+
if not line:
|
|
133
|
+
continue
|
|
134
|
+
|
|
135
|
+
self._process_test_collection_line(line, suite_info)
|
|
136
|
+
self._process_test_result_line(line, tests, suite_info)
|
|
137
|
+
self._process_coverage_line(line, suite_info)
|
|
138
|
+
self._process_current_test_line(line, suite_info)
|
|
139
|
+
|
|
140
|
+
return {
|
|
141
|
+
"tests": list(tests.values()),
|
|
142
|
+
"suite_progress": suite_info,
|
|
143
|
+
"test_count": len(tests),
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
def _process_test_collection_line(
|
|
147
|
+
self,
|
|
148
|
+
line: str,
|
|
149
|
+
suite_info: TestSuiteProgress,
|
|
150
|
+
) -> None:
|
|
151
|
+
if match := self.TEST_COLLECTION_PATTERN.search(line):
|
|
152
|
+
suite_info.total_tests = int(match.group(1))
|
|
153
|
+
|
|
154
|
+
def _process_test_result_line(
|
|
155
|
+
self,
|
|
156
|
+
line: str,
|
|
157
|
+
tests: dict[str, TestProgress],
|
|
158
|
+
suite_info: TestSuiteProgress,
|
|
159
|
+
) -> None:
|
|
160
|
+
if match := self.DETAILED_TEST_PATTERN.match(line):
|
|
161
|
+
file_path, test_name, status, _progress, _timing = match.groups()
|
|
162
|
+
test_id = f"{file_path}::{test_name}"
|
|
163
|
+
|
|
164
|
+
if test_id not in tests:
|
|
165
|
+
tests[test_id] = self._create_test_progress(
|
|
166
|
+
file_path,
|
|
167
|
+
test_name,
|
|
168
|
+
test_id,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
self._update_test_progress(tests[test_id], status)
|
|
172
|
+
self._update_suite_counts(suite_info, status)
|
|
173
|
+
|
|
174
|
+
def _create_test_progress(
|
|
175
|
+
self,
|
|
176
|
+
file_path: str,
|
|
177
|
+
test_name: str,
|
|
178
|
+
test_id: str,
|
|
179
|
+
) -> TestProgress:
|
|
180
|
+
test_file = Path(file_path).name
|
|
181
|
+
test_parts = test_name.split("::")
|
|
182
|
+
test_class = test_parts[0] if len(test_parts) > 1 else None
|
|
183
|
+
test_method = test_parts[-1]
|
|
184
|
+
|
|
185
|
+
return TestProgress(
|
|
186
|
+
test_id=test_id,
|
|
187
|
+
test_file=test_file,
|
|
188
|
+
test_class=test_class,
|
|
189
|
+
test_method=test_method,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
def _update_test_progress(self, test_progress: TestProgress, status: str) -> None:
|
|
193
|
+
test_progress.status = status.lower()
|
|
194
|
+
test_progress.end_time = time.time()
|
|
195
|
+
|
|
196
|
+
def _update_suite_counts(self, suite_info: TestSuiteProgress, status: str) -> None:
|
|
197
|
+
suite_info.completed_tests += 1
|
|
198
|
+
if status == "PASSED":
|
|
199
|
+
suite_info.passed_tests += 1
|
|
200
|
+
elif status == "FAILED":
|
|
201
|
+
suite_info.failed_tests += 1
|
|
202
|
+
elif status == "SKIPPED":
|
|
203
|
+
suite_info.skipped_tests += 1
|
|
204
|
+
elif status == "ERROR":
|
|
205
|
+
suite_info.error_tests += 1
|
|
206
|
+
|
|
207
|
+
def _process_coverage_line(self, line: str, suite_info: TestSuiteProgress) -> None:
|
|
208
|
+
if match := self.COVERAGE_PATTERN.search(line):
|
|
209
|
+
suite_info.coverage_percentage = float(match.group(1))
|
|
210
|
+
|
|
211
|
+
def _process_current_test_line(
|
|
212
|
+
self,
|
|
213
|
+
line: str,
|
|
214
|
+
suite_info: TestSuiteProgress,
|
|
215
|
+
) -> None:
|
|
216
|
+
if "::" in line and any(
|
|
217
|
+
status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
|
|
218
|
+
):
|
|
219
|
+
suite_info.current_test = line.split()[0] if line.split() else None
|
|
220
|
+
|
|
221
|
+
def parse_test_failure_details(self, output_lines: list[str]) -> dict[str, str]:
|
|
222
|
+
failures = {}
|
|
223
|
+
current_test = None
|
|
224
|
+
current_traceback = []
|
|
225
|
+
in_failure_section = False
|
|
226
|
+
|
|
227
|
+
for line in output_lines:
|
|
228
|
+
if self._is_failure_section_start(line):
|
|
229
|
+
in_failure_section = True
|
|
230
|
+
continue
|
|
231
|
+
|
|
232
|
+
if not in_failure_section:
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
if self._is_test_header(line):
|
|
236
|
+
self._save_current_failure(failures, current_test, current_traceback)
|
|
237
|
+
current_test = line.strip("_")
|
|
238
|
+
current_traceback = []
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
if self._should_add_to_traceback(current_test, line):
|
|
242
|
+
current_traceback.append(line)
|
|
243
|
+
|
|
244
|
+
self._save_current_failure(failures, current_test, current_traceback)
|
|
245
|
+
return failures
|
|
246
|
+
|
|
247
|
+
def _is_failure_section_start(self, line: str) -> bool:
|
|
248
|
+
return "FAILURES" in line or "ERRORS" in line
|
|
249
|
+
|
|
250
|
+
def _is_test_header(self, line: str) -> bool:
|
|
251
|
+
return line.startswith("_") and "::" in line
|
|
252
|
+
|
|
253
|
+
def _should_add_to_traceback(self, current_test: str | None, line: str) -> bool:
|
|
254
|
+
return current_test is not None and bool(line.strip())
|
|
255
|
+
|
|
256
|
+
def _save_current_failure(
|
|
257
|
+
self,
|
|
258
|
+
failures: dict[str, str],
|
|
259
|
+
current_test: str | None,
|
|
260
|
+
current_traceback: list[str],
|
|
261
|
+
) -> None:
|
|
262
|
+
if current_test and current_traceback:
|
|
263
|
+
failures[current_test] = "\n".join(current_traceback)
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
class TestProgressStreamer:
|
|
267
|
+
def __init__(self, console: Console, pkg_path: Path) -> None:
|
|
268
|
+
self.console = console
|
|
269
|
+
self.pkg_path = pkg_path
|
|
270
|
+
self.parser = PytestOutputParser()
|
|
271
|
+
self.progress_callback: t.Callable[[TestSuiteProgress], None] | None = None
|
|
272
|
+
self.test_callback: t.Callable[[TestProgress], None] | None = None
|
|
273
|
+
|
|
274
|
+
def set_progress_callback(
|
|
275
|
+
self,
|
|
276
|
+
callback: t.Callable[[TestSuiteProgress], None],
|
|
277
|
+
) -> None:
|
|
278
|
+
self.progress_callback = callback
|
|
279
|
+
|
|
280
|
+
def set_test_callback(self, callback: t.Callable[[TestProgress], None]) -> None:
|
|
281
|
+
self.test_callback = callback
|
|
282
|
+
|
|
283
|
+
async def run_tests_with_streaming(
|
|
284
|
+
self,
|
|
285
|
+
options: OptionsProtocol,
|
|
286
|
+
execution_mode: str = "full_suite",
|
|
287
|
+
) -> dict[str, t.Any]:
|
|
288
|
+
start_time = time.time()
|
|
289
|
+
suite_progress = TestSuiteProgress(start_time=start_time)
|
|
290
|
+
|
|
291
|
+
self.console.print(
|
|
292
|
+
"\n[bold bright_green]๐งช RUNNING TESTS WITH STREAMING PROGRESS[/bold bright_green]",
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
cmd = self.build_pytest_command(options, execution_mode)
|
|
296
|
+
|
|
297
|
+
try:
|
|
298
|
+
return await self._execute_tests_and_process_results(cmd, suite_progress)
|
|
299
|
+
except Exception as e:
|
|
300
|
+
return self._handle_test_execution_error(e, suite_progress)
|
|
301
|
+
|
|
302
|
+
async def _execute_tests_and_process_results(
|
|
303
|
+
self,
|
|
304
|
+
cmd: list[str],
|
|
305
|
+
suite_progress: TestSuiteProgress,
|
|
306
|
+
) -> dict[str, t.Any]:
|
|
307
|
+
result = await self._run_pytest_with_streaming(cmd, suite_progress)
|
|
308
|
+
|
|
309
|
+
parsed_results = self.parser.parse_pytest_output(result.stdout.split("\n"))
|
|
310
|
+
failure_details = self.parser.parse_test_failure_details(
|
|
311
|
+
result.stdout.split("\n"),
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
self._finalize_suite_progress(suite_progress)
|
|
315
|
+
self._attach_failure_details(parsed_results["tests"], failure_details)
|
|
316
|
+
self._print_test_summary(suite_progress, parsed_results["tests"])
|
|
317
|
+
|
|
318
|
+
return self._build_success_result(result, suite_progress, parsed_results)
|
|
319
|
+
|
|
320
|
+
def _finalize_suite_progress(self, suite_progress: TestSuiteProgress) -> None:
|
|
321
|
+
suite_progress.end_time = time.time()
|
|
322
|
+
suite_progress.duration = suite_progress.end_time - (
|
|
323
|
+
suite_progress.start_time or 0
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
def _attach_failure_details(
|
|
327
|
+
self,
|
|
328
|
+
tests: list[TestProgress],
|
|
329
|
+
failure_details: dict[str, str],
|
|
330
|
+
) -> None:
|
|
331
|
+
for test in tests:
|
|
332
|
+
if test.test_id in failure_details:
|
|
333
|
+
test.failure_traceback = failure_details[test.test_id]
|
|
334
|
+
|
|
335
|
+
def _build_success_result(
|
|
336
|
+
self,
|
|
337
|
+
result: subprocess.CompletedProcess[str],
|
|
338
|
+
suite_progress: TestSuiteProgress,
|
|
339
|
+
parsed_results: dict[str, t.Any],
|
|
340
|
+
) -> dict[str, t.Any]:
|
|
341
|
+
return {
|
|
342
|
+
"success": result.returncode == 0,
|
|
343
|
+
"suite_progress": suite_progress,
|
|
344
|
+
"individual_tests": parsed_results["tests"],
|
|
345
|
+
"failed_tests": [
|
|
346
|
+
t for t in parsed_results["tests"] if t.status == "failed"
|
|
347
|
+
],
|
|
348
|
+
"total_duration": suite_progress.duration,
|
|
349
|
+
"coverage_percentage": suite_progress.coverage_percentage,
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
def _handle_test_execution_error(
|
|
353
|
+
self,
|
|
354
|
+
error: Exception,
|
|
355
|
+
suite_progress: TestSuiteProgress,
|
|
356
|
+
) -> dict[str, t.Any]:
|
|
357
|
+
self.console.print(f"[red]โ Test execution failed: {error}[/red]")
|
|
358
|
+
suite_progress.end_time = time.time()
|
|
359
|
+
suite_progress.duration = suite_progress.end_time - (
|
|
360
|
+
suite_progress.start_time or 0
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
return {
|
|
364
|
+
"success": False,
|
|
365
|
+
"suite_progress": suite_progress,
|
|
366
|
+
"individual_tests": [],
|
|
367
|
+
"failed_tests": [],
|
|
368
|
+
"error": str(error),
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
def build_pytest_command(
|
|
372
|
+
self,
|
|
373
|
+
options: OptionsProtocol,
|
|
374
|
+
execution_mode: str,
|
|
375
|
+
) -> list[str]:
|
|
376
|
+
cmd = ["uv", "run", "pytest"]
|
|
377
|
+
|
|
378
|
+
cmd.extend(["-v", "--tb=short"])
|
|
379
|
+
|
|
380
|
+
if hasattr(options, "coverage") and options.coverage:
|
|
381
|
+
cmd.extend(["--cov=crackerjack", "--cov-report=term-missing"])
|
|
382
|
+
|
|
383
|
+
if execution_mode == "individual_with_progress":
|
|
384
|
+
cmd.extend(["--no-header"])
|
|
385
|
+
elif execution_mode == "selective":
|
|
386
|
+
pass
|
|
387
|
+
else:
|
|
388
|
+
cmd.extend(["-q"])
|
|
389
|
+
|
|
390
|
+
if hasattr(options, "test_timeout"):
|
|
391
|
+
cmd.extend([f"--timeout={options.test_timeout}"])
|
|
392
|
+
|
|
393
|
+
if hasattr(options, "test_workers") and options.test_workers > 1:
|
|
394
|
+
cmd.extend(["-n", str(options.test_workers)])
|
|
395
|
+
|
|
396
|
+
return cmd
|
|
397
|
+
|
|
398
|
+
async def _run_pytest_with_streaming(
|
|
399
|
+
self,
|
|
400
|
+
cmd: list[str],
|
|
401
|
+
suite_progress: TestSuiteProgress,
|
|
402
|
+
) -> subprocess.CompletedProcess[str]:
|
|
403
|
+
self.console.print(f"[dim]Running: {' '.join(cmd)}[/dim]")
|
|
404
|
+
|
|
405
|
+
process = await self._create_subprocess(cmd)
|
|
406
|
+
stdout_lines: list[str] = []
|
|
407
|
+
stderr_lines: list[str] = []
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
await self._process_streams(
|
|
411
|
+
process,
|
|
412
|
+
stdout_lines,
|
|
413
|
+
stderr_lines,
|
|
414
|
+
suite_progress,
|
|
415
|
+
)
|
|
416
|
+
except Exception:
|
|
417
|
+
await self._cleanup_process_and_tasks(process, [])
|
|
418
|
+
raise
|
|
419
|
+
|
|
420
|
+
return self._build_completed_process(cmd, process, stdout_lines, stderr_lines)
|
|
421
|
+
|
|
422
|
+
async def _create_subprocess(self, cmd: list[str]) -> asyncio.subprocess.Process:
|
|
423
|
+
return await asyncio.create_subprocess_exec(
|
|
424
|
+
*cmd,
|
|
425
|
+
cwd=self.pkg_path,
|
|
426
|
+
stdout=asyncio.subprocess.PIPE,
|
|
427
|
+
stderr=asyncio.subprocess.PIPE,
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
async def _process_streams(
|
|
431
|
+
self,
|
|
432
|
+
process: asyncio.subprocess.Process,
|
|
433
|
+
stdout_lines: list[str],
|
|
434
|
+
stderr_lines: list[str],
|
|
435
|
+
suite_progress: TestSuiteProgress,
|
|
436
|
+
) -> None:
|
|
437
|
+
tasks = [
|
|
438
|
+
asyncio.create_task(
|
|
439
|
+
self._read_stream(process.stdout, stdout_lines, suite_progress),
|
|
440
|
+
),
|
|
441
|
+
asyncio.create_task(
|
|
442
|
+
self._read_stream(process.stderr, stderr_lines, suite_progress),
|
|
443
|
+
),
|
|
444
|
+
]
|
|
445
|
+
|
|
446
|
+
try:
|
|
447
|
+
await process.wait()
|
|
448
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
449
|
+
except Exception:
|
|
450
|
+
await self._cleanup_process_and_tasks(process, tasks)
|
|
451
|
+
raise
|
|
452
|
+
|
|
453
|
+
async def _read_stream(
|
|
454
|
+
self,
|
|
455
|
+
stream: asyncio.StreamReader | None,
|
|
456
|
+
output_list: list[str],
|
|
457
|
+
suite_progress: TestSuiteProgress,
|
|
458
|
+
) -> None:
|
|
459
|
+
if not stream:
|
|
460
|
+
return
|
|
461
|
+
|
|
462
|
+
while True:
|
|
463
|
+
try:
|
|
464
|
+
line = await stream.readline()
|
|
465
|
+
if not line:
|
|
466
|
+
break
|
|
467
|
+
|
|
468
|
+
line_str = self._process_stream_line(line)
|
|
469
|
+
output_list.append(line_str)
|
|
470
|
+
|
|
471
|
+
self._handle_line_output(line_str, suite_progress)
|
|
472
|
+
|
|
473
|
+
except Exception:
|
|
474
|
+
break
|
|
475
|
+
|
|
476
|
+
def _process_stream_line(self, line: bytes | str) -> str:
|
|
477
|
+
return (line.decode() if isinstance(line, bytes) else line).rstrip()
|
|
478
|
+
|
|
479
|
+
def _handle_line_output(
|
|
480
|
+
self,
|
|
481
|
+
line_str: str,
|
|
482
|
+
suite_progress: TestSuiteProgress,
|
|
483
|
+
) -> None:
|
|
484
|
+
self._parse_line_for_progress(line_str, suite_progress)
|
|
485
|
+
|
|
486
|
+
if line_str.strip():
|
|
487
|
+
self._print_test_line(line_str)
|
|
488
|
+
|
|
489
|
+
if self.progress_callback:
|
|
490
|
+
self.progress_callback(suite_progress)
|
|
491
|
+
|
|
492
|
+
async def _cleanup_process_and_tasks(
|
|
493
|
+
self,
|
|
494
|
+
process: asyncio.subprocess.Process,
|
|
495
|
+
tasks: list[asyncio.Task[t.Any]],
|
|
496
|
+
) -> None:
|
|
497
|
+
process.kill()
|
|
498
|
+
for task in tasks:
|
|
499
|
+
task.cancel()
|
|
500
|
+
|
|
501
|
+
def _build_completed_process(
|
|
502
|
+
self,
|
|
503
|
+
cmd: list[str],
|
|
504
|
+
process: asyncio.subprocess.Process,
|
|
505
|
+
stdout_lines: list[str],
|
|
506
|
+
stderr_lines: list[str],
|
|
507
|
+
) -> subprocess.CompletedProcess[str]:
|
|
508
|
+
return subprocess.CompletedProcess(
|
|
509
|
+
args=cmd,
|
|
510
|
+
returncode=process.returncode or 0,
|
|
511
|
+
stdout="\n".join(stdout_lines),
|
|
512
|
+
stderr="\n".join(stderr_lines),
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
def _parse_line_for_progress(
|
|
516
|
+
self,
|
|
517
|
+
line: str,
|
|
518
|
+
suite_progress: TestSuiteProgress,
|
|
519
|
+
) -> None:
|
|
520
|
+
if "::" in line and any(
|
|
521
|
+
status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
|
|
522
|
+
):
|
|
523
|
+
parts = line.split()
|
|
524
|
+
if parts:
|
|
525
|
+
suite_progress.current_test = parts[0]
|
|
526
|
+
|
|
527
|
+
if match := self.parser.TEST_COLLECTION_PATTERN.search(line):
|
|
528
|
+
suite_progress.total_tests = int(match.group(1))
|
|
529
|
+
|
|
530
|
+
if "PASSED" in line:
|
|
531
|
+
suite_progress.passed_tests += 1
|
|
532
|
+
suite_progress.completed_tests += 1
|
|
533
|
+
elif "FAILED" in line:
|
|
534
|
+
suite_progress.failed_tests += 1
|
|
535
|
+
suite_progress.completed_tests += 1
|
|
536
|
+
elif "SKIPPED" in line:
|
|
537
|
+
suite_progress.skipped_tests += 1
|
|
538
|
+
suite_progress.completed_tests += 1
|
|
539
|
+
elif "ERROR" in line:
|
|
540
|
+
suite_progress.error_tests += 1
|
|
541
|
+
suite_progress.completed_tests += 1
|
|
542
|
+
|
|
543
|
+
def _print_test_line(self, line: str) -> None:
|
|
544
|
+
if "PASSED" in line:
|
|
545
|
+
self.console.print(f"[green]{line}[/green]")
|
|
546
|
+
elif "FAILED" in line:
|
|
547
|
+
self.console.print(f"[red]{line}[/red]")
|
|
548
|
+
elif "SKIPPED" in line:
|
|
549
|
+
self.console.print(f"[yellow]{line}[/yellow]")
|
|
550
|
+
elif "ERROR" in line:
|
|
551
|
+
self.console.print(f"[bright_red]{line}[/bright_red]")
|
|
552
|
+
elif line.startswith("="):
|
|
553
|
+
self.console.print(f"[bold cyan]{line}[/bold cyan]")
|
|
554
|
+
else:
|
|
555
|
+
self.console.print(f"[dim]{line}[/dim]")
|
|
556
|
+
|
|
557
|
+
def _print_test_summary(
|
|
558
|
+
self,
|
|
559
|
+
suite_progress: TestSuiteProgress,
|
|
560
|
+
tests: list[TestProgress],
|
|
561
|
+
) -> None:
|
|
562
|
+
self._print_summary_header()
|
|
563
|
+
self._print_test_counts(suite_progress)
|
|
564
|
+
self._print_timing_stats(suite_progress)
|
|
565
|
+
self._print_coverage_stats(suite_progress)
|
|
566
|
+
self._print_failed_test_details(tests)
|
|
567
|
+
self._print_summary_footer()
|
|
568
|
+
|
|
569
|
+
def _print_summary_header(self) -> None:
|
|
570
|
+
self.console.print("\n" + "=" * 80)
|
|
571
|
+
self.console.print(
|
|
572
|
+
"[bold bright_green]๐งช TEST EXECUTION SUMMARY[/bold bright_green]",
|
|
573
|
+
)
|
|
574
|
+
self.console.print("=" * 80)
|
|
575
|
+
|
|
576
|
+
def _print_test_counts(self, suite_progress: TestSuiteProgress) -> None:
|
|
577
|
+
self.console.print(f"[bold]Total Tests: [/bold] {suite_progress.total_tests}")
|
|
578
|
+
self.console.print(f"[green]โ
Passed: [/green] {suite_progress.passed_tests}")
|
|
579
|
+
|
|
580
|
+
if suite_progress.failed_tests > 0:
|
|
581
|
+
self.console.print(f"[red]โ Failed: [/red] {suite_progress.failed_tests}")
|
|
582
|
+
|
|
583
|
+
if suite_progress.skipped_tests > 0:
|
|
584
|
+
self.console.print(
|
|
585
|
+
f"[yellow]โญ๏ธ Skipped: [/yellow] {suite_progress.skipped_tests}",
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
if suite_progress.error_tests > 0:
|
|
589
|
+
self.console.print(
|
|
590
|
+
f"[bright_red]๐ฅ Errors: [/bright_red] {suite_progress.error_tests}",
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
def _print_timing_stats(self, suite_progress: TestSuiteProgress) -> None:
|
|
594
|
+
if not suite_progress.duration:
|
|
595
|
+
return
|
|
596
|
+
|
|
597
|
+
self.console.print(f"[bold]โฑ๏ธ Duration: [/bold] {suite_progress.duration:.1f}s")
|
|
598
|
+
|
|
599
|
+
if suite_progress.total_tests > 0:
|
|
600
|
+
avg_time = suite_progress.duration / suite_progress.total_tests
|
|
601
|
+
self.console.print(f"[dim]Average per test: {avg_time:.2f}s[/dim]")
|
|
602
|
+
|
|
603
|
+
self.console.print(
|
|
604
|
+
f"[bold]๐ Success Rate: [/bold] {suite_progress.success_rate:.1f}%",
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
def _print_coverage_stats(self, suite_progress: TestSuiteProgress) -> None:
|
|
608
|
+
if suite_progress.coverage_percentage is not None:
|
|
609
|
+
self.console.print(
|
|
610
|
+
f"[bold]๐ Coverage: [/bold] {suite_progress.coverage_percentage:.1f}%",
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
def _print_failed_test_details(self, tests: list[TestProgress]) -> None:
|
|
614
|
+
failed_tests = [t for t in tests if t.status == "failed"]
|
|
615
|
+
if not failed_tests:
|
|
616
|
+
return
|
|
617
|
+
|
|
618
|
+
self.console.print(
|
|
619
|
+
f"\n[bold red]Failed Tests ({len(failed_tests)}): [/bold red]",
|
|
620
|
+
)
|
|
621
|
+
for test in failed_tests[:5]:
|
|
622
|
+
self.console.print(f" โ {test.test_id}")
|
|
623
|
+
if test.error_message:
|
|
624
|
+
error_preview = self._format_error_preview(test.error_message)
|
|
625
|
+
self.console.print(f" [dim]{error_preview}[/dim]")
|
|
626
|
+
|
|
627
|
+
if len(failed_tests) > 5:
|
|
628
|
+
self.console.print(f" [dim]... and {len(failed_tests) - 5} more[/dim]")
|
|
629
|
+
|
|
630
|
+
def _format_error_preview(self, error_message: str) -> str:
|
|
631
|
+
return (
|
|
632
|
+
error_message[:100] + "..." if len(error_message) > 100 else error_message
|
|
633
|
+
)
|
|
634
|
+
|
|
635
|
+
def _print_summary_footer(self) -> None:
|
|
636
|
+
self.console.print("=" * 80)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .base import PluginBase, PluginMetadata, PluginRegistry
|
|
2
|
+
from .hooks import CustomHookPlugin, HookPluginBase
|
|
3
|
+
from .loader import PluginDiscovery, PluginLoader
|
|
4
|
+
from .managers import PluginManager
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"CustomHookPlugin",
|
|
8
|
+
"HookPluginBase",
|
|
9
|
+
"PluginBase",
|
|
10
|
+
"PluginDiscovery",
|
|
11
|
+
"PluginLoader",
|
|
12
|
+
"PluginManager",
|
|
13
|
+
"PluginMetadata",
|
|
14
|
+
"PluginRegistry",
|
|
15
|
+
]
|