crackerjack 0.31.4__py3-none-any.whl → 0.31.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +2 -0
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/test_creation_agent.py +5 -0
- crackerjack/cli/options.py +8 -0
- crackerjack/core/phase_coordinator.py +25 -1
- crackerjack/core/workflow_orchestrator.py +264 -78
- crackerjack/dynamic_config.py +3 -3
- crackerjack/managers/publish_manager.py +22 -0
- crackerjack/managers/test_executor.py +13 -5
- crackerjack/managers/test_progress.py +31 -61
- crackerjack/mcp/context.py +6 -0
- crackerjack/mcp/tools/execution_tools.py +4 -2
- crackerjack/mcp/tools/progress_tools.py +95 -19
- crackerjack/mcp/tools/workflow_executor.py +224 -19
- crackerjack/orchestration/coverage_improvement.py +223 -0
- crackerjack/services/config.py +12 -1
- crackerjack/services/coverage_ratchet.py +9 -0
- crackerjack/services/filesystem.py +26 -0
- crackerjack/services/git.py +12 -1
- crackerjack/services/initialization.py +38 -12
- {crackerjack-0.31.4.dist-info → crackerjack-0.31.7.dist-info}/METADATA +1 -1
- {crackerjack-0.31.4.dist-info → crackerjack-0.31.7.dist-info}/RECORD +25 -24
- {crackerjack-0.31.4.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.4.dist-info → crackerjack-0.31.7.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.4.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,9 +8,6 @@ import threading
|
|
|
8
8
|
import time
|
|
9
9
|
import typing as t
|
|
10
10
|
|
|
11
|
-
from rich.align import Align
|
|
12
|
-
from rich.table import Table
|
|
13
|
-
|
|
14
11
|
|
|
15
12
|
class TestProgress:
|
|
16
13
|
"""Tracks test execution progress with thread-safe updates."""
|
|
@@ -58,87 +55,60 @@ class TestProgress:
|
|
|
58
55
|
if hasattr(self, key):
|
|
59
56
|
setattr(self, key, value)
|
|
60
57
|
|
|
61
|
-
def format_progress(self) ->
|
|
58
|
+
def format_progress(self) -> str:
|
|
62
59
|
"""Format progress display for Rich output."""
|
|
63
60
|
if self.is_collecting:
|
|
64
|
-
return
|
|
65
|
-
return
|
|
61
|
+
return self._format_collection_progress()
|
|
62
|
+
return self._format_execution_progress()
|
|
66
63
|
|
|
67
|
-
def _format_collection_progress(self) ->
|
|
64
|
+
def _format_collection_progress(self) -> str:
|
|
68
65
|
"""Format test collection progress display."""
|
|
69
|
-
|
|
70
|
-
table.add_column()
|
|
71
|
-
table.add_column(justify="center")
|
|
72
|
-
|
|
73
|
-
# Collection status
|
|
74
|
-
table.add_row(
|
|
75
|
-
"[yellow]📋[/yellow] Test Collection",
|
|
76
|
-
f"[dim]{self.collection_status}[/dim]",
|
|
77
|
-
)
|
|
66
|
+
status_parts = [self.collection_status]
|
|
78
67
|
|
|
79
|
-
# Files discovered
|
|
80
68
|
if self.files_discovered > 0:
|
|
81
|
-
|
|
82
|
-
"[cyan]📁[/cyan] Files Found",
|
|
83
|
-
f"[green]{self.files_discovered}[/green] test files",
|
|
84
|
-
)
|
|
69
|
+
status_parts.append(f"{self.files_discovered} test files")
|
|
85
70
|
|
|
86
|
-
# Elapsed time
|
|
87
71
|
elapsed = self.elapsed_time
|
|
88
72
|
if elapsed > 1:
|
|
89
|
-
|
|
73
|
+
status_parts.append(f"{elapsed:.1f}s")
|
|
90
74
|
|
|
91
|
-
return
|
|
75
|
+
return " | ".join(status_parts)
|
|
92
76
|
|
|
93
|
-
def _format_execution_progress(self) ->
|
|
77
|
+
def _format_execution_progress(self) -> str:
|
|
94
78
|
"""Format test execution progress display."""
|
|
95
|
-
|
|
96
|
-
table.add_column()
|
|
97
|
-
table.add_column(justify="center")
|
|
79
|
+
parts = []
|
|
98
80
|
|
|
99
|
-
#
|
|
81
|
+
# Test progress
|
|
100
82
|
if self.total_tests > 0:
|
|
101
83
|
progress_pct = (self.completed / self.total_tests) * 100
|
|
102
|
-
|
|
103
|
-
remaining_blocks = 20 - completed_blocks
|
|
104
|
-
progress_bar = "█" * completed_blocks + "░" * remaining_blocks
|
|
105
|
-
|
|
106
|
-
table.add_row(
|
|
107
|
-
"[yellow]⚡[/yellow] Progress",
|
|
108
|
-
f"[green]{progress_bar}[/green] {progress_pct:.1f}%",
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
# Test counts
|
|
112
|
-
table.add_row("[green]✅[/green] Passed", f"[green]{self.passed}[/green]")
|
|
84
|
+
parts.append(f"{self.completed}/{self.total_tests} ({progress_pct:.1f}%)")
|
|
113
85
|
|
|
86
|
+
# Status counts
|
|
87
|
+
status_parts = []
|
|
88
|
+
if self.passed > 0:
|
|
89
|
+
status_parts.append(f"✅ {self.passed}")
|
|
114
90
|
if self.failed > 0:
|
|
115
|
-
|
|
116
|
-
|
|
91
|
+
status_parts.append(f"❌ {self.failed}")
|
|
117
92
|
if self.skipped > 0:
|
|
118
|
-
|
|
119
|
-
"[yellow]⏭️[/yellow] Skipped", f"[yellow]{self.skipped}[/yellow]"
|
|
120
|
-
)
|
|
121
|
-
|
|
93
|
+
status_parts.append(f"⏭ {self.skipped}")
|
|
122
94
|
if self.errors > 0:
|
|
123
|
-
|
|
95
|
+
status_parts.append(f"💥 {self.errors}")
|
|
124
96
|
|
|
125
|
-
|
|
97
|
+
if status_parts:
|
|
98
|
+
parts.append(" ".join(status_parts))
|
|
99
|
+
|
|
100
|
+
# Current test (truncated)
|
|
126
101
|
if self.current_test and not self.is_complete:
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
else f"[dim]{self.current_test}[/dim]",
|
|
102
|
+
test_name = (
|
|
103
|
+
self.current_test[:30] + "..."
|
|
104
|
+
if len(self.current_test) > 30
|
|
105
|
+
else self.current_test
|
|
132
106
|
)
|
|
107
|
+
parts.append(f"Running: {test_name}")
|
|
133
108
|
|
|
134
|
-
# Timing
|
|
109
|
+
# Timing
|
|
135
110
|
elapsed = self.elapsed_time
|
|
136
111
|
if elapsed > 1:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
# ETA
|
|
140
|
-
eta = self.eta_seconds
|
|
141
|
-
if eta and eta > 1 and not self.is_complete:
|
|
142
|
-
table.add_row("[blue]📅[/blue] ETA", f"[dim]{eta:.1f}s[/dim]")
|
|
112
|
+
parts.append(f"{elapsed:.1f}s")
|
|
143
113
|
|
|
144
|
-
return
|
|
114
|
+
return " | ".join(parts)
|
crackerjack/mcp/context.py
CHANGED
|
@@ -521,6 +521,12 @@ class MCPServerContext:
|
|
|
521
521
|
) -> None:
|
|
522
522
|
await self.batched_saver.schedule_save(save_id, save_func)
|
|
523
523
|
|
|
524
|
+
def get_current_time(self) -> str:
|
|
525
|
+
"""Get current timestamp as string for progress tracking."""
|
|
526
|
+
import datetime
|
|
527
|
+
|
|
528
|
+
return datetime.datetime.now().isoformat()
|
|
529
|
+
|
|
524
530
|
def get_context_stats(self) -> dict[str, t.Any]:
|
|
525
531
|
return {
|
|
526
532
|
"initialized": self._initialized,
|
|
@@ -140,12 +140,14 @@ async def _validate_context_and_rate_limit(context: t.Any) -> str | None:
|
|
|
140
140
|
from contextlib import suppress
|
|
141
141
|
|
|
142
142
|
with suppress(Exception):
|
|
143
|
-
allowed = await context.rate_limiter.
|
|
143
|
+
allowed, details = await context.rate_limiter.check_request_allowed(
|
|
144
|
+
"execute_crackerjack"
|
|
145
|
+
)
|
|
144
146
|
if not allowed:
|
|
145
147
|
return json.dumps(
|
|
146
148
|
{
|
|
147
149
|
"status": "error",
|
|
148
|
-
"message": "Rate limit exceeded. Please wait before retrying.",
|
|
150
|
+
"message": f"Rate limit exceeded: {details}. Please wait before retrying.",
|
|
149
151
|
}
|
|
150
152
|
)
|
|
151
153
|
|
|
@@ -25,9 +25,78 @@ def _create_progress_file(job_id: str) -> Path:
|
|
|
25
25
|
return progress_dir / f"job-{job_id}.json"
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
def _clamp_progress(value: int) -> int:
|
|
29
|
+
return min(100, max(0, value))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _get_timestamp() -> str:
|
|
33
|
+
context = get_context()
|
|
34
|
+
return context.get_current_time() if context else ""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _build_dict_format_progress(
|
|
38
|
+
job_id: str,
|
|
39
|
+
progress_data: dict[str, t.Any],
|
|
40
|
+
iteration: int,
|
|
41
|
+
max_iterations: int,
|
|
42
|
+
overall_progress: int,
|
|
43
|
+
current_stage: str,
|
|
44
|
+
stage_progress: int,
|
|
45
|
+
message: str,
|
|
46
|
+
) -> dict[str, t.Any]:
|
|
47
|
+
return {
|
|
48
|
+
"job_id": job_id,
|
|
49
|
+
"status": progress_data.get("status", "running"),
|
|
50
|
+
"iteration": progress_data.get("iteration", iteration),
|
|
51
|
+
"max_iterations": progress_data.get("max_iterations", max_iterations),
|
|
52
|
+
"overall_progress": _clamp_progress(
|
|
53
|
+
progress_data.get("overall_progress", overall_progress)
|
|
54
|
+
),
|
|
55
|
+
"current_stage": progress_data.get("type", current_stage),
|
|
56
|
+
"stage_progress": _clamp_progress(
|
|
57
|
+
progress_data.get("stage_progress", stage_progress)
|
|
58
|
+
),
|
|
59
|
+
"message": progress_data.get("message", message),
|
|
60
|
+
"timestamp": _get_timestamp(),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _build_legacy_format_progress(
|
|
65
|
+
job_id: str,
|
|
66
|
+
progress_data: str | None,
|
|
67
|
+
iteration: int,
|
|
68
|
+
max_iterations: int,
|
|
69
|
+
overall_progress: int,
|
|
70
|
+
current_stage: str,
|
|
71
|
+
stage_progress: int,
|
|
72
|
+
message: str,
|
|
73
|
+
) -> dict[str, t.Any]:
|
|
74
|
+
status = progress_data if isinstance(progress_data, str) else "running"
|
|
75
|
+
return {
|
|
76
|
+
"job_id": job_id,
|
|
77
|
+
"status": status,
|
|
78
|
+
"iteration": iteration,
|
|
79
|
+
"max_iterations": max_iterations,
|
|
80
|
+
"overall_progress": _clamp_progress(overall_progress),
|
|
81
|
+
"current_stage": current_stage,
|
|
82
|
+
"stage_progress": _clamp_progress(stage_progress),
|
|
83
|
+
"message": message,
|
|
84
|
+
"timestamp": _get_timestamp(),
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _notify_websocket(final_progress_data: dict[str, t.Any]) -> None:
|
|
89
|
+
context = get_context()
|
|
90
|
+
if context and hasattr(context, "websocket_progress_queue"):
|
|
91
|
+
with contextlib.suppress(Exception):
|
|
92
|
+
context.websocket_progress_queue.put_nowait(final_progress_data)
|
|
93
|
+
|
|
94
|
+
|
|
28
95
|
def _update_progress(
|
|
29
96
|
job_id: str,
|
|
30
|
-
|
|
97
|
+
progress_data: dict[str, t.Any] | str = None,
|
|
98
|
+
context: t.Any = None,
|
|
99
|
+
# Legacy parameters for backward compatibility
|
|
31
100
|
iteration: int = 1,
|
|
32
101
|
max_iterations: int = 10,
|
|
33
102
|
overall_progress: int = 0,
|
|
@@ -38,24 +107,31 @@ def _update_progress(
|
|
|
38
107
|
try:
|
|
39
108
|
progress_file = _create_progress_file(job_id)
|
|
40
109
|
|
|
41
|
-
progress_data
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
110
|
+
if isinstance(progress_data, dict):
|
|
111
|
+
final_progress_data = _build_dict_format_progress(
|
|
112
|
+
job_id,
|
|
113
|
+
progress_data,
|
|
114
|
+
iteration,
|
|
115
|
+
max_iterations,
|
|
116
|
+
overall_progress,
|
|
117
|
+
current_stage,
|
|
118
|
+
stage_progress,
|
|
119
|
+
message,
|
|
120
|
+
)
|
|
121
|
+
else:
|
|
122
|
+
final_progress_data = _build_legacy_format_progress(
|
|
123
|
+
job_id,
|
|
124
|
+
progress_data,
|
|
125
|
+
iteration,
|
|
126
|
+
max_iterations,
|
|
127
|
+
overall_progress,
|
|
128
|
+
current_stage,
|
|
129
|
+
stage_progress,
|
|
130
|
+
message,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
progress_file.write_text(json.dumps(final_progress_data, indent=2))
|
|
134
|
+
_notify_websocket(final_progress_data)
|
|
59
135
|
|
|
60
136
|
except Exception as e:
|
|
61
137
|
context = get_context()
|
|
@@ -24,10 +24,15 @@ async def execute_crackerjack_workflow(
|
|
|
24
24
|
try:
|
|
25
25
|
return await _execute_crackerjack_sync(job_id, args, kwargs, get_context())
|
|
26
26
|
except Exception as e:
|
|
27
|
+
# Add full traceback for debugging
|
|
28
|
+
import traceback
|
|
29
|
+
|
|
30
|
+
error_details = traceback.format_exc()
|
|
27
31
|
return {
|
|
28
32
|
"job_id": job_id,
|
|
29
33
|
"status": "failed",
|
|
30
34
|
"error": f"Execution failed: {e}",
|
|
35
|
+
"traceback": error_details,
|
|
31
36
|
"timestamp": time.time(),
|
|
32
37
|
}
|
|
33
38
|
|
|
@@ -64,7 +69,7 @@ async def _initialize_execution(
|
|
|
64
69
|
context: t.Any,
|
|
65
70
|
) -> dict[str, t.Any]:
|
|
66
71
|
"""Initialize execution environment and validate parameters."""
|
|
67
|
-
|
|
72
|
+
_update_progress(
|
|
68
73
|
job_id,
|
|
69
74
|
{
|
|
70
75
|
"type": "initialization",
|
|
@@ -86,7 +91,7 @@ async def _initialize_execution(
|
|
|
86
91
|
"job_id": job_id,
|
|
87
92
|
}
|
|
88
93
|
|
|
89
|
-
|
|
94
|
+
_update_progress(
|
|
90
95
|
job_id,
|
|
91
96
|
{
|
|
92
97
|
"type": "initialization",
|
|
@@ -111,7 +116,7 @@ async def _setup_orchestrator(
|
|
|
111
116
|
context: t.Any,
|
|
112
117
|
) -> dict[str, t.Any]:
|
|
113
118
|
"""Set up the appropriate orchestrator based on configuration."""
|
|
114
|
-
|
|
119
|
+
_update_progress(
|
|
115
120
|
job_id,
|
|
116
121
|
{
|
|
117
122
|
"type": "setup",
|
|
@@ -121,7 +126,9 @@ async def _setup_orchestrator(
|
|
|
121
126
|
context,
|
|
122
127
|
)
|
|
123
128
|
|
|
124
|
-
use_advanced = kwargs.get(
|
|
129
|
+
use_advanced = kwargs.get(
|
|
130
|
+
"advanced_orchestration", False
|
|
131
|
+
) # Temporarily disable advanced orchestration
|
|
125
132
|
|
|
126
133
|
try:
|
|
127
134
|
if use_advanced:
|
|
@@ -161,7 +168,6 @@ async def _create_advanced_orchestrator(
|
|
|
161
168
|
|
|
162
169
|
orchestrator = AsyncWorkflowOrchestrator(
|
|
163
170
|
pkg_path=Path(working_dir),
|
|
164
|
-
container=container,
|
|
165
171
|
)
|
|
166
172
|
|
|
167
173
|
return orchestrator
|
|
@@ -233,7 +239,7 @@ async def _run_workflow_iterations(
|
|
|
233
239
|
max_iterations = kwargs.get("max_iterations", 10)
|
|
234
240
|
|
|
235
241
|
for iteration in range(max_iterations):
|
|
236
|
-
|
|
242
|
+
_update_progress(
|
|
237
243
|
job_id,
|
|
238
244
|
{
|
|
239
245
|
"type": "iteration",
|
|
@@ -250,7 +256,15 @@ async def _run_workflow_iterations(
|
|
|
250
256
|
)
|
|
251
257
|
|
|
252
258
|
if success:
|
|
253
|
-
|
|
259
|
+
# Attempt coverage improvement after successful execution (if enabled)
|
|
260
|
+
coverage_result = None
|
|
261
|
+
if kwargs.get("boost_coverage", False): # Temporarily disabled
|
|
262
|
+
coverage_result = await _attempt_coverage_improvement(
|
|
263
|
+
job_id, orchestrator, context
|
|
264
|
+
)
|
|
265
|
+
return _create_success_result(
|
|
266
|
+
job_id, iteration + 1, context, coverage_result
|
|
267
|
+
)
|
|
254
268
|
|
|
255
269
|
# Handle retry logic
|
|
256
270
|
if iteration < max_iterations - 1:
|
|
@@ -266,14 +280,54 @@ def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
|
|
|
266
280
|
"""Create workflow options from kwargs."""
|
|
267
281
|
from types import SimpleNamespace
|
|
268
282
|
|
|
269
|
-
# Create options object with
|
|
283
|
+
# Create options object with all required attributes from OptionsProtocol
|
|
270
284
|
options = SimpleNamespace()
|
|
271
|
-
|
|
272
|
-
|
|
285
|
+
|
|
286
|
+
# Core execution options
|
|
287
|
+
options.commit = kwargs.get("commit", False)
|
|
273
288
|
options.interactive = kwargs.get("interactive", False)
|
|
289
|
+
options.no_config_updates = kwargs.get("no_config_updates", False)
|
|
290
|
+
options.verbose = kwargs.get("verbose", True)
|
|
291
|
+
options.clean = kwargs.get("clean", False)
|
|
292
|
+
options.test = kwargs.get("test_mode", True)
|
|
274
293
|
options.benchmark = kwargs.get("benchmark", False)
|
|
275
294
|
options.skip_hooks = kwargs.get("skip_hooks", False)
|
|
276
|
-
options.
|
|
295
|
+
options.ai_agent = kwargs.get("ai_agent", True)
|
|
296
|
+
options.async_mode = kwargs.get("async_mode", True)
|
|
297
|
+
|
|
298
|
+
# Test options
|
|
299
|
+
options.test_workers = kwargs.get("test_workers", 0)
|
|
300
|
+
options.test_timeout = kwargs.get("test_timeout", 0)
|
|
301
|
+
|
|
302
|
+
# Publishing options
|
|
303
|
+
options.publish = kwargs.get("publish")
|
|
304
|
+
options.bump = kwargs.get("bump")
|
|
305
|
+
options.all = kwargs.get("all")
|
|
306
|
+
options.create_pr = kwargs.get("create_pr", False)
|
|
307
|
+
options.no_git_tags = kwargs.get("no_git_tags", False)
|
|
308
|
+
options.skip_version_check = kwargs.get("skip_version_check", False)
|
|
309
|
+
options.cleanup_pypi = kwargs.get("cleanup_pypi", False)
|
|
310
|
+
options.keep_releases = kwargs.get("keep_releases", 10)
|
|
311
|
+
|
|
312
|
+
# Server options
|
|
313
|
+
options.start_mcp_server = kwargs.get("start_mcp_server", False)
|
|
314
|
+
|
|
315
|
+
# Hook options
|
|
316
|
+
options.update_precommit = kwargs.get("update_precommit", False)
|
|
317
|
+
options.experimental_hooks = kwargs.get("experimental_hooks", False)
|
|
318
|
+
options.enable_pyrefly = kwargs.get("enable_pyrefly", False)
|
|
319
|
+
options.enable_ty = kwargs.get("enable_ty", False)
|
|
320
|
+
|
|
321
|
+
# Cleanup options
|
|
322
|
+
options.cleanup = kwargs.get("cleanup")
|
|
323
|
+
|
|
324
|
+
# Coverage and progress
|
|
325
|
+
options.coverage = kwargs.get("coverage", False)
|
|
326
|
+
options.track_progress = kwargs.get("track_progress", False)
|
|
327
|
+
|
|
328
|
+
# Speed options
|
|
329
|
+
options.fast = kwargs.get("fast", False)
|
|
330
|
+
options.comp = kwargs.get("comp", False)
|
|
277
331
|
|
|
278
332
|
return options
|
|
279
333
|
|
|
@@ -286,17 +340,52 @@ async def _execute_single_iteration(
|
|
|
286
340
|
context: t.Any,
|
|
287
341
|
) -> bool:
|
|
288
342
|
"""Execute a single workflow iteration."""
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
343
|
+
try:
|
|
344
|
+
# Check for orchestrator workflow methods
|
|
345
|
+
if hasattr(orchestrator, "run_complete_workflow"):
|
|
346
|
+
# Standard WorkflowOrchestrator method is async
|
|
347
|
+
result = orchestrator.run_complete_workflow(options)
|
|
348
|
+
if result is None:
|
|
349
|
+
raise ValueError(
|
|
350
|
+
"Method run_complete_workflow returned None instead of awaitable"
|
|
351
|
+
)
|
|
352
|
+
return await result
|
|
353
|
+
elif hasattr(orchestrator, "run_complete_workflow_async"):
|
|
354
|
+
result = orchestrator.run_complete_workflow_async(options)
|
|
355
|
+
if result is None:
|
|
356
|
+
raise ValueError(
|
|
357
|
+
"Method run_complete_workflow_async returned None instead of awaitable"
|
|
358
|
+
)
|
|
359
|
+
return await result
|
|
360
|
+
elif hasattr(orchestrator, "execute_workflow"):
|
|
361
|
+
result = orchestrator.execute_workflow(options)
|
|
362
|
+
if result is None:
|
|
363
|
+
raise ValueError(
|
|
364
|
+
"Method execute_workflow returned None instead of awaitable"
|
|
365
|
+
)
|
|
366
|
+
return await result
|
|
367
|
+
elif hasattr(orchestrator, "run"):
|
|
368
|
+
# Fallback for synchronous orchestrators
|
|
369
|
+
return orchestrator.run(options)
|
|
370
|
+
else:
|
|
371
|
+
raise ValueError(
|
|
372
|
+
f"Orchestrator {type(orchestrator)} has no recognized workflow execution method"
|
|
373
|
+
)
|
|
374
|
+
except Exception as e:
|
|
375
|
+
# Add detailed error info for debugging
|
|
376
|
+
raise RuntimeError(
|
|
377
|
+
f"Error in _execute_single_iteration (iteration {iteration}): {e}"
|
|
378
|
+
) from e
|
|
293
379
|
|
|
294
380
|
|
|
295
381
|
def _create_success_result(
|
|
296
|
-
job_id: str,
|
|
382
|
+
job_id: str,
|
|
383
|
+
iterations: int,
|
|
384
|
+
context: t.Any,
|
|
385
|
+
coverage_result: dict[str, t.Any] | None = None,
|
|
297
386
|
) -> dict[str, t.Any]:
|
|
298
387
|
"""Create success result with completion data."""
|
|
299
|
-
|
|
388
|
+
result = {
|
|
300
389
|
"job_id": job_id,
|
|
301
390
|
"status": "completed",
|
|
302
391
|
"iterations": iterations,
|
|
@@ -305,10 +394,15 @@ def _create_success_result(
|
|
|
305
394
|
"success": True,
|
|
306
395
|
}
|
|
307
396
|
|
|
397
|
+
if coverage_result:
|
|
398
|
+
result["coverage_improvement"] = coverage_result
|
|
399
|
+
|
|
400
|
+
return result
|
|
401
|
+
|
|
308
402
|
|
|
309
403
|
async def _handle_iteration_retry(job_id: str, iteration: int, context: t.Any) -> None:
|
|
310
404
|
"""Handle retry logic between iterations."""
|
|
311
|
-
|
|
405
|
+
_update_progress(
|
|
312
406
|
job_id,
|
|
313
407
|
{
|
|
314
408
|
"type": "iteration",
|
|
@@ -327,7 +421,7 @@ async def _handle_iteration_error(
|
|
|
327
421
|
job_id: str, iteration: int, error: Exception, context: t.Any
|
|
328
422
|
) -> dict[str, t.Any]:
|
|
329
423
|
"""Handle errors during iteration execution."""
|
|
330
|
-
|
|
424
|
+
_update_progress(
|
|
331
425
|
job_id,
|
|
332
426
|
{
|
|
333
427
|
"type": "error",
|
|
@@ -347,6 +441,117 @@ async def _handle_iteration_error(
|
|
|
347
441
|
}
|
|
348
442
|
|
|
349
443
|
|
|
444
|
+
async def _attempt_coverage_improvement(
|
|
445
|
+
job_id: str, orchestrator: t.Any, context: t.Any
|
|
446
|
+
) -> dict[str, t.Any]:
|
|
447
|
+
"""Attempt proactive coverage improvement after successful workflow execution."""
|
|
448
|
+
try:
|
|
449
|
+
_update_progress(
|
|
450
|
+
job_id,
|
|
451
|
+
{
|
|
452
|
+
"type": "coverage_improvement",
|
|
453
|
+
"status": "starting",
|
|
454
|
+
"message": "Analyzing coverage for improvement opportunities...",
|
|
455
|
+
},
|
|
456
|
+
context,
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# Get project path from orchestrator
|
|
460
|
+
project_path = getattr(orchestrator, "pkg_path", None)
|
|
461
|
+
if not project_path:
|
|
462
|
+
return {"status": "skipped", "reason": "No project path available"}
|
|
463
|
+
|
|
464
|
+
# Import coverage improvement orchestrator
|
|
465
|
+
from crackerjack.orchestration.coverage_improvement import (
|
|
466
|
+
create_coverage_improvement_orchestrator,
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
# Create coverage orchestrator
|
|
470
|
+
coverage_orchestrator = await create_coverage_improvement_orchestrator(
|
|
471
|
+
project_path,
|
|
472
|
+
console=getattr(orchestrator, "console", None),
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Check if improvement is needed
|
|
476
|
+
should_improve = await coverage_orchestrator.should_improve_coverage()
|
|
477
|
+
if not should_improve:
|
|
478
|
+
_update_progress(
|
|
479
|
+
job_id,
|
|
480
|
+
{
|
|
481
|
+
"type": "coverage_improvement",
|
|
482
|
+
"status": "skipped",
|
|
483
|
+
"message": "Coverage improvement not needed (already at 100%)",
|
|
484
|
+
},
|
|
485
|
+
context,
|
|
486
|
+
)
|
|
487
|
+
return {"status": "skipped", "reason": "Coverage at 100%"}
|
|
488
|
+
|
|
489
|
+
# Create agent context (simplified)
|
|
490
|
+
from crackerjack.agents.base import AgentContext
|
|
491
|
+
|
|
492
|
+
agent_context = AgentContext(project_path=project_path, console=None)
|
|
493
|
+
|
|
494
|
+
# Execute coverage improvement
|
|
495
|
+
_update_progress(
|
|
496
|
+
job_id,
|
|
497
|
+
{
|
|
498
|
+
"type": "coverage_improvement",
|
|
499
|
+
"status": "executing",
|
|
500
|
+
"message": "Generating tests to improve coverage...",
|
|
501
|
+
},
|
|
502
|
+
context,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
improvement_result = await coverage_orchestrator.execute_coverage_improvement(
|
|
506
|
+
agent_context
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
# Update progress with results
|
|
510
|
+
if improvement_result["status"] == "completed":
|
|
511
|
+
_update_progress(
|
|
512
|
+
job_id,
|
|
513
|
+
{
|
|
514
|
+
"type": "coverage_improvement",
|
|
515
|
+
"status": "completed",
|
|
516
|
+
"message": f"Coverage improvement: {len(improvement_result.get('fixes_applied', []))} tests created",
|
|
517
|
+
"fixes_applied": improvement_result.get("fixes_applied", []),
|
|
518
|
+
"files_modified": improvement_result.get("files_modified", []),
|
|
519
|
+
},
|
|
520
|
+
context,
|
|
521
|
+
)
|
|
522
|
+
else:
|
|
523
|
+
_update_progress(
|
|
524
|
+
job_id,
|
|
525
|
+
{
|
|
526
|
+
"type": "coverage_improvement",
|
|
527
|
+
"status": "completed_with_issues",
|
|
528
|
+
"message": f"Coverage improvement attempted: {improvement_result.get('status', 'unknown')}",
|
|
529
|
+
},
|
|
530
|
+
context,
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
return improvement_result
|
|
534
|
+
|
|
535
|
+
except Exception as e:
|
|
536
|
+
_update_progress(
|
|
537
|
+
job_id,
|
|
538
|
+
{
|
|
539
|
+
"type": "coverage_improvement",
|
|
540
|
+
"status": "failed",
|
|
541
|
+
"error": str(e),
|
|
542
|
+
"message": f"Coverage improvement failed: {e}",
|
|
543
|
+
},
|
|
544
|
+
context,
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
return {
|
|
548
|
+
"status": "failed",
|
|
549
|
+
"error": str(e),
|
|
550
|
+
"fixes_applied": [],
|
|
551
|
+
"files_modified": [],
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
|
|
350
555
|
def _create_failure_result(
|
|
351
556
|
job_id: str, max_iterations: int, context: t.Any
|
|
352
557
|
) -> dict[str, t.Any]:
|