crackerjack 0.30.3__py3-none-any.whl → 0.31.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (156) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +227 -299
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +170 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +657 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +409 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +618 -928
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +585 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +826 -0
  40. crackerjack/dynamic_config.py +94 -103
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +433 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +443 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +114 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +621 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +372 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +217 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +565 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/coverage_improvement.py +223 -0
  107. crackerjack/orchestration/execution_strategies.py +341 -0
  108. crackerjack/orchestration/test_progress_streamer.py +636 -0
  109. crackerjack/plugins/__init__.py +15 -0
  110. crackerjack/plugins/base.py +200 -0
  111. crackerjack/plugins/hooks.py +246 -0
  112. crackerjack/plugins/loader.py +335 -0
  113. crackerjack/plugins/managers.py +259 -0
  114. crackerjack/py313.py +8 -3
  115. crackerjack/services/__init__.py +22 -0
  116. crackerjack/services/cache.py +314 -0
  117. crackerjack/services/config.py +358 -0
  118. crackerjack/services/config_integrity.py +99 -0
  119. crackerjack/services/contextual_ai_assistant.py +516 -0
  120. crackerjack/services/coverage_ratchet.py +356 -0
  121. crackerjack/services/debug.py +736 -0
  122. crackerjack/services/dependency_monitor.py +617 -0
  123. crackerjack/services/enhanced_filesystem.py +439 -0
  124. crackerjack/services/file_hasher.py +151 -0
  125. crackerjack/services/filesystem.py +421 -0
  126. crackerjack/services/git.py +176 -0
  127. crackerjack/services/health_metrics.py +611 -0
  128. crackerjack/services/initialization.py +873 -0
  129. crackerjack/services/log_manager.py +286 -0
  130. crackerjack/services/logging.py +174 -0
  131. crackerjack/services/metrics.py +578 -0
  132. crackerjack/services/pattern_cache.py +362 -0
  133. crackerjack/services/pattern_detector.py +515 -0
  134. crackerjack/services/performance_benchmarks.py +653 -0
  135. crackerjack/services/security.py +163 -0
  136. crackerjack/services/server_manager.py +234 -0
  137. crackerjack/services/smart_scheduling.py +144 -0
  138. crackerjack/services/tool_version_service.py +61 -0
  139. crackerjack/services/unified_config.py +437 -0
  140. crackerjack/services/version_checker.py +248 -0
  141. crackerjack/slash_commands/__init__.py +14 -0
  142. crackerjack/slash_commands/init.md +122 -0
  143. crackerjack/slash_commands/run.md +163 -0
  144. crackerjack/slash_commands/status.md +127 -0
  145. crackerjack-0.31.7.dist-info/METADATA +742 -0
  146. crackerjack-0.31.7.dist-info/RECORD +149 -0
  147. crackerjack-0.31.7.dist-info/entry_points.txt +2 -0
  148. crackerjack/.gitignore +0 -34
  149. crackerjack/.libcst.codemod.yaml +0 -18
  150. crackerjack/.pdm.toml +0 -1
  151. crackerjack/crackerjack.py +0 -3805
  152. crackerjack/pyproject.toml +0 -286
  153. crackerjack-0.30.3.dist-info/METADATA +0 -1290
  154. crackerjack-0.30.3.dist-info/RECORD +0 -16
  155. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
  156. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,565 @@
1
+ """Workflow execution engine for MCP tools.
2
+
3
+ This module handles the core workflow execution logic, including orchestrator setup,
4
+ iteration management, and result handling. Split from execution_tools.py for better
5
+ separation of concerns.
6
+ """
7
+
8
+ import asyncio
9
+ import time
10
+ import typing as t
11
+ import uuid
12
+
13
+ from crackerjack.mcp.context import get_context
14
+
15
+ from .progress_tools import _update_progress
16
+
17
+
18
+ async def execute_crackerjack_workflow(
19
+ args: str, kwargs: dict[str, t.Any]
20
+ ) -> dict[str, t.Any]:
21
+ """Execute the main crackerjack workflow with progress tracking."""
22
+ job_id = str(uuid.uuid4())[:8]
23
+
24
+ try:
25
+ return await _execute_crackerjack_sync(job_id, args, kwargs, get_context())
26
+ except Exception as e:
27
+ # Add full traceback for debugging
28
+ import traceback
29
+
30
+ error_details = traceback.format_exc()
31
+ return {
32
+ "job_id": job_id,
33
+ "status": "failed",
34
+ "error": f"Execution failed: {e}",
35
+ "traceback": error_details,
36
+ "timestamp": time.time(),
37
+ }
38
+
39
+
40
+ async def _execute_crackerjack_sync(
41
+ job_id: str,
42
+ args: str,
43
+ kwargs: dict[str, t.Any],
44
+ context: t.Any,
45
+ ) -> dict[str, t.Any]:
46
+ """Execute crackerjack workflow synchronously with progress tracking."""
47
+ # Initialize execution environment
48
+ setup_result = await _initialize_execution(job_id, args, kwargs, context)
49
+ if setup_result.get("status") == "failed":
50
+ return setup_result
51
+
52
+ # Set up orchestrator
53
+ orchestrator_result = await _setup_orchestrator(
54
+ job_id, args, kwargs, setup_result["working_dir"], context
55
+ )
56
+ if orchestrator_result.get("status") == "failed":
57
+ return orchestrator_result
58
+
59
+ orchestrator = orchestrator_result["orchestrator"]
60
+
61
+ # Run workflow iterations
62
+ return await _run_workflow_iterations(job_id, orchestrator, kwargs, context)
63
+
64
+
65
+ async def _initialize_execution(
66
+ job_id: str,
67
+ args: str,
68
+ kwargs: dict[str, t.Any],
69
+ context: t.Any,
70
+ ) -> dict[str, t.Any]:
71
+ """Initialize execution environment and validate parameters."""
72
+ _update_progress(
73
+ job_id,
74
+ {
75
+ "type": "initialization",
76
+ "status": "starting",
77
+ "message": "Initializing crackerjack execution...",
78
+ },
79
+ context,
80
+ )
81
+
82
+ # Validate working directory
83
+ working_dir = kwargs.get("working_directory", ".")
84
+ from pathlib import Path
85
+
86
+ working_path = Path(working_dir)
87
+ if not working_path.exists():
88
+ return {
89
+ "status": "failed",
90
+ "error": f"Working directory does not exist: {working_dir}",
91
+ "job_id": job_id,
92
+ }
93
+
94
+ _update_progress(
95
+ job_id,
96
+ {
97
+ "type": "initialization",
98
+ "status": "ready",
99
+ "working_directory": str(working_path.absolute()),
100
+ },
101
+ context,
102
+ )
103
+
104
+ return {
105
+ "status": "initialized",
106
+ "working_dir": working_path.absolute(),
107
+ "job_id": job_id,
108
+ }
109
+
110
+
111
+ async def _setup_orchestrator(
112
+ job_id: str,
113
+ args: str,
114
+ kwargs: dict[str, t.Any],
115
+ working_dir: t.Any,
116
+ context: t.Any,
117
+ ) -> dict[str, t.Any]:
118
+ """Set up the appropriate orchestrator based on configuration."""
119
+ _update_progress(
120
+ job_id,
121
+ {
122
+ "type": "setup",
123
+ "status": "creating_orchestrator",
124
+ "message": "Setting up workflow orchestrator...",
125
+ },
126
+ context,
127
+ )
128
+
129
+ use_advanced = kwargs.get(
130
+ "advanced_orchestration", False
131
+ ) # Temporarily disable advanced orchestration
132
+
133
+ try:
134
+ if use_advanced:
135
+ orchestrator = await _create_advanced_orchestrator(
136
+ working_dir, kwargs, context
137
+ )
138
+ else:
139
+ orchestrator = _create_standard_orchestrator(working_dir, kwargs)
140
+
141
+ return {
142
+ "status": "ready",
143
+ "orchestrator": orchestrator,
144
+ "job_id": job_id,
145
+ }
146
+
147
+ except Exception as e:
148
+ return {
149
+ "status": "failed",
150
+ "error": f"Failed to create orchestrator: {e}",
151
+ "job_id": job_id,
152
+ }
153
+
154
+
155
+ async def _create_advanced_orchestrator(
156
+ working_dir: t.Any, kwargs: dict[str, t.Any], context: t.Any
157
+ ) -> t.Any:
158
+ """Create advanced async orchestrator with dependency injection."""
159
+ from pathlib import Path
160
+
161
+ from crackerjack.core.async_workflow_orchestrator import AsyncWorkflowOrchestrator
162
+ from crackerjack.core.enhanced_container import EnhancedContainer
163
+
164
+ container = EnhancedContainer()
165
+
166
+ # Register services with the container
167
+ await _register_core_services(container, Path(working_dir))
168
+
169
+ orchestrator = AsyncWorkflowOrchestrator(
170
+ pkg_path=Path(working_dir),
171
+ )
172
+
173
+ return orchestrator
174
+
175
+
176
+ def _create_standard_orchestrator(
177
+ working_dir: t.Any, kwargs: dict[str, t.Any]
178
+ ) -> t.Any:
179
+ """Create standard synchronous orchestrator."""
180
+ from pathlib import Path
181
+
182
+ from crackerjack.core.workflow_orchestrator import WorkflowOrchestrator
183
+
184
+ return WorkflowOrchestrator(pkg_path=Path(working_dir))
185
+
186
+
187
+ async def _register_core_services(container: t.Any, working_dir: t.Any) -> None:
188
+ """Register core services with the dependency injection container."""
189
+ from rich.console import Console
190
+
191
+ from crackerjack.core.enhanced_container import ServiceLifetime
192
+ from crackerjack.managers.hook_manager import AsyncHookManager
193
+ from crackerjack.managers.publish_manager import PublishManager
194
+ from crackerjack.managers.test_manager import TestManager
195
+ from crackerjack.models.protocols import (
196
+ HookManagerProtocol,
197
+ PublishManagerProtocol,
198
+ TestManagerProtocol,
199
+ )
200
+ from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
201
+
202
+ console = Console()
203
+
204
+ # Register managers
205
+ container.register_service(
206
+ HookManagerProtocol,
207
+ AsyncHookManager(console, working_dir),
208
+ ServiceLifetime.SINGLETON,
209
+ )
210
+
211
+ container.register_service(
212
+ TestManagerProtocol,
213
+ TestManager(console, working_dir),
214
+ ServiceLifetime.SINGLETON,
215
+ )
216
+
217
+ container.register_service(
218
+ PublishManagerProtocol,
219
+ PublishManager(console, working_dir),
220
+ ServiceLifetime.SINGLETON,
221
+ )
222
+
223
+ # Register filesystem service
224
+ container.register_service(
225
+ EnhancedFileSystemService,
226
+ EnhancedFileSystemService(),
227
+ ServiceLifetime.SINGLETON,
228
+ )
229
+
230
+
231
+ async def _run_workflow_iterations(
232
+ job_id: str,
233
+ orchestrator: t.Any,
234
+ kwargs: dict[str, t.Any],
235
+ context: t.Any,
236
+ ) -> dict[str, t.Any]:
237
+ """Run workflow iterations until completion or max attempts."""
238
+ options = _create_workflow_options(kwargs)
239
+ max_iterations = kwargs.get("max_iterations", 10)
240
+
241
+ for iteration in range(max_iterations):
242
+ _update_progress(
243
+ job_id,
244
+ {
245
+ "type": "iteration",
246
+ "iteration": iteration + 1,
247
+ "max_iterations": max_iterations,
248
+ "status": "running",
249
+ },
250
+ context,
251
+ )
252
+
253
+ try:
254
+ success = await _execute_single_iteration(
255
+ job_id, orchestrator, options, iteration, context
256
+ )
257
+
258
+ if success:
259
+ # Attempt coverage improvement after successful execution (if enabled)
260
+ coverage_result = None
261
+ if kwargs.get("boost_coverage", False): # Temporarily disabled
262
+ coverage_result = await _attempt_coverage_improvement(
263
+ job_id, orchestrator, context
264
+ )
265
+ return _create_success_result(
266
+ job_id, iteration + 1, context, coverage_result
267
+ )
268
+
269
+ # Handle retry logic
270
+ if iteration < max_iterations - 1:
271
+ await _handle_iteration_retry(job_id, iteration, context)
272
+
273
+ except Exception as e:
274
+ return await _handle_iteration_error(job_id, iteration, e, context)
275
+
276
+ return _create_failure_result(job_id, max_iterations, context)
277
+
278
+
279
+ def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
280
+ """Create workflow options from kwargs."""
281
+ from types import SimpleNamespace
282
+
283
+ # Create options object with all required attributes from OptionsProtocol
284
+ options = SimpleNamespace()
285
+
286
+ # Core execution options
287
+ options.commit = kwargs.get("commit", False)
288
+ options.interactive = kwargs.get("interactive", False)
289
+ options.no_config_updates = kwargs.get("no_config_updates", False)
290
+ options.verbose = kwargs.get("verbose", True)
291
+ options.clean = kwargs.get("clean", False)
292
+ options.test = kwargs.get("test_mode", True)
293
+ options.benchmark = kwargs.get("benchmark", False)
294
+ options.skip_hooks = kwargs.get("skip_hooks", False)
295
+ options.ai_agent = kwargs.get("ai_agent", True)
296
+ options.async_mode = kwargs.get("async_mode", True)
297
+
298
+ # Test options
299
+ options.test_workers = kwargs.get("test_workers", 0)
300
+ options.test_timeout = kwargs.get("test_timeout", 0)
301
+
302
+ # Publishing options
303
+ options.publish = kwargs.get("publish")
304
+ options.bump = kwargs.get("bump")
305
+ options.all = kwargs.get("all")
306
+ options.create_pr = kwargs.get("create_pr", False)
307
+ options.no_git_tags = kwargs.get("no_git_tags", False)
308
+ options.skip_version_check = kwargs.get("skip_version_check", False)
309
+ options.cleanup_pypi = kwargs.get("cleanup_pypi", False)
310
+ options.keep_releases = kwargs.get("keep_releases", 10)
311
+
312
+ # Server options
313
+ options.start_mcp_server = kwargs.get("start_mcp_server", False)
314
+
315
+ # Hook options
316
+ options.update_precommit = kwargs.get("update_precommit", False)
317
+ options.experimental_hooks = kwargs.get("experimental_hooks", False)
318
+ options.enable_pyrefly = kwargs.get("enable_pyrefly", False)
319
+ options.enable_ty = kwargs.get("enable_ty", False)
320
+
321
+ # Cleanup options
322
+ options.cleanup = kwargs.get("cleanup")
323
+
324
+ # Coverage and progress
325
+ options.coverage = kwargs.get("coverage", False)
326
+ options.track_progress = kwargs.get("track_progress", False)
327
+
328
+ # Speed options
329
+ options.fast = kwargs.get("fast", False)
330
+ options.comp = kwargs.get("comp", False)
331
+
332
+ return options
333
+
334
+
335
+ async def _execute_single_iteration(
336
+ job_id: str,
337
+ orchestrator: t.Any,
338
+ options: t.Any,
339
+ iteration: int,
340
+ context: t.Any,
341
+ ) -> bool:
342
+ """Execute a single workflow iteration."""
343
+ try:
344
+ # Check for orchestrator workflow methods
345
+ if hasattr(orchestrator, "run_complete_workflow"):
346
+ # Standard WorkflowOrchestrator method is async
347
+ result = orchestrator.run_complete_workflow(options)
348
+ if result is None:
349
+ raise ValueError(
350
+ "Method run_complete_workflow returned None instead of awaitable"
351
+ )
352
+ return await result
353
+ elif hasattr(orchestrator, "run_complete_workflow_async"):
354
+ result = orchestrator.run_complete_workflow_async(options)
355
+ if result is None:
356
+ raise ValueError(
357
+ "Method run_complete_workflow_async returned None instead of awaitable"
358
+ )
359
+ return await result
360
+ elif hasattr(orchestrator, "execute_workflow"):
361
+ result = orchestrator.execute_workflow(options)
362
+ if result is None:
363
+ raise ValueError(
364
+ "Method execute_workflow returned None instead of awaitable"
365
+ )
366
+ return await result
367
+ elif hasattr(orchestrator, "run"):
368
+ # Fallback for synchronous orchestrators
369
+ return orchestrator.run(options)
370
+ else:
371
+ raise ValueError(
372
+ f"Orchestrator {type(orchestrator)} has no recognized workflow execution method"
373
+ )
374
+ except Exception as e:
375
+ # Add detailed error info for debugging
376
+ raise RuntimeError(
377
+ f"Error in _execute_single_iteration (iteration {iteration}): {e}"
378
+ ) from e
379
+
380
+
381
+ def _create_success_result(
382
+ job_id: str,
383
+ iterations: int,
384
+ context: t.Any,
385
+ coverage_result: dict[str, t.Any] | None = None,
386
+ ) -> dict[str, t.Any]:
387
+ """Create success result with completion data."""
388
+ result = {
389
+ "job_id": job_id,
390
+ "status": "completed",
391
+ "iterations": iterations,
392
+ "result": "All quality checks passed successfully",
393
+ "timestamp": time.time(),
394
+ "success": True,
395
+ }
396
+
397
+ if coverage_result:
398
+ result["coverage_improvement"] = coverage_result
399
+
400
+ return result
401
+
402
+
403
+ async def _handle_iteration_retry(job_id: str, iteration: int, context: t.Any) -> None:
404
+ """Handle retry logic between iterations."""
405
+ _update_progress(
406
+ job_id,
407
+ {
408
+ "type": "iteration",
409
+ "iteration": iteration + 1,
410
+ "status": "retrying",
411
+ "message": f"Issues found in iteration {iteration + 1}, retrying...",
412
+ },
413
+ context,
414
+ )
415
+
416
+ # Brief pause between iterations
417
+ await asyncio.sleep(1)
418
+
419
+
420
+ async def _handle_iteration_error(
421
+ job_id: str, iteration: int, error: Exception, context: t.Any
422
+ ) -> dict[str, t.Any]:
423
+ """Handle errors during iteration execution."""
424
+ _update_progress(
425
+ job_id,
426
+ {
427
+ "type": "error",
428
+ "iteration": iteration + 1,
429
+ "error": str(error),
430
+ "status": "failed",
431
+ },
432
+ context,
433
+ )
434
+
435
+ return {
436
+ "job_id": job_id,
437
+ "status": "failed",
438
+ "error": f"Iteration {iteration + 1} failed: {error}",
439
+ "timestamp": time.time(),
440
+ "success": False,
441
+ }
442
+
443
+
444
+ async def _attempt_coverage_improvement(
445
+ job_id: str, orchestrator: t.Any, context: t.Any
446
+ ) -> dict[str, t.Any]:
447
+ """Attempt proactive coverage improvement after successful workflow execution."""
448
+ try:
449
+ _update_progress(
450
+ job_id,
451
+ {
452
+ "type": "coverage_improvement",
453
+ "status": "starting",
454
+ "message": "Analyzing coverage for improvement opportunities...",
455
+ },
456
+ context,
457
+ )
458
+
459
+ # Get project path from orchestrator
460
+ project_path = getattr(orchestrator, "pkg_path", None)
461
+ if not project_path:
462
+ return {"status": "skipped", "reason": "No project path available"}
463
+
464
+ # Import coverage improvement orchestrator
465
+ from crackerjack.orchestration.coverage_improvement import (
466
+ create_coverage_improvement_orchestrator,
467
+ )
468
+
469
+ # Create coverage orchestrator
470
+ coverage_orchestrator = await create_coverage_improvement_orchestrator(
471
+ project_path,
472
+ console=getattr(orchestrator, "console", None),
473
+ )
474
+
475
+ # Check if improvement is needed
476
+ should_improve = await coverage_orchestrator.should_improve_coverage()
477
+ if not should_improve:
478
+ _update_progress(
479
+ job_id,
480
+ {
481
+ "type": "coverage_improvement",
482
+ "status": "skipped",
483
+ "message": "Coverage improvement not needed (already at 100%)",
484
+ },
485
+ context,
486
+ )
487
+ return {"status": "skipped", "reason": "Coverage at 100%"}
488
+
489
+ # Create agent context (simplified)
490
+ from crackerjack.agents.base import AgentContext
491
+
492
+ agent_context = AgentContext(project_path=project_path, console=None)
493
+
494
+ # Execute coverage improvement
495
+ _update_progress(
496
+ job_id,
497
+ {
498
+ "type": "coverage_improvement",
499
+ "status": "executing",
500
+ "message": "Generating tests to improve coverage...",
501
+ },
502
+ context,
503
+ )
504
+
505
+ improvement_result = await coverage_orchestrator.execute_coverage_improvement(
506
+ agent_context
507
+ )
508
+
509
+ # Update progress with results
510
+ if improvement_result["status"] == "completed":
511
+ _update_progress(
512
+ job_id,
513
+ {
514
+ "type": "coverage_improvement",
515
+ "status": "completed",
516
+ "message": f"Coverage improvement: {len(improvement_result.get('fixes_applied', []))} tests created",
517
+ "fixes_applied": improvement_result.get("fixes_applied", []),
518
+ "files_modified": improvement_result.get("files_modified", []),
519
+ },
520
+ context,
521
+ )
522
+ else:
523
+ _update_progress(
524
+ job_id,
525
+ {
526
+ "type": "coverage_improvement",
527
+ "status": "completed_with_issues",
528
+ "message": f"Coverage improvement attempted: {improvement_result.get('status', 'unknown')}",
529
+ },
530
+ context,
531
+ )
532
+
533
+ return improvement_result
534
+
535
+ except Exception as e:
536
+ _update_progress(
537
+ job_id,
538
+ {
539
+ "type": "coverage_improvement",
540
+ "status": "failed",
541
+ "error": str(e),
542
+ "message": f"Coverage improvement failed: {e}",
543
+ },
544
+ context,
545
+ )
546
+
547
+ return {
548
+ "status": "failed",
549
+ "error": str(e),
550
+ "fixes_applied": [],
551
+ "files_modified": [],
552
+ }
553
+
554
+
555
+ def _create_failure_result(
556
+ job_id: str, max_iterations: int, context: t.Any
557
+ ) -> dict[str, t.Any]:
558
+ """Create failure result when max iterations exceeded."""
559
+ return {
560
+ "job_id": job_id,
561
+ "status": "failed",
562
+ "error": f"Maximum iterations ({max_iterations}) reached without success",
563
+ "timestamp": time.time(),
564
+ "success": False,
565
+ }
@@ -0,0 +1,14 @@
1
+ from .app import create_websocket_app
2
+ from .endpoints import register_endpoints
3
+ from .jobs import JobManager
4
+ from .server import WebSocketServer, main
5
+ from .websocket_handler import WebSocketHandler
6
+
7
+ __all__ = [
8
+ "JobManager",
9
+ "WebSocketHandler",
10
+ "WebSocketServer",
11
+ "create_websocket_app",
12
+ "main",
13
+ "register_endpoints",
14
+ ]
@@ -0,0 +1,39 @@
1
+ import asyncio
2
+ from pathlib import Path
3
+
4
+ from fastapi import FastAPI
5
+
6
+ from .endpoints import register_endpoints
7
+ from .jobs import JobManager
8
+ from .websocket_handler import register_websocket_routes
9
+
10
+
11
+ def create_websocket_app(job_manager: JobManager, progress_dir: Path) -> FastAPI:
12
+ app = FastAPI(
13
+ title="Crackerjack WebSocket Server",
14
+ description="Real-time progress monitoring for Crackerjack workflows",
15
+ version="1.0.0",
16
+ )
17
+
18
+ # Store job_manager in app state for startup/shutdown events
19
+ app.state.job_manager = job_manager
20
+
21
+ @app.on_event("startup")
22
+ async def startup_event() -> None:
23
+ """Start background tasks."""
24
+ if job_manager:
25
+ asyncio.create_task(job_manager.monitor_progress_files())
26
+ asyncio.create_task(job_manager.cleanup_old_jobs())
27
+ asyncio.create_task(job_manager.timeout_stuck_jobs())
28
+
29
+ @app.on_event("shutdown")
30
+ async def shutdown_event() -> None:
31
+ """Cleanup on shutdown."""
32
+ if job_manager:
33
+ job_manager.cleanup()
34
+
35
+ register_endpoints(app, job_manager, progress_dir)
36
+
37
+ register_websocket_routes(app, job_manager, progress_dir)
38
+
39
+ return app