crackerjack 0.29.0__py3-none-any.whl → 0.31.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (158) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +225 -253
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +169 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +652 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +401 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +670 -0
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +561 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +640 -0
  40. crackerjack/dynamic_config.py +577 -0
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +411 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +435 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +144 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +615 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +370 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +141 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +360 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/execution_strategies.py +341 -0
  107. crackerjack/orchestration/test_progress_streamer.py +636 -0
  108. crackerjack/plugins/__init__.py +15 -0
  109. crackerjack/plugins/base.py +200 -0
  110. crackerjack/plugins/hooks.py +246 -0
  111. crackerjack/plugins/loader.py +335 -0
  112. crackerjack/plugins/managers.py +259 -0
  113. crackerjack/py313.py +8 -3
  114. crackerjack/services/__init__.py +22 -0
  115. crackerjack/services/cache.py +314 -0
  116. crackerjack/services/config.py +347 -0
  117. crackerjack/services/config_integrity.py +99 -0
  118. crackerjack/services/contextual_ai_assistant.py +516 -0
  119. crackerjack/services/coverage_ratchet.py +347 -0
  120. crackerjack/services/debug.py +736 -0
  121. crackerjack/services/dependency_monitor.py +617 -0
  122. crackerjack/services/enhanced_filesystem.py +439 -0
  123. crackerjack/services/file_hasher.py +151 -0
  124. crackerjack/services/filesystem.py +395 -0
  125. crackerjack/services/git.py +165 -0
  126. crackerjack/services/health_metrics.py +611 -0
  127. crackerjack/services/initialization.py +847 -0
  128. crackerjack/services/log_manager.py +286 -0
  129. crackerjack/services/logging.py +174 -0
  130. crackerjack/services/metrics.py +578 -0
  131. crackerjack/services/pattern_cache.py +362 -0
  132. crackerjack/services/pattern_detector.py +515 -0
  133. crackerjack/services/performance_benchmarks.py +653 -0
  134. crackerjack/services/security.py +163 -0
  135. crackerjack/services/server_manager.py +234 -0
  136. crackerjack/services/smart_scheduling.py +144 -0
  137. crackerjack/services/tool_version_service.py +61 -0
  138. crackerjack/services/unified_config.py +437 -0
  139. crackerjack/services/version_checker.py +248 -0
  140. crackerjack/slash_commands/__init__.py +14 -0
  141. crackerjack/slash_commands/init.md +122 -0
  142. crackerjack/slash_commands/run.md +163 -0
  143. crackerjack/slash_commands/status.md +127 -0
  144. crackerjack-0.31.4.dist-info/METADATA +742 -0
  145. crackerjack-0.31.4.dist-info/RECORD +148 -0
  146. crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
  147. crackerjack/.gitignore +0 -34
  148. crackerjack/.libcst.codemod.yaml +0 -18
  149. crackerjack/.pdm.toml +0 -1
  150. crackerjack/.pre-commit-config-ai.yaml +0 -149
  151. crackerjack/.pre-commit-config-fast.yaml +0 -69
  152. crackerjack/.pre-commit-config.yaml +0 -114
  153. crackerjack/crackerjack.py +0 -4140
  154. crackerjack/pyproject.toml +0 -285
  155. crackerjack-0.29.0.dist-info/METADATA +0 -1289
  156. crackerjack-0.29.0.dist-info/RECORD +0 -17
  157. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
  158. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,341 @@
1
+ import json
2
+ import time
3
+ import typing as t
4
+ from contextlib import suppress
5
+ from pathlib import Path
6
+
7
+ from crackerjack.mcp.context import get_context
8
+
9
+
10
+ def _create_error_response(message: str, success: bool = False) -> str:
11
+ """Utility function to create standardized error responses."""
12
+ return json.dumps({"error": message, "success": success}, indent=2)
13
+
14
+
15
+ def register_utility_tools(mcp_app: t.Any) -> None:
16
+ """Register utility slash command tools."""
17
+ _register_clean_tool(mcp_app)
18
+ _register_config_tool(mcp_app)
19
+ _register_analyze_tool(mcp_app)
20
+
21
+
22
+ def _clean_file_if_old(
23
+ file_path: Path, cutoff_time: float, dry_run: bool, file_type: str
24
+ ) -> dict | None:
25
+ """Clean a single file if it's older than cutoff time."""
26
+ with suppress(OSError):
27
+ if file_path.stat().st_mtime < cutoff_time:
28
+ file_size = file_path.stat().st_size
29
+ if not dry_run:
30
+ file_path.unlink()
31
+ return {"path": str(file_path), "size": file_size, "type": file_type}
32
+ return None
33
+
34
+
35
+ def _clean_temp_files(cutoff_time: float, dry_run: bool) -> tuple[list[dict], int]:
36
+ """Clean temporary files older than cutoff time."""
37
+ import tempfile
38
+
39
+ cleaned_files = []
40
+ total_size = 0
41
+ temp_dir = Path(tempfile.gettempdir())
42
+
43
+ patterns = ("crackerjack-*.log", "crackerjack-task-error-*.log", ".coverage.*")
44
+ for pattern in patterns:
45
+ for file_path in temp_dir.glob(pattern):
46
+ file_info = _clean_file_if_old(file_path, cutoff_time, dry_run, "temp")
47
+ if file_info:
48
+ cleaned_files.append(file_info)
49
+ total_size += file_info["size"]
50
+
51
+ return cleaned_files, total_size
52
+
53
+
54
+ def _clean_progress_files(
55
+ context: t.Any, cutoff_time: float, dry_run: bool
56
+ ) -> tuple[list[dict], int]:
57
+ """Clean progress files older than cutoff time."""
58
+ cleaned_files = []
59
+ total_size = 0
60
+
61
+ if context.progress_dir.exists():
62
+ for progress_file in context.progress_dir.glob("*.json"):
63
+ file_info = _clean_file_if_old(
64
+ progress_file, cutoff_time, dry_run, "progress"
65
+ )
66
+ if file_info:
67
+ cleaned_files.append(file_info)
68
+ total_size += file_info["size"]
69
+
70
+ return cleaned_files, total_size
71
+
72
+
73
+ def _parse_cleanup_options(kwargs: str) -> tuple[dict, str | None]:
74
+ """Parse and validate cleanup options from kwargs string."""
75
+ try:
76
+ extra_kwargs = json.loads(kwargs) if kwargs.strip() else {}
77
+ return extra_kwargs, None
78
+ except json.JSONDecodeError as e:
79
+ return {}, f"Invalid JSON in kwargs: {e}"
80
+
81
+
82
+ def _register_clean_tool(mcp_app: t.Any) -> None:
83
+ @mcp_app.tool()
84
+ async def clean_crackerjack(args: str = "", kwargs: str = "{}") -> str:
85
+ """Clean up temporary files, stale progress data, and cached resources.
86
+
87
+ Args:
88
+ args: Optional cleanup scope: 'temp', 'progress', 'cache', 'all' (default)
89
+ kwargs: JSON with options like {"dry_run": true, "older_than": 24}
90
+ """
91
+ context = get_context()
92
+ if not context:
93
+ return _create_error_response("Server context not available")
94
+
95
+ clean_config = _parse_clean_configuration(args, kwargs)
96
+ if "error" in clean_config:
97
+ return _create_error_response(clean_config["error"])
98
+
99
+ try:
100
+ cleanup_results = _execute_cleanup_operations(context, clean_config)
101
+ return _create_cleanup_response(clean_config, cleanup_results)
102
+ except Exception as e:
103
+ return _create_error_response(f"Cleanup failed: {e}")
104
+
105
+
106
+ def _parse_clean_configuration(args: str, kwargs: str) -> dict:
107
+ """Parse and validate cleanup configuration from arguments."""
108
+ extra_kwargs, parse_error = _parse_cleanup_options(kwargs)
109
+ if parse_error:
110
+ return {"error": parse_error}
111
+
112
+ return {
113
+ "scope": args.strip().lower() or "all",
114
+ "dry_run": extra_kwargs.get("dry_run", False),
115
+ "older_than_hours": extra_kwargs.get("older_than", 24),
116
+ }
117
+
118
+
119
+ def _execute_cleanup_operations(context: t.Any, clean_config: dict) -> dict:
120
+ """Execute the cleanup operations based on configuration."""
121
+ from datetime import datetime, timedelta
122
+
123
+ cutoff_time = (
124
+ datetime.now() - timedelta(hours=clean_config["older_than_hours"])
125
+ ).timestamp()
126
+ all_cleaned_files = []
127
+ total_size = 0
128
+
129
+ # Clean temp files
130
+ if clean_config["scope"] in ("temp", "all"):
131
+ temp_files, temp_size = _clean_temp_files(cutoff_time, clean_config["dry_run"])
132
+ all_cleaned_files.extend(temp_files)
133
+ total_size += temp_size
134
+
135
+ # Clean progress files
136
+ if clean_config["scope"] in ("progress", "all"):
137
+ progress_files, progress_size = _clean_progress_files(
138
+ context, cutoff_time, clean_config["dry_run"]
139
+ )
140
+ all_cleaned_files.extend(progress_files)
141
+ total_size += progress_size
142
+
143
+ # Clean cache files (if any caching is implemented)
144
+ if clean_config["scope"] in ("cache", "all"):
145
+ # Placeholder for future cache cleaning
146
+ pass
147
+
148
+ return {"all_cleaned_files": all_cleaned_files, "total_size": total_size}
149
+
150
+
151
+ def _create_cleanup_response(clean_config: dict, cleanup_results: dict) -> str:
152
+ """Create the cleanup response JSON."""
153
+ all_cleaned_files = cleanup_results["all_cleaned_files"]
154
+
155
+ return json.dumps(
156
+ {
157
+ "success": True,
158
+ "command": "clean_crackerjack",
159
+ "dry_run": clean_config["dry_run"],
160
+ "scope": clean_config["scope"],
161
+ "older_than_hours": clean_config["older_than_hours"],
162
+ "files_cleaned": len(all_cleaned_files),
163
+ "total_size_bytes": cleanup_results["total_size"],
164
+ "files": all_cleaned_files
165
+ if len(all_cleaned_files) <= 50
166
+ else all_cleaned_files[:50],
167
+ },
168
+ indent=2,
169
+ )
170
+
171
+
172
+ def _handle_config_list(context: t.Any) -> dict[str, t.Any]:
173
+ """Handle config list action."""
174
+ return {
175
+ "project_path": str(context.config.project_path),
176
+ "rate_limiter": {
177
+ "enabled": context.rate_limiter is not None,
178
+ "config": context.rate_limiter.config.__dict__
179
+ if context.rate_limiter
180
+ else None,
181
+ },
182
+ "progress_dir": str(context.progress_dir),
183
+ "websocket_port": getattr(context, "websocket_server_port", None),
184
+ }
185
+
186
+
187
+ def _handle_config_get(context: t.Any, key: str) -> dict[str, t.Any]:
188
+ """Handle config get action."""
189
+ value = getattr(context.config, key, None)
190
+ if value is None:
191
+ value = getattr(context, key, "Key not found")
192
+
193
+ return {
194
+ "success": True,
195
+ "command": "config_crackerjack",
196
+ "action": "get",
197
+ "key": key,
198
+ "value": str(value),
199
+ }
200
+
201
+
202
+ def _handle_config_validate(context: t.Any) -> dict[str, t.Any]:
203
+ """Handle config validate action."""
204
+ validation_results = {
205
+ "project_path_exists": context.config.project_path.exists(),
206
+ "progress_dir_writable": context.progress_dir.exists()
207
+ and context.progress_dir.is_dir(),
208
+ "rate_limiter_configured": context.rate_limiter is not None,
209
+ }
210
+
211
+ all_valid = all(validation_results.values())
212
+
213
+ return {
214
+ "success": True,
215
+ "command": "config_crackerjack",
216
+ "action": "validate",
217
+ "valid": all_valid,
218
+ "checks": validation_results,
219
+ }
220
+
221
+
222
+ def _register_config_tool(mcp_app: t.Any) -> None:
223
+ @mcp_app.tool()
224
+ async def config_crackerjack(args: str = "", kwargs: str = "{}") -> str:
225
+ """View or update crackerjack configuration.
226
+
227
+ Args:
228
+ args: Action - 'get <key>', 'set <key=value>', 'list', or 'validate'
229
+ kwargs: JSON with additional options
230
+ """
231
+ context = get_context()
232
+ if not context:
233
+ return _create_error_response("Server context not available")
234
+
235
+ extra_kwargs, parse_error = _parse_cleanup_options(kwargs)
236
+ if parse_error:
237
+ return _create_error_response(parse_error)
238
+
239
+ args_parts = args.strip().split() if args.strip() else ["list"]
240
+ action = args_parts[0].lower()
241
+
242
+ try:
243
+ if action == "list":
244
+ config_info = _handle_config_list(context)
245
+ result = {
246
+ "success": True,
247
+ "command": "config_crackerjack",
248
+ "action": "list",
249
+ "configuration": config_info,
250
+ }
251
+ elif action == "get" and len(args_parts) > 1:
252
+ result = _handle_config_get(context, args_parts[1])
253
+ elif action == "validate":
254
+ result = _handle_config_validate(context)
255
+ else:
256
+ return _create_error_response(
257
+ f"Invalid action '{action}'. Valid actions: list, get <key>, validate"
258
+ )
259
+
260
+ return json.dumps(result, indent=2)
261
+
262
+ except Exception as e:
263
+ return _create_error_response(f"Config operation failed: {e}")
264
+
265
+
266
+ def _run_hooks_analysis(orchestrator: t.Any, options: t.Any) -> dict:
267
+ """Run hooks analysis and return results."""
268
+ fast_result = orchestrator.run_fast_hooks_only(options)
269
+ comprehensive_result = orchestrator.run_comprehensive_hooks_only(options)
270
+
271
+ return {
272
+ "fast_hooks": "passed" if fast_result else "failed",
273
+ "comprehensive_hooks": "passed" if comprehensive_result else "failed",
274
+ }
275
+
276
+
277
+ def _run_tests_analysis(orchestrator: t.Any, options: t.Any) -> dict:
278
+ """Run tests analysis and return results."""
279
+ test_result = orchestrator.run_testing_phase(options)
280
+ return {"status": "passed" if test_result else "failed"}
281
+
282
+
283
+ def _create_analysis_orchestrator(context: t.Any) -> t.Any:
284
+ """Create workflow orchestrator for analysis."""
285
+ from crackerjack.core.workflow_orchestrator import WorkflowOrchestrator
286
+
287
+ return WorkflowOrchestrator(
288
+ console=context.console,
289
+ pkg_path=context.config.project_path,
290
+ dry_run=True, # Analysis only, no changes
291
+ )
292
+
293
+
294
+ def _register_analyze_tool(mcp_app: t.Any) -> None:
295
+ @mcp_app.tool()
296
+ async def analyze_crackerjack(args: str = "", kwargs: str = "{}") -> str:
297
+ """Analyze code quality without making changes.
298
+
299
+ Args:
300
+ args: Analysis scope - 'hooks', 'tests', 'all' (default)
301
+ kwargs: JSON with options like {"report_format": "summary"}
302
+ """
303
+ context = get_context()
304
+ if not context:
305
+ return _create_error_response("Server context not available")
306
+
307
+ extra_kwargs, parse_error = _parse_cleanup_options(kwargs)
308
+ if parse_error:
309
+ return _create_error_response(parse_error)
310
+
311
+ scope = args.strip().lower() or "all"
312
+ report_format = extra_kwargs.get("report_format", "summary")
313
+
314
+ try:
315
+ from crackerjack.models.config import WorkflowOptions
316
+
317
+ orchestrator = _create_analysis_orchestrator(context)
318
+ options = WorkflowOptions()
319
+ analysis_results = {}
320
+
321
+ if scope in ("hooks", "all"):
322
+ analysis_results["hooks"] = _run_hooks_analysis(orchestrator, options)
323
+
324
+ if scope in ("tests", "all"):
325
+ analysis_results["tests"] = _run_tests_analysis(orchestrator, options)
326
+
327
+ return json.dumps(
328
+ {
329
+ "success": True,
330
+ "command": "analyze_crackerjack",
331
+ "scope": scope,
332
+ "report_format": report_format,
333
+ "dry_run": True,
334
+ "timestamp": time.time(),
335
+ "analysis": analysis_results,
336
+ },
337
+ indent=2,
338
+ )
339
+
340
+ except Exception as e:
341
+ return _create_error_response(f"Analysis failed: {e}")
@@ -0,0 +1,360 @@
1
+ """Workflow execution engine for MCP tools.
2
+
3
+ This module handles the core workflow execution logic, including orchestrator setup,
4
+ iteration management, and result handling. Split from execution_tools.py for better
5
+ separation of concerns.
6
+ """
7
+
8
+ import asyncio
9
+ import time
10
+ import typing as t
11
+ import uuid
12
+
13
+ from crackerjack.mcp.context import get_context
14
+
15
+ from .progress_tools import _update_progress
16
+
17
+
18
+ async def execute_crackerjack_workflow(
19
+ args: str, kwargs: dict[str, t.Any]
20
+ ) -> dict[str, t.Any]:
21
+ """Execute the main crackerjack workflow with progress tracking."""
22
+ job_id = str(uuid.uuid4())[:8]
23
+
24
+ try:
25
+ return await _execute_crackerjack_sync(job_id, args, kwargs, get_context())
26
+ except Exception as e:
27
+ return {
28
+ "job_id": job_id,
29
+ "status": "failed",
30
+ "error": f"Execution failed: {e}",
31
+ "timestamp": time.time(),
32
+ }
33
+
34
+
35
+ async def _execute_crackerjack_sync(
36
+ job_id: str,
37
+ args: str,
38
+ kwargs: dict[str, t.Any],
39
+ context: t.Any,
40
+ ) -> dict[str, t.Any]:
41
+ """Execute crackerjack workflow synchronously with progress tracking."""
42
+ # Initialize execution environment
43
+ setup_result = await _initialize_execution(job_id, args, kwargs, context)
44
+ if setup_result.get("status") == "failed":
45
+ return setup_result
46
+
47
+ # Set up orchestrator
48
+ orchestrator_result = await _setup_orchestrator(
49
+ job_id, args, kwargs, setup_result["working_dir"], context
50
+ )
51
+ if orchestrator_result.get("status") == "failed":
52
+ return orchestrator_result
53
+
54
+ orchestrator = orchestrator_result["orchestrator"]
55
+
56
+ # Run workflow iterations
57
+ return await _run_workflow_iterations(job_id, orchestrator, kwargs, context)
58
+
59
+
60
+ async def _initialize_execution(
61
+ job_id: str,
62
+ args: str,
63
+ kwargs: dict[str, t.Any],
64
+ context: t.Any,
65
+ ) -> dict[str, t.Any]:
66
+ """Initialize execution environment and validate parameters."""
67
+ await _update_progress(
68
+ job_id,
69
+ {
70
+ "type": "initialization",
71
+ "status": "starting",
72
+ "message": "Initializing crackerjack execution...",
73
+ },
74
+ context,
75
+ )
76
+
77
+ # Validate working directory
78
+ working_dir = kwargs.get("working_directory", ".")
79
+ from pathlib import Path
80
+
81
+ working_path = Path(working_dir)
82
+ if not working_path.exists():
83
+ return {
84
+ "status": "failed",
85
+ "error": f"Working directory does not exist: {working_dir}",
86
+ "job_id": job_id,
87
+ }
88
+
89
+ await _update_progress(
90
+ job_id,
91
+ {
92
+ "type": "initialization",
93
+ "status": "ready",
94
+ "working_directory": str(working_path.absolute()),
95
+ },
96
+ context,
97
+ )
98
+
99
+ return {
100
+ "status": "initialized",
101
+ "working_dir": working_path.absolute(),
102
+ "job_id": job_id,
103
+ }
104
+
105
+
106
+ async def _setup_orchestrator(
107
+ job_id: str,
108
+ args: str,
109
+ kwargs: dict[str, t.Any],
110
+ working_dir: t.Any,
111
+ context: t.Any,
112
+ ) -> dict[str, t.Any]:
113
+ """Set up the appropriate orchestrator based on configuration."""
114
+ await _update_progress(
115
+ job_id,
116
+ {
117
+ "type": "setup",
118
+ "status": "creating_orchestrator",
119
+ "message": "Setting up workflow orchestrator...",
120
+ },
121
+ context,
122
+ )
123
+
124
+ use_advanced = kwargs.get("advanced_orchestration", True)
125
+
126
+ try:
127
+ if use_advanced:
128
+ orchestrator = await _create_advanced_orchestrator(
129
+ working_dir, kwargs, context
130
+ )
131
+ else:
132
+ orchestrator = _create_standard_orchestrator(working_dir, kwargs)
133
+
134
+ return {
135
+ "status": "ready",
136
+ "orchestrator": orchestrator,
137
+ "job_id": job_id,
138
+ }
139
+
140
+ except Exception as e:
141
+ return {
142
+ "status": "failed",
143
+ "error": f"Failed to create orchestrator: {e}",
144
+ "job_id": job_id,
145
+ }
146
+
147
+
148
+ async def _create_advanced_orchestrator(
149
+ working_dir: t.Any, kwargs: dict[str, t.Any], context: t.Any
150
+ ) -> t.Any:
151
+ """Create advanced async orchestrator with dependency injection."""
152
+ from pathlib import Path
153
+
154
+ from crackerjack.core.async_workflow_orchestrator import AsyncWorkflowOrchestrator
155
+ from crackerjack.core.enhanced_container import EnhancedContainer
156
+
157
+ container = EnhancedContainer()
158
+
159
+ # Register services with the container
160
+ await _register_core_services(container, Path(working_dir))
161
+
162
+ orchestrator = AsyncWorkflowOrchestrator(
163
+ pkg_path=Path(working_dir),
164
+ container=container,
165
+ )
166
+
167
+ return orchestrator
168
+
169
+
170
+ def _create_standard_orchestrator(
171
+ working_dir: t.Any, kwargs: dict[str, t.Any]
172
+ ) -> t.Any:
173
+ """Create standard synchronous orchestrator."""
174
+ from pathlib import Path
175
+
176
+ from crackerjack.core.workflow_orchestrator import WorkflowOrchestrator
177
+
178
+ return WorkflowOrchestrator(pkg_path=Path(working_dir))
179
+
180
+
181
+ async def _register_core_services(container: t.Any, working_dir: t.Any) -> None:
182
+ """Register core services with the dependency injection container."""
183
+ from rich.console import Console
184
+
185
+ from crackerjack.core.enhanced_container import ServiceLifetime
186
+ from crackerjack.managers.hook_manager import AsyncHookManager
187
+ from crackerjack.managers.publish_manager import PublishManager
188
+ from crackerjack.managers.test_manager import TestManager
189
+ from crackerjack.models.protocols import (
190
+ HookManagerProtocol,
191
+ PublishManagerProtocol,
192
+ TestManagerProtocol,
193
+ )
194
+ from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
195
+
196
+ console = Console()
197
+
198
+ # Register managers
199
+ container.register_service(
200
+ HookManagerProtocol,
201
+ AsyncHookManager(console, working_dir),
202
+ ServiceLifetime.SINGLETON,
203
+ )
204
+
205
+ container.register_service(
206
+ TestManagerProtocol,
207
+ TestManager(console, working_dir),
208
+ ServiceLifetime.SINGLETON,
209
+ )
210
+
211
+ container.register_service(
212
+ PublishManagerProtocol,
213
+ PublishManager(console, working_dir),
214
+ ServiceLifetime.SINGLETON,
215
+ )
216
+
217
+ # Register filesystem service
218
+ container.register_service(
219
+ EnhancedFileSystemService,
220
+ EnhancedFileSystemService(),
221
+ ServiceLifetime.SINGLETON,
222
+ )
223
+
224
+
225
+ async def _run_workflow_iterations(
226
+ job_id: str,
227
+ orchestrator: t.Any,
228
+ kwargs: dict[str, t.Any],
229
+ context: t.Any,
230
+ ) -> dict[str, t.Any]:
231
+ """Run workflow iterations until completion or max attempts."""
232
+ options = _create_workflow_options(kwargs)
233
+ max_iterations = kwargs.get("max_iterations", 10)
234
+
235
+ for iteration in range(max_iterations):
236
+ await _update_progress(
237
+ job_id,
238
+ {
239
+ "type": "iteration",
240
+ "iteration": iteration + 1,
241
+ "max_iterations": max_iterations,
242
+ "status": "running",
243
+ },
244
+ context,
245
+ )
246
+
247
+ try:
248
+ success = await _execute_single_iteration(
249
+ job_id, orchestrator, options, iteration, context
250
+ )
251
+
252
+ if success:
253
+ return _create_success_result(job_id, iteration + 1, context)
254
+
255
+ # Handle retry logic
256
+ if iteration < max_iterations - 1:
257
+ await _handle_iteration_retry(job_id, iteration, context)
258
+
259
+ except Exception as e:
260
+ return await _handle_iteration_error(job_id, iteration, e, context)
261
+
262
+ return _create_failure_result(job_id, max_iterations, context)
263
+
264
+
265
+ def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
266
+ """Create workflow options from kwargs."""
267
+ from types import SimpleNamespace
268
+
269
+ # Create options object with default values
270
+ options = SimpleNamespace()
271
+ options.test_mode = kwargs.get("test_mode", True)
272
+ options.ai_agent = kwargs.get("ai_agent", True)
273
+ options.interactive = kwargs.get("interactive", False)
274
+ options.benchmark = kwargs.get("benchmark", False)
275
+ options.skip_hooks = kwargs.get("skip_hooks", False)
276
+ options.verbose = kwargs.get("verbose", True)
277
+
278
+ return options
279
+
280
+
281
+ async def _execute_single_iteration(
282
+ job_id: str,
283
+ orchestrator: t.Any,
284
+ options: t.Any,
285
+ iteration: int,
286
+ context: t.Any,
287
+ ) -> bool:
288
+ """Execute a single workflow iteration."""
289
+ if hasattr(orchestrator, "execute_workflow"):
290
+ return await orchestrator.execute_workflow(options)
291
+ # Fallback for synchronous orchestrators
292
+ return orchestrator.run(options)
293
+
294
+
295
+ def _create_success_result(
296
+ job_id: str, iterations: int, context: t.Any
297
+ ) -> dict[str, t.Any]:
298
+ """Create success result with completion data."""
299
+ return {
300
+ "job_id": job_id,
301
+ "status": "completed",
302
+ "iterations": iterations,
303
+ "result": "All quality checks passed successfully",
304
+ "timestamp": time.time(),
305
+ "success": True,
306
+ }
307
+
308
+
309
+ async def _handle_iteration_retry(job_id: str, iteration: int, context: t.Any) -> None:
310
+ """Handle retry logic between iterations."""
311
+ await _update_progress(
312
+ job_id,
313
+ {
314
+ "type": "iteration",
315
+ "iteration": iteration + 1,
316
+ "status": "retrying",
317
+ "message": f"Issues found in iteration {iteration + 1}, retrying...",
318
+ },
319
+ context,
320
+ )
321
+
322
+ # Brief pause between iterations
323
+ await asyncio.sleep(1)
324
+
325
+
326
+ async def _handle_iteration_error(
327
+ job_id: str, iteration: int, error: Exception, context: t.Any
328
+ ) -> dict[str, t.Any]:
329
+ """Handle errors during iteration execution."""
330
+ await _update_progress(
331
+ job_id,
332
+ {
333
+ "type": "error",
334
+ "iteration": iteration + 1,
335
+ "error": str(error),
336
+ "status": "failed",
337
+ },
338
+ context,
339
+ )
340
+
341
+ return {
342
+ "job_id": job_id,
343
+ "status": "failed",
344
+ "error": f"Iteration {iteration + 1} failed: {error}",
345
+ "timestamp": time.time(),
346
+ "success": False,
347
+ }
348
+
349
+
350
+ def _create_failure_result(
351
+ job_id: str, max_iterations: int, context: t.Any
352
+ ) -> dict[str, t.Any]:
353
+ """Create failure result when max iterations exceeded."""
354
+ return {
355
+ "job_id": job_id,
356
+ "status": "failed",
357
+ "error": f"Maximum iterations ({max_iterations}) reached without success",
358
+ "timestamp": time.time(),
359
+ "success": False,
360
+ }