crackerjack 0.30.3__py3-none-any.whl → 0.31.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +225 -299
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +169 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +652 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +401 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +618 -928
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +561 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +640 -0
  40. crackerjack/dynamic_config.py +94 -103
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +411 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +435 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +144 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +615 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +370 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +141 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +360 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/execution_strategies.py +341 -0
  107. crackerjack/orchestration/test_progress_streamer.py +636 -0
  108. crackerjack/plugins/__init__.py +15 -0
  109. crackerjack/plugins/base.py +200 -0
  110. crackerjack/plugins/hooks.py +246 -0
  111. crackerjack/plugins/loader.py +335 -0
  112. crackerjack/plugins/managers.py +259 -0
  113. crackerjack/py313.py +8 -3
  114. crackerjack/services/__init__.py +22 -0
  115. crackerjack/services/cache.py +314 -0
  116. crackerjack/services/config.py +347 -0
  117. crackerjack/services/config_integrity.py +99 -0
  118. crackerjack/services/contextual_ai_assistant.py +516 -0
  119. crackerjack/services/coverage_ratchet.py +347 -0
  120. crackerjack/services/debug.py +736 -0
  121. crackerjack/services/dependency_monitor.py +617 -0
  122. crackerjack/services/enhanced_filesystem.py +439 -0
  123. crackerjack/services/file_hasher.py +151 -0
  124. crackerjack/services/filesystem.py +395 -0
  125. crackerjack/services/git.py +165 -0
  126. crackerjack/services/health_metrics.py +611 -0
  127. crackerjack/services/initialization.py +847 -0
  128. crackerjack/services/log_manager.py +286 -0
  129. crackerjack/services/logging.py +174 -0
  130. crackerjack/services/metrics.py +578 -0
  131. crackerjack/services/pattern_cache.py +362 -0
  132. crackerjack/services/pattern_detector.py +515 -0
  133. crackerjack/services/performance_benchmarks.py +653 -0
  134. crackerjack/services/security.py +163 -0
  135. crackerjack/services/server_manager.py +234 -0
  136. crackerjack/services/smart_scheduling.py +144 -0
  137. crackerjack/services/tool_version_service.py +61 -0
  138. crackerjack/services/unified_config.py +437 -0
  139. crackerjack/services/version_checker.py +248 -0
  140. crackerjack/slash_commands/__init__.py +14 -0
  141. crackerjack/slash_commands/init.md +122 -0
  142. crackerjack/slash_commands/run.md +163 -0
  143. crackerjack/slash_commands/status.md +127 -0
  144. crackerjack-0.31.4.dist-info/METADATA +742 -0
  145. crackerjack-0.31.4.dist-info/RECORD +148 -0
  146. crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
  147. crackerjack/.gitignore +0 -34
  148. crackerjack/.libcst.codemod.yaml +0 -18
  149. crackerjack/.pdm.toml +0 -1
  150. crackerjack/crackerjack.py +0 -3805
  151. crackerjack/pyproject.toml +0 -286
  152. crackerjack-0.30.3.dist-info/METADATA +0 -1290
  153. crackerjack-0.30.3.dist-info/RECORD +0 -16
  154. {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
  155. {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1097 @@
1
+ import asyncio
2
+ import json
3
+ import time
4
+ import typing as t
5
+ import uuid
6
+ from contextlib import suppress
7
+
8
+ from crackerjack.mcp.context import get_context
9
+
10
+ from .progress_tools import _update_progress
11
+
12
+
13
+ def register_execution_tools(mcp_app: t.Any) -> None:
14
+ _register_execute_crackerjack_tool(mcp_app)
15
+ _register_smart_error_analysis_tool(mcp_app)
16
+ _register_init_crackerjack_tool(mcp_app)
17
+ _register_agent_suggestions_tool(mcp_app)
18
+
19
+
20
+ def _register_execute_crackerjack_tool(mcp_app: t.Any) -> None:
21
+ @mcp_app.tool()
22
+ async def execute_crackerjack(args: str, kwargs: str) -> str:
23
+ context = get_context()
24
+ validation_error = await _validate_context_and_rate_limit(context)
25
+ if validation_error:
26
+ return validation_error
27
+
28
+ job_id = str(uuid.uuid4())[:8]
29
+
30
+ kwargs_result = _parse_kwargs(kwargs)
31
+ if "error" in kwargs_result:
32
+ return json.dumps(kwargs_result)
33
+
34
+ extra_kwargs = kwargs_result["kwargs"]
35
+
36
+ # Run the workflow directly instead of in background
37
+ try:
38
+ result = await _execute_crackerjack_sync(
39
+ job_id,
40
+ args,
41
+ extra_kwargs,
42
+ context,
43
+ )
44
+ return json.dumps(result, indent=2)
45
+ except Exception as e:
46
+ return json.dumps(
47
+ {
48
+ "job_id": job_id,
49
+ "status": "failed",
50
+ "error": f"Execution failed: {e}",
51
+ },
52
+ indent=2,
53
+ )
54
+
55
+
56
+ def _register_smart_error_analysis_tool(mcp_app: t.Any) -> None:
57
+ @mcp_app.tool()
58
+ async def smart_error_analysis(use_cache: bool = True) -> str:
59
+ context = get_context()
60
+ if not context:
61
+ return '{"error": "Server context not available"}'
62
+
63
+ try:
64
+ from crackerjack.services.debug import get_ai_agent_debugger
65
+
66
+ get_ai_agent_debugger()
67
+
68
+ cached_patterns = _get_cached_patterns(context, use_cache)
69
+ analysis = _build_error_analysis(use_cache, cached_patterns)
70
+
71
+ return json.dumps(analysis, indent=2)
72
+
73
+ except Exception as e:
74
+ return f'{{"error": "Smart error analysis failed: {e}"}}'
75
+
76
+
77
+ async def _validate_context_and_rate_limit(context: t.Any) -> str | None:
78
+ if not context:
79
+ return '{"error": "Server context not available"}'
80
+
81
+ # Rate limiting is optional - skip if not configured
82
+ if context.rate_limiter:
83
+ allowed, details = await context.rate_limiter.check_request_allowed("default")
84
+ if not allowed:
85
+ return f'{{"error": "Rate limit exceeded: {details.get("reason", "unknown")}", "success": false}}'
86
+
87
+ return None
88
+
89
+
90
+ def _handle_task_exception(job_id: str, task: asyncio.Task) -> None:
91
+ """Handle exceptions from background tasks."""
92
+ import tempfile
93
+ from pathlib import Path
94
+
95
+ try:
96
+ exception = task.exception()
97
+ if exception:
98
+ # Log the exception to a debug file
99
+ debug_file = (
100
+ Path(tempfile.gettempdir()) / f"crackerjack-task-error-{job_id}.log"
101
+ )
102
+ with debug_file.open("w") as f:
103
+ f.write(
104
+ f"Background task {job_id} failed with exception: {exception}\n",
105
+ )
106
+ f.write(f"Exception type: {type(exception)}\n")
107
+ import traceback
108
+
109
+ f.write(
110
+ f"Traceback:\n{traceback.format_exception(type(exception), exception, exception.__traceback__)}\n",
111
+ )
112
+ except Exception as e:
113
+ # If we can't even log the error, at least try to create a simple file
114
+ with suppress(Exception):
115
+ debug_file = (
116
+ Path(tempfile.gettempdir()) / f"crackerjack-logging-error-{job_id}.log"
117
+ )
118
+ with debug_file.open("w") as f:
119
+ f.write(f"Failed to log task exception: {e}\n")
120
+
121
+
122
+ def _parse_kwargs(kwargs: str) -> dict[str, t.Any]:
123
+ try:
124
+ return {"kwargs": json.loads(kwargs) if kwargs.strip() else {}}
125
+ except json.JSONDecodeError as e:
126
+ return {"error": f"Invalid JSON in kwargs: {e}"}
127
+
128
+
129
+ def _get_cached_patterns(context: t.Any, use_cache: bool) -> list[t.Any]:
130
+ if use_cache and hasattr(context, "error_cache"):
131
+ return getattr(context.error_cache, "patterns", [])
132
+ return []
133
+
134
+
135
+ def _build_error_analysis(
136
+ use_cache: bool,
137
+ cached_patterns: list[t.Any],
138
+ ) -> dict[str, t.Any]:
139
+ analysis = {
140
+ "analysis_type": "smart_error_analysis",
141
+ "use_cache": use_cache,
142
+ "cached_patterns_count": len(cached_patterns),
143
+ "common_patterns": [
144
+ {
145
+ "type": "import_error",
146
+ "frequency": "high",
147
+ "typical_fix": "Check import paths and dependencies",
148
+ },
149
+ {
150
+ "type": "type_annotation_missing",
151
+ "frequency": "medium",
152
+ "typical_fix": "Add proper type hints to functions and methods",
153
+ },
154
+ {
155
+ "type": "test_failure",
156
+ "frequency": "medium",
157
+ "typical_fix": "Review test expectations and implementation",
158
+ },
159
+ ],
160
+ "recommendations": [
161
+ "Run fast hooks first to fix formatting issues",
162
+ "Execute tests to identify functional problems",
163
+ "Run comprehensive hooks for quality analysis",
164
+ ],
165
+ }
166
+
167
+ if cached_patterns:
168
+ analysis["cached_patterns"] = cached_patterns[:5]
169
+
170
+ return analysis
171
+
172
+
173
+ async def _execute_crackerjack_sync(
174
+ job_id: str,
175
+ args: str,
176
+ kwargs: dict[str, t.Any],
177
+ context: t.Any,
178
+ ) -> dict[str, t.Any]:
179
+ if not context:
180
+ return {"job_id": job_id, "status": "failed", "error": "No context available"}
181
+
182
+ max_iterations = kwargs.get("max_iterations", 10)
183
+ current_iteration = 1
184
+
185
+ try:
186
+ await _initialize_execution(job_id, max_iterations, current_iteration, context)
187
+
188
+ orchestrator, use_advanced_orchestrator = await _setup_orchestrator(
189
+ job_id,
190
+ max_iterations,
191
+ current_iteration,
192
+ kwargs,
193
+ context,
194
+ )
195
+
196
+ return await _run_workflow_iterations(
197
+ job_id,
198
+ max_iterations,
199
+ orchestrator,
200
+ use_advanced_orchestrator,
201
+ kwargs,
202
+ )
203
+
204
+ except Exception as e:
205
+ _update_progress(
206
+ job_id=job_id,
207
+ status="failed",
208
+ iteration=current_iteration,
209
+ max_iterations=max_iterations,
210
+ current_stage="error",
211
+ message=f"Execution failed: {e}",
212
+ )
213
+ context.safe_print(f"Execution failed: {e}")
214
+ return {"job_id": job_id, "status": "failed", "error": str(e)}
215
+
216
+
217
+ async def _initialize_execution(
218
+ job_id: str,
219
+ max_iterations: int,
220
+ current_iteration: int,
221
+ context: t.Any,
222
+ ) -> None:
223
+ """Initialize execution with status checks and service preparation."""
224
+ _update_progress(
225
+ job_id=job_id,
226
+ iteration=current_iteration,
227
+ max_iterations=max_iterations,
228
+ overall_progress=2,
229
+ message="Initializing crackerjack execution",
230
+ )
231
+
232
+ # Check comprehensive status first to prevent conflicts and perform cleanup
233
+ status_result = await _check_status_and_prepare(job_id, context)
234
+ if status_result.get("should_abort", False):
235
+ msg = f"Execution aborted: {status_result['reason']}"
236
+ raise RuntimeError(msg)
237
+
238
+ _update_progress(
239
+ job_id=job_id,
240
+ iteration=current_iteration,
241
+ max_iterations=max_iterations,
242
+ overall_progress=5,
243
+ current_stage="status_verified",
244
+ message="Status check complete - no conflicts detected",
245
+ )
246
+
247
+ # Clean up stale jobs first
248
+ await _cleanup_stale_jobs(context)
249
+
250
+ # Auto-start required services
251
+ await _ensure_services_running(job_id, context)
252
+
253
+ _update_progress(
254
+ job_id=job_id,
255
+ iteration=current_iteration,
256
+ max_iterations=max_iterations,
257
+ overall_progress=10,
258
+ current_stage="services_ready",
259
+ message="Services initialized successfully",
260
+ )
261
+
262
+
263
+ async def _setup_orchestrator(
264
+ job_id: str,
265
+ max_iterations: int,
266
+ current_iteration: int,
267
+ kwargs: dict[str, t.Any],
268
+ context: t.Any,
269
+ ) -> tuple[t.Any, bool]:
270
+ """Set up the appropriate orchestrator (force standard for MCP compatibility)."""
271
+ # Force standard orchestrator for MCP server to ensure proper progress reporting
272
+ context.safe_print("Using Standard WorkflowOrchestrator for MCP compatibility")
273
+ orchestrator = _create_standard_orchestrator(job_id, kwargs, context)
274
+ use_advanced_orchestrator = False
275
+
276
+ # Update progress to show orchestrator mode
277
+ orchestrator_type = "Standard Orchestrator (MCP Compatible)"
278
+ _update_progress(
279
+ job_id=job_id,
280
+ iteration=current_iteration,
281
+ max_iterations=max_iterations,
282
+ overall_progress=15,
283
+ current_stage="orchestrator_ready",
284
+ message=f"Initialized {orchestrator_type}",
285
+ )
286
+
287
+ return orchestrator, use_advanced_orchestrator
288
+
289
+
290
+ async def _create_advanced_orchestrator(
291
+ job_id: str,
292
+ kwargs: dict[str, t.Any],
293
+ context: t.Any,
294
+ ) -> t.Any:
295
+ """Create and configure the advanced orchestrator."""
296
+ from crackerjack.core.session_coordinator import SessionCoordinator
297
+ from crackerjack.orchestration.advanced_orchestrator import (
298
+ AdvancedWorkflowOrchestrator,
299
+ )
300
+ from crackerjack.orchestration.execution_strategies import (
301
+ AICoordinationMode,
302
+ AIIntelligence,
303
+ ExecutionStrategy,
304
+ OrchestrationConfig,
305
+ ProgressLevel,
306
+ StreamingMode,
307
+ )
308
+
309
+ # Create optimal orchestration configuration for maximum efficiency
310
+ optimal_config = OrchestrationConfig(
311
+ execution_strategy=ExecutionStrategy.ADAPTIVE,
312
+ progress_level=ProgressLevel.DETAILED,
313
+ streaming_mode=StreamingMode.WEBSOCKET,
314
+ ai_coordination_mode=AICoordinationMode.COORDINATOR,
315
+ ai_intelligence=AIIntelligence.ADAPTIVE,
316
+ # Enable advanced features
317
+ correlation_tracking=True,
318
+ failure_analysis=True,
319
+ intelligent_retry=True,
320
+ # Maximize parallelism for hook and test fixing
321
+ max_parallel_hooks=3,
322
+ max_parallel_tests=4,
323
+ timeout_multiplier=1.0,
324
+ # Enhanced debugging and monitoring
325
+ debug_level="standard",
326
+ log_individual_outputs=False,
327
+ preserve_temp_files=False,
328
+ )
329
+
330
+ # Initialize advanced orchestrator with optimal config
331
+ session = SessionCoordinator(
332
+ context.console,
333
+ context.config.project_path,
334
+ web_job_id=job_id,
335
+ )
336
+ orchestrator = AdvancedWorkflowOrchestrator(
337
+ console=context.console,
338
+ pkg_path=context.config.project_path,
339
+ session=session,
340
+ config=optimal_config,
341
+ )
342
+
343
+ # Override MCP mode if debug flag is set
344
+ if kwargs.get("debug", False):
345
+ orchestrator.individual_executor.set_mcp_mode(False)
346
+ context.safe_print("🐛 Debug mode enabled - full output mode")
347
+
348
+ return orchestrator
349
+
350
+
351
+ def _create_standard_orchestrator(
352
+ job_id: str,
353
+ kwargs: dict[str, t.Any],
354
+ context: t.Any,
355
+ ) -> t.Any:
356
+ """Create the standard fallback orchestrator."""
357
+ from crackerjack.core.workflow_orchestrator import WorkflowOrchestrator
358
+
359
+ return WorkflowOrchestrator(
360
+ console=context.console,
361
+ pkg_path=context.config.project_path,
362
+ dry_run=kwargs.get("dry_run", False),
363
+ web_job_id=job_id,
364
+ )
365
+
366
+
367
+ async def _run_workflow_iterations(
368
+ job_id: str,
369
+ max_iterations: int,
370
+ orchestrator: t.Any,
371
+ use_advanced_orchestrator: bool,
372
+ kwargs: dict[str, t.Any],
373
+ ) -> dict[str, t.Any]:
374
+ """Run the main workflow iteration loop."""
375
+ success = False
376
+ current_iteration = 1
377
+
378
+ for iteration in range(1, max_iterations + 1):
379
+ current_iteration = iteration
380
+
381
+ _update_progress(
382
+ job_id=job_id,
383
+ iteration=current_iteration,
384
+ max_iterations=max_iterations,
385
+ overall_progress=int((iteration / max_iterations) * 80),
386
+ current_stage=f"iteration_{iteration}",
387
+ message=f"Running iteration {iteration} / {max_iterations}",
388
+ )
389
+
390
+ options = _create_workflow_options(kwargs)
391
+
392
+ try:
393
+ success = await _execute_single_iteration(
394
+ orchestrator,
395
+ use_advanced_orchestrator,
396
+ options,
397
+ )
398
+
399
+ if success:
400
+ return _create_success_result(
401
+ job_id,
402
+ current_iteration,
403
+ max_iterations,
404
+ iteration,
405
+ )
406
+
407
+ if iteration < max_iterations:
408
+ await _handle_iteration_retry(
409
+ job_id,
410
+ current_iteration,
411
+ max_iterations,
412
+ iteration,
413
+ )
414
+ continue
415
+
416
+ except Exception as e:
417
+ if not await _handle_iteration_error(iteration, max_iterations, e):
418
+ break
419
+
420
+ return _create_failure_result(job_id, current_iteration, max_iterations)
421
+
422
+
423
+ def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
424
+ """Create WorkflowOptions from kwargs."""
425
+ from crackerjack.models.config import WorkflowOptions
426
+
427
+ options = WorkflowOptions()
428
+ options.testing.test = kwargs.get("test", True)
429
+ options.ai_agent = kwargs.get("ai_agent", True)
430
+ options.skip_hooks = kwargs.get("skip_hooks", False)
431
+ # Enable proactive mode by default for better architectural planning
432
+ options.proactive_mode = kwargs.get("proactive_mode", True)
433
+ return options
434
+
435
+
436
+ async def _execute_single_iteration(
437
+ orchestrator: t.Any,
438
+ use_advanced_orchestrator: bool,
439
+ options: t.Any,
440
+ ) -> bool:
441
+ """Execute a single workflow iteration."""
442
+ if use_advanced_orchestrator:
443
+ return await orchestrator.execute_orchestrated_workflow(options)
444
+ return await orchestrator.run_complete_workflow(options)
445
+
446
+
447
+ def _create_success_result(
448
+ job_id: str,
449
+ current_iteration: int,
450
+ max_iterations: int,
451
+ iteration: int,
452
+ ) -> dict[str, t.Any]:
453
+ """Create success result dictionary."""
454
+ _update_progress(
455
+ job_id=job_id,
456
+ status="completed",
457
+ iteration=current_iteration,
458
+ max_iterations=max_iterations,
459
+ overall_progress=100,
460
+ current_stage="completed",
461
+ message=f"Successfully completed after {iteration} iterations",
462
+ )
463
+ return {
464
+ "job_id": job_id,
465
+ "status": "completed",
466
+ "iteration": current_iteration,
467
+ "message": f"Successfully completed after {iteration} iterations",
468
+ }
469
+
470
+
471
+ async def _handle_iteration_retry(
472
+ job_id: str,
473
+ current_iteration: int,
474
+ max_iterations: int,
475
+ iteration: int,
476
+ ) -> None:
477
+ """Handle iteration retry logic."""
478
+ _update_progress(
479
+ job_id=job_id,
480
+ iteration=current_iteration,
481
+ max_iterations=max_iterations,
482
+ overall_progress=int((iteration / max_iterations) * 80),
483
+ current_stage="retrying",
484
+ message=f"Iteration {iteration} failed, retrying...",
485
+ )
486
+ await asyncio.sleep(1)
487
+
488
+
489
+ async def _handle_iteration_error(
490
+ iteration: int,
491
+ max_iterations: int,
492
+ error: Exception,
493
+ ) -> bool:
494
+ """Handle iteration errors. Returns True to continue, False to break."""
495
+ if iteration >= max_iterations:
496
+ return False
497
+ await asyncio.sleep(1)
498
+ return True
499
+
500
+
501
+ def _create_failure_result(
502
+ job_id: str,
503
+ current_iteration: int,
504
+ max_iterations: int,
505
+ ) -> dict[str, t.Any]:
506
+ """Create failure result dictionary."""
507
+ _update_progress(
508
+ job_id=job_id,
509
+ status="failed",
510
+ iteration=current_iteration,
511
+ max_iterations=max_iterations,
512
+ overall_progress=80,
513
+ current_stage="failed",
514
+ message=f"Failed after {max_iterations} iterations",
515
+ )
516
+ return {
517
+ "job_id": job_id,
518
+ "status": "failed",
519
+ "iteration": current_iteration,
520
+ "message": f"Failed after {max_iterations} iterations",
521
+ }
522
+
523
+
524
+ async def _ensure_services_running(job_id: str, context: t.Any) -> None:
525
+ """Ensure WebSocket server and watchdog are running before starting workflow."""
526
+ import subprocess
527
+
528
+ _update_progress(
529
+ job_id=job_id,
530
+ current_stage="service_startup",
531
+ message="Checking required services...",
532
+ )
533
+
534
+ # Check if WebSocket server is running
535
+ websocket_running = False
536
+ with suppress(Exception):
537
+ from crackerjack.services.server_manager import find_websocket_server_processes
538
+
539
+ websocket_processes = find_websocket_server_processes()
540
+ websocket_running = len(websocket_processes) > 0
541
+
542
+ if not websocket_running:
543
+ _update_progress(
544
+ job_id=job_id,
545
+ current_stage="service_startup",
546
+ message="Starting WebSocket server...",
547
+ )
548
+
549
+ try:
550
+ # Start WebSocket server in background
551
+ subprocess.Popen(
552
+ ["python", "-m", "crackerjack", "--start-websocket-server"],
553
+ cwd=context.config.project_path,
554
+ stdout=subprocess.DEVNULL,
555
+ stderr=subprocess.DEVNULL,
556
+ start_new_session=True,
557
+ )
558
+
559
+ # Wait for server to start
560
+ for _i in range(10):
561
+ with suppress(Exception):
562
+ websocket_processes = find_websocket_server_processes()
563
+ if websocket_processes:
564
+ context.safe_print("✅ WebSocket server started successfully")
565
+ break
566
+ await asyncio.sleep(0.5)
567
+ else:
568
+ context.safe_print("⚠️ WebSocket server may not have started properly")
569
+
570
+ except Exception as e:
571
+ context.safe_print(f"⚠️ Failed to start WebSocket server: {e}")
572
+ else:
573
+ context.safe_print("✅ WebSocket server already running")
574
+
575
+
576
+ async def _check_status_and_prepare(job_id: str, context: t.Any) -> dict[str, t.Any]:
577
+ """Check comprehensive system status and prepare for execution."""
578
+ _update_progress(
579
+ job_id=job_id,
580
+ current_stage="status_check",
581
+ message="🔍 Checking system status to prevent conflicts...",
582
+ )
583
+
584
+ try:
585
+ status_info = await _get_status_info()
586
+ if "error" in status_info:
587
+ return _handle_status_error(status_info, context)
588
+
589
+ cleanup_performed = []
590
+
591
+ # Check for conflicting jobs
592
+ _check_active_jobs(status_info, context)
593
+
594
+ # Check and flag resource cleanup needs
595
+ cleanup_performed.extend(_check_resource_cleanup(status_info, context))
596
+
597
+ # Check service health
598
+ _check_service_health(status_info, context)
599
+
600
+ context.safe_print("✅ Status check complete - ready to proceed")
601
+
602
+ return {
603
+ "should_abort": False,
604
+ "reason": "",
605
+ "status_info": status_info,
606
+ "cleanup_performed": cleanup_performed,
607
+ }
608
+
609
+ except Exception as e:
610
+ return _handle_status_exception(e, context)
611
+
612
+
613
+ async def _get_status_info() -> dict[str, t.Any]:
614
+ """Get comprehensive system status."""
615
+ from .monitoring_tools import _get_comprehensive_status
616
+
617
+ return await _get_comprehensive_status()
618
+
619
+
620
+ def _handle_status_error(
621
+ status_info: dict[str, t.Any],
622
+ context: t.Any,
623
+ ) -> dict[str, t.Any]:
624
+ """Handle status check failure."""
625
+ context.safe_print(f"⚠️ Status check failed: {status_info['error']}")
626
+ return {
627
+ "should_abort": False,
628
+ "reason": "",
629
+ "status_info": status_info,
630
+ "cleanup_performed": [],
631
+ }
632
+
633
+
634
+ def _check_active_jobs(status_info: dict[str, t.Any], context: t.Any) -> None:
635
+ """Check for active jobs that might conflict."""
636
+ active_jobs = [
637
+ j
638
+ for j in status_info.get("jobs", {}).get("details", [])
639
+ if j.get("status") == "running"
640
+ ]
641
+
642
+ if active_jobs:
643
+ _handle_conflicting_jobs(active_jobs, context)
644
+ else:
645
+ context.safe_print("✅ No active jobs detected - safe to proceed")
646
+
647
+
648
+ def _handle_conflicting_jobs(
649
+ active_jobs: list[dict[str, t.Any]],
650
+ context: t.Any,
651
+ ) -> None:
652
+ """Handle conflicting active jobs."""
653
+ # For now, assume all jobs could conflict (future: check project paths)
654
+ conflicting_jobs = active_jobs
655
+
656
+ if conflicting_jobs:
657
+ job_ids = [j.get("job_id", "unknown") for j in conflicting_jobs]
658
+ context.safe_print(
659
+ f"⚠️ Found {len(conflicting_jobs)} active job(s): {', '.join(job_ids[:3])}",
660
+ )
661
+ context.safe_print(
662
+ " Running concurrent crackerjack instances may cause file conflicts",
663
+ )
664
+ context.safe_print(" Proceeding with caution...")
665
+
666
+
667
+ def _check_resource_cleanup(status_info: dict[str, t.Any], context: t.Any) -> list[str]:
668
+ """Check if resource cleanup is needed."""
669
+ cleanup_performed = []
670
+
671
+ temp_files_count = (
672
+ status_info.get("server_stats", {})
673
+ .get("resource_usage", {})
674
+ .get("temp_files_count", 0)
675
+ )
676
+
677
+ if temp_files_count > 50:
678
+ context.safe_print(
679
+ f"🗑️ Found {temp_files_count} temporary files - cleanup recommended",
680
+ )
681
+ cleanup_performed.append("temp_files_flagged")
682
+
683
+ return cleanup_performed
684
+
685
+
686
+ def _check_service_health(status_info: dict[str, t.Any], context: t.Any) -> None:
687
+ """Check health of required services."""
688
+ services = status_info.get("services", {})
689
+ mcp_running = services.get("mcp_server", {}).get("running", False)
690
+ websocket_running = services.get("websocket_server", {}).get("running", False)
691
+
692
+ if not mcp_running:
693
+ context.safe_print("⚠️ MCP server not running - will auto-start if needed")
694
+
695
+ if not websocket_running:
696
+ context.safe_print("📡 WebSocket server not running - will auto-start")
697
+
698
+
699
+ def _handle_status_exception(error: Exception, context: t.Any) -> dict[str, t.Any]:
700
+ """Handle status check exceptions."""
701
+ context.safe_print(f"⚠️ Status check encountered error: {error}")
702
+ return {
703
+ "should_abort": False,
704
+ "reason": "",
705
+ "status_info": {"error": str(error)},
706
+ "cleanup_performed": [],
707
+ }
708
+
709
+
710
+ async def _cleanup_stale_jobs(context: t.Any) -> None:
711
+ """Clean up stale job files with unknown IDs or stuck in processing state."""
712
+ if not context.progress_dir.exists():
713
+ return
714
+
715
+ current_time = time.time()
716
+ cleaned_count = 0
717
+
718
+ with suppress(Exception):
719
+ for progress_file in context.progress_dir.glob("job-*.json"):
720
+ try:
721
+ import json
722
+
723
+ progress_data = json.loads(progress_file.read_text())
724
+
725
+ # Check if job is stale (older than 30 minutes and stuck)
726
+ last_update = progress_data.get("updated_at", 0)
727
+ age_minutes = (current_time - last_update) / 60
728
+
729
+ is_stale = (
730
+ age_minutes > 30 # Older than 30 minutes
731
+ or progress_data.get("job_id") == "unknown" # Unknown job ID
732
+ or "analyzing_failures: processing"
733
+ in progress_data.get("status", "") # Stuck in processing
734
+ )
735
+
736
+ if is_stale:
737
+ progress_file.unlink()
738
+ cleaned_count += 1
739
+
740
+ except (json.JSONDecodeError, OSError):
741
+ # Clean up malformed files
742
+ with suppress(OSError):
743
+ progress_file.unlink()
744
+ cleaned_count += 1
745
+
746
+ if cleaned_count > 0:
747
+ context.safe_print(f"🗑️ Cleaned up {cleaned_count} stale job files")
748
+
749
+
750
+ def _register_init_crackerjack_tool(mcp_app: t.Any) -> None:
751
+ @mcp_app.tool()
752
+ async def init_crackerjack(args: str = "", kwargs: str = "{}") -> str:
753
+ """Initialize or update crackerjack configuration in current project.
754
+
755
+ Args:
756
+ args: Optional target path (defaults to current directory)
757
+ kwargs: JSON string with options like {"force": true}
758
+
759
+ Returns:
760
+ JSON string with initialization results
761
+ """
762
+ context = get_context()
763
+ if not context:
764
+ return _create_init_error_response("Server context not available")
765
+
766
+ target_path, force, parse_error = _parse_init_arguments(args, kwargs)
767
+ if parse_error:
768
+ return parse_error
769
+
770
+ try:
771
+ results = _execute_initialization(context, target_path, force)
772
+ return _create_init_success_response(results, target_path, force)
773
+ except Exception as e:
774
+ return _create_init_exception_response(e, target_path)
775
+
776
+
777
+ def _create_init_error_response(message: str) -> str:
778
+ """Create standardized error response for initialization."""
779
+ return json.dumps({"error": message, "success": False}, indent=2)
780
+
781
+
782
+ def _parse_init_arguments(args: str, kwargs: str) -> tuple[t.Any, bool, str | None]:
783
+ """Parse and validate initialization arguments."""
784
+ from pathlib import Path
785
+
786
+ target_path = args.strip() or None
787
+
788
+ try:
789
+ extra_kwargs = json.loads(kwargs) if kwargs.strip() else {}
790
+ except json.JSONDecodeError as e:
791
+ return None, False, _create_init_error_response(f"Invalid JSON in kwargs: {e}")
792
+
793
+ force = extra_kwargs.get("force", False)
794
+
795
+ # Determine target path
796
+ if target_path:
797
+ target_path = Path(target_path).resolve()
798
+ else:
799
+ target_path = Path.cwd()
800
+
801
+ # Validate target path exists
802
+ if not target_path.exists():
803
+ return (
804
+ None,
805
+ False,
806
+ _create_init_error_response(f"Target path does not exist: {target_path}"),
807
+ )
808
+
809
+ return target_path, force, None
810
+
811
+
812
+ def _execute_initialization(
813
+ context: t.Any, target_path: t.Any, force: bool
814
+ ) -> dict[str, t.Any]:
815
+ """Execute the initialization process."""
816
+ from crackerjack.services.filesystem import FileSystemService
817
+ from crackerjack.services.git import GitService
818
+ from crackerjack.services.initialization import InitializationService
819
+
820
+ # Initialize services
821
+ filesystem = FileSystemService()
822
+ git_service = GitService(context.console, context.config.project_path)
823
+ # Run initialization
824
+ return InitializationService(
825
+ context.console, filesystem, git_service, context.config.project_path
826
+ ).initialize_project(target_path=target_path, force=force)
827
+
828
+
829
+ def _create_init_success_response(
830
+ results: dict[str, t.Any], target_path: t.Any, force: bool
831
+ ) -> str:
832
+ """Create success response with summary information."""
833
+ results["command"] = "init_crackerjack"
834
+ results["target_path"] = str(target_path)
835
+ results["force"] = force
836
+ return json.dumps(results, indent=2)
837
+
838
+
839
+ def _create_init_exception_response(error: Exception, target_path: t.Any) -> str:
840
+ """Create exception response for initialization failures."""
841
+ error_result = {
842
+ "error": f"Initialization failed: {error}",
843
+ "success": False,
844
+ "command": "init_crackerjack",
845
+ "target_path": str(target_path) if target_path else "current_directory",
846
+ }
847
+ return json.dumps(error_result, indent=2)
848
+
849
+
850
+ def _register_agent_suggestions_tool(mcp_app: t.Any) -> None:
851
+ """Register tool for suggesting appropriate Claude Code agents."""
852
+
853
+ @mcp_app.tool()
854
+ async def suggest_agents(
855
+ task_description: str = "",
856
+ project_type: str = "python",
857
+ current_context: str = "",
858
+ ) -> str:
859
+ """Suggest appropriate Claude Code agents based on task and context.
860
+
861
+ Args:
862
+ task_description: Description of the task being performed
863
+ project_type: Type of project (python, web, etc.)
864
+ current_context: Current development context or issues
865
+
866
+ Returns:
867
+ JSON with suggested agents and usage patterns
868
+ """
869
+ suggestions = {
870
+ "primary_agents": [],
871
+ "task_specific_agents": [],
872
+ "usage_patterns": [],
873
+ "rationale": "",
874
+ }
875
+
876
+ # Always recommend crackerjack-architect for Python projects
877
+ if project_type.lower() == "python" or "python" in task_description.lower():
878
+ suggestions["primary_agents"].append(
879
+ {
880
+ "name": "crackerjack-architect",
881
+ "emoji": "🏗️",
882
+ "description": "Expert in crackerjack's modular architecture and Python project management patterns",
883
+ "usage": "Use PROACTIVELY for all feature development, architectural decisions, and ensuring code follows crackerjack standards",
884
+ "priority": "HIGH",
885
+ }
886
+ )
887
+
888
+ suggestions["primary_agents"].append(
889
+ {
890
+ "name": "python-pro",
891
+ "emoji": "🐍",
892
+ "description": "Modern Python development with type hints, async/await patterns, and clean architecture",
893
+ "usage": "Use for implementing Python code with best practices",
894
+ "priority": "HIGH",
895
+ }
896
+ )
897
+
898
+ # Task-specific agent suggestions
899
+ task_lower = task_description.lower()
900
+ context_lower = current_context.lower()
901
+
902
+ if any(
903
+ word in task_lower + context_lower
904
+ for word in ("test", "testing", "coverage", "pytest")
905
+ ):
906
+ suggestions["task_specific_agents"].append(
907
+ {
908
+ "name": "crackerjack-test-specialist",
909
+ "emoji": "🧪",
910
+ "description": "Advanced testing specialist for complex scenarios and coverage optimization",
911
+ "usage": "Use for test creation, debugging test failures, and coverage improvements",
912
+ "priority": "HIGH",
913
+ }
914
+ )
915
+
916
+ suggestions["task_specific_agents"].append(
917
+ {
918
+ "name": "pytest-hypothesis-specialist",
919
+ "emoji": "🧪",
920
+ "description": "Advanced testing patterns and property-based testing",
921
+ "usage": "Use for comprehensive test development and optimization",
922
+ "priority": "MEDIUM",
923
+ }
924
+ )
925
+
926
+ if any(
927
+ word in task_lower + context_lower
928
+ for word in ("security", "vulnerability", "auth", "permission")
929
+ ):
930
+ suggestions["task_specific_agents"].append(
931
+ {
932
+ "name": "security-auditor",
933
+ "emoji": "🔒",
934
+ "description": "Security analysis, vulnerability detection, and secure coding practices",
935
+ "usage": "Use for security review and vulnerability assessment",
936
+ "priority": "HIGH",
937
+ }
938
+ )
939
+
940
+ if any(
941
+ word in task_lower + context_lower
942
+ for word in ("architecture", "design", "api", "backend")
943
+ ):
944
+ suggestions["task_specific_agents"].append(
945
+ {
946
+ "name": "backend-architect",
947
+ "emoji": "🏗️",
948
+ "description": "System design, API architecture, and service integration patterns",
949
+ "usage": "Use for architectural planning and system design",
950
+ "priority": "MEDIUM",
951
+ }
952
+ )
953
+
954
+ # Usage patterns
955
+ suggestions["usage_patterns"] = [
956
+ 'Task tool with subagent_type="crackerjack-architect" for feature planning and architecture',
957
+ 'Task tool with subagent_type="python-pro" for implementation with best practices',
958
+ 'Task tool with subagent_type="crackerjack-test-specialist" for comprehensive testing',
959
+ 'Task tool with subagent_type="security-auditor" for security validation',
960
+ ]
961
+
962
+ # Rationale
963
+ if "crackerjack-architect" in [
964
+ agent["name"] for agent in suggestions["primary_agents"]
965
+ ]:
966
+ suggestions["rationale"] = (
967
+ "The crackerjack-architect agent is essential for this Python project as it ensures "
968
+ "code follows crackerjack patterns from the start, eliminating retrofitting needs. "
969
+ "Combined with python-pro for implementation and task-specific agents for specialized "
970
+ "work, this provides comprehensive development support with built-in quality assurance."
971
+ )
972
+
973
+ return json.dumps(suggestions, indent=2)
974
+
975
+ @mcp_app.tool()
976
+ async def detect_agent_needs(
977
+ error_context: str = "",
978
+ file_patterns: str = "",
979
+ recent_changes: str = "",
980
+ ) -> str:
981
+ """Detect and suggest agents based on current development context.
982
+
983
+ Args:
984
+ error_context: Current errors or issues being faced
985
+ file_patterns: File types or patterns being worked on
986
+ recent_changes: Recent changes or commits
987
+
988
+ Returns:
989
+ JSON with agent recommendations based on context analysis
990
+ """
991
+ recommendations = {
992
+ "urgent_agents": [],
993
+ "suggested_agents": [],
994
+ "workflow_recommendations": [],
995
+ "detection_reasoning": "",
996
+ }
997
+
998
+ # Add urgent agents based on error context
999
+ _add_urgent_agents_for_errors(recommendations, error_context)
1000
+
1001
+ # Add general suggestions for Python projects
1002
+ _add_python_project_suggestions(recommendations, file_patterns)
1003
+
1004
+ # Set workflow recommendations
1005
+ _set_workflow_recommendations(recommendations)
1006
+
1007
+ # Generate detection reasoning
1008
+ _generate_detection_reasoning(recommendations)
1009
+
1010
+ return json.dumps(recommendations, indent=2)
1011
+
1012
+
1013
+ def _add_urgent_agents_for_errors(
1014
+ recommendations: dict[str, t.Any], error_context: str
1015
+ ) -> None:
1016
+ """Add urgent agent recommendations based on error context."""
1017
+ if any(
1018
+ word in error_context.lower()
1019
+ for word in ("test fail", "coverage", "pytest", "assertion")
1020
+ ):
1021
+ recommendations["urgent_agents"].append(
1022
+ {
1023
+ "agent": "crackerjack-test-specialist",
1024
+ "reason": "Test failures detected - specialist needed for debugging and fixes",
1025
+ "action": 'Task tool with subagent_type="crackerjack-test-specialist" to analyze and fix test issues',
1026
+ }
1027
+ )
1028
+
1029
+ if any(
1030
+ word in error_context.lower()
1031
+ for word in ("security", "vulnerability", "bandit", "unsafe")
1032
+ ):
1033
+ recommendations["urgent_agents"].append(
1034
+ {
1035
+ "agent": "security-auditor",
1036
+ "reason": "Security issues detected - immediate audit required",
1037
+ "action": 'Task tool with subagent_type="security-auditor" to review and fix security vulnerabilities',
1038
+ }
1039
+ )
1040
+
1041
+ if any(
1042
+ word in error_context.lower()
1043
+ for word in ("complexity", "refactor", "too complex")
1044
+ ):
1045
+ recommendations["urgent_agents"].append(
1046
+ {
1047
+ "agent": "crackerjack-architect",
1048
+ "reason": "Complexity issues detected - architectural review needed",
1049
+ "action": 'Task tool with subagent_type="crackerjack-architect" to simplify and restructure code',
1050
+ }
1051
+ )
1052
+
1053
+
1054
+ def _add_python_project_suggestions(
1055
+ recommendations: dict[str, t.Any], file_patterns: str
1056
+ ) -> None:
1057
+ """Add general suggestions for Python projects."""
1058
+ if "python" in file_patterns.lower() or ".py" in file_patterns:
1059
+ recommendations["suggested_agents"].extend(
1060
+ [
1061
+ {
1062
+ "agent": "crackerjack-architect",
1063
+ "reason": "Python project detected - ensure crackerjack compliance",
1064
+ "priority": "HIGH",
1065
+ },
1066
+ {
1067
+ "agent": "python-pro",
1068
+ "reason": "Python development best practices",
1069
+ "priority": "HIGH",
1070
+ },
1071
+ ]
1072
+ )
1073
+
1074
+
1075
+ def _set_workflow_recommendations(recommendations: dict[str, t.Any]) -> None:
1076
+ """Set workflow recommendations based on urgent agents."""
1077
+ if recommendations["urgent_agents"]:
1078
+ recommendations["workflow_recommendations"] = [
1079
+ "Address urgent issues first with specialized agents",
1080
+ "Run crackerjack quality checks after fixes: python -m crackerjack -t",
1081
+ "Use crackerjack-architect for ongoing compliance",
1082
+ ]
1083
+ else:
1084
+ recommendations["workflow_recommendations"] = [
1085
+ "Start with crackerjack-architect for proper planning",
1086
+ "Use python-pro for implementation",
1087
+ "Run continuous quality checks: python -m crackerjack",
1088
+ ]
1089
+
1090
+
1091
+ def _generate_detection_reasoning(recommendations: dict[str, t.Any]) -> None:
1092
+ """Generate detection reasoning based on recommendations."""
1093
+ recommendations["detection_reasoning"] = (
1094
+ f"Analysis of context revealed {len(recommendations['urgent_agents'])} urgent issues "
1095
+ f"and {len(recommendations['suggested_agents'])} general recommendations. "
1096
+ "Prioritize urgent agents first, then follow standard workflow patterns."
1097
+ )