empathy-framework 3.7.0__py3-none-any.whl → 3.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (274) hide show
  1. coach_wizards/code_reviewer_README.md +60 -0
  2. coach_wizards/code_reviewer_wizard.py +180 -0
  3. {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/METADATA +148 -11
  4. empathy_framework-3.8.0.dist-info/RECORD +333 -0
  5. {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/top_level.txt +5 -1
  6. empathy_healthcare_plugin/monitors/__init__.py +9 -0
  7. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
  8. empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
  9. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
  10. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
  11. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
  12. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
  13. empathy_llm_toolkit/agent_factory/__init__.py +53 -0
  14. empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
  15. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
  16. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
  17. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
  18. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
  19. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
  20. empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
  21. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
  22. empathy_llm_toolkit/agent_factory/base.py +305 -0
  23. empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
  24. empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
  25. empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
  26. empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
  27. empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
  28. empathy_llm_toolkit/agent_factory/decorators.py +286 -0
  29. empathy_llm_toolkit/agent_factory/factory.py +558 -0
  30. empathy_llm_toolkit/agent_factory/framework.py +192 -0
  31. empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
  32. empathy_llm_toolkit/agent_factory/resilient.py +320 -0
  33. empathy_llm_toolkit/cli/__init__.py +8 -0
  34. empathy_llm_toolkit/cli/sync_claude.py +487 -0
  35. empathy_llm_toolkit/code_health.py +150 -3
  36. empathy_llm_toolkit/config/__init__.py +29 -0
  37. empathy_llm_toolkit/config/unified.py +295 -0
  38. empathy_llm_toolkit/routing/__init__.py +32 -0
  39. empathy_llm_toolkit/routing/model_router.py +362 -0
  40. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
  41. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
  42. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
  43. empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
  44. empathy_llm_toolkit/security/README.md +262 -0
  45. empathy_llm_toolkit/security/__init__.py +62 -0
  46. empathy_llm_toolkit/security/audit_logger.py +929 -0
  47. empathy_llm_toolkit/security/audit_logger_example.py +152 -0
  48. empathy_llm_toolkit/security/pii_scrubber.py +640 -0
  49. empathy_llm_toolkit/security/secrets_detector.py +678 -0
  50. empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
  51. empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
  52. empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
  53. empathy_llm_toolkit/wizards/__init__.py +38 -0
  54. empathy_llm_toolkit/wizards/base_wizard.py +364 -0
  55. empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
  56. empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
  57. empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
  58. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
  59. empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
  60. empathy_os/__init__.py +52 -52
  61. empathy_os/adaptive/__init__.py +13 -0
  62. empathy_os/adaptive/task_complexity.py +127 -0
  63. empathy_os/cache/__init__.py +117 -0
  64. empathy_os/cache/base.py +166 -0
  65. empathy_os/cache/dependency_manager.py +253 -0
  66. empathy_os/cache/hash_only.py +248 -0
  67. empathy_os/cache/hybrid.py +390 -0
  68. empathy_os/cache/storage.py +282 -0
  69. empathy_os/cli.py +118 -8
  70. empathy_os/cli_unified.py +121 -1
  71. empathy_os/config/__init__.py +63 -0
  72. empathy_os/config/xml_config.py +239 -0
  73. empathy_os/config.py +2 -1
  74. empathy_os/dashboard/__init__.py +15 -0
  75. empathy_os/dashboard/server.py +743 -0
  76. empathy_os/memory/__init__.py +195 -0
  77. empathy_os/memory/claude_memory.py +466 -0
  78. empathy_os/memory/config.py +224 -0
  79. empathy_os/memory/control_panel.py +1298 -0
  80. empathy_os/memory/edges.py +179 -0
  81. empathy_os/memory/graph.py +567 -0
  82. empathy_os/memory/long_term.py +1194 -0
  83. empathy_os/memory/nodes.py +179 -0
  84. empathy_os/memory/redis_bootstrap.py +540 -0
  85. empathy_os/memory/security/__init__.py +31 -0
  86. empathy_os/memory/security/audit_logger.py +930 -0
  87. empathy_os/memory/security/pii_scrubber.py +640 -0
  88. empathy_os/memory/security/secrets_detector.py +678 -0
  89. empathy_os/memory/short_term.py +2119 -0
  90. empathy_os/memory/storage/__init__.py +15 -0
  91. empathy_os/memory/summary_index.py +583 -0
  92. empathy_os/memory/unified.py +619 -0
  93. empathy_os/metrics/__init__.py +12 -0
  94. empathy_os/metrics/prompt_metrics.py +190 -0
  95. empathy_os/models/__init__.py +136 -0
  96. empathy_os/models/__main__.py +13 -0
  97. empathy_os/models/cli.py +655 -0
  98. empathy_os/models/empathy_executor.py +354 -0
  99. empathy_os/models/executor.py +252 -0
  100. empathy_os/models/fallback.py +671 -0
  101. empathy_os/models/provider_config.py +563 -0
  102. empathy_os/models/registry.py +382 -0
  103. empathy_os/models/tasks.py +302 -0
  104. empathy_os/models/telemetry.py +548 -0
  105. empathy_os/models/token_estimator.py +378 -0
  106. empathy_os/models/validation.py +274 -0
  107. empathy_os/monitoring/__init__.py +52 -0
  108. empathy_os/monitoring/alerts.py +23 -0
  109. empathy_os/monitoring/alerts_cli.py +268 -0
  110. empathy_os/monitoring/multi_backend.py +271 -0
  111. empathy_os/monitoring/otel_backend.py +363 -0
  112. empathy_os/optimization/__init__.py +19 -0
  113. empathy_os/optimization/context_optimizer.py +272 -0
  114. empathy_os/plugins/__init__.py +28 -0
  115. empathy_os/plugins/base.py +361 -0
  116. empathy_os/plugins/registry.py +268 -0
  117. empathy_os/project_index/__init__.py +30 -0
  118. empathy_os/project_index/cli.py +335 -0
  119. empathy_os/project_index/crew_integration.py +430 -0
  120. empathy_os/project_index/index.py +425 -0
  121. empathy_os/project_index/models.py +501 -0
  122. empathy_os/project_index/reports.py +473 -0
  123. empathy_os/project_index/scanner.py +538 -0
  124. empathy_os/prompts/__init__.py +61 -0
  125. empathy_os/prompts/config.py +77 -0
  126. empathy_os/prompts/context.py +177 -0
  127. empathy_os/prompts/parser.py +285 -0
  128. empathy_os/prompts/registry.py +313 -0
  129. empathy_os/prompts/templates.py +208 -0
  130. empathy_os/resilience/__init__.py +56 -0
  131. empathy_os/resilience/circuit_breaker.py +256 -0
  132. empathy_os/resilience/fallback.py +179 -0
  133. empathy_os/resilience/health.py +300 -0
  134. empathy_os/resilience/retry.py +209 -0
  135. empathy_os/resilience/timeout.py +135 -0
  136. empathy_os/routing/__init__.py +43 -0
  137. empathy_os/routing/chain_executor.py +433 -0
  138. empathy_os/routing/classifier.py +217 -0
  139. empathy_os/routing/smart_router.py +234 -0
  140. empathy_os/routing/wizard_registry.py +307 -0
  141. empathy_os/trust/__init__.py +28 -0
  142. empathy_os/trust/circuit_breaker.py +579 -0
  143. empathy_os/validation/__init__.py +19 -0
  144. empathy_os/validation/xml_validator.py +281 -0
  145. empathy_os/wizard_factory_cli.py +170 -0
  146. empathy_os/workflows/__init__.py +360 -0
  147. empathy_os/workflows/base.py +1660 -0
  148. empathy_os/workflows/bug_predict.py +962 -0
  149. empathy_os/workflows/code_review.py +960 -0
  150. empathy_os/workflows/code_review_adapters.py +310 -0
  151. empathy_os/workflows/code_review_pipeline.py +720 -0
  152. empathy_os/workflows/config.py +600 -0
  153. empathy_os/workflows/dependency_check.py +648 -0
  154. empathy_os/workflows/document_gen.py +1069 -0
  155. empathy_os/workflows/documentation_orchestrator.py +1205 -0
  156. empathy_os/workflows/health_check.py +679 -0
  157. empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
  158. empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
  159. empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
  160. empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
  161. empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
  162. empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
  163. empathy_os/workflows/manage_documentation.py +804 -0
  164. empathy_os/workflows/new_sample_workflow1.py +146 -0
  165. empathy_os/workflows/new_sample_workflow1_README.md +150 -0
  166. empathy_os/workflows/perf_audit.py +687 -0
  167. empathy_os/workflows/pr_review.py +748 -0
  168. empathy_os/workflows/progress.py +445 -0
  169. empathy_os/workflows/progress_server.py +322 -0
  170. empathy_os/workflows/refactor_plan.py +693 -0
  171. empathy_os/workflows/release_prep.py +808 -0
  172. empathy_os/workflows/research_synthesis.py +404 -0
  173. empathy_os/workflows/secure_release.py +585 -0
  174. empathy_os/workflows/security_adapters.py +297 -0
  175. empathy_os/workflows/security_audit.py +1046 -0
  176. empathy_os/workflows/step_config.py +234 -0
  177. empathy_os/workflows/test5.py +125 -0
  178. empathy_os/workflows/test5_README.md +158 -0
  179. empathy_os/workflows/test_gen.py +1855 -0
  180. empathy_os/workflows/test_lifecycle.py +526 -0
  181. empathy_os/workflows/test_maintenance.py +626 -0
  182. empathy_os/workflows/test_maintenance_cli.py +590 -0
  183. empathy_os/workflows/test_maintenance_crew.py +821 -0
  184. empathy_os/workflows/xml_enhanced_crew.py +285 -0
  185. empathy_software_plugin/cli/__init__.py +120 -0
  186. empathy_software_plugin/cli/inspect.py +362 -0
  187. empathy_software_plugin/cli.py +3 -1
  188. empathy_software_plugin/wizards/__init__.py +42 -0
  189. empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
  190. empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
  191. empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
  192. empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
  193. empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
  194. empathy_software_plugin/wizards/base_wizard.py +288 -0
  195. empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
  196. empathy_software_plugin/wizards/code_review_wizard.py +606 -0
  197. empathy_software_plugin/wizards/debugging/__init__.py +50 -0
  198. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
  199. empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
  200. empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
  201. empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
  202. empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
  203. empathy_software_plugin/wizards/debugging/verification.py +369 -0
  204. empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
  205. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
  206. empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
  207. empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
  208. empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
  209. empathy_software_plugin/wizards/performance/__init__.py +9 -0
  210. empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
  211. empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
  212. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
  213. empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
  214. empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
  215. empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
  216. empathy_software_plugin/wizards/security/__init__.py +32 -0
  217. empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
  218. empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
  219. empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
  220. empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
  221. empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
  222. empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
  223. empathy_software_plugin/wizards/testing/__init__.py +27 -0
  224. empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
  225. empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
  226. empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
  227. empathy_software_plugin/wizards/testing_wizard.py +274 -0
  228. hot_reload/README.md +473 -0
  229. hot_reload/__init__.py +62 -0
  230. hot_reload/config.py +84 -0
  231. hot_reload/integration.py +228 -0
  232. hot_reload/reloader.py +298 -0
  233. hot_reload/watcher.py +179 -0
  234. hot_reload/websocket.py +176 -0
  235. scaffolding/README.md +589 -0
  236. scaffolding/__init__.py +35 -0
  237. scaffolding/__main__.py +14 -0
  238. scaffolding/cli.py +240 -0
  239. test_generator/__init__.py +38 -0
  240. test_generator/__main__.py +14 -0
  241. test_generator/cli.py +226 -0
  242. test_generator/generator.py +325 -0
  243. test_generator/risk_analyzer.py +216 -0
  244. workflow_patterns/__init__.py +33 -0
  245. workflow_patterns/behavior.py +249 -0
  246. workflow_patterns/core.py +76 -0
  247. workflow_patterns/output.py +99 -0
  248. workflow_patterns/registry.py +255 -0
  249. workflow_patterns/structural.py +288 -0
  250. workflow_scaffolding/__init__.py +11 -0
  251. workflow_scaffolding/__main__.py +12 -0
  252. workflow_scaffolding/cli.py +206 -0
  253. workflow_scaffolding/generator.py +265 -0
  254. agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
  255. agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
  256. agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
  257. agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
  258. agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
  259. agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
  260. agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
  261. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
  262. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
  263. agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
  264. agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
  265. agents/compliance_anticipation_agent.py +0 -1422
  266. agents/compliance_db.py +0 -339
  267. agents/epic_integration_wizard.py +0 -530
  268. agents/notifications.py +0 -291
  269. agents/trust_building_behaviors.py +0 -872
  270. empathy_framework-3.7.0.dist-info/RECORD +0 -105
  271. {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/WHEEL +0 -0
  272. {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/entry_points.txt +0 -0
  273. {empathy_framework-3.7.0.dist-info → empathy_framework-3.8.0.dist-info}/licenses/LICENSE +0 -0
  274. /empathy_os/{monitoring.py → agent_monitoring.py} +0 -0
@@ -0,0 +1,693 @@
1
+ """Refactor Planning Workflow
2
+
3
+ Prioritizes tech debt based on trajectory analysis and impact assessment.
4
+ Uses historical tech debt data to identify trends and hotspots.
5
+
6
+ Stages:
7
+ 1. scan (CHEAP) - Scan for TODOs, FIXMEs, HACKs, complexity
8
+ 2. analyze (CAPABLE) - Analyze debt trajectory from patterns
9
+ 3. prioritize (CAPABLE) - Score by impact, effort, and risk
10
+ 4. plan (PREMIUM) - Generate prioritized refactoring roadmap (conditional)
11
+
12
+ Copyright 2025 Smart-AI-Memory
13
+ Licensed under Fair Source License 0.9
14
+ """
15
+
16
+ import json
17
+ import logging
18
+ import re
19
+ from pathlib import Path
20
+ from typing import Any
21
+
22
+ from .base import BaseWorkflow, ModelTier
23
+ from .step_config import WorkflowStepConfig
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Define step configurations for executor-based execution
28
+ REFACTOR_PLAN_STEPS = {
29
+ "plan": WorkflowStepConfig(
30
+ name="plan",
31
+ task_type="architectural_decision", # Premium tier task
32
+ tier_hint="premium",
33
+ description="Generate prioritized refactoring roadmap",
34
+ max_tokens=3000,
35
+ ),
36
+ }
37
+
38
+ # Debt markers and their severity
39
+ DEBT_MARKERS = {
40
+ "TODO": {"severity": "low", "weight": 1},
41
+ "FIXME": {"severity": "medium", "weight": 3},
42
+ "HACK": {"severity": "high", "weight": 5},
43
+ "XXX": {"severity": "medium", "weight": 3},
44
+ "BUG": {"severity": "high", "weight": 5},
45
+ "OPTIMIZE": {"severity": "low", "weight": 2},
46
+ "REFACTOR": {"severity": "medium", "weight": 3},
47
+ }
48
+
49
+
50
+ class RefactorPlanWorkflow(BaseWorkflow):
51
+ """Prioritize tech debt with trajectory analysis.
52
+
53
+ Analyzes tech debt trends over time to identify growing
54
+ problem areas and generate prioritized refactoring plans.
55
+ """
56
+
57
+ name = "refactor-plan"
58
+ description = "Prioritize tech debt based on trajectory and impact"
59
+ stages = ["scan", "analyze", "prioritize", "plan"]
60
+ tier_map = {
61
+ "scan": ModelTier.CHEAP,
62
+ "analyze": ModelTier.CAPABLE,
63
+ "prioritize": ModelTier.CAPABLE,
64
+ "plan": ModelTier.PREMIUM,
65
+ }
66
+
67
+ def __init__(
68
+ self,
69
+ patterns_dir: str = "./patterns",
70
+ min_debt_for_premium: int = 50,
71
+ use_crew_for_analysis: bool = True,
72
+ crew_config: dict | None = None,
73
+ **kwargs: Any,
74
+ ):
75
+ """Initialize refactor planning workflow.
76
+
77
+ Args:
78
+ patterns_dir: Directory containing tech debt history
79
+ min_debt_for_premium: Minimum debt items to use premium planning
80
+ use_crew_for_analysis: Use RefactoringCrew for enhanced code analysis (default: True)
81
+ crew_config: Configuration dict for RefactoringCrew
82
+ **kwargs: Additional arguments passed to BaseWorkflow
83
+
84
+ """
85
+ super().__init__(**kwargs)
86
+ self.patterns_dir = patterns_dir
87
+ self.min_debt_for_premium = min_debt_for_premium
88
+ self.use_crew_for_analysis = use_crew_for_analysis
89
+ self.crew_config = crew_config or {}
90
+ self._total_debt: int = 0
91
+ self._debt_history: list[dict] = []
92
+ self._crew: Any = None
93
+ self._crew_available = False
94
+ self._load_debt_history()
95
+
96
+ def _load_debt_history(self) -> None:
97
+ """Load tech debt history from pattern library."""
98
+ debt_file = Path(self.patterns_dir) / "tech_debt.json"
99
+ if debt_file.exists():
100
+ try:
101
+ with open(debt_file) as f:
102
+ data = json.load(f)
103
+ self._debt_history = data.get("snapshots", [])
104
+ except (json.JSONDecodeError, OSError):
105
+ pass
106
+
107
+ async def _initialize_crew(self) -> None:
108
+ """Initialize the RefactoringCrew."""
109
+ if self._crew is not None:
110
+ return
111
+
112
+ try:
113
+ from empathy_llm_toolkit.agent_factory.crews.refactoring import RefactoringCrew
114
+
115
+ self._crew = RefactoringCrew()
116
+ self._crew_available = True
117
+ logger.info("RefactoringCrew initialized successfully")
118
+ except ImportError as e:
119
+ logger.warning(f"RefactoringCrew not available: {e}")
120
+ self._crew_available = False
121
+
122
+ def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
123
+ """Downgrade plan stage if debt is low.
124
+
125
+ Args:
126
+ stage_name: Name of the stage to check
127
+ input_data: Current workflow data
128
+
129
+ Returns:
130
+ Tuple of (should_skip, reason)
131
+
132
+ """
133
+ if stage_name == "plan":
134
+ if self._total_debt < self.min_debt_for_premium:
135
+ self.tier_map["plan"] = ModelTier.CAPABLE
136
+ return False, None
137
+ return False, None
138
+
139
+ async def run_stage(
140
+ self,
141
+ stage_name: str,
142
+ tier: ModelTier,
143
+ input_data: Any,
144
+ ) -> tuple[Any, int, int]:
145
+ """Route to specific stage implementation."""
146
+ if stage_name == "scan":
147
+ return await self._scan(input_data, tier)
148
+ if stage_name == "analyze":
149
+ return await self._analyze(input_data, tier)
150
+ if stage_name == "prioritize":
151
+ return await self._prioritize(input_data, tier)
152
+ if stage_name == "plan":
153
+ return await self._plan(input_data, tier)
154
+ raise ValueError(f"Unknown stage: {stage_name}")
155
+
156
+ async def _scan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
157
+ """Scan codebase for tech debt markers.
158
+
159
+ Finds TODOs, FIXMEs, HACKs and other debt indicators.
160
+ """
161
+ target_path = input_data.get("path", ".")
162
+ file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js"])
163
+
164
+ debt_items: list[dict] = []
165
+ files_scanned = 0
166
+
167
+ target = Path(target_path)
168
+ if target.exists():
169
+ for ext in file_types:
170
+ for file_path in target.rglob(f"*{ext}"):
171
+ if any(
172
+ skip in str(file_path)
173
+ for skip in [".git", "node_modules", "__pycache__", "venv"]
174
+ ):
175
+ continue
176
+
177
+ try:
178
+ content = file_path.read_text(errors="ignore")
179
+ files_scanned += 1
180
+
181
+ for marker, info in DEBT_MARKERS.items():
182
+ pattern = rf"#\s*{marker}[:\s]*(.*?)(?:\n|$)"
183
+ for match in re.finditer(pattern, content, re.IGNORECASE):
184
+ line_num = content[: match.start()].count("\n") + 1
185
+ debt_items.append(
186
+ {
187
+ "file": str(file_path),
188
+ "line": line_num,
189
+ "marker": marker,
190
+ "message": match.group(1).strip()[:100],
191
+ "severity": info["severity"],
192
+ "weight": info["weight"],
193
+ },
194
+ )
195
+ except OSError:
196
+ continue
197
+
198
+ self._total_debt = len(debt_items)
199
+
200
+ # Group by file
201
+ by_file: dict[str, int] = {}
202
+ for item in debt_items:
203
+ f = item["file"]
204
+ by_file[f] = by_file.get(f, 0) + 1
205
+
206
+ # By marker type
207
+ by_marker: dict[str, int] = {}
208
+ for item in debt_items:
209
+ m = item["marker"]
210
+ by_marker[m] = by_marker.get(m, 0) + 1
211
+
212
+ input_tokens = len(str(input_data)) // 4
213
+ output_tokens = len(str(debt_items)) // 4
214
+
215
+ return (
216
+ {
217
+ "debt_items": debt_items,
218
+ "total_debt": self._total_debt,
219
+ "files_scanned": files_scanned,
220
+ "by_file": dict(sorted(by_file.items(), key=lambda x: -x[1])[:20]),
221
+ "by_marker": by_marker,
222
+ **input_data,
223
+ },
224
+ input_tokens,
225
+ output_tokens,
226
+ )
227
+
228
+ async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
229
+ """Analyze debt trajectory from historical data.
230
+
231
+ Compares current debt with historical snapshots to
232
+ identify trends and growing problem areas.
233
+ """
234
+ current_total = input_data.get("total_debt", 0)
235
+ by_file = input_data.get("by_file", {})
236
+
237
+ # Analyze trajectory
238
+ trajectory = "stable"
239
+ velocity = 0.0
240
+
241
+ if self._debt_history and len(self._debt_history) >= 2:
242
+ oldest = self._debt_history[0].get("total_items", 0)
243
+ newest = self._debt_history[-1].get("total_items", 0)
244
+
245
+ change = newest - oldest
246
+ if change > 10:
247
+ trajectory = "increasing"
248
+ elif change < -10:
249
+ trajectory = "decreasing"
250
+
251
+ # Calculate velocity (items per snapshot)
252
+ velocity = change / len(self._debt_history)
253
+
254
+ # Identify hotspots (files with most debt and increasing)
255
+ hotspots: list[dict] = []
256
+ for file_path, count in list(by_file.items())[:10]:
257
+ hotspots.append(
258
+ {
259
+ "file": file_path,
260
+ "debt_count": count,
261
+ "trend": "stable", # Would compare with history
262
+ },
263
+ )
264
+
265
+ analysis = {
266
+ "trajectory": trajectory,
267
+ "velocity": round(velocity, 2),
268
+ "current_total": current_total,
269
+ "historical_snapshots": len(self._debt_history),
270
+ "hotspots": hotspots,
271
+ }
272
+
273
+ input_tokens = len(str(input_data)) // 4
274
+ output_tokens = len(str(analysis)) // 4
275
+
276
+ return (
277
+ {
278
+ "analysis": analysis,
279
+ **input_data,
280
+ },
281
+ input_tokens,
282
+ output_tokens,
283
+ )
284
+
285
+ async def _prioritize(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
286
+ """Score debt items by impact, effort, and risk.
287
+
288
+ Calculates priority scores considering multiple factors.
289
+ When use_crew_for_analysis=True, uses RefactoringCrew for
290
+ enhanced refactoring opportunity detection.
291
+ """
292
+ await self._initialize_crew()
293
+
294
+ debt_items = input_data.get("debt_items", [])
295
+ analysis = input_data.get("analysis", {})
296
+ hotspots = {h["file"] for h in analysis.get("hotspots", [])}
297
+
298
+ prioritized: list[dict] = []
299
+ for item in debt_items:
300
+ # Calculate priority score
301
+ base_weight = item.get("weight", 1)
302
+
303
+ # Bonus for hotspot files
304
+ hotspot_bonus = 2 if item["file"] in hotspots else 0
305
+
306
+ # Severity factor
307
+ severity_factor = {
308
+ "high": 3,
309
+ "medium": 2,
310
+ "low": 1,
311
+ }.get(item.get("severity", "low"), 1)
312
+
313
+ priority_score = (base_weight * severity_factor) + hotspot_bonus
314
+
315
+ prioritized.append(
316
+ {
317
+ **item,
318
+ "priority_score": priority_score,
319
+ "is_hotspot": item["file"] in hotspots,
320
+ },
321
+ )
322
+
323
+ # Sort by priority
324
+ prioritized.sort(key=lambda x: -x["priority_score"])
325
+
326
+ # Group into priority tiers (single pass instead of 3 scans)
327
+ high_priority: list[dict] = []
328
+ medium_priority: list[dict] = []
329
+ low_priority: list[dict] = []
330
+ for p in prioritized:
331
+ score = p["priority_score"]
332
+ if score >= 10:
333
+ high_priority.append(p)
334
+ elif score >= 5:
335
+ medium_priority.append(p)
336
+ else:
337
+ low_priority.append(p)
338
+
339
+ # Use crew for enhanced refactoring analysis if available
340
+ crew_enhanced = False
341
+ crew_findings = []
342
+ if self.use_crew_for_analysis and self._crew_available:
343
+ try:
344
+ # Analyze hotspot files with the crew
345
+ for hotspot in list(hotspots)[:5]: # Analyze top 5 hotspots
346
+ try:
347
+ code_content = Path(hotspot).read_text(errors="ignore")
348
+ crew_result = await self._crew.analyze(code=code_content, file_path=hotspot)
349
+ if crew_result and crew_result.findings:
350
+ crew_enhanced = True
351
+ # Convert crew findings to workflow format
352
+ for finding in crew_result.findings:
353
+ crew_findings.append(
354
+ {
355
+ "file": finding.file_path or hotspot,
356
+ "line": finding.start_line or 0,
357
+ "marker": "REFACTOR",
358
+ "message": finding.title,
359
+ "description": finding.description,
360
+ "severity": finding.severity.value,
361
+ "category": finding.category.value,
362
+ "priority_score": (
363
+ 15 if finding.severity.value == "high" else 10
364
+ ),
365
+ "is_hotspot": True,
366
+ "source": "crew",
367
+ }
368
+ )
369
+ except Exception as e:
370
+ logger.debug(f"Crew analysis failed for {hotspot}: {e}")
371
+ continue
372
+
373
+ # Add crew findings to high priority if they're high severity
374
+ if crew_findings:
375
+ for cf in crew_findings:
376
+ if cf["priority_score"] >= 10:
377
+ high_priority.append(cf)
378
+ except Exception as e:
379
+ logger.warning(f"Crew analysis failed: {e}")
380
+
381
+ input_tokens = len(str(input_data)) // 4
382
+ output_tokens = len(str(prioritized)) // 4
383
+
384
+ return (
385
+ {
386
+ "prioritized_items": prioritized[:50], # Top 50
387
+ "high_priority": high_priority[:20],
388
+ "medium_priority": medium_priority[:20],
389
+ "low_priority_count": len(low_priority),
390
+ "crew_enhanced": crew_enhanced,
391
+ "crew_findings_count": len(crew_findings),
392
+ **input_data,
393
+ },
394
+ input_tokens,
395
+ output_tokens,
396
+ )
397
+
398
+ async def _plan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
399
+ """Generate prioritized refactoring roadmap using LLM.
400
+
401
+ Creates actionable refactoring plan based on priorities.
402
+
403
+ Supports XML-enhanced prompts when enabled in workflow config.
404
+ """
405
+ high_priority = input_data.get("high_priority", [])
406
+ medium_priority = input_data.get("medium_priority", [])
407
+ analysis = input_data.get("analysis", {})
408
+ target = input_data.get("target", "")
409
+
410
+ # Build high priority summary for LLM
411
+ high_summary = []
412
+ for item in high_priority[:15]:
413
+ high_summary.append(
414
+ f"- {item.get('file')}:{item.get('line')} [{item.get('marker')}] "
415
+ f"{item.get('message', '')[:50]}",
416
+ )
417
+
418
+ # Build input payload for prompt
419
+ input_payload = f"""Target: {target or "codebase"}
420
+
421
+ Total Debt Items: {input_data.get("total_debt", 0)}
422
+ Trajectory: {analysis.get("trajectory", "unknown")}
423
+ Velocity: {analysis.get("velocity", 0)} items/snapshot
424
+
425
+ High Priority Items ({len(high_priority)}):
426
+ {chr(10).join(high_summary) if high_summary else "None"}
427
+
428
+ Medium Priority Items: {len(medium_priority)}
429
+ Hotspot Files: {json.dumps([h.get("file") for h in analysis.get("hotspots", [])[:5]], indent=2)}"""
430
+
431
+ # Check if XML prompts are enabled
432
+ if self._is_xml_enabled():
433
+ # Use XML-enhanced prompt
434
+ user_message = self._render_xml_prompt(
435
+ role="software architect specializing in technical debt management",
436
+ goal="Generate a prioritized refactoring roadmap to reduce technical debt",
437
+ instructions=[
438
+ "Analyze the debt trajectory and identify root causes",
439
+ "Create a phased roadmap with clear milestones",
440
+ "Prioritize items by impact and effort",
441
+ "Provide specific refactoring strategies for each phase",
442
+ "Include prevention measures to stop new debt accumulation",
443
+ ],
444
+ constraints=[
445
+ "Be specific about which files to refactor",
446
+ "Include effort estimates (high/medium/low)",
447
+ "Focus on sustainable debt reduction",
448
+ ],
449
+ input_type="tech_debt_analysis",
450
+ input_payload=input_payload,
451
+ extra={
452
+ "total_debt": input_data.get("total_debt", 0),
453
+ "trajectory": analysis.get("trajectory", "unknown"),
454
+ },
455
+ )
456
+ system = None # XML prompt includes all context
457
+ else:
458
+ # Use legacy plain text prompts
459
+ system = """You are a software architect specializing in technical debt management.
460
+ Create a prioritized refactoring roadmap based on the debt analysis.
461
+
462
+ For each phase:
463
+ 1. Define clear goals and milestones
464
+ 2. Prioritize by impact and effort
465
+ 3. Provide specific refactoring strategies
466
+ 4. Include prevention measures
467
+
468
+ Be specific and actionable."""
469
+
470
+ user_message = f"""Generate a refactoring roadmap for this tech debt:
471
+
472
+ {input_payload}
473
+
474
+ Create a phased approach to reduce debt sustainably."""
475
+
476
+ # Try executor-based execution first (Phase 3 pattern)
477
+ if self._executor is not None or self._api_key:
478
+ try:
479
+ step = REFACTOR_PLAN_STEPS["plan"]
480
+ response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
481
+ step=step,
482
+ prompt=user_message,
483
+ system=system,
484
+ )
485
+ except (RuntimeError, ValueError, TypeError, KeyError, AttributeError) as e:
486
+ # INTENTIONAL: Graceful fallback to legacy _call_llm if executor fails
487
+ # Catches executor/API/parsing errors during new execution path
488
+ logger.warning(f"Executor failed, falling back to legacy path: {e}")
489
+ response, input_tokens, output_tokens = await self._call_llm(
490
+ tier,
491
+ system or "",
492
+ user_message,
493
+ max_tokens=3000,
494
+ )
495
+ else:
496
+ # Legacy path for backward compatibility
497
+ response, input_tokens, output_tokens = await self._call_llm(
498
+ tier,
499
+ system or "",
500
+ user_message,
501
+ max_tokens=3000,
502
+ )
503
+
504
+ # Parse XML response if enforcement is enabled
505
+ parsed_data = self._parse_xml_response(response)
506
+
507
+ # Summary
508
+ summary = {
509
+ "total_debt": input_data.get("total_debt", 0),
510
+ "trajectory": analysis.get("trajectory", "unknown"),
511
+ "high_priority_count": len(high_priority),
512
+ }
513
+
514
+ result: dict = {
515
+ "refactoring_plan": response,
516
+ "summary": summary,
517
+ "model_tier_used": tier.value,
518
+ }
519
+
520
+ # Merge parsed XML data if available
521
+ if parsed_data.get("xml_parsed"):
522
+ result.update(
523
+ {
524
+ "xml_parsed": True,
525
+ "plan_summary": parsed_data.get("summary"),
526
+ "findings": parsed_data.get("findings", []),
527
+ "checklist": parsed_data.get("checklist", []),
528
+ },
529
+ )
530
+
531
+ # Add formatted report for human readability
532
+ result["formatted_report"] = format_refactor_plan_report(result, input_data)
533
+
534
+ return (
535
+ result,
536
+ input_tokens,
537
+ output_tokens,
538
+ )
539
+
540
+
541
+ def format_refactor_plan_report(result: dict, input_data: dict) -> str:
542
+ """Format refactor plan output as a human-readable report.
543
+
544
+ Args:
545
+ result: The plan stage result
546
+ input_data: Input data from previous stages
547
+
548
+ Returns:
549
+ Formatted report string
550
+
551
+ """
552
+ lines = []
553
+
554
+ # Header with trajectory
555
+ summary = result.get("summary", {})
556
+ total_debt = summary.get("total_debt", 0)
557
+ trajectory = summary.get("trajectory", "unknown")
558
+ high_priority_count = summary.get("high_priority_count", 0)
559
+
560
+ # Trajectory icon
561
+ if trajectory == "increasing":
562
+ traj_icon = "📈"
563
+ traj_text = "INCREASING"
564
+ elif trajectory == "decreasing":
565
+ traj_icon = "📉"
566
+ traj_text = "DECREASING"
567
+ else:
568
+ traj_icon = "➡️"
569
+ traj_text = "STABLE"
570
+
571
+ lines.append("=" * 60)
572
+ lines.append("REFACTOR PLAN REPORT")
573
+ lines.append("=" * 60)
574
+ lines.append("")
575
+ lines.append(f"Total Tech Debt Items: {total_debt}")
576
+ lines.append(f"Trajectory: {traj_icon} {traj_text}")
577
+ lines.append(f"High Priority Items: {high_priority_count}")
578
+ lines.append("")
579
+
580
+ # Scan summary
581
+ by_marker: dict[str, int] = input_data.get("by_marker", {})
582
+ files_scanned = input_data.get("files_scanned", 0)
583
+
584
+ lines.append("-" * 60)
585
+ lines.append("DEBT SCAN SUMMARY")
586
+ lines.append("-" * 60)
587
+ lines.append(f"Files Scanned: {files_scanned}")
588
+ if by_marker:
589
+ lines.append("By Marker Type:")
590
+ for marker, count in sorted(by_marker.items(), key=lambda x: -x[1]):
591
+ marker_info = DEBT_MARKERS.get(marker, {"severity": "low", "weight": 1})
592
+ severity = str(marker_info.get("severity", "low"))
593
+ sev_icon = {"high": "🔴", "medium": "🟡", "low": "🟢"}.get(severity, "⚪")
594
+ lines.append(f" {sev_icon} {marker}: {count}")
595
+ lines.append("")
596
+
597
+ # Analysis
598
+ analysis = input_data.get("analysis", {})
599
+ if analysis:
600
+ lines.append("-" * 60)
601
+ lines.append("TRAJECTORY ANALYSIS")
602
+ lines.append("-" * 60)
603
+ velocity = analysis.get("velocity", 0)
604
+ snapshots = analysis.get("historical_snapshots", 0)
605
+
606
+ lines.append(f"Historical Snapshots: {snapshots}")
607
+ if velocity != 0:
608
+ velocity_text = f"+{velocity}" if velocity > 0 else str(velocity)
609
+ lines.append(f"Velocity: {velocity_text} items/snapshot")
610
+ lines.append("")
611
+
612
+ # Hotspots
613
+ hotspots = analysis.get("hotspots", [])
614
+ if hotspots:
615
+ lines.append("-" * 60)
616
+ lines.append("🔥 HOTSPOT FILES")
617
+ lines.append("-" * 60)
618
+ for h in hotspots[:10]:
619
+ file_path = h.get("file", "unknown")
620
+ debt_count = h.get("debt_count", 0)
621
+ lines.append(f" • {file_path}")
622
+ lines.append(f" {debt_count} debt items")
623
+ lines.append("")
624
+
625
+ # High priority items
626
+ high_priority = input_data.get("high_priority", [])
627
+ if high_priority:
628
+ lines.append("-" * 60)
629
+ lines.append("🔴 HIGH PRIORITY ITEMS")
630
+ lines.append("-" * 60)
631
+ for item in high_priority[:10]:
632
+ file_path = item.get("file", "unknown")
633
+ line = item.get("line", "?")
634
+ marker = item.get("marker", "DEBT")
635
+ message = item.get("message", "")[:50]
636
+ score = item.get("priority_score", 0)
637
+ hotspot = "🔥" if item.get("is_hotspot") else ""
638
+ lines.append(f" [{marker}] {file_path}:{line} {hotspot}")
639
+ lines.append(f" {message} (score: {score})")
640
+ if len(high_priority) > 10:
641
+ lines.append(f" ... and {len(high_priority) - 10} more")
642
+ lines.append("")
643
+
644
+ # Refactoring plan from LLM
645
+ refactoring_plan = result.get("refactoring_plan", "")
646
+ if refactoring_plan and not refactoring_plan.startswith("[Simulated"):
647
+ lines.append("-" * 60)
648
+ lines.append("REFACTORING ROADMAP")
649
+ lines.append("-" * 60)
650
+ if len(refactoring_plan) > 2000:
651
+ lines.append(refactoring_plan[:2000] + "...")
652
+ else:
653
+ lines.append(refactoring_plan)
654
+ lines.append("")
655
+
656
+ # Footer
657
+ lines.append("=" * 60)
658
+ model_tier = result.get("model_tier_used", "unknown")
659
+ lines.append(f"Analyzed {total_debt} debt items using {model_tier} tier model")
660
+ lines.append("=" * 60)
661
+
662
+ return "\n".join(lines)
663
+
664
+
665
+ def main():
666
+ """CLI entry point for refactor planning workflow."""
667
+ import asyncio
668
+
669
+ async def run():
670
+ workflow = RefactorPlanWorkflow()
671
+ result = await workflow.execute(path=".", file_types=[".py"])
672
+
673
+ print("\nRefactor Plan Results")
674
+ print("=" * 50)
675
+ print(f"Provider: {result.provider}")
676
+ print(f"Success: {result.success}")
677
+
678
+ summary = result.final_output.get("summary", {})
679
+ print(f"Total Debt: {summary.get('total_debt', 0)} items")
680
+ print(f"Trajectory: {summary.get('trajectory', 'N/A')}")
681
+ print(f"High Priority: {summary.get('high_priority_count', 0)}")
682
+
683
+ print("\nCost Report:")
684
+ print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
685
+ savings = result.cost_report.savings
686
+ pct = result.cost_report.savings_percent
687
+ print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
688
+
689
+ asyncio.run(run())
690
+
691
+
692
+ if __name__ == "__main__":
693
+ main()