empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,726 +0,0 @@
|
|
|
1
|
-
"""Tech Debt Tracking Wizard (Level 4)
|
|
2
|
-
|
|
3
|
-
Tracks technical debt over time and predicts when it will become critical.
|
|
4
|
-
Demonstrates Level 4 Anticipatory Empathy: predicts future problems
|
|
5
|
-
before they become urgent.
|
|
6
|
-
|
|
7
|
-
"At current trajectory, your tech debt will double in 90 days."
|
|
8
|
-
|
|
9
|
-
Key capabilities enabled by persistent memory:
|
|
10
|
-
- Historical debt tracking across sessions
|
|
11
|
-
- Trajectory analysis and prediction
|
|
12
|
-
- Hotspot identification over time
|
|
13
|
-
- Team patterns (who adds debt, who pays it down)
|
|
14
|
-
|
|
15
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
16
|
-
Licensed under Fair Source 0.9
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
import json
|
|
20
|
-
import logging
|
|
21
|
-
import re
|
|
22
|
-
from dataclasses import dataclass, field
|
|
23
|
-
from datetime import datetime, timedelta
|
|
24
|
-
from pathlib import Path
|
|
25
|
-
from typing import Any
|
|
26
|
-
|
|
27
|
-
from .base_wizard import BaseWizard
|
|
28
|
-
|
|
29
|
-
logger = logging.getLogger(__name__)
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
@dataclass
|
|
33
|
-
class DebtItem:
|
|
34
|
-
"""A single technical debt item"""
|
|
35
|
-
|
|
36
|
-
item_id: str
|
|
37
|
-
file_path: str
|
|
38
|
-
line_number: int
|
|
39
|
-
debt_type: str # TODO, fixme, hack, temporary, deprecated
|
|
40
|
-
content: str
|
|
41
|
-
severity: str # low, medium, high, critical
|
|
42
|
-
date_found: str
|
|
43
|
-
age_days: int = 0
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
@dataclass
|
|
47
|
-
class DebtSnapshot:
|
|
48
|
-
"""A point-in-time snapshot of technical debt"""
|
|
49
|
-
|
|
50
|
-
date: str
|
|
51
|
-
total_items: int
|
|
52
|
-
by_type: dict[str, int] = field(default_factory=dict)
|
|
53
|
-
by_severity: dict[str, int] = field(default_factory=dict)
|
|
54
|
-
by_file: dict[str, int] = field(default_factory=dict)
|
|
55
|
-
hotspots: list[str] = field(default_factory=list)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
@dataclass
|
|
59
|
-
class DebtTrajectory:
|
|
60
|
-
"""Trajectory analysis of technical debt over time"""
|
|
61
|
-
|
|
62
|
-
current_total: int
|
|
63
|
-
previous_total: int
|
|
64
|
-
change_percent: float
|
|
65
|
-
trend: str # decreasing, stable, increasing, exploding
|
|
66
|
-
projection_30_days: int
|
|
67
|
-
projection_90_days: int
|
|
68
|
-
critical_threshold_days: int | None # Days until critical if continuing
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class TechDebtWizard(BaseWizard):
|
|
72
|
-
"""Tech Debt Tracking Wizard - Level 4 Anticipatory
|
|
73
|
-
|
|
74
|
-
What's now possible that wasn't before:
|
|
75
|
-
|
|
76
|
-
WITHOUT PERSISTENT MEMORY (Before):
|
|
77
|
-
- Debt count is just a number (no context)
|
|
78
|
-
- No visibility into trends over time
|
|
79
|
-
- Surprises when debt becomes unmanageable
|
|
80
|
-
- No data to justify cleanup time
|
|
81
|
-
|
|
82
|
-
WITH PERSISTENT MEMORY (After):
|
|
83
|
-
- Track debt trajectory over months
|
|
84
|
-
- Predict when debt will become critical
|
|
85
|
-
- Identify hotspots that accumulate debt
|
|
86
|
-
- Justify cleanup with historical trends
|
|
87
|
-
|
|
88
|
-
Example:
|
|
89
|
-
>>> wizard = TechDebtWizard()
|
|
90
|
-
>>> result = await wizard.analyze({
|
|
91
|
-
... "project_path": ".",
|
|
92
|
-
... "track_history": True
|
|
93
|
-
... })
|
|
94
|
-
>>> print(result["trajectory"]["projection_90_days"])
|
|
95
|
-
# Shows predicted debt count in 90 days
|
|
96
|
-
|
|
97
|
-
"""
|
|
98
|
-
|
|
99
|
-
@property
|
|
100
|
-
def name(self) -> str:
|
|
101
|
-
return "Tech Debt Wizard"
|
|
102
|
-
|
|
103
|
-
@property
|
|
104
|
-
def level(self) -> int:
|
|
105
|
-
return 4 # Anticipatory
|
|
106
|
-
|
|
107
|
-
def __init__(self, pattern_storage_path: str = "./patterns/tech_debt"):
|
|
108
|
-
"""Initialize the tech debt tracking wizard.
|
|
109
|
-
|
|
110
|
-
Args:
|
|
111
|
-
pattern_storage_path: Path to git-based pattern storage for history
|
|
112
|
-
|
|
113
|
-
"""
|
|
114
|
-
super().__init__()
|
|
115
|
-
self.pattern_storage_path = Path(pattern_storage_path)
|
|
116
|
-
self.pattern_storage_path.mkdir(parents=True, exist_ok=True)
|
|
117
|
-
|
|
118
|
-
# Debt detection patterns
|
|
119
|
-
self.debt_patterns = {
|
|
120
|
-
"todo": [
|
|
121
|
-
r"#\s*TODO[:\s]",
|
|
122
|
-
r"//\s*TODO[:\s]",
|
|
123
|
-
r"/\*\s*TODO",
|
|
124
|
-
r"<!--\s*TODO",
|
|
125
|
-
],
|
|
126
|
-
"fixme": [
|
|
127
|
-
r"#\s*FIXME[:\s]",
|
|
128
|
-
r"//\s*FIXME[:\s]",
|
|
129
|
-
r"/\*\s*FIXME",
|
|
130
|
-
],
|
|
131
|
-
"hack": [
|
|
132
|
-
r"#\s*HACK[:\s]",
|
|
133
|
-
r"//\s*HACK[:\s]",
|
|
134
|
-
r"/\*\s*HACK",
|
|
135
|
-
r"#\s*XXX[:\s]",
|
|
136
|
-
r"//\s*XXX[:\s]",
|
|
137
|
-
],
|
|
138
|
-
"temporary": [
|
|
139
|
-
r"#\s*TEMP[:\s]",
|
|
140
|
-
r"//\s*TEMP[:\s]",
|
|
141
|
-
r"temporary",
|
|
142
|
-
r"workaround",
|
|
143
|
-
],
|
|
144
|
-
"deprecated": [
|
|
145
|
-
r"@deprecated",
|
|
146
|
-
r"#\s*DEPRECATED",
|
|
147
|
-
r"//\s*DEPRECATED",
|
|
148
|
-
],
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
# Severity keywords
|
|
152
|
-
self.severity_indicators = {
|
|
153
|
-
"critical": ["urgent", "critical", "breaking", "security", "must fix"],
|
|
154
|
-
"high": ["important", "asap", "soon", "refactor needed"],
|
|
155
|
-
"medium": ["should", "consider", "improve"],
|
|
156
|
-
"low": ["nice to have", "someday", "maybe", "cleanup"],
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
160
|
-
"""Analyze technical debt with trajectory tracking.
|
|
161
|
-
|
|
162
|
-
Context expects:
|
|
163
|
-
- project_path: Path to the project
|
|
164
|
-
- track_history: Enable historical tracking (default True)
|
|
165
|
-
- exclude_patterns: Patterns to exclude (default: tests, node_modules, etc.)
|
|
166
|
-
|
|
167
|
-
Returns:
|
|
168
|
-
Analysis with:
|
|
169
|
-
- current_debt: Current debt snapshot
|
|
170
|
-
- trajectory: Historical trajectory analysis
|
|
171
|
-
- hotspots: Files with most debt
|
|
172
|
-
- predictions: Level 4 predictions
|
|
173
|
-
- recommendations: Actionable steps
|
|
174
|
-
|
|
175
|
-
"""
|
|
176
|
-
project_path = Path(context.get("project_path", "."))
|
|
177
|
-
track_history = context.get("track_history", True)
|
|
178
|
-
exclude_patterns = context.get(
|
|
179
|
-
"exclude_patterns",
|
|
180
|
-
["node_modules", "venv", ".git", "__pycache__", "dist", "build", "test"],
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
# Step 1: Scan for current debt
|
|
184
|
-
debt_items = await self._scan_for_debt(project_path, exclude_patterns)
|
|
185
|
-
|
|
186
|
-
# Step 2: Create current snapshot
|
|
187
|
-
current_snapshot = self._create_snapshot(debt_items)
|
|
188
|
-
|
|
189
|
-
# Step 3: Load historical data and calculate trajectory
|
|
190
|
-
trajectory = None
|
|
191
|
-
history = []
|
|
192
|
-
|
|
193
|
-
if track_history:
|
|
194
|
-
history = self._load_history()
|
|
195
|
-
trajectory = self._calculate_trajectory(current_snapshot, history)
|
|
196
|
-
|
|
197
|
-
# Store current snapshot for future tracking
|
|
198
|
-
self._store_snapshot(current_snapshot)
|
|
199
|
-
|
|
200
|
-
# Step 4: Identify hotspots
|
|
201
|
-
hotspots = self._identify_hotspots(debt_items)
|
|
202
|
-
|
|
203
|
-
# Step 5: Generate predictions (Level 4)
|
|
204
|
-
predictions = self._generate_predictions(current_snapshot, trajectory, hotspots, history)
|
|
205
|
-
|
|
206
|
-
# Step 6: Generate recommendations
|
|
207
|
-
recommendations = self._generate_recommendations(current_snapshot, trajectory, hotspots)
|
|
208
|
-
|
|
209
|
-
return {
|
|
210
|
-
"current_debt": {
|
|
211
|
-
"total_items": current_snapshot.total_items,
|
|
212
|
-
"by_type": current_snapshot.by_type,
|
|
213
|
-
"by_severity": current_snapshot.by_severity,
|
|
214
|
-
"scan_date": current_snapshot.date,
|
|
215
|
-
},
|
|
216
|
-
"debt_items": [
|
|
217
|
-
{
|
|
218
|
-
"file": d.file_path,
|
|
219
|
-
"line": d.line_number,
|
|
220
|
-
"type": d.debt_type,
|
|
221
|
-
"content": d.content[:100], # Truncate
|
|
222
|
-
"severity": d.severity,
|
|
223
|
-
"age_days": d.age_days,
|
|
224
|
-
}
|
|
225
|
-
for d in debt_items[:20] # Top 20
|
|
226
|
-
],
|
|
227
|
-
"hotspots": hotspots,
|
|
228
|
-
"trajectory": (
|
|
229
|
-
{
|
|
230
|
-
"current_total": trajectory.current_total,
|
|
231
|
-
"previous_total": trajectory.previous_total,
|
|
232
|
-
"change_percent": trajectory.change_percent,
|
|
233
|
-
"trend": trajectory.trend,
|
|
234
|
-
"projection_30_days": trajectory.projection_30_days,
|
|
235
|
-
"projection_90_days": trajectory.projection_90_days,
|
|
236
|
-
"days_until_critical": trajectory.critical_threshold_days,
|
|
237
|
-
}
|
|
238
|
-
if trajectory
|
|
239
|
-
else None
|
|
240
|
-
),
|
|
241
|
-
"history_available": len(history) > 0,
|
|
242
|
-
"history_snapshots": len(history),
|
|
243
|
-
"predictions": predictions,
|
|
244
|
-
"recommendations": recommendations,
|
|
245
|
-
"confidence": 0.85 if trajectory else 0.5,
|
|
246
|
-
"memory_benefit": self._calculate_memory_benefit(history, trajectory),
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
async def _scan_for_debt(
|
|
250
|
-
self,
|
|
251
|
-
project_path: Path,
|
|
252
|
-
exclude_patterns: list[str],
|
|
253
|
-
) -> list[DebtItem]:
|
|
254
|
-
"""Scan project for technical debt markers"""
|
|
255
|
-
debt_items = []
|
|
256
|
-
|
|
257
|
-
# File extensions to scan
|
|
258
|
-
extensions = [
|
|
259
|
-
"*.py",
|
|
260
|
-
"*.js",
|
|
261
|
-
"*.ts",
|
|
262
|
-
"*.tsx",
|
|
263
|
-
"*.jsx",
|
|
264
|
-
"*.java",
|
|
265
|
-
"*.go",
|
|
266
|
-
"*.rb",
|
|
267
|
-
"*.rs",
|
|
268
|
-
"*.cpp",
|
|
269
|
-
"*.c",
|
|
270
|
-
"*.h",
|
|
271
|
-
]
|
|
272
|
-
|
|
273
|
-
for ext in extensions:
|
|
274
|
-
for file_path in project_path.rglob(ext):
|
|
275
|
-
# Skip excluded patterns
|
|
276
|
-
if any(exclude in str(file_path) for exclude in exclude_patterns):
|
|
277
|
-
continue
|
|
278
|
-
|
|
279
|
-
try:
|
|
280
|
-
debt_items.extend(self._scan_file(file_path))
|
|
281
|
-
except (OSError, UnicodeDecodeError) as e:
|
|
282
|
-
logger.debug(f"Could not scan {file_path}: {e}")
|
|
283
|
-
continue
|
|
284
|
-
|
|
285
|
-
return debt_items
|
|
286
|
-
|
|
287
|
-
def _scan_file(self, file_path: Path) -> list[DebtItem]:
|
|
288
|
-
"""Scan a single file for debt markers"""
|
|
289
|
-
items = []
|
|
290
|
-
|
|
291
|
-
with open(file_path, encoding="utf-8", errors="ignore") as f:
|
|
292
|
-
lines = f.readlines()
|
|
293
|
-
|
|
294
|
-
for line_num, line in enumerate(lines, 1):
|
|
295
|
-
for debt_type, patterns in self.debt_patterns.items():
|
|
296
|
-
for pattern in patterns:
|
|
297
|
-
if re.search(pattern, line, re.IGNORECASE):
|
|
298
|
-
item = DebtItem(
|
|
299
|
-
item_id=f"{file_path}:{line_num}",
|
|
300
|
-
file_path=str(file_path),
|
|
301
|
-
line_number=line_num,
|
|
302
|
-
debt_type=debt_type,
|
|
303
|
-
content=line.strip(),
|
|
304
|
-
severity=self._assess_severity(line),
|
|
305
|
-
date_found=datetime.now().isoformat(),
|
|
306
|
-
)
|
|
307
|
-
items.append(item)
|
|
308
|
-
break # Only count once per line
|
|
309
|
-
|
|
310
|
-
return items
|
|
311
|
-
|
|
312
|
-
def _assess_severity(self, content: str) -> str:
|
|
313
|
-
"""Assess severity of a debt item based on keywords"""
|
|
314
|
-
content_lower = content.lower()
|
|
315
|
-
|
|
316
|
-
for severity, keywords in self.severity_indicators.items():
|
|
317
|
-
if any(keyword in content_lower for keyword in keywords):
|
|
318
|
-
return severity
|
|
319
|
-
|
|
320
|
-
return "medium" # Default
|
|
321
|
-
|
|
322
|
-
def _create_snapshot(self, debt_items: list[DebtItem]) -> DebtSnapshot:
|
|
323
|
-
"""Create a point-in-time snapshot of debt"""
|
|
324
|
-
by_type: dict[str, int] = {}
|
|
325
|
-
by_severity: dict[str, int] = {}
|
|
326
|
-
by_file: dict[str, int] = {}
|
|
327
|
-
|
|
328
|
-
for item in debt_items:
|
|
329
|
-
by_type[item.debt_type] = by_type.get(item.debt_type, 0) + 1
|
|
330
|
-
by_severity[item.severity] = by_severity.get(item.severity, 0) + 1
|
|
331
|
-
|
|
332
|
-
# Normalize file path (handle both absolute and relative)
|
|
333
|
-
try:
|
|
334
|
-
file_path = Path(item.file_path)
|
|
335
|
-
if file_path.is_absolute():
|
|
336
|
-
file_key = str(file_path.relative_to(Path.cwd()))
|
|
337
|
-
else:
|
|
338
|
-
file_key = str(file_path)
|
|
339
|
-
except ValueError:
|
|
340
|
-
# Path not relative to cwd, use as-is
|
|
341
|
-
file_key = str(item.file_path)
|
|
342
|
-
by_file[file_key] = by_file.get(file_key, 0) + 1
|
|
343
|
-
|
|
344
|
-
# Identify top hotspots
|
|
345
|
-
hotspots = sorted(by_file.items(), key=lambda x: x[1], reverse=True)[:5]
|
|
346
|
-
|
|
347
|
-
return DebtSnapshot(
|
|
348
|
-
date=datetime.now().isoformat(),
|
|
349
|
-
total_items=len(debt_items),
|
|
350
|
-
by_type=by_type,
|
|
351
|
-
by_severity=by_severity,
|
|
352
|
-
by_file=by_file,
|
|
353
|
-
hotspots=[h[0] for h in hotspots],
|
|
354
|
-
)
|
|
355
|
-
|
|
356
|
-
def _load_history(self) -> list[DebtSnapshot]:
|
|
357
|
-
"""Load historical snapshots from pattern storage"""
|
|
358
|
-
history = []
|
|
359
|
-
history_file = self.pattern_storage_path / "debt_history.json"
|
|
360
|
-
|
|
361
|
-
if history_file.exists():
|
|
362
|
-
try:
|
|
363
|
-
with open(history_file, encoding="utf-8") as f:
|
|
364
|
-
data = json.load(f)
|
|
365
|
-
|
|
366
|
-
for snapshot_data in data.get("snapshots", []):
|
|
367
|
-
history.append(
|
|
368
|
-
DebtSnapshot(
|
|
369
|
-
date=snapshot_data["date"],
|
|
370
|
-
total_items=snapshot_data["total_items"],
|
|
371
|
-
by_type=snapshot_data.get("by_type", {}),
|
|
372
|
-
by_severity=snapshot_data.get("by_severity", {}),
|
|
373
|
-
by_file=snapshot_data.get("by_file", {}),
|
|
374
|
-
hotspots=snapshot_data.get("hotspots", []),
|
|
375
|
-
),
|
|
376
|
-
)
|
|
377
|
-
except (json.JSONDecodeError, KeyError) as e:
|
|
378
|
-
logger.warning(f"Could not load debt history: {e}")
|
|
379
|
-
|
|
380
|
-
return history
|
|
381
|
-
|
|
382
|
-
def _store_snapshot(self, snapshot: DebtSnapshot) -> None:
|
|
383
|
-
"""Store snapshot to history"""
|
|
384
|
-
history_file = self.pattern_storage_path / "debt_history.json"
|
|
385
|
-
|
|
386
|
-
# Load existing history
|
|
387
|
-
history_data: dict[str, list[dict]] = {"snapshots": []}
|
|
388
|
-
if history_file.exists():
|
|
389
|
-
try:
|
|
390
|
-
with open(history_file, encoding="utf-8") as f:
|
|
391
|
-
history_data = json.load(f)
|
|
392
|
-
except json.JSONDecodeError:
|
|
393
|
-
pass
|
|
394
|
-
|
|
395
|
-
# Add new snapshot
|
|
396
|
-
history_data["snapshots"].append(
|
|
397
|
-
{
|
|
398
|
-
"date": snapshot.date,
|
|
399
|
-
"total_items": snapshot.total_items,
|
|
400
|
-
"by_type": snapshot.by_type,
|
|
401
|
-
"by_severity": snapshot.by_severity,
|
|
402
|
-
"by_file": snapshot.by_file,
|
|
403
|
-
"hotspots": snapshot.hotspots,
|
|
404
|
-
},
|
|
405
|
-
)
|
|
406
|
-
|
|
407
|
-
# Keep last 100 snapshots
|
|
408
|
-
history_data["snapshots"] = history_data["snapshots"][-100:]
|
|
409
|
-
|
|
410
|
-
# Store
|
|
411
|
-
try:
|
|
412
|
-
with open(history_file, "w", encoding="utf-8") as f:
|
|
413
|
-
json.dump(history_data, f, indent=2)
|
|
414
|
-
except OSError as e:
|
|
415
|
-
logger.warning(f"Could not store debt snapshot: {e}")
|
|
416
|
-
|
|
417
|
-
def _calculate_trajectory(
|
|
418
|
-
self,
|
|
419
|
-
current: DebtSnapshot,
|
|
420
|
-
history: list[DebtSnapshot],
|
|
421
|
-
) -> DebtTrajectory:
|
|
422
|
-
"""Calculate debt trajectory from historical data"""
|
|
423
|
-
if not history:
|
|
424
|
-
# No history - can't calculate trajectory
|
|
425
|
-
return DebtTrajectory(
|
|
426
|
-
current_total=current.total_items,
|
|
427
|
-
previous_total=current.total_items,
|
|
428
|
-
change_percent=0.0,
|
|
429
|
-
trend="unknown",
|
|
430
|
-
projection_30_days=current.total_items,
|
|
431
|
-
projection_90_days=current.total_items,
|
|
432
|
-
critical_threshold_days=None,
|
|
433
|
-
)
|
|
434
|
-
|
|
435
|
-
# Get comparison point (30 days ago if available, otherwise earliest)
|
|
436
|
-
comparison_snapshot = None
|
|
437
|
-
thirty_days_ago = datetime.now() - timedelta(days=30)
|
|
438
|
-
|
|
439
|
-
for snapshot in reversed(history):
|
|
440
|
-
snapshot_date = datetime.fromisoformat(snapshot.date.replace("Z", ""))
|
|
441
|
-
if snapshot_date <= thirty_days_ago:
|
|
442
|
-
comparison_snapshot = snapshot
|
|
443
|
-
break
|
|
444
|
-
|
|
445
|
-
if not comparison_snapshot and history:
|
|
446
|
-
comparison_snapshot = history[0] # Use earliest available
|
|
447
|
-
|
|
448
|
-
previous_total = (
|
|
449
|
-
comparison_snapshot.total_items if comparison_snapshot else current.total_items
|
|
450
|
-
)
|
|
451
|
-
|
|
452
|
-
# Calculate change
|
|
453
|
-
if previous_total > 0:
|
|
454
|
-
change_percent = ((current.total_items - previous_total) / previous_total) * 100
|
|
455
|
-
else:
|
|
456
|
-
change_percent = 0.0 if current.total_items == 0 else 100.0
|
|
457
|
-
|
|
458
|
-
# Determine trend
|
|
459
|
-
if change_percent < -10:
|
|
460
|
-
trend = "decreasing"
|
|
461
|
-
elif change_percent < 5:
|
|
462
|
-
trend = "stable"
|
|
463
|
-
elif change_percent < 25:
|
|
464
|
-
trend = "increasing"
|
|
465
|
-
else:
|
|
466
|
-
trend = "exploding"
|
|
467
|
-
|
|
468
|
-
# Calculate daily growth rate for projections
|
|
469
|
-
if len(history) >= 2 and previous_total > 0 and comparison_snapshot is not None:
|
|
470
|
-
days_between = max(
|
|
471
|
-
1,
|
|
472
|
-
(
|
|
473
|
-
datetime.fromisoformat(current.date.replace("Z", ""))
|
|
474
|
-
- datetime.fromisoformat(comparison_snapshot.date.replace("Z", ""))
|
|
475
|
-
).days,
|
|
476
|
-
)
|
|
477
|
-
daily_growth_rate = (current.total_items - previous_total) / days_between
|
|
478
|
-
else:
|
|
479
|
-
daily_growth_rate = 0
|
|
480
|
-
|
|
481
|
-
# Project future debt
|
|
482
|
-
projection_30 = int(current.total_items + (daily_growth_rate * 30))
|
|
483
|
-
projection_90 = int(current.total_items + (daily_growth_rate * 90))
|
|
484
|
-
|
|
485
|
-
# Calculate days until critical (define critical as 2x current)
|
|
486
|
-
critical_threshold = current.total_items * 2
|
|
487
|
-
if daily_growth_rate > 0:
|
|
488
|
-
days_until_critical = int(
|
|
489
|
-
(critical_threshold - current.total_items) / daily_growth_rate,
|
|
490
|
-
)
|
|
491
|
-
else:
|
|
492
|
-
days_until_critical = None
|
|
493
|
-
|
|
494
|
-
return DebtTrajectory(
|
|
495
|
-
current_total=current.total_items,
|
|
496
|
-
previous_total=previous_total,
|
|
497
|
-
change_percent=round(change_percent, 1),
|
|
498
|
-
trend=trend,
|
|
499
|
-
projection_30_days=max(0, projection_30),
|
|
500
|
-
projection_90_days=max(0, projection_90),
|
|
501
|
-
critical_threshold_days=days_until_critical,
|
|
502
|
-
)
|
|
503
|
-
|
|
504
|
-
def _identify_hotspots(self, debt_items: list[DebtItem]) -> list[dict[str, Any]]:
|
|
505
|
-
"""Identify files with the most technical debt"""
|
|
506
|
-
by_file: dict[str, list[DebtItem]] = {}
|
|
507
|
-
|
|
508
|
-
for item in debt_items:
|
|
509
|
-
if item.file_path not in by_file:
|
|
510
|
-
by_file[item.file_path] = []
|
|
511
|
-
by_file[item.file_path].append(item)
|
|
512
|
-
|
|
513
|
-
hotspots = []
|
|
514
|
-
for file_path, items in sorted(by_file.items(), key=lambda x: len(x[1]), reverse=True)[:10]:
|
|
515
|
-
# Calculate severity score
|
|
516
|
-
severity_scores = {"critical": 4, "high": 3, "medium": 2, "low": 1}
|
|
517
|
-
total_severity = sum(severity_scores.get(i.severity, 1) for i in items)
|
|
518
|
-
|
|
519
|
-
# Normalize file path for display
|
|
520
|
-
try:
|
|
521
|
-
fp = Path(file_path)
|
|
522
|
-
if fp.is_absolute():
|
|
523
|
-
display_path = str(fp.relative_to(Path.cwd()))
|
|
524
|
-
else:
|
|
525
|
-
display_path = str(fp)
|
|
526
|
-
except ValueError:
|
|
527
|
-
display_path = str(file_path)
|
|
528
|
-
|
|
529
|
-
hotspots.append(
|
|
530
|
-
{
|
|
531
|
-
"file": display_path,
|
|
532
|
-
"debt_count": len(items),
|
|
533
|
-
"severity_score": total_severity,
|
|
534
|
-
"types": list({i.debt_type for i in items}),
|
|
535
|
-
"oldest_item": min((i.date_found for i in items), default="unknown"),
|
|
536
|
-
},
|
|
537
|
-
)
|
|
538
|
-
|
|
539
|
-
return hotspots
|
|
540
|
-
|
|
541
|
-
def _generate_predictions(
|
|
542
|
-
self,
|
|
543
|
-
current: DebtSnapshot,
|
|
544
|
-
trajectory: DebtTrajectory | None,
|
|
545
|
-
hotspots: list[dict[str, Any]],
|
|
546
|
-
history: list[DebtSnapshot],
|
|
547
|
-
) -> list[dict[str, Any]]:
|
|
548
|
-
"""Generate Level 4 predictions based on trajectory"""
|
|
549
|
-
predictions = []
|
|
550
|
-
|
|
551
|
-
if trajectory:
|
|
552
|
-
# Prediction 1: Debt explosion warning
|
|
553
|
-
if trajectory.trend == "exploding":
|
|
554
|
-
predictions.append(
|
|
555
|
-
{
|
|
556
|
-
"type": "debt_explosion",
|
|
557
|
-
"severity": "critical",
|
|
558
|
-
"description": (
|
|
559
|
-
f"Technical debt increased {trajectory.change_percent}% recently. "
|
|
560
|
-
f"At current trajectory: {trajectory.projection_30_days} items in 30 days, "
|
|
561
|
-
f"{trajectory.projection_90_days} in 90 days."
|
|
562
|
-
),
|
|
563
|
-
"prevention_steps": [
|
|
564
|
-
"Allocate dedicated cleanup sprints",
|
|
565
|
-
"Add debt ceiling to Definition of Done",
|
|
566
|
-
"Block new features until debt stabilizes",
|
|
567
|
-
],
|
|
568
|
-
},
|
|
569
|
-
)
|
|
570
|
-
|
|
571
|
-
# Prediction 2: Critical threshold warning
|
|
572
|
-
if trajectory.critical_threshold_days and trajectory.critical_threshold_days < 180:
|
|
573
|
-
predictions.append(
|
|
574
|
-
{
|
|
575
|
-
"type": "critical_threshold",
|
|
576
|
-
"severity": "high",
|
|
577
|
-
"description": (
|
|
578
|
-
f"At current growth rate, debt will double in "
|
|
579
|
-
f"{trajectory.critical_threshold_days} days. "
|
|
580
|
-
"Major refactoring will be required."
|
|
581
|
-
),
|
|
582
|
-
"prevention_steps": [
|
|
583
|
-
"Start addressing high-severity items now",
|
|
584
|
-
"Review root causes of debt accumulation",
|
|
585
|
-
"Consider architectural improvements",
|
|
586
|
-
],
|
|
587
|
-
},
|
|
588
|
-
)
|
|
589
|
-
|
|
590
|
-
# Prediction 3: Healthy trend acknowledgment
|
|
591
|
-
if trajectory.trend == "decreasing":
|
|
592
|
-
predictions.append(
|
|
593
|
-
{
|
|
594
|
-
"type": "positive_trend",
|
|
595
|
-
"severity": "info",
|
|
596
|
-
"description": (
|
|
597
|
-
f"Debt decreased {abs(trajectory.change_percent)}%. "
|
|
598
|
-
"Team is successfully paying down technical debt."
|
|
599
|
-
),
|
|
600
|
-
"prevention_steps": ["Continue current practices"],
|
|
601
|
-
},
|
|
602
|
-
)
|
|
603
|
-
|
|
604
|
-
# Prediction 4: Hotspot warnings
|
|
605
|
-
critical_hotspots = [h for h in hotspots if h["debt_count"] >= 10]
|
|
606
|
-
if critical_hotspots:
|
|
607
|
-
predictions.append(
|
|
608
|
-
{
|
|
609
|
-
"type": "hotspot_concentration",
|
|
610
|
-
"severity": "medium",
|
|
611
|
-
"description": (
|
|
612
|
-
f"{len(critical_hotspots)} files have 10+ debt items. "
|
|
613
|
-
"Concentrated debt often indicates need for refactoring."
|
|
614
|
-
),
|
|
615
|
-
"affected_files": [h["file"] for h in critical_hotspots[:3]],
|
|
616
|
-
"prevention_steps": [
|
|
617
|
-
"Prioritize refactoring hotspot files",
|
|
618
|
-
"Consider splitting large files",
|
|
619
|
-
"Review why debt accumulates in these areas",
|
|
620
|
-
],
|
|
621
|
-
},
|
|
622
|
-
)
|
|
623
|
-
|
|
624
|
-
# Prediction 5: Type-based pattern
|
|
625
|
-
if current.by_type.get("hack", 0) >= 5:
|
|
626
|
-
predictions.append(
|
|
627
|
-
{
|
|
628
|
-
"type": "hack_accumulation",
|
|
629
|
-
"severity": "high",
|
|
630
|
-
"description": (
|
|
631
|
-
f"{current.by_type['hack']} HACK/XXX markers detected. "
|
|
632
|
-
"These often indicate shortcuts that need proper solutions."
|
|
633
|
-
),
|
|
634
|
-
"prevention_steps": [
|
|
635
|
-
"Convert hacks to tracked issues",
|
|
636
|
-
"Estimate effort to replace each hack",
|
|
637
|
-
"Prioritize hacks in critical paths",
|
|
638
|
-
],
|
|
639
|
-
},
|
|
640
|
-
)
|
|
641
|
-
|
|
642
|
-
return predictions
|
|
643
|
-
|
|
644
|
-
def _generate_recommendations(
|
|
645
|
-
self,
|
|
646
|
-
current: DebtSnapshot,
|
|
647
|
-
trajectory: DebtTrajectory | None,
|
|
648
|
-
hotspots: list[dict[str, Any]],
|
|
649
|
-
) -> list[str]:
|
|
650
|
-
"""Generate actionable recommendations"""
|
|
651
|
-
recommendations = []
|
|
652
|
-
|
|
653
|
-
# Severity-based recommendations
|
|
654
|
-
critical_count = current.by_severity.get("critical", 0)
|
|
655
|
-
high_count = current.by_severity.get("high", 0)
|
|
656
|
-
|
|
657
|
-
if critical_count > 0:
|
|
658
|
-
recommendations.append(
|
|
659
|
-
f"🚨 {critical_count} CRITICAL debt items need immediate attention",
|
|
660
|
-
)
|
|
661
|
-
|
|
662
|
-
if high_count > 3:
|
|
663
|
-
recommendations.append(
|
|
664
|
-
f"⚠️ {high_count} HIGH severity items - schedule for next sprint",
|
|
665
|
-
)
|
|
666
|
-
|
|
667
|
-
# Trajectory-based recommendations
|
|
668
|
-
if trajectory and trajectory.trend in ["increasing", "exploding"]:
|
|
669
|
-
recommendations.append(
|
|
670
|
-
f"📈 Debt is {trajectory.trend} ({trajectory.change_percent:+.1f}%) - "
|
|
671
|
-
"consider adding cleanup tasks to each sprint",
|
|
672
|
-
)
|
|
673
|
-
|
|
674
|
-
# Hotspot recommendations
|
|
675
|
-
if hotspots and hotspots[0]["debt_count"] >= 5:
|
|
676
|
-
recommendations.append(
|
|
677
|
-
f"🔥 Top hotspot: {hotspots[0]['file']} "
|
|
678
|
-
f"({hotspots[0]['debt_count']} items) - candidate for refactoring",
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
# Type-based recommendations
|
|
682
|
-
if current.by_type.get("todo", 0) > 20:
|
|
683
|
-
recommendations.append(
|
|
684
|
-
f"📝 {current.by_type['todo']} TODOs - consider converting to tracked issues",
|
|
685
|
-
)
|
|
686
|
-
|
|
687
|
-
# History benefit reminder
|
|
688
|
-
if trajectory:
|
|
689
|
-
recommendations.append("📊 Trajectory analysis enabled - track progress over time")
|
|
690
|
-
else:
|
|
691
|
-
recommendations.append(
|
|
692
|
-
"💡 Run regularly to build historical data for trajectory analysis",
|
|
693
|
-
)
|
|
694
|
-
|
|
695
|
-
return recommendations
|
|
696
|
-
|
|
697
|
-
def _calculate_memory_benefit(
|
|
698
|
-
self,
|
|
699
|
-
history: list[DebtSnapshot],
|
|
700
|
-
trajectory: DebtTrajectory | None,
|
|
701
|
-
) -> dict[str, Any]:
|
|
702
|
-
"""Calculate the benefit provided by persistent memory"""
|
|
703
|
-
if not history:
|
|
704
|
-
return {
|
|
705
|
-
"history_available": False,
|
|
706
|
-
"value_statement": (
|
|
707
|
-
"No historical data yet. Run regularly to enable trajectory analysis."
|
|
708
|
-
),
|
|
709
|
-
"trajectory_enabled": False,
|
|
710
|
-
}
|
|
711
|
-
|
|
712
|
-
return {
|
|
713
|
-
"history_available": True,
|
|
714
|
-
"snapshots_stored": len(history),
|
|
715
|
-
"earliest_record": history[0].date if history else None,
|
|
716
|
-
"trajectory_enabled": trajectory is not None,
|
|
717
|
-
"value_statement": (
|
|
718
|
-
f"Persistent memory enables trajectory analysis with {len(history)} historical snapshots. "
|
|
719
|
-
"Without memory, you'd only see a point-in-time count—no trends, no predictions."
|
|
720
|
-
),
|
|
721
|
-
"insight": (
|
|
722
|
-
f"Current trend: {trajectory.trend} ({trajectory.change_percent:+.1f}%)"
|
|
723
|
-
if trajectory
|
|
724
|
-
else "Building history..."
|
|
725
|
-
),
|
|
726
|
-
}
|