empathy-framework 4.6.6__py3-none-any.whl → 4.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (273) hide show
  1. empathy_framework-4.7.1.dist-info/METADATA +690 -0
  2. empathy_framework-4.7.1.dist-info/RECORD +379 -0
  3. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/top_level.txt +1 -2
  4. empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
  5. empathy_llm_toolkit/agent_factory/__init__.py +6 -6
  6. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
  7. empathy_llm_toolkit/agents_md/__init__.py +22 -0
  8. empathy_llm_toolkit/agents_md/loader.py +218 -0
  9. empathy_llm_toolkit/agents_md/parser.py +271 -0
  10. empathy_llm_toolkit/agents_md/registry.py +307 -0
  11. empathy_llm_toolkit/commands/__init__.py +51 -0
  12. empathy_llm_toolkit/commands/context.py +375 -0
  13. empathy_llm_toolkit/commands/loader.py +301 -0
  14. empathy_llm_toolkit/commands/models.py +231 -0
  15. empathy_llm_toolkit/commands/parser.py +371 -0
  16. empathy_llm_toolkit/commands/registry.py +429 -0
  17. empathy_llm_toolkit/config/__init__.py +8 -8
  18. empathy_llm_toolkit/config/unified.py +3 -7
  19. empathy_llm_toolkit/context/__init__.py +22 -0
  20. empathy_llm_toolkit/context/compaction.py +455 -0
  21. empathy_llm_toolkit/context/manager.py +434 -0
  22. empathy_llm_toolkit/hooks/__init__.py +24 -0
  23. empathy_llm_toolkit/hooks/config.py +306 -0
  24. empathy_llm_toolkit/hooks/executor.py +289 -0
  25. empathy_llm_toolkit/hooks/registry.py +302 -0
  26. empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
  27. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
  28. empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
  29. empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
  30. empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
  31. empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
  32. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
  33. empathy_llm_toolkit/learning/__init__.py +30 -0
  34. empathy_llm_toolkit/learning/evaluator.py +438 -0
  35. empathy_llm_toolkit/learning/extractor.py +514 -0
  36. empathy_llm_toolkit/learning/storage.py +560 -0
  37. empathy_llm_toolkit/providers.py +4 -11
  38. empathy_llm_toolkit/security/__init__.py +17 -17
  39. empathy_llm_toolkit/utils/tokens.py +2 -5
  40. empathy_os/__init__.py +202 -70
  41. empathy_os/cache_monitor.py +5 -3
  42. empathy_os/cli/__init__.py +11 -55
  43. empathy_os/cli/__main__.py +29 -15
  44. empathy_os/cli/commands/inspection.py +21 -12
  45. empathy_os/cli/commands/memory.py +4 -12
  46. empathy_os/cli/commands/profiling.py +198 -0
  47. empathy_os/cli/commands/utilities.py +27 -7
  48. empathy_os/cli.py +28 -57
  49. empathy_os/cli_unified.py +525 -1164
  50. empathy_os/cost_tracker.py +9 -3
  51. empathy_os/dashboard/server.py +200 -2
  52. empathy_os/hot_reload/__init__.py +7 -7
  53. empathy_os/hot_reload/config.py +6 -7
  54. empathy_os/hot_reload/integration.py +35 -35
  55. empathy_os/hot_reload/reloader.py +57 -57
  56. empathy_os/hot_reload/watcher.py +28 -28
  57. empathy_os/hot_reload/websocket.py +2 -2
  58. empathy_os/memory/__init__.py +11 -4
  59. empathy_os/memory/claude_memory.py +1 -1
  60. empathy_os/memory/cross_session.py +8 -12
  61. empathy_os/memory/edges.py +6 -6
  62. empathy_os/memory/file_session.py +770 -0
  63. empathy_os/memory/graph.py +30 -30
  64. empathy_os/memory/nodes.py +6 -6
  65. empathy_os/memory/short_term.py +15 -9
  66. empathy_os/memory/unified.py +606 -140
  67. empathy_os/meta_workflows/agent_creator.py +3 -9
  68. empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
  69. empathy_os/meta_workflows/form_engine.py +6 -18
  70. empathy_os/meta_workflows/intent_detector.py +64 -24
  71. empathy_os/meta_workflows/models.py +3 -1
  72. empathy_os/meta_workflows/pattern_learner.py +13 -31
  73. empathy_os/meta_workflows/plan_generator.py +55 -47
  74. empathy_os/meta_workflows/session_context.py +2 -3
  75. empathy_os/meta_workflows/workflow.py +20 -51
  76. empathy_os/models/cli.py +2 -2
  77. empathy_os/models/tasks.py +1 -2
  78. empathy_os/models/telemetry.py +4 -1
  79. empathy_os/models/token_estimator.py +3 -1
  80. empathy_os/monitoring/alerts.py +938 -9
  81. empathy_os/monitoring/alerts_cli.py +346 -183
  82. empathy_os/orchestration/execution_strategies.py +12 -29
  83. empathy_os/orchestration/pattern_learner.py +20 -26
  84. empathy_os/orchestration/real_tools.py +6 -15
  85. empathy_os/platform_utils.py +2 -1
  86. empathy_os/plugins/__init__.py +2 -2
  87. empathy_os/plugins/base.py +64 -64
  88. empathy_os/plugins/registry.py +32 -32
  89. empathy_os/project_index/index.py +49 -15
  90. empathy_os/project_index/models.py +1 -2
  91. empathy_os/project_index/reports.py +1 -1
  92. empathy_os/project_index/scanner.py +1 -0
  93. empathy_os/redis_memory.py +10 -7
  94. empathy_os/resilience/__init__.py +1 -1
  95. empathy_os/resilience/health.py +10 -10
  96. empathy_os/routing/__init__.py +7 -7
  97. empathy_os/routing/chain_executor.py +37 -37
  98. empathy_os/routing/classifier.py +36 -36
  99. empathy_os/routing/smart_router.py +40 -40
  100. empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
  101. empathy_os/scaffolding/__init__.py +8 -8
  102. empathy_os/scaffolding/__main__.py +1 -1
  103. empathy_os/scaffolding/cli.py +28 -28
  104. empathy_os/socratic/__init__.py +3 -19
  105. empathy_os/socratic/ab_testing.py +25 -36
  106. empathy_os/socratic/blueprint.py +38 -38
  107. empathy_os/socratic/cli.py +34 -20
  108. empathy_os/socratic/collaboration.py +30 -28
  109. empathy_os/socratic/domain_templates.py +9 -1
  110. empathy_os/socratic/embeddings.py +17 -13
  111. empathy_os/socratic/engine.py +135 -70
  112. empathy_os/socratic/explainer.py +70 -60
  113. empathy_os/socratic/feedback.py +24 -19
  114. empathy_os/socratic/forms.py +15 -10
  115. empathy_os/socratic/generator.py +51 -35
  116. empathy_os/socratic/llm_analyzer.py +25 -23
  117. empathy_os/socratic/mcp_server.py +99 -159
  118. empathy_os/socratic/session.py +19 -13
  119. empathy_os/socratic/storage.py +98 -67
  120. empathy_os/socratic/success.py +38 -27
  121. empathy_os/socratic/visual_editor.py +51 -39
  122. empathy_os/socratic/web_ui.py +99 -66
  123. empathy_os/telemetry/cli.py +3 -1
  124. empathy_os/telemetry/usage_tracker.py +1 -3
  125. empathy_os/test_generator/__init__.py +3 -3
  126. empathy_os/test_generator/cli.py +28 -28
  127. empathy_os/test_generator/generator.py +64 -66
  128. empathy_os/test_generator/risk_analyzer.py +11 -11
  129. empathy_os/vscode_bridge 2.py +173 -0
  130. empathy_os/vscode_bridge.py +173 -0
  131. empathy_os/workflows/__init__.py +212 -120
  132. empathy_os/workflows/batch_processing.py +8 -24
  133. empathy_os/workflows/bug_predict.py +1 -1
  134. empathy_os/workflows/code_review.py +20 -5
  135. empathy_os/workflows/code_review_pipeline.py +13 -8
  136. empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
  137. empathy_os/workflows/manage_documentation.py +1 -0
  138. empathy_os/workflows/orchestrated_health_check.py +6 -11
  139. empathy_os/workflows/orchestrated_release_prep.py +3 -3
  140. empathy_os/workflows/pr_review.py +18 -10
  141. empathy_os/workflows/progressive/README 2.md +454 -0
  142. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  143. empathy_os/workflows/progressive/__init__.py +2 -12
  144. empathy_os/workflows/progressive/cli 2.py +242 -0
  145. empathy_os/workflows/progressive/cli.py +14 -37
  146. empathy_os/workflows/progressive/core 2.py +488 -0
  147. empathy_os/workflows/progressive/core.py +12 -12
  148. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  149. empathy_os/workflows/progressive/orchestrator.py +166 -144
  150. empathy_os/workflows/progressive/reports 2.py +528 -0
  151. empathy_os/workflows/progressive/reports.py +22 -31
  152. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  153. empathy_os/workflows/progressive/telemetry.py +8 -14
  154. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  155. empathy_os/workflows/progressive/test_gen.py +29 -48
  156. empathy_os/workflows/progressive/workflow 2.py +628 -0
  157. empathy_os/workflows/progressive/workflow.py +31 -70
  158. empathy_os/workflows/release_prep.py +21 -6
  159. empathy_os/workflows/release_prep_crew.py +1 -0
  160. empathy_os/workflows/secure_release.py +13 -6
  161. empathy_os/workflows/security_audit.py +8 -3
  162. empathy_os/workflows/test_coverage_boost_crew.py +3 -2
  163. empathy_os/workflows/test_maintenance_crew.py +1 -0
  164. empathy_os/workflows/test_runner.py +16 -12
  165. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
  166. empathy_software_plugin/cli.py +0 -122
  167. patterns/README.md +119 -0
  168. patterns/__init__.py +95 -0
  169. patterns/behavior.py +298 -0
  170. patterns/code_review_memory.json +441 -0
  171. patterns/core.py +97 -0
  172. patterns/debugging.json +3763 -0
  173. patterns/empathy.py +268 -0
  174. patterns/health_check_memory.json +505 -0
  175. patterns/input.py +161 -0
  176. patterns/memory_graph.json +8 -0
  177. patterns/refactoring_memory.json +1113 -0
  178. patterns/registry.py +663 -0
  179. patterns/security_memory.json +8 -0
  180. patterns/structural.py +415 -0
  181. patterns/validation.py +194 -0
  182. coach_wizards/__init__.py +0 -45
  183. coach_wizards/accessibility_wizard.py +0 -91
  184. coach_wizards/api_wizard.py +0 -91
  185. coach_wizards/base_wizard.py +0 -209
  186. coach_wizards/cicd_wizard.py +0 -91
  187. coach_wizards/code_reviewer_README.md +0 -60
  188. coach_wizards/code_reviewer_wizard.py +0 -180
  189. coach_wizards/compliance_wizard.py +0 -91
  190. coach_wizards/database_wizard.py +0 -91
  191. coach_wizards/debugging_wizard.py +0 -91
  192. coach_wizards/documentation_wizard.py +0 -91
  193. coach_wizards/generate_wizards.py +0 -347
  194. coach_wizards/localization_wizard.py +0 -173
  195. coach_wizards/migration_wizard.py +0 -91
  196. coach_wizards/monitoring_wizard.py +0 -91
  197. coach_wizards/observability_wizard.py +0 -91
  198. coach_wizards/performance_wizard.py +0 -91
  199. coach_wizards/prompt_engineering_wizard.py +0 -661
  200. coach_wizards/refactoring_wizard.py +0 -91
  201. coach_wizards/scaling_wizard.py +0 -90
  202. coach_wizards/security_wizard.py +0 -92
  203. coach_wizards/testing_wizard.py +0 -91
  204. empathy_framework-4.6.6.dist-info/METADATA +0 -1597
  205. empathy_framework-4.6.6.dist-info/RECORD +0 -410
  206. empathy_llm_toolkit/wizards/__init__.py +0 -43
  207. empathy_llm_toolkit/wizards/base_wizard.py +0 -364
  208. empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
  209. empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
  210. empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
  211. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
  212. empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
  213. empathy_os/wizard_factory_cli.py +0 -170
  214. empathy_software_plugin/wizards/__init__.py +0 -42
  215. empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
  216. empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
  217. empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
  218. empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
  219. empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
  220. empathy_software_plugin/wizards/base_wizard.py +0 -288
  221. empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
  222. empathy_software_plugin/wizards/code_review_wizard.py +0 -604
  223. empathy_software_plugin/wizards/debugging/__init__.py +0 -50
  224. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
  225. empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
  226. empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
  227. empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
  228. empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
  229. empathy_software_plugin/wizards/debugging/verification.py +0 -369
  230. empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
  231. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
  232. empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
  233. empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
  234. empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
  235. empathy_software_plugin/wizards/performance/__init__.py +0 -9
  236. empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
  237. empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
  238. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
  239. empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
  240. empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
  241. empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
  242. empathy_software_plugin/wizards/security/__init__.py +0 -32
  243. empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
  244. empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
  245. empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
  246. empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
  247. empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
  248. empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
  249. empathy_software_plugin/wizards/testing/__init__.py +0 -27
  250. empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
  251. empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
  252. empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
  253. empathy_software_plugin/wizards/testing_wizard.py +0 -274
  254. wizards/__init__.py +0 -82
  255. wizards/admission_assessment_wizard.py +0 -644
  256. wizards/care_plan.py +0 -321
  257. wizards/clinical_assessment.py +0 -769
  258. wizards/discharge_planning.py +0 -77
  259. wizards/discharge_summary_wizard.py +0 -468
  260. wizards/dosage_calculation.py +0 -497
  261. wizards/incident_report_wizard.py +0 -454
  262. wizards/medication_reconciliation.py +0 -85
  263. wizards/nursing_assessment.py +0 -171
  264. wizards/patient_education.py +0 -654
  265. wizards/quality_improvement.py +0 -705
  266. wizards/sbar_report.py +0 -324
  267. wizards/sbar_wizard.py +0 -608
  268. wizards/shift_handoff_wizard.py +0 -535
  269. wizards/soap_note_wizard.py +0 -679
  270. wizards/treatment_plan.py +0 -15
  271. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/WHEEL +0 -0
  272. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/entry_points.txt +0 -0
  273. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,441 +0,0 @@
1
- """AI Context Window Management Wizard - Level 4 Anticipatory Empathy
2
-
3
- Alerts developers when context window usage patterns will become problematic.
4
-
5
- In our experience building AI Nurse Florence with complex multi-step agents,
6
- context window management became critical. This wizard learned to detect
7
- when context strategies that work today will fail tomorrow as complexity grows.
8
-
9
- Copyright 2025 Smart AI Memory, LLC
10
- Licensed under Fair Source 0.9
11
- """
12
-
13
- import os
14
- import sys
15
- from typing import Any
16
-
17
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..", "src"))
18
-
19
- from empathy_os.plugins import BaseWizard
20
-
21
-
22
- class AIContextWindowWizard(BaseWizard):
23
- """Level 4 Anticipatory: Predicts context window issues before they occur.
24
-
25
- What We Learned:
26
- - Context needs grow non-linearly with feature complexity
27
- - Naive concatenation fails at ~60% of window capacity
28
- - Chunking strategies need planning before you hit limits
29
- - Early refactoring prevents emergency rewrites
30
- """
31
-
32
- def __init__(self):
33
- super().__init__(
34
- name="AI Context Window Management Wizard",
35
- domain="software",
36
- empathy_level=4,
37
- category="ai_development",
38
- )
39
-
40
- def get_required_context(self) -> list[str]:
41
- """Required context for analysis"""
42
- return [
43
- "ai_calls", # List of AI API calls in codebase
44
- "context_sources", # Where context comes from (DB, files, etc.)
45
- "ai_provider", # openai, anthropic, etc.
46
- "model_name", # gpt-4, claude-3, etc.
47
- ]
48
-
49
- async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
50
- """Analyze context window usage and predict future issues.
51
-
52
- In our experience: Context limits sneak up on you. By the time
53
- you hit the limit, you're forced into emergency refactoring.
54
- """
55
- self.validate_context(context)
56
-
57
- ai_calls = context["ai_calls"]
58
- context_sources = context["context_sources"]
59
- ai_provider = context.get("ai_provider", "unknown")
60
- model_name = context.get("model_name", "unknown")
61
-
62
- # Get model limits
63
- model_limits = self._get_model_limits(ai_provider, model_name)
64
-
65
- # Current issues
66
- issues = await self._analyze_current_usage(ai_calls, context_sources, model_limits)
67
-
68
- # Level 4: Predict future problems
69
- predictions = await self._predict_context_issues(
70
- ai_calls,
71
- context_sources,
72
- model_limits,
73
- context,
74
- )
75
-
76
- recommendations = self._generate_recommendations(issues, predictions)
77
- patterns = self._extract_patterns(issues, predictions)
78
-
79
- return {
80
- "issues": issues,
81
- "predictions": predictions,
82
- "recommendations": recommendations,
83
- "patterns": patterns,
84
- "confidence": 0.85,
85
- "metadata": {
86
- "wizard": self.name,
87
- "empathy_level": self.empathy_level,
88
- "ai_calls_analyzed": len(ai_calls),
89
- "model_limit_tokens": model_limits.get("max_tokens", "unknown"),
90
- },
91
- }
92
-
93
- async def _analyze_current_usage(
94
- self,
95
- ai_calls: list[dict],
96
- context_sources: list[dict],
97
- model_limits: dict,
98
- ) -> list[dict[str, Any]]:
99
- """Analyze current context window usage"""
100
- issues = []
101
- max_tokens = model_limits.get("max_tokens", 128000)
102
-
103
- for call in ai_calls:
104
- estimated_tokens = self._estimate_context_tokens(call, context_sources)
105
- usage_percent = (estimated_tokens / max_tokens) * 100
106
-
107
- # Current issues
108
- if usage_percent > 80:
109
- issues.append(
110
- {
111
- "severity": "warning",
112
- "type": "high_context_usage",
113
- "call_location": call.get("location", "unknown"),
114
- "message": (
115
- f"Context window at {usage_percent:.0f}% capacity. "
116
- "In our experience, operations above 80% become unreliable."
117
- ),
118
- "estimated_tokens": estimated_tokens,
119
- "suggestion": "Implement context pruning or chunking strategy",
120
- },
121
- )
122
-
123
- # Check for naive concatenation
124
- if self._uses_naive_concatenation(call):
125
- issues.append(
126
- {
127
- "severity": "info",
128
- "type": "naive_concatenation",
129
- "call_location": call.get("location", "unknown"),
130
- "message": (
131
- "Detected simple string concatenation for context building. "
132
- "This pattern fails unpredictably as data grows."
133
- ),
134
- "suggestion": "Use structured context builders with size limits",
135
- },
136
- )
137
-
138
- # Check for missing token counting
139
- if not self._has_token_counting(call):
140
- issues.append(
141
- {
142
- "severity": "info",
143
- "type": "missing_token_tracking",
144
- "call_location": call.get("location", "unknown"),
145
- "message": (
146
- "No token counting detected. In our experience, blind context "
147
- "injection leads to unpredictable failures."
148
- ),
149
- "suggestion": "Add token estimation before AI calls",
150
- },
151
- )
152
-
153
- return issues
154
-
155
- async def _predict_context_issues(
156
- self,
157
- ai_calls: list[dict],
158
- context_sources: list[dict],
159
- model_limits: dict,
160
- full_context: dict[str, Any],
161
- ) -> list[dict[str, Any]]:
162
- """Level 4: Predict future context window problems.
163
-
164
- Based on our experience with multi-agent systems that started simple
165
- and grew complex.
166
- """
167
- predictions = []
168
- max_tokens = model_limits.get("max_tokens", 128000)
169
-
170
- # Pattern 1: Context growth trajectory
171
- growth_rate = self._estimate_context_growth_rate(ai_calls, full_context)
172
- current_avg = self._calculate_avg_context_size(ai_calls, context_sources)
173
-
174
- if growth_rate > 1.2: # 20% growth
175
- projected_size = current_avg * (growth_rate**3) # 3 months out
176
- projected_percent = (projected_size / max_tokens) * 100
177
-
178
- if projected_percent > 70:
179
- predictions.append(
180
- {
181
- "type": "context_capacity_limit",
182
- "alert": (
183
- f"Context usage growing at {(growth_rate - 1) * 100:.0f}% rate. "
184
- f"Current average: {current_avg:.0f} tokens. "
185
- "In our experience, this trajectory leads to context window "
186
- "limits. Alert: Implement chunking strategy before you hit the wall."
187
- ),
188
- "probability": "high",
189
- "impact": "high",
190
- "prevention_steps": [
191
- "Design semantic chunking strategy (split by meaning, not char count)",
192
- "Implement context prioritization (keep most relevant)",
193
- "Add context summarization for older messages",
194
- "Consider retrieval-augmented generation (RAG) pattern",
195
- "Implement token budget system with hard limits",
196
- ],
197
- "reasoning": (
198
- f"Growth trajectory: {current_avg:.0f} → {projected_size:.0f} tokens. "
199
- "We've seen this pattern require emergency refactoring. "
200
- "Proactive design prevents crisis."
201
- ),
202
- },
203
- )
204
-
205
- # Pattern 2: Multi-turn conversation growth
206
- multi_turn_calls = [c for c in ai_calls if self._is_multi_turn(c)]
207
- if len(multi_turn_calls) > 3:
208
- predictions.append(
209
- {
210
- "type": "conversation_memory_burden",
211
- "alert": (
212
- f"Detected {len(multi_turn_calls)} multi-turn conversations. "
213
- "In our experience, conversation history grows faster than expected. "
214
- "Alert: Implement conversation pruning before memory becomes unwieldy."
215
- ),
216
- "probability": "medium-high",
217
- "impact": "medium",
218
- "prevention_steps": [
219
- "Implement sliding window (keep last N messages)",
220
- "Add conversation summarization (compress old context)",
221
- "Design conversation checkpointing (restart with summary)",
222
- "Create context budget per conversation turn",
223
- ],
224
- "reasoning": (
225
- "Multi-turn conversations accumulate context linearly. "
226
- "Without pruning, they hit limits within 10-20 turns. "
227
- "We learned to design for this early."
228
- ),
229
- },
230
- )
231
-
232
- # Pattern 3: Dynamic context sources
233
- dynamic_sources = [s for s in context_sources if s.get("type") == "dynamic"]
234
- if len(dynamic_sources) > 2:
235
- predictions.append(
236
- {
237
- "type": "dynamic_context_unpredictability",
238
- "alert": (
239
- f"Found {len(dynamic_sources)} dynamic context sources "
240
- "(database queries, API calls, file reads). "
241
- "In our experience, dynamic context size is unpredictable. "
242
- "Alert: Add size constraints before data growth causes failures."
243
- ),
244
- "probability": "high",
245
- "impact": "high",
246
- "prevention_steps": [
247
- "Add LIMIT clauses to all database queries for context",
248
- "Implement pagination for large result sets",
249
- "Add size validation before context injection",
250
- "Create fallback behavior when context exceeds budget",
251
- "Log context size metrics for monitoring",
252
- ],
253
- "reasoning": (
254
- "Dynamic sources return variable amounts of data. "
255
- "User has 10 records today, 10,000 tomorrow. "
256
- "We've seen this break production systems."
257
- ),
258
- },
259
- )
260
-
261
- # Pattern 4: Lack of context strategy
262
- if not self._has_context_strategy(ai_calls):
263
- predictions.append(
264
- {
265
- "type": "missing_context_architecture",
266
- "alert": (
267
- "No centralized context management detected. "
268
- "In our experience, ad-hoc context building becomes unmaintainable "
269
- "as AI integration grows. Alert: Design context architecture now "
270
- "while refactoring is still manageable."
271
- ),
272
- "probability": "medium",
273
- "impact": "high",
274
- "prevention_steps": [
275
- "Create ContextBuilder abstraction",
276
- "Implement context templates with variable injection",
277
- "Design context caching layer (reuse expensive computations)",
278
- "Add context versioning (track what context produced what output)",
279
- "Build context testing framework",
280
- ],
281
- "reasoning": (
282
- "We refactored context management 3 times before finding the right "
283
- "pattern. Starting with good architecture saves months of rework."
284
- ),
285
- },
286
- )
287
-
288
- # Pattern 5: Cost implications
289
- total_estimated_tokens = sum(
290
- self._estimate_context_tokens(c, context_sources) for c in ai_calls
291
- )
292
- if total_estimated_tokens > 50000: # Arbitrary threshold
293
- predictions.append(
294
- {
295
- "type": "context_cost_scaling",
296
- "alert": (
297
- f"Estimated {total_estimated_tokens:,} tokens across all calls. "
298
- "In our experience, context costs scale faster than expected. "
299
- "Alert: Optimize context efficiency before costs compound."
300
- ),
301
- "probability": "high",
302
- "impact": "medium",
303
- "prevention_steps": [
304
- "Implement context caching (reuse system prompts)",
305
- "Remove redundant context (deduplicate instructions)",
306
- "Use prompt compression techniques",
307
- "Consider smaller models for simple tasks",
308
- "Add cost monitoring and alerting",
309
- ],
310
- "reasoning": (
311
- "Context costs are proportional to token count. "
312
- "We've reduced costs 40-60% through context optimization."
313
- ),
314
- },
315
- )
316
-
317
- return predictions
318
-
319
- def _generate_recommendations(self, issues: list[dict], predictions: list[dict]) -> list[str]:
320
- """Generate actionable recommendations"""
321
- recommendations = []
322
-
323
- # Prioritize high-impact predictions
324
- high_impact = sorted(
325
- predictions,
326
- key=lambda p: {"high": 3, "medium": 2, "low": 1}.get(str(p.get("impact", "")), 0),
327
- reverse=True,
328
- )
329
-
330
- for pred in high_impact[:2]: # Top 2
331
- recommendations.append(f"[ALERT] {pred['alert']}")
332
- recommendations.append(" Immediate actions:")
333
- for step in pred["prevention_steps"][:3]:
334
- recommendations.append(f" - {step}")
335
- recommendations.append("")
336
-
337
- return recommendations
338
-
339
- def _extract_patterns(
340
- self,
341
- issues: list[dict],
342
- predictions: list[dict],
343
- ) -> list[dict[str, Any]]:
344
- """Extract cross-domain patterns"""
345
- patterns = []
346
-
347
- if any(p["type"] == "dynamic_context_unpredictability" for p in predictions):
348
- patterns.append(
349
- {
350
- "pattern_type": "unbounded_dynamic_data",
351
- "description": (
352
- "When system depends on external data sources with unbounded size, "
353
- "implement constraints before data growth causes failures"
354
- ),
355
- "domain_agnostic": True,
356
- "applicable_to": [
357
- "AI context management",
358
- "API responses",
359
- "Database queries",
360
- "File processing",
361
- "Healthcare record retrieval",
362
- ],
363
- "detection": "Identify dynamic data sources without size limits",
364
- "prevention": "Add LIMIT, pagination, size validation",
365
- },
366
- )
367
-
368
- return patterns
369
-
370
- # Helper methods
371
-
372
- def _get_model_limits(self, provider: str, model: str) -> dict:
373
- """Get token limits for AI model"""
374
- limits = {
375
- "openai": {
376
- "gpt-4": {"max_tokens": 8192},
377
- "gpt-4-32k": {"max_tokens": 32768},
378
- "gpt-4-turbo": {"max_tokens": 128000},
379
- },
380
- "anthropic": {
381
- "claude-3-opus": {"max_tokens": 200000},
382
- "claude-3-sonnet": {"max_tokens": 200000},
383
- "claude-3-haiku": {"max_tokens": 200000},
384
- },
385
- }
386
-
387
- return limits.get(provider, {}).get(model, {"max_tokens": 128000})
388
-
389
- def _estimate_context_tokens(self, call: dict, context_sources: list[dict]) -> int:
390
- """Estimate tokens for an AI call"""
391
- # Simplified: ~4 chars per token
392
- base_prompt = int(call.get("prompt_size", 1000))
393
- dynamic_context = sum(
394
- int(source.get("estimated_size", 0))
395
- for source in context_sources
396
- if source.get("call_id") == call.get("id")
397
- )
398
- return (base_prompt + dynamic_context) // 4
399
-
400
- def _uses_naive_concatenation(self, call: dict) -> bool:
401
- """Check if call uses naive string concatenation"""
402
- # Heuristic: look for patterns like f"prompt + {data}"
403
- code = call.get("code_snippet", "")
404
- return "+" in code or "concat" in code.lower()
405
-
406
- def _has_token_counting(self, call: dict) -> bool:
407
- """Check if call includes token counting"""
408
- code = call.get("code_snippet", "")
409
- return "token" in code.lower() and ("count" in code.lower() or "len" in code.lower())
410
-
411
- def _estimate_context_growth_rate(self, ai_calls: list[dict], full_context: dict) -> float:
412
- """Estimate how fast context is growing"""
413
- # Simplified: check version history or feature count
414
- version_history = full_context.get("version_history", [])
415
- if len(version_history) > 1:
416
- # Rough heuristic
417
- return 1.3 # 30% growth
418
- return 1.1 # Default 10% growth
419
-
420
- def _calculate_avg_context_size(
421
- self,
422
- ai_calls: list[dict],
423
- context_sources: list[dict],
424
- ) -> float:
425
- """Calculate average context size"""
426
- if not ai_calls:
427
- return 0
428
- total = sum(self._estimate_context_tokens(call, context_sources) for call in ai_calls)
429
- return total / len(ai_calls)
430
-
431
- def _is_multi_turn(self, call: dict) -> bool:
432
- """Check if call is part of multi-turn conversation"""
433
- return call.get("conversation_id") is not None
434
-
435
- def _has_context_strategy(self, ai_calls: list[dict]) -> bool:
436
- """Check if codebase has centralized context management"""
437
- # Heuristic: look for context builder classes
438
- code_snippets = [call.get("code_snippet", "") for call in ai_calls]
439
- return any(
440
- "ContextBuilder" in code or "context_manager" in code.lower() for code in code_snippets
441
- )