empathy-framework 3.5.6__py3-none-any.whl → 3.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agents/compliance_anticipation_agent.py +113 -118
- agents/compliance_db.py +339 -0
- agents/epic_integration_wizard.py +37 -48
- agents/notifications.py +291 -0
- agents/trust_building_behaviors.py +66 -85
- coach_wizards/__init__.py +11 -12
- coach_wizards/accessibility_wizard.py +12 -12
- coach_wizards/api_wizard.py +12 -12
- coach_wizards/base_wizard.py +26 -20
- coach_wizards/cicd_wizard.py +15 -13
- coach_wizards/compliance_wizard.py +12 -12
- coach_wizards/database_wizard.py +12 -12
- coach_wizards/debugging_wizard.py +12 -12
- coach_wizards/documentation_wizard.py +12 -12
- coach_wizards/generate_wizards.py +1 -2
- coach_wizards/localization_wizard.py +21 -14
- coach_wizards/migration_wizard.py +12 -12
- coach_wizards/monitoring_wizard.py +12 -12
- coach_wizards/observability_wizard.py +12 -12
- coach_wizards/performance_wizard.py +12 -12
- coach_wizards/prompt_engineering_wizard.py +22 -25
- coach_wizards/refactoring_wizard.py +12 -12
- coach_wizards/scaling_wizard.py +12 -12
- coach_wizards/security_wizard.py +12 -12
- coach_wizards/testing_wizard.py +12 -12
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/METADATA +234 -30
- empathy_framework-3.7.0.dist-info/RECORD +105 -0
- empathy_healthcare_plugin/__init__.py +1 -2
- empathy_llm_toolkit/__init__.py +5 -6
- empathy_llm_toolkit/claude_memory.py +14 -15
- empathy_llm_toolkit/code_health.py +27 -19
- empathy_llm_toolkit/contextual_patterns.py +11 -12
- empathy_llm_toolkit/core.py +43 -49
- empathy_llm_toolkit/git_pattern_extractor.py +16 -12
- empathy_llm_toolkit/levels.py +6 -13
- empathy_llm_toolkit/pattern_confidence.py +14 -18
- empathy_llm_toolkit/pattern_resolver.py +10 -12
- empathy_llm_toolkit/pattern_summary.py +13 -11
- empathy_llm_toolkit/providers.py +27 -38
- empathy_llm_toolkit/session_status.py +18 -20
- empathy_llm_toolkit/state.py +20 -21
- empathy_os/__init__.py +72 -73
- empathy_os/cli.py +193 -98
- empathy_os/cli_unified.py +68 -41
- empathy_os/config.py +31 -31
- empathy_os/coordination.py +48 -54
- empathy_os/core.py +90 -99
- empathy_os/cost_tracker.py +20 -23
- empathy_os/discovery.py +9 -11
- empathy_os/emergence.py +20 -21
- empathy_os/exceptions.py +18 -30
- empathy_os/feedback_loops.py +27 -30
- empathy_os/levels.py +31 -34
- empathy_os/leverage_points.py +27 -28
- empathy_os/logging_config.py +11 -12
- empathy_os/monitoring.py +27 -27
- empathy_os/pattern_library.py +29 -28
- empathy_os/persistence.py +30 -34
- empathy_os/platform_utils.py +46 -47
- empathy_os/redis_config.py +14 -15
- empathy_os/redis_memory.py +53 -56
- empathy_os/templates.py +12 -11
- empathy_os/trust_building.py +44 -36
- empathy_os/workflow_commands.py +123 -31
- empathy_software_plugin/__init__.py +1 -2
- empathy_software_plugin/cli.py +32 -25
- empathy_software_plugin/plugin.py +4 -8
- empathy_framework-3.5.6.dist-info/RECORD +0 -103
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-3.5.6.dist-info → empathy_framework-3.7.0.dist-info}/top_level.txt +0 -0
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Git Pattern Extractor
|
|
1
|
+
"""Git Pattern Extractor
|
|
3
2
|
|
|
4
3
|
Automatically detects bug fixes from git commits and creates
|
|
5
4
|
draft pattern entries for review.
|
|
@@ -32,8 +31,7 @@ logger = logging.getLogger(__name__)
|
|
|
32
31
|
|
|
33
32
|
|
|
34
33
|
class GitPatternExtractor:
|
|
35
|
-
"""
|
|
36
|
-
Extracts bug fix patterns from git commits.
|
|
34
|
+
"""Extracts bug fix patterns from git commits.
|
|
37
35
|
|
|
38
36
|
Analyzes commit messages and diffs to detect common
|
|
39
37
|
fix patterns, then creates draft pattern files.
|
|
@@ -104,14 +102,14 @@ class GitPatternExtractor:
|
|
|
104
102
|
}
|
|
105
103
|
|
|
106
104
|
def extract_from_recent_commits(self, num_commits: int = 1) -> list[dict[str, Any]]:
|
|
107
|
-
"""
|
|
108
|
-
Extract patterns from recent git commits.
|
|
105
|
+
"""Extract patterns from recent git commits.
|
|
109
106
|
|
|
110
107
|
Args:
|
|
111
108
|
num_commits: Number of recent commits to analyze
|
|
112
109
|
|
|
113
110
|
Returns:
|
|
114
111
|
List of detected pattern dicts
|
|
112
|
+
|
|
115
113
|
"""
|
|
116
114
|
patterns = []
|
|
117
115
|
|
|
@@ -138,11 +136,11 @@ class GitPatternExtractor:
|
|
|
138
136
|
return patterns
|
|
139
137
|
|
|
140
138
|
def extract_from_staged(self) -> list[dict[str, Any]]:
|
|
141
|
-
"""
|
|
142
|
-
Extract patterns from currently staged changes.
|
|
139
|
+
"""Extract patterns from currently staged changes.
|
|
143
140
|
|
|
144
141
|
Returns:
|
|
145
142
|
List of detected pattern dicts
|
|
143
|
+
|
|
146
144
|
"""
|
|
147
145
|
diff = self._get_staged_diff()
|
|
148
146
|
if not diff:
|
|
@@ -158,14 +156,14 @@ class GitPatternExtractor:
|
|
|
158
156
|
return self._analyze_diff(diff, commit_info)
|
|
159
157
|
|
|
160
158
|
def save_pattern(self, pattern: dict[str, Any]) -> Path | None:
|
|
161
|
-
"""
|
|
162
|
-
Save a detected pattern as a draft for review.
|
|
159
|
+
"""Save a detected pattern as a draft for review.
|
|
163
160
|
|
|
164
161
|
Args:
|
|
165
162
|
pattern: Pattern dict from extraction
|
|
166
163
|
|
|
167
164
|
Returns:
|
|
168
165
|
Path to saved file, or None if failed
|
|
166
|
+
|
|
169
167
|
"""
|
|
170
168
|
self.debugging_dir.mkdir(parents=True, exist_ok=True)
|
|
171
169
|
|
|
@@ -201,6 +199,7 @@ class GitPatternExtractor:
|
|
|
201
199
|
try:
|
|
202
200
|
result = subprocess.run(
|
|
203
201
|
["git", "log", "-1", "--format=%H%n%s%n%an%n%aI", ref],
|
|
202
|
+
check=False,
|
|
204
203
|
capture_output=True,
|
|
205
204
|
text=True,
|
|
206
205
|
timeout=5,
|
|
@@ -226,6 +225,7 @@ class GitPatternExtractor:
|
|
|
226
225
|
try:
|
|
227
226
|
result = subprocess.run(
|
|
228
227
|
["git", "diff", ref1, ref2],
|
|
228
|
+
check=False,
|
|
229
229
|
capture_output=True,
|
|
230
230
|
text=True,
|
|
231
231
|
timeout=10,
|
|
@@ -239,6 +239,7 @@ class GitPatternExtractor:
|
|
|
239
239
|
try:
|
|
240
240
|
result = subprocess.run(
|
|
241
241
|
["git", "diff", "--cached"],
|
|
242
|
+
check=False,
|
|
242
243
|
capture_output=True,
|
|
243
244
|
text=True,
|
|
244
245
|
timeout=10,
|
|
@@ -252,6 +253,7 @@ class GitPatternExtractor:
|
|
|
252
253
|
try:
|
|
253
254
|
result = subprocess.run(
|
|
254
255
|
["git", "config", key],
|
|
256
|
+
check=False,
|
|
255
257
|
capture_output=True,
|
|
256
258
|
text=True,
|
|
257
259
|
timeout=5,
|
|
@@ -286,7 +288,9 @@ class GitPatternExtractor:
|
|
|
286
288
|
# Process previous file
|
|
287
289
|
if current_file and added_lines:
|
|
288
290
|
file_patterns = self._detect_fix_patterns(
|
|
289
|
-
current_file,
|
|
291
|
+
current_file,
|
|
292
|
+
added_lines,
|
|
293
|
+
commit_info,
|
|
290
294
|
)
|
|
291
295
|
patterns.extend(file_patterns)
|
|
292
296
|
|
|
@@ -334,7 +338,7 @@ class GitPatternExtractor:
|
|
|
334
338
|
"matches_count": len(matches),
|
|
335
339
|
"author": commit_info.get("author", "unknown"),
|
|
336
340
|
"date": commit_info.get("date", datetime.now().isoformat()),
|
|
337
|
-
}
|
|
341
|
+
},
|
|
338
342
|
)
|
|
339
343
|
|
|
340
344
|
return detected
|
empathy_llm_toolkit/levels.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Empathy Level Definitions
|
|
1
|
+
"""Empathy Level Definitions
|
|
3
2
|
|
|
4
3
|
Defines behavior for each of the 5 empathy levels.
|
|
5
4
|
|
|
@@ -11,8 +10,7 @@ from enum import IntEnum
|
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
class EmpathyLevel(IntEnum):
|
|
14
|
-
"""
|
|
15
|
-
The 5 levels of AI-human collaboration empathy.
|
|
13
|
+
"""The 5 levels of AI-human collaboration empathy.
|
|
16
14
|
|
|
17
15
|
Each level builds on previous levels.
|
|
18
16
|
"""
|
|
@@ -38,7 +36,6 @@ class EmpathyLevel(IntEnum):
|
|
|
38
36
|
@classmethod
|
|
39
37
|
def get_system_prompt(cls, level: int) -> str:
|
|
40
38
|
"""Get system prompt for operating at specific level"""
|
|
41
|
-
|
|
42
39
|
base = """You are an AI assistant using the Empathy Framework for collaboration.
|
|
43
40
|
|
|
44
41
|
Your responses should be:
|
|
@@ -117,8 +114,7 @@ Pattern contribution:
|
|
|
117
114
|
|
|
118
115
|
@classmethod
|
|
119
116
|
def get_temperature_recommendation(cls, level: int) -> float:
|
|
120
|
-
"""
|
|
121
|
-
Get recommended temperature for each level.
|
|
117
|
+
"""Get recommended temperature for each level.
|
|
122
118
|
|
|
123
119
|
Higher levels benefit from lower temperature (more focused).
|
|
124
120
|
"""
|
|
@@ -133,8 +129,7 @@ Pattern contribution:
|
|
|
133
129
|
|
|
134
130
|
@classmethod
|
|
135
131
|
def get_required_context(cls, level: int) -> dict[str, bool]:
|
|
136
|
-
"""
|
|
137
|
-
Get context requirements for each level.
|
|
132
|
+
"""Get context requirements for each level.
|
|
138
133
|
|
|
139
134
|
Returns dict of {context_type: required}
|
|
140
135
|
"""
|
|
@@ -175,8 +170,7 @@ Pattern contribution:
|
|
|
175
170
|
|
|
176
171
|
@classmethod
|
|
177
172
|
def get_max_tokens_recommendation(cls, level: int) -> int:
|
|
178
|
-
"""
|
|
179
|
-
Get recommended max_tokens for each level.
|
|
173
|
+
"""Get recommended max_tokens for each level.
|
|
180
174
|
|
|
181
175
|
Higher levels often need longer responses.
|
|
182
176
|
"""
|
|
@@ -190,8 +184,7 @@ Pattern contribution:
|
|
|
190
184
|
|
|
191
185
|
@classmethod
|
|
192
186
|
def should_use_json_mode(cls, level: int) -> bool:
|
|
193
|
-
"""
|
|
194
|
-
Determine if JSON mode is beneficial for level.
|
|
187
|
+
"""Determine if JSON mode is beneficial for level.
|
|
195
188
|
|
|
196
189
|
Levels 4-5 benefit from structured output.
|
|
197
190
|
"""
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Pattern Confidence Scoring
|
|
1
|
+
"""Pattern Confidence Scoring
|
|
3
2
|
|
|
4
3
|
Tracks how often stored fixes resolve similar issues,
|
|
5
4
|
building confidence scores over time.
|
|
@@ -85,8 +84,7 @@ class PatternUsageStats:
|
|
|
85
84
|
|
|
86
85
|
|
|
87
86
|
class PatternConfidenceTracker:
|
|
88
|
-
"""
|
|
89
|
-
Tracks pattern usage and calculates confidence scores.
|
|
87
|
+
"""Tracks pattern usage and calculates confidence scores.
|
|
90
88
|
|
|
91
89
|
Stores usage data in patterns/confidence/usage_stats.json
|
|
92
90
|
"""
|
|
@@ -151,8 +149,7 @@ class PatternConfidenceTracker:
|
|
|
151
149
|
return self._stats[pattern_id]
|
|
152
150
|
|
|
153
151
|
def record_suggestion(self, pattern_id: str) -> None:
|
|
154
|
-
"""
|
|
155
|
-
Record that a pattern was suggested to the user.
|
|
152
|
+
"""Record that a pattern was suggested to the user.
|
|
156
153
|
|
|
157
154
|
Call this when a pattern is shown as a potential fix.
|
|
158
155
|
"""
|
|
@@ -168,13 +165,13 @@ class PatternConfidenceTracker:
|
|
|
168
165
|
successful: bool = True,
|
|
169
166
|
notes: str | None = None,
|
|
170
167
|
) -> None:
|
|
171
|
-
"""
|
|
172
|
-
Record that a pattern fix was applied.
|
|
168
|
+
"""Record that a pattern fix was applied.
|
|
173
169
|
|
|
174
170
|
Args:
|
|
175
171
|
pattern_id: The pattern that was applied
|
|
176
172
|
successful: Whether the fix resolved the issue
|
|
177
173
|
notes: Optional feedback notes
|
|
174
|
+
|
|
178
175
|
"""
|
|
179
176
|
stats = self._get_or_create_stats(pattern_id)
|
|
180
177
|
stats.times_applied += 1
|
|
@@ -191,7 +188,7 @@ class PatternConfidenceTracker:
|
|
|
191
188
|
"date": datetime.now().isoformat(),
|
|
192
189
|
"successful": successful,
|
|
193
190
|
"notes": notes,
|
|
194
|
-
}
|
|
191
|
+
},
|
|
195
192
|
)
|
|
196
193
|
|
|
197
194
|
self._save()
|
|
@@ -203,11 +200,11 @@ class PatternConfidenceTracker:
|
|
|
203
200
|
)
|
|
204
201
|
|
|
205
202
|
def get_pattern_stats(self, pattern_id: str) -> dict[str, Any]:
|
|
206
|
-
"""
|
|
207
|
-
Get usage statistics for a pattern.
|
|
203
|
+
"""Get usage statistics for a pattern.
|
|
208
204
|
|
|
209
205
|
Returns:
|
|
210
206
|
Dict with usage stats and calculated scores
|
|
207
|
+
|
|
211
208
|
"""
|
|
212
209
|
stats = self._get_or_create_stats(pattern_id)
|
|
213
210
|
return {
|
|
@@ -230,14 +227,14 @@ class PatternConfidenceTracker:
|
|
|
230
227
|
return [self.get_pattern_stats(pid) for pid in self._stats]
|
|
231
228
|
|
|
232
229
|
def get_top_patterns(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
233
|
-
"""
|
|
234
|
-
Get top patterns by confidence score.
|
|
230
|
+
"""Get top patterns by confidence score.
|
|
235
231
|
|
|
236
232
|
Args:
|
|
237
233
|
limit: Maximum patterns to return
|
|
238
234
|
|
|
239
235
|
Returns:
|
|
240
236
|
List of pattern stats, sorted by confidence
|
|
237
|
+
|
|
241
238
|
"""
|
|
242
239
|
self._ensure_loaded()
|
|
243
240
|
all_stats = self.get_all_stats()
|
|
@@ -249,14 +246,14 @@ class PatternConfidenceTracker:
|
|
|
249
246
|
return sorted_stats[:limit]
|
|
250
247
|
|
|
251
248
|
def get_stale_patterns(self, days: int = 90) -> list[dict[str, Any]]:
|
|
252
|
-
"""
|
|
253
|
-
Get patterns that haven't been used recently.
|
|
249
|
+
"""Get patterns that haven't been used recently.
|
|
254
250
|
|
|
255
251
|
Args:
|
|
256
252
|
days: Number of days to consider stale
|
|
257
253
|
|
|
258
254
|
Returns:
|
|
259
255
|
List of stale pattern stats
|
|
256
|
+
|
|
260
257
|
"""
|
|
261
258
|
self._ensure_loaded()
|
|
262
259
|
stale = []
|
|
@@ -275,8 +272,7 @@ class PatternConfidenceTracker:
|
|
|
275
272
|
return stale
|
|
276
273
|
|
|
277
274
|
def update_pattern_summary(self) -> bool:
|
|
278
|
-
"""
|
|
279
|
-
Update the patterns_summary.md with confidence scores.
|
|
275
|
+
"""Update the patterns_summary.md with confidence scores.
|
|
280
276
|
|
|
281
277
|
This adds a confidence section to the generated summary.
|
|
282
278
|
"""
|
|
@@ -297,7 +293,7 @@ class PatternConfidenceTracker:
|
|
|
297
293
|
icon = "🟢" if score >= 0.8 else "🟡" if score >= 0.5 else "🔴"
|
|
298
294
|
confidence_section.append(
|
|
299
295
|
f"- {icon} **{p['pattern_id']}**: {score:.0%} confidence "
|
|
300
|
-
f"({p['times_applied']} applied, {p['times_successful']} successful)"
|
|
296
|
+
f"({p['times_applied']} applied, {p['times_successful']} successful)",
|
|
301
297
|
)
|
|
302
298
|
|
|
303
299
|
confidence_section.append("")
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Pattern Resolution Module
|
|
1
|
+
"""Pattern Resolution Module
|
|
3
2
|
|
|
4
3
|
Provides CLI workflow for resolving investigating bug patterns
|
|
5
4
|
by adding root cause, fix, and resolution time.
|
|
@@ -40,8 +39,7 @@ logger = logging.getLogger(__name__)
|
|
|
40
39
|
|
|
41
40
|
|
|
42
41
|
class PatternResolver:
|
|
43
|
-
"""
|
|
44
|
-
Resolves investigating bug patterns with root cause and fix information.
|
|
42
|
+
"""Resolves investigating bug patterns with root cause and fix information.
|
|
45
43
|
|
|
46
44
|
Searches through pattern directories to find matching bug IDs
|
|
47
45
|
and updates them with resolution details.
|
|
@@ -52,14 +50,14 @@ class PatternResolver:
|
|
|
52
50
|
self._debugging_dirs = ["debugging", "debugging_demo", "repo_test/debugging"]
|
|
53
51
|
|
|
54
52
|
def find_bug(self, bug_id: str) -> tuple[Path | None, dict[str, Any] | None]:
|
|
55
|
-
"""
|
|
56
|
-
Find a bug pattern by ID.
|
|
53
|
+
"""Find a bug pattern by ID.
|
|
57
54
|
|
|
58
55
|
Args:
|
|
59
56
|
bug_id: The bug ID to find (e.g., "bug_20251212_3c5b9951")
|
|
60
57
|
|
|
61
58
|
Returns:
|
|
62
59
|
Tuple of (file_path, pattern_data) or (None, None) if not found
|
|
60
|
+
|
|
63
61
|
"""
|
|
64
62
|
for debug_dir in self._debugging_dirs:
|
|
65
63
|
dir_path = self.patterns_dir / debug_dir
|
|
@@ -88,11 +86,11 @@ class PatternResolver:
|
|
|
88
86
|
return None, None
|
|
89
87
|
|
|
90
88
|
def list_investigating(self) -> list[dict[str, Any]]:
|
|
91
|
-
"""
|
|
92
|
-
List all bugs with status 'investigating'.
|
|
89
|
+
"""List all bugs with status 'investigating'.
|
|
93
90
|
|
|
94
91
|
Returns:
|
|
95
92
|
List of bug patterns that need resolution
|
|
93
|
+
|
|
96
94
|
"""
|
|
97
95
|
investigating = []
|
|
98
96
|
|
|
@@ -122,8 +120,7 @@ class PatternResolver:
|
|
|
122
120
|
resolution_time_minutes: int = 0,
|
|
123
121
|
resolved_by: str = "@developer",
|
|
124
122
|
) -> bool:
|
|
125
|
-
"""
|
|
126
|
-
Resolve a bug pattern by updating its fields.
|
|
123
|
+
"""Resolve a bug pattern by updating its fields.
|
|
127
124
|
|
|
128
125
|
Args:
|
|
129
126
|
bug_id: The bug ID to resolve
|
|
@@ -135,6 +132,7 @@ class PatternResolver:
|
|
|
135
132
|
|
|
136
133
|
Returns:
|
|
137
134
|
True if successfully resolved, False otherwise
|
|
135
|
+
|
|
138
136
|
"""
|
|
139
137
|
file_path, pattern = self.find_bug(bug_id)
|
|
140
138
|
|
|
@@ -165,11 +163,11 @@ class PatternResolver:
|
|
|
165
163
|
return False
|
|
166
164
|
|
|
167
165
|
def regenerate_summary(self) -> bool:
|
|
168
|
-
"""
|
|
169
|
-
Regenerate the patterns_summary.md file.
|
|
166
|
+
"""Regenerate the patterns_summary.md file.
|
|
170
167
|
|
|
171
168
|
Returns:
|
|
172
169
|
True if successful, False otherwise
|
|
170
|
+
|
|
173
171
|
"""
|
|
174
172
|
try:
|
|
175
173
|
from empathy_llm_toolkit.pattern_summary import PatternSummaryGenerator
|
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Pattern Summary Generator
|
|
1
|
+
"""Pattern Summary Generator
|
|
3
2
|
|
|
4
3
|
Generates a markdown summary of stored patterns for inclusion in CLAUDE.md.
|
|
5
4
|
This enables Claude Code sessions to have context about historical patterns.
|
|
@@ -29,8 +28,7 @@ logger = logging.getLogger(__name__)
|
|
|
29
28
|
|
|
30
29
|
|
|
31
30
|
class PatternSummaryGenerator:
|
|
32
|
-
"""
|
|
33
|
-
Generates markdown summaries of stored patterns.
|
|
31
|
+
"""Generates markdown summaries of stored patterns.
|
|
34
32
|
|
|
35
33
|
Scans the patterns directory and produces a concise summary
|
|
36
34
|
suitable for inclusion in CLAUDE.md via @import.
|
|
@@ -119,11 +117,11 @@ class PatternSummaryGenerator:
|
|
|
119
117
|
return snapshots
|
|
120
118
|
|
|
121
119
|
def generate_markdown(self) -> str:
|
|
122
|
-
"""
|
|
123
|
-
Generate a markdown summary of all patterns.
|
|
120
|
+
"""Generate a markdown summary of all patterns.
|
|
124
121
|
|
|
125
122
|
Returns:
|
|
126
123
|
Markdown string suitable for CLAUDE.md inclusion
|
|
124
|
+
|
|
127
125
|
"""
|
|
128
126
|
if not any([self._bug_patterns, self._security_decisions, self._tech_debt_history]):
|
|
129
127
|
self.load_all_patterns()
|
|
@@ -157,7 +155,7 @@ class PatternSummaryGenerator:
|
|
|
157
155
|
"- **Security**: Check team decisions for false positives",
|
|
158
156
|
"- **Tech Debt**: Consider debt trajectory when planning refactoring work",
|
|
159
157
|
"",
|
|
160
|
-
]
|
|
158
|
+
],
|
|
161
159
|
)
|
|
162
160
|
|
|
163
161
|
return "\n".join(lines)
|
|
@@ -247,7 +245,9 @@ class PatternSummaryGenerator:
|
|
|
247
245
|
|
|
248
246
|
# Sort by date
|
|
249
247
|
sorted_snapshots = sorted(
|
|
250
|
-
self._tech_debt_history,
|
|
248
|
+
self._tech_debt_history,
|
|
249
|
+
key=lambda s: s.get("date", ""),
|
|
250
|
+
reverse=True,
|
|
251
251
|
)
|
|
252
252
|
|
|
253
253
|
# Current state (most recent)
|
|
@@ -295,11 +295,11 @@ class PatternSummaryGenerator:
|
|
|
295
295
|
return lines
|
|
296
296
|
|
|
297
297
|
def write_to_file(self, output_path: str) -> None:
|
|
298
|
-
"""
|
|
299
|
-
Write the markdown summary to a file.
|
|
298
|
+
"""Write the markdown summary to a file.
|
|
300
299
|
|
|
301
300
|
Args:
|
|
302
301
|
output_path: Path to write the summary
|
|
302
|
+
|
|
303
303
|
"""
|
|
304
304
|
output = Path(output_path)
|
|
305
305
|
output.parent.mkdir(parents=True, exist_ok=True)
|
|
@@ -326,7 +326,9 @@ def main():
|
|
|
326
326
|
help="Output file path (default: ./.claude/patterns_summary.md)",
|
|
327
327
|
)
|
|
328
328
|
parser.add_argument(
|
|
329
|
-
"--print",
|
|
329
|
+
"--print",
|
|
330
|
+
action="store_true",
|
|
331
|
+
help="Print to stdout instead of writing to file",
|
|
330
332
|
)
|
|
331
333
|
|
|
332
334
|
args = parser.parse_args()
|
empathy_llm_toolkit/providers.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
LLM Provider Adapters
|
|
1
|
+
"""LLM Provider Adapters
|
|
3
2
|
|
|
4
3
|
Unified interface for different LLM providers (OpenAI, Anthropic, local models).
|
|
5
4
|
|
|
@@ -24,8 +23,7 @@ class LLMResponse:
|
|
|
24
23
|
|
|
25
24
|
|
|
26
25
|
class BaseLLMProvider(ABC):
|
|
27
|
-
"""
|
|
28
|
-
Base class for all LLM providers.
|
|
26
|
+
"""Base class for all LLM providers.
|
|
29
27
|
|
|
30
28
|
Provides unified interface regardless of backend.
|
|
31
29
|
"""
|
|
@@ -43,8 +41,7 @@ class BaseLLMProvider(ABC):
|
|
|
43
41
|
max_tokens: int = 1024,
|
|
44
42
|
**kwargs,
|
|
45
43
|
) -> LLMResponse:
|
|
46
|
-
"""
|
|
47
|
-
Generate response from LLM.
|
|
44
|
+
"""Generate response from LLM.
|
|
48
45
|
|
|
49
46
|
Args:
|
|
50
47
|
messages: List of {"role": "user/assistant", "content": "..."}
|
|
@@ -55,17 +52,15 @@ class BaseLLMProvider(ABC):
|
|
|
55
52
|
|
|
56
53
|
Returns:
|
|
57
54
|
LLMResponse with standardized format
|
|
55
|
+
|
|
58
56
|
"""
|
|
59
|
-
pass
|
|
60
57
|
|
|
61
58
|
@abstractmethod
|
|
62
59
|
def get_model_info(self) -> dict[str, Any]:
|
|
63
60
|
"""Get information about the model being used"""
|
|
64
|
-
pass
|
|
65
61
|
|
|
66
62
|
def estimate_tokens(self, text: str) -> int:
|
|
67
|
-
"""
|
|
68
|
-
Estimate token count for text.
|
|
63
|
+
"""Estimate token count for text.
|
|
69
64
|
|
|
70
65
|
Rough approximation: ~4 chars per token
|
|
71
66
|
"""
|
|
@@ -73,8 +68,7 @@ class BaseLLMProvider(ABC):
|
|
|
73
68
|
|
|
74
69
|
|
|
75
70
|
class AnthropicProvider(BaseLLMProvider):
|
|
76
|
-
"""
|
|
77
|
-
Anthropic (Claude) provider with enhanced features.
|
|
71
|
+
"""Anthropic (Claude) provider with enhanced features.
|
|
78
72
|
|
|
79
73
|
Supports Claude 3 family models with advanced capabilities:
|
|
80
74
|
- Extended context windows (200K tokens)
|
|
@@ -100,7 +94,7 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
100
94
|
if not api_key or not api_key.strip():
|
|
101
95
|
raise ValueError(
|
|
102
96
|
"API key is required for Anthropic provider. "
|
|
103
|
-
"Provide via api_key parameter or ANTHROPIC_API_KEY environment variable"
|
|
97
|
+
"Provide via api_key parameter or ANTHROPIC_API_KEY environment variable",
|
|
104
98
|
)
|
|
105
99
|
|
|
106
100
|
# Lazy import to avoid requiring anthropic if not used
|
|
@@ -110,7 +104,7 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
110
104
|
self.client = anthropic.Anthropic(api_key=api_key)
|
|
111
105
|
except ImportError as e:
|
|
112
106
|
raise ImportError(
|
|
113
|
-
"anthropic package required. Install with: pip install anthropic"
|
|
107
|
+
"anthropic package required. Install with: pip install anthropic",
|
|
114
108
|
) from e
|
|
115
109
|
|
|
116
110
|
async def generate(
|
|
@@ -121,15 +115,13 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
121
115
|
max_tokens: int = 1024,
|
|
122
116
|
**kwargs,
|
|
123
117
|
) -> LLMResponse:
|
|
124
|
-
"""
|
|
125
|
-
Generate response using Anthropic API with enhanced features.
|
|
118
|
+
"""Generate response using Anthropic API with enhanced features.
|
|
126
119
|
|
|
127
120
|
Claude-specific enhancements:
|
|
128
121
|
- Prompt caching for repeated system prompts (90% cost reduction)
|
|
129
122
|
- Extended context (200K tokens) for large codebase analysis
|
|
130
123
|
- Thinking mode for complex reasoning tasks
|
|
131
124
|
"""
|
|
132
|
-
|
|
133
125
|
# Build kwargs for Anthropic
|
|
134
126
|
api_kwargs = {
|
|
135
127
|
"model": self.model,
|
|
@@ -145,7 +137,7 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
145
137
|
"type": "text",
|
|
146
138
|
"text": system_prompt,
|
|
147
139
|
"cache_control": {"type": "ephemeral"}, # Cache for 5 minutes
|
|
148
|
-
}
|
|
140
|
+
},
|
|
149
141
|
]
|
|
150
142
|
elif system_prompt:
|
|
151
143
|
api_kwargs["system"] = system_prompt
|
|
@@ -202,10 +194,12 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
202
194
|
)
|
|
203
195
|
|
|
204
196
|
async def analyze_large_codebase(
|
|
205
|
-
self,
|
|
197
|
+
self,
|
|
198
|
+
codebase_files: list[dict[str, str]],
|
|
199
|
+
analysis_prompt: str,
|
|
200
|
+
**kwargs,
|
|
206
201
|
) -> LLMResponse:
|
|
207
|
-
"""
|
|
208
|
-
Analyze large codebases using Claude's 200K context window.
|
|
202
|
+
"""Analyze large codebases using Claude's 200K context window.
|
|
209
203
|
|
|
210
204
|
Claude-specific feature: Can process entire repositories in one call.
|
|
211
205
|
|
|
@@ -216,10 +210,11 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
216
210
|
|
|
217
211
|
Returns:
|
|
218
212
|
LLMResponse with analysis results
|
|
213
|
+
|
|
219
214
|
"""
|
|
220
215
|
# Build context from all files
|
|
221
216
|
file_context = "\n\n".join(
|
|
222
|
-
[f"# File: {file['path']}\n{file['content']}" for file in codebase_files]
|
|
217
|
+
[f"# File: {file['path']}\n{file['content']}" for file in codebase_files],
|
|
223
218
|
)
|
|
224
219
|
|
|
225
220
|
# Create system prompt with caching for file context
|
|
@@ -287,8 +282,7 @@ class AnthropicProvider(BaseLLMProvider):
|
|
|
287
282
|
|
|
288
283
|
|
|
289
284
|
class OpenAIProvider(BaseLLMProvider):
|
|
290
|
-
"""
|
|
291
|
-
OpenAI provider.
|
|
285
|
+
"""OpenAI provider.
|
|
292
286
|
|
|
293
287
|
Supports GPT-4, GPT-3.5, and other OpenAI models.
|
|
294
288
|
"""
|
|
@@ -301,7 +295,7 @@ class OpenAIProvider(BaseLLMProvider):
|
|
|
301
295
|
if not api_key or not api_key.strip():
|
|
302
296
|
raise ValueError(
|
|
303
297
|
"API key is required for OpenAI provider. "
|
|
304
|
-
"Provide via api_key parameter or OPENAI_API_KEY environment variable"
|
|
298
|
+
"Provide via api_key parameter or OPENAI_API_KEY environment variable",
|
|
305
299
|
)
|
|
306
300
|
|
|
307
301
|
# Lazy import
|
|
@@ -321,7 +315,6 @@ class OpenAIProvider(BaseLLMProvider):
|
|
|
321
315
|
**kwargs,
|
|
322
316
|
) -> LLMResponse:
|
|
323
317
|
"""Generate response using OpenAI API"""
|
|
324
|
-
|
|
325
318
|
# Add system prompt if provided
|
|
326
319
|
if system_prompt:
|
|
327
320
|
messages = [{"role": "system", "content": system_prompt}] + messages
|
|
@@ -373,8 +366,7 @@ class OpenAIProvider(BaseLLMProvider):
|
|
|
373
366
|
|
|
374
367
|
|
|
375
368
|
class GeminiProvider(BaseLLMProvider):
|
|
376
|
-
"""
|
|
377
|
-
Google Gemini provider with cost tracking integration.
|
|
369
|
+
"""Google Gemini provider with cost tracking integration.
|
|
378
370
|
|
|
379
371
|
Supports Gemini models:
|
|
380
372
|
- gemini-2.0-flash-exp: Fast, cheap tier (1M context)
|
|
@@ -395,7 +387,7 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
395
387
|
if not api_key or not api_key.strip():
|
|
396
388
|
raise ValueError(
|
|
397
389
|
"API key is required for Gemini provider. "
|
|
398
|
-
"Provide via api_key parameter or GOOGLE_API_KEY environment variable"
|
|
390
|
+
"Provide via api_key parameter or GOOGLE_API_KEY environment variable",
|
|
399
391
|
)
|
|
400
392
|
|
|
401
393
|
# Lazy import to avoid requiring google-generativeai if not used
|
|
@@ -407,7 +399,7 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
407
399
|
self.client = genai.GenerativeModel(model)
|
|
408
400
|
except ImportError as e:
|
|
409
401
|
raise ImportError(
|
|
410
|
-
"google-generativeai package required. Install with: pip install google-generativeai"
|
|
402
|
+
"google-generativeai package required. Install with: pip install google-generativeai",
|
|
411
403
|
) from e
|
|
412
404
|
|
|
413
405
|
async def generate(
|
|
@@ -418,8 +410,7 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
418
410
|
max_tokens: int = 1024,
|
|
419
411
|
**kwargs,
|
|
420
412
|
) -> LLMResponse:
|
|
421
|
-
"""
|
|
422
|
-
Generate response using Google Gemini API.
|
|
413
|
+
"""Generate response using Google Gemini API.
|
|
423
414
|
|
|
424
415
|
Gemini-specific features:
|
|
425
416
|
- Large context windows (1M-2M tokens)
|
|
@@ -454,7 +445,7 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
454
445
|
response = await loop.run_in_executor(
|
|
455
446
|
None,
|
|
456
447
|
lambda: model.generate_content(
|
|
457
|
-
gemini_messages,
|
|
448
|
+
gemini_messages, # type: ignore[arg-type]
|
|
458
449
|
generation_config=generation_config,
|
|
459
450
|
),
|
|
460
451
|
)
|
|
@@ -507,10 +498,9 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
507
498
|
"""Determine tier from model name."""
|
|
508
499
|
if "flash" in self.model.lower():
|
|
509
500
|
return "cheap"
|
|
510
|
-
|
|
501
|
+
if "2.5" in self.model or "ultra" in self.model.lower():
|
|
511
502
|
return "premium"
|
|
512
|
-
|
|
513
|
-
return "capable"
|
|
503
|
+
return "capable"
|
|
514
504
|
|
|
515
505
|
def get_model_info(self) -> dict[str, Any]:
|
|
516
506
|
"""Get Gemini model information"""
|
|
@@ -550,8 +540,7 @@ class GeminiProvider(BaseLLMProvider):
|
|
|
550
540
|
|
|
551
541
|
|
|
552
542
|
class LocalProvider(BaseLLMProvider):
|
|
553
|
-
"""
|
|
554
|
-
Local model provider (Ollama, LM Studio, etc.).
|
|
543
|
+
"""Local model provider (Ollama, LM Studio, etc.).
|
|
555
544
|
|
|
556
545
|
For running models locally.
|
|
557
546
|
"""
|