empathy-framework 3.8.3__py3-none-any.whl → 3.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/METADATA +67 -7
  2. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/RECORD +50 -39
  3. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/top_level.txt +0 -4
  4. empathy_os/.empathy/costs.json +60 -0
  5. empathy_os/.empathy/discovery_stats.json +15 -0
  6. empathy_os/.empathy/workflow_runs.json +45 -0
  7. empathy_os/cli.py +372 -13
  8. empathy_os/cli_unified.py +111 -0
  9. empathy_os/config/xml_config.py +45 -3
  10. empathy_os/config.py +46 -2
  11. empathy_os/memory/control_panel.py +128 -8
  12. empathy_os/memory/long_term.py +26 -4
  13. empathy_os/memory/short_term.py +110 -0
  14. empathy_os/models/token_estimator.py +25 -0
  15. empathy_os/pattern_library.py +81 -8
  16. empathy_os/patterns/debugging/all_patterns.json +81 -0
  17. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +77 -0
  18. empathy_os/patterns/refactoring_memory.json +89 -0
  19. empathy_os/telemetry/__init__.py +11 -0
  20. empathy_os/telemetry/cli.py +451 -0
  21. empathy_os/telemetry/usage_tracker.py +475 -0
  22. {test_generator → empathy_os/test_generator}/generator.py +1 -0
  23. empathy_os/tier_recommender.py +422 -0
  24. empathy_os/workflows/base.py +223 -23
  25. empathy_os/workflows/config.py +50 -5
  26. empathy_os/workflows/tier_tracking.py +408 -0
  27. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/WHEEL +0 -0
  28. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/entry_points.txt +0 -0
  29. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/licenses/LICENSE +0 -0
  30. {hot_reload → empathy_os/hot_reload}/README.md +0 -0
  31. {hot_reload → empathy_os/hot_reload}/__init__.py +0 -0
  32. {hot_reload → empathy_os/hot_reload}/config.py +0 -0
  33. {hot_reload → empathy_os/hot_reload}/integration.py +0 -0
  34. {hot_reload → empathy_os/hot_reload}/reloader.py +0 -0
  35. {hot_reload → empathy_os/hot_reload}/watcher.py +0 -0
  36. {hot_reload → empathy_os/hot_reload}/websocket.py +0 -0
  37. {scaffolding → empathy_os/scaffolding}/README.md +0 -0
  38. {scaffolding → empathy_os/scaffolding}/__init__.py +0 -0
  39. {scaffolding → empathy_os/scaffolding}/__main__.py +0 -0
  40. {scaffolding → empathy_os/scaffolding}/cli.py +0 -0
  41. {test_generator → empathy_os/test_generator}/__init__.py +0 -0
  42. {test_generator → empathy_os/test_generator}/__main__.py +0 -0
  43. {test_generator → empathy_os/test_generator}/cli.py +0 -0
  44. {test_generator → empathy_os/test_generator}/risk_analyzer.py +0 -0
  45. {workflow_patterns → empathy_os/workflow_patterns}/__init__.py +0 -0
  46. {workflow_patterns → empathy_os/workflow_patterns}/behavior.py +0 -0
  47. {workflow_patterns → empathy_os/workflow_patterns}/core.py +0 -0
  48. {workflow_patterns → empathy_os/workflow_patterns}/output.py +0 -0
  49. {workflow_patterns → empathy_os/workflow_patterns}/registry.py +0 -0
  50. {workflow_patterns → empathy_os/workflow_patterns}/structural.py +0 -0
@@ -126,6 +126,9 @@ class PatternLibrary:
126
126
  agent_id: ID of contributing agent
127
127
  pattern: Pattern to contribute
128
128
 
129
+ Raises:
130
+ ValueError: If agent_id is empty or pattern.id already exists
131
+
129
132
  Example:
130
133
  >>> pattern = Pattern(
131
134
  ... id="pat_002",
@@ -138,6 +141,16 @@ class PatternLibrary:
138
141
  >>> library.contribute_pattern("agent_1", pattern)
139
142
 
140
143
  """
144
+ # Validate inputs
145
+ if not agent_id or not agent_id.strip():
146
+ raise ValueError("agent_id cannot be empty")
147
+
148
+ if pattern.id in self.patterns:
149
+ raise ValueError(
150
+ f"Pattern '{pattern.id}' already exists. "
151
+ f"Use a different ID or remove the existing pattern first."
152
+ )
153
+
141
154
  # Store pattern
142
155
  self.patterns[pattern.id] = pattern
143
156
 
@@ -170,6 +183,10 @@ class PatternLibrary:
170
183
  Returns:
171
184
  List of PatternMatch objects, sorted by relevance
172
185
 
186
+ Raises:
187
+ ValueError: If agent_id is empty, min_confidence out of range, or limit < 1
188
+ TypeError: If context is not a dictionary
189
+
173
190
  Example:
174
191
  >>> context = {
175
192
  ... "user_role": "developer",
@@ -179,6 +196,19 @@ class PatternLibrary:
179
196
  >>> matches = library.query_patterns("debug_agent", context, min_confidence=0.7)
180
197
 
181
198
  """
199
+ # Validate inputs
200
+ if not agent_id or not agent_id.strip():
201
+ raise ValueError("agent_id cannot be empty")
202
+
203
+ if not isinstance(context, dict):
204
+ raise TypeError(f"context must be dict, got {type(context).__name__}")
205
+
206
+ if not 0.0 <= min_confidence <= 1.0:
207
+ raise ValueError(f"min_confidence must be 0-1, got {min_confidence}")
208
+
209
+ if limit < 1:
210
+ raise ValueError(f"limit must be positive, got {limit}")
211
+
182
212
  matches: list[PatternMatch] = []
183
213
 
184
214
  for pattern in self.patterns.values():
@@ -226,10 +256,16 @@ class PatternLibrary:
226
256
  pattern_id: ID of pattern that was used
227
257
  success: Whether using the pattern was successful
228
258
 
259
+ Raises:
260
+ ValueError: If pattern_id does not exist
261
+
229
262
  """
230
263
  pattern = self.patterns.get(pattern_id)
231
- if pattern:
232
- pattern.record_usage(success)
264
+ if not pattern:
265
+ raise ValueError(
266
+ f"Pattern '{pattern_id}' not found. Cannot record outcome."
267
+ )
268
+ pattern.record_usage(success)
233
269
 
234
270
  def link_patterns(self, pattern_id_1: str, pattern_id_2: str):
235
271
  """Create a link between related patterns
@@ -240,7 +276,21 @@ class PatternLibrary:
240
276
  pattern_id_1: First pattern ID
241
277
  pattern_id_2: Second pattern ID
242
278
 
279
+ Raises:
280
+ ValueError: If either pattern ID doesn't exist or IDs are the same
281
+
243
282
  """
283
+ # Validate patterns exist
284
+ if pattern_id_1 not in self.patterns:
285
+ raise ValueError(f"Pattern '{pattern_id_1}' does not exist")
286
+
287
+ if pattern_id_2 not in self.patterns:
288
+ raise ValueError(f"Pattern '{pattern_id_2}' does not exist")
289
+
290
+ if pattern_id_1 == pattern_id_2:
291
+ raise ValueError("Cannot link a pattern to itself")
292
+
293
+ # Create bidirectional link
244
294
  if pattern_id_1 in self.pattern_graph:
245
295
  if pattern_id_2 not in self.pattern_graph[pattern_id_1]:
246
296
  self.pattern_graph[pattern_id_1].append(pattern_id_2)
@@ -249,27 +299,39 @@ class PatternLibrary:
249
299
  if pattern_id_1 not in self.pattern_graph[pattern_id_2]:
250
300
  self.pattern_graph[pattern_id_2].append(pattern_id_1)
251
301
 
252
- def get_related_patterns(self, pattern_id: str, depth: int = 1) -> list[Pattern]:
302
+ def get_related_patterns(
303
+ self,
304
+ pattern_id: str,
305
+ depth: int = 1,
306
+ _visited: set[str] | None = None
307
+ ) -> list[Pattern]:
253
308
  """Get patterns related to a given pattern
254
309
 
255
310
  Args:
256
311
  pattern_id: Source pattern ID
257
312
  depth: How many hops to traverse (1 = immediate neighbors)
313
+ _visited: Internal tracking to prevent cycles (do not use directly)
258
314
 
259
315
  Returns:
260
- List of related patterns
316
+ List of related patterns (no duplicates, cycle-safe)
261
317
 
262
318
  """
319
+ # Initialize visited set on first call
320
+ if _visited is None:
321
+ _visited = {pattern_id}
322
+
263
323
  if depth <= 0 or pattern_id not in self.pattern_graph:
264
324
  return []
265
325
 
266
326
  related_ids = set(self.pattern_graph[pattern_id])
267
327
 
268
328
  if depth > 1:
269
- # Traverse deeper
329
+ # Traverse deeper (avoiding cycles)
270
330
  for related_id in list(related_ids):
271
- deeper = self.get_related_patterns(related_id, depth - 1)
272
- related_ids.update(p.id for p in deeper)
331
+ if related_id not in _visited:
332
+ _visited.add(related_id)
333
+ deeper = self.get_related_patterns(related_id, depth - 1, _visited)
334
+ related_ids.update(p.id for p in deeper)
273
335
 
274
336
  # Remove source pattern
275
337
  related_ids.discard(pattern_id)
@@ -355,8 +417,19 @@ class PatternLibrary:
355
417
  ) -> tuple[float, list[str]]:
356
418
  """Calculate how relevant a pattern is to current context
357
419
 
420
+ Args:
421
+ pattern: Pattern to evaluate
422
+ context: Current context to match against
423
+
358
424
  Returns:
359
- (relevance_score, matching_factors)
425
+ tuple: (relevance_score, matching_factors)
426
+ - relevance_score (float): 0.0-1.0 relevance score
427
+ - matching_factors (list[str]): Human-readable reasons for match
428
+
429
+ Algorithm:
430
+ - 50% weight: Context key/value matches
431
+ - 30% weight: Tag matches
432
+ - 20% weight: Pattern success rate boost
360
433
 
361
434
  """
362
435
  relevance = 0.0
@@ -0,0 +1,81 @@
1
+ {
2
+ "patterns": [
3
+ {
4
+ "pattern_id": "workflow_20260107_1770825e",
5
+ "bug_type": "refactoring",
6
+ "status": "resolved",
7
+ "root_cause": "Workflow: refactor-plan - Prioritize tech debt based on trajectory and impact",
8
+ "fix": "Completed via refactor-plan workflow",
9
+ "resolved_by": "@empathy_framework",
10
+ "resolved_at": "2026-01-07",
11
+ "files_affected": [],
12
+ "source": "workflow_tracking",
13
+ "tier_progression": {
14
+ "methodology": "AI-ADDIE",
15
+ "recommended_tier": "CHEAP",
16
+ "starting_tier": "PREMIUM",
17
+ "successful_tier": "PREMIUM",
18
+ "total_attempts": 3,
19
+ "tier_history": [
20
+ {
21
+ "tier": "CHEAP",
22
+ "attempts": 1,
23
+ "success": {
24
+ "attempt": 1,
25
+ "quality_gates_passed": [
26
+ "execution",
27
+ "output"
28
+ ]
29
+ }
30
+ },
31
+ {
32
+ "tier": "CAPABLE",
33
+ "attempts": 2,
34
+ "success": {
35
+ "attempt": 2,
36
+ "quality_gates_passed": [
37
+ "execution",
38
+ "output"
39
+ ]
40
+ }
41
+ },
42
+ {
43
+ "tier": "PREMIUM",
44
+ "attempts": 1,
45
+ "success": {
46
+ "attempt": 1,
47
+ "quality_gates_passed": [
48
+ "execution",
49
+ "output"
50
+ ]
51
+ }
52
+ }
53
+ ],
54
+ "cost_breakdown": {
55
+ "total_cost": 0.021,
56
+ "cost_if_always_premium": 0.104,
57
+ "savings_percent": 80.0
58
+ },
59
+ "quality_metrics": {
60
+ "tests_passed": true,
61
+ "health_score_before": 73,
62
+ "health_score_after": 73
63
+ },
64
+ "xml_protocol_compliance": {
65
+ "prompt_used_xml": true,
66
+ "response_used_xml": true,
67
+ "all_sections_present": true,
68
+ "test_evidence_provided": true,
69
+ "false_complete_avoided": true
70
+ }
71
+ },
72
+ "workflow_metadata": {
73
+ "workflow_name": "refactor-plan",
74
+ "workflow_id": "1770825e-b645-4c66-b4e1-87b9463b0082",
75
+ "duration_seconds": 3.24,
76
+ "started_at": "2026-01-07T06:30:40.696987",
77
+ "completed_at": "2026-01-07T06:30:43.935316"
78
+ }
79
+ }
80
+ ]
81
+ }
@@ -0,0 +1,77 @@
1
+ {
2
+ "pattern_id": "workflow_20260107_1770825e",
3
+ "bug_type": "refactoring",
4
+ "status": "resolved",
5
+ "root_cause": "Workflow: refactor-plan - Prioritize tech debt based on trajectory and impact",
6
+ "fix": "Completed via refactor-plan workflow",
7
+ "resolved_by": "@empathy_framework",
8
+ "resolved_at": "2026-01-07",
9
+ "files_affected": [],
10
+ "source": "workflow_tracking",
11
+ "tier_progression": {
12
+ "methodology": "AI-ADDIE",
13
+ "recommended_tier": "CHEAP",
14
+ "starting_tier": "PREMIUM",
15
+ "successful_tier": "PREMIUM",
16
+ "total_attempts": 3,
17
+ "tier_history": [
18
+ {
19
+ "tier": "CHEAP",
20
+ "attempts": 1,
21
+ "success": {
22
+ "attempt": 1,
23
+ "quality_gates_passed": [
24
+ "execution",
25
+ "output"
26
+ ]
27
+ }
28
+ },
29
+ {
30
+ "tier": "CAPABLE",
31
+ "attempts": 2,
32
+ "success": {
33
+ "attempt": 2,
34
+ "quality_gates_passed": [
35
+ "execution",
36
+ "output"
37
+ ]
38
+ }
39
+ },
40
+ {
41
+ "tier": "PREMIUM",
42
+ "attempts": 1,
43
+ "success": {
44
+ "attempt": 1,
45
+ "quality_gates_passed": [
46
+ "execution",
47
+ "output"
48
+ ]
49
+ }
50
+ }
51
+ ],
52
+ "cost_breakdown": {
53
+ "total_cost": 0.021,
54
+ "cost_if_always_premium": 0.104,
55
+ "savings_percent": 80.0
56
+ },
57
+ "quality_metrics": {
58
+ "tests_passed": true,
59
+ "health_score_before": 73,
60
+ "health_score_after": 73
61
+ },
62
+ "xml_protocol_compliance": {
63
+ "prompt_used_xml": true,
64
+ "response_used_xml": true,
65
+ "all_sections_present": true,
66
+ "test_evidence_provided": true,
67
+ "false_complete_avoided": true
68
+ }
69
+ },
70
+ "workflow_metadata": {
71
+ "workflow_name": "refactor-plan",
72
+ "workflow_id": "1770825e-b645-4c66-b4e1-87b9463b0082",
73
+ "duration_seconds": 3.24,
74
+ "started_at": "2026-01-07T06:30:40.696987",
75
+ "completed_at": "2026-01-07T06:30:43.935316"
76
+ }
77
+ }
@@ -0,0 +1,89 @@
1
+ {
2
+ "version": "1.0",
3
+ "updated_at": "2026-01-07T06:30:43.930352",
4
+ "node_count": 5,
5
+ "edge_count": 0,
6
+ "nodes": [
7
+ {
8
+ "id": "refactoring_analysis_20260107063043_663dbce03854",
9
+ "type": "pattern",
10
+ "name": "refactor:workflows/bug_predict.py",
11
+ "description": "Found 1 refactoring opportunities:\n - 1 medium impact\n\nTop categories:\n - other: 1",
12
+ "source_wizard": "refactoring_crew",
13
+ "source_file": "",
14
+ "source_line": null,
15
+ "severity": "",
16
+ "confidence": 1.0,
17
+ "metadata": {},
18
+ "tags": [],
19
+ "created_at": "2026-01-07T06:30:43.460922",
20
+ "updated_at": "2026-01-07T06:30:43.460924",
21
+ "status": "open"
22
+ },
23
+ {
24
+ "id": "refactoring_analysis_20260107063043_fc92406de1d5",
25
+ "type": "pattern",
26
+ "name": "refactor:monitoring/alerts_cli.py",
27
+ "description": "Found 1 refactoring opportunities:\n - 1 medium impact\n\nTop categories:\n - other: 1",
28
+ "source_wizard": "refactoring_crew",
29
+ "source_file": "",
30
+ "source_line": null,
31
+ "severity": "",
32
+ "confidence": 1.0,
33
+ "metadata": {},
34
+ "tags": [],
35
+ "created_at": "2026-01-07T06:30:43.593291",
36
+ "updated_at": "2026-01-07T06:30:43.593291",
37
+ "status": "open"
38
+ },
39
+ {
40
+ "id": "refactoring_analysis_20260107063043_5cce2d4230a4",
41
+ "type": "pattern",
42
+ "name": "refactor:memory/edges.py",
43
+ "description": "Found 1 refactoring opportunities:\n - 1 medium impact\n\nTop categories:\n - other: 1",
44
+ "source_wizard": "refactoring_crew",
45
+ "source_file": "",
46
+ "source_line": null,
47
+ "severity": "",
48
+ "confidence": 1.0,
49
+ "metadata": {},
50
+ "tags": [],
51
+ "created_at": "2026-01-07T06:30:43.682805",
52
+ "updated_at": "2026-01-07T06:30:43.682806",
53
+ "status": "open"
54
+ },
55
+ {
56
+ "id": "refactoring_analysis_20260107063043_0ef136ce11fb",
57
+ "type": "pattern",
58
+ "name": "refactor:workflows/refactor_plan.py",
59
+ "description": "Found 1 refactoring opportunities:\n - 1 medium impact\n\nTop categories:\n - other: 1",
60
+ "source_wizard": "refactoring_crew",
61
+ "source_file": "",
62
+ "source_line": null,
63
+ "severity": "",
64
+ "confidence": 1.0,
65
+ "metadata": {},
66
+ "tags": [],
67
+ "created_at": "2026-01-07T06:30:43.796584",
68
+ "updated_at": "2026-01-07T06:30:43.796586",
69
+ "status": "open"
70
+ },
71
+ {
72
+ "id": "refactoring_analysis_20260107063043_3c6673a7bb17",
73
+ "type": "pattern",
74
+ "name": "refactor:workflows/new_sample_workflow1.py",
75
+ "description": "Found 1 refactoring opportunities:\n - 1 medium impact\n\nTop categories:\n - other: 1",
76
+ "source_wizard": "refactoring_crew",
77
+ "source_file": "",
78
+ "source_line": null,
79
+ "severity": "",
80
+ "confidence": 1.0,
81
+ "metadata": {},
82
+ "tags": [],
83
+ "created_at": "2026-01-07T06:30:43.930183",
84
+ "updated_at": "2026-01-07T06:30:43.930183",
85
+ "status": "open"
86
+ }
87
+ ],
88
+ "edges": []
89
+ }
@@ -0,0 +1,11 @@
1
+ """Telemetry tracking for Empathy Framework.
2
+
3
+ Privacy-first, local-only usage tracking to measure actual cost savings.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from .usage_tracker import UsageTracker
10
+
11
+ __all__ = ["UsageTracker"]