empathy-framework 3.8.3__py3-none-any.whl → 3.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/METADATA +67 -7
  2. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/RECORD +50 -39
  3. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/top_level.txt +0 -4
  4. empathy_os/.empathy/costs.json +60 -0
  5. empathy_os/.empathy/discovery_stats.json +15 -0
  6. empathy_os/.empathy/workflow_runs.json +45 -0
  7. empathy_os/cli.py +372 -13
  8. empathy_os/cli_unified.py +111 -0
  9. empathy_os/config/xml_config.py +45 -3
  10. empathy_os/config.py +46 -2
  11. empathy_os/memory/control_panel.py +128 -8
  12. empathy_os/memory/long_term.py +26 -4
  13. empathy_os/memory/short_term.py +110 -0
  14. empathy_os/models/token_estimator.py +25 -0
  15. empathy_os/pattern_library.py +81 -8
  16. empathy_os/patterns/debugging/all_patterns.json +81 -0
  17. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +77 -0
  18. empathy_os/patterns/refactoring_memory.json +89 -0
  19. empathy_os/telemetry/__init__.py +11 -0
  20. empathy_os/telemetry/cli.py +451 -0
  21. empathy_os/telemetry/usage_tracker.py +475 -0
  22. {test_generator → empathy_os/test_generator}/generator.py +1 -0
  23. empathy_os/tier_recommender.py +422 -0
  24. empathy_os/workflows/base.py +223 -23
  25. empathy_os/workflows/config.py +50 -5
  26. empathy_os/workflows/tier_tracking.py +408 -0
  27. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/WHEEL +0 -0
  28. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/entry_points.txt +0 -0
  29. {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/licenses/LICENSE +0 -0
  30. {hot_reload → empathy_os/hot_reload}/README.md +0 -0
  31. {hot_reload → empathy_os/hot_reload}/__init__.py +0 -0
  32. {hot_reload → empathy_os/hot_reload}/config.py +0 -0
  33. {hot_reload → empathy_os/hot_reload}/integration.py +0 -0
  34. {hot_reload → empathy_os/hot_reload}/reloader.py +0 -0
  35. {hot_reload → empathy_os/hot_reload}/watcher.py +0 -0
  36. {hot_reload → empathy_os/hot_reload}/websocket.py +0 -0
  37. {scaffolding → empathy_os/scaffolding}/README.md +0 -0
  38. {scaffolding → empathy_os/scaffolding}/__init__.py +0 -0
  39. {scaffolding → empathy_os/scaffolding}/__main__.py +0 -0
  40. {scaffolding → empathy_os/scaffolding}/cli.py +0 -0
  41. {test_generator → empathy_os/test_generator}/__init__.py +0 -0
  42. {test_generator → empathy_os/test_generator}/__main__.py +0 -0
  43. {test_generator → empathy_os/test_generator}/cli.py +0 -0
  44. {test_generator → empathy_os/test_generator}/risk_analyzer.py +0 -0
  45. {workflow_patterns → empathy_os/workflow_patterns}/__init__.py +0 -0
  46. {workflow_patterns → empathy_os/workflow_patterns}/behavior.py +0 -0
  47. {workflow_patterns → empathy_os/workflow_patterns}/core.py +0 -0
  48. {workflow_patterns → empathy_os/workflow_patterns}/output.py +0 -0
  49. {workflow_patterns → empathy_os/workflow_patterns}/registry.py +0 -0
  50. {workflow_patterns → empathy_os/workflow_patterns}/structural.py +0 -0
@@ -0,0 +1,408 @@
1
+ """
2
+ Automatic tier recommendation and progression tracking for workflows.
3
+
4
+ Integrates TierRecommender into workflows to:
5
+ 1. Auto-suggest optimal tier at workflow start
6
+ 2. Track tier progression during execution
7
+ 3. Save tier progression data automatically
8
+
9
+ Copyright 2025 Smart-AI-Memory
10
+ Licensed under Fair Source License 0.9
11
+ """
12
+
13
+ import json
14
+ import uuid
15
+ from dataclasses import asdict, dataclass
16
+ from datetime import datetime
17
+ from pathlib import Path
18
+ from typing import Any, Dict, List, Optional
19
+
20
+ from empathy_os.logging_config import get_logger
21
+
22
+ logger = get_logger(__name__)
23
+
24
+
25
+ @dataclass
26
+ class TierAttempt:
27
+ """Record of a single tier attempt."""
28
+
29
+ tier: str
30
+ attempt: int
31
+ success: bool
32
+ quality_gate_failed: Optional[str] = None
33
+ quality_gates_passed: Optional[List[str]] = None
34
+
35
+
36
+ @dataclass
37
+ class WorkflowTierProgression:
38
+ """Track tier progression for a workflow run."""
39
+
40
+ workflow_name: str
41
+ workflow_id: str
42
+ bug_description: str
43
+ files_affected: List[str]
44
+ bug_type: str
45
+
46
+ # Tier progression
47
+ recommended_tier: str
48
+ starting_tier: str
49
+ successful_tier: str
50
+ total_attempts: int
51
+ tier_history: List[Dict[str, Any]]
52
+
53
+ # Costs
54
+ total_cost: float
55
+ cost_if_always_premium: float
56
+ savings_percent: float
57
+
58
+ # Quality
59
+ tests_passed: bool
60
+ error_occurred: bool
61
+
62
+ # Metadata
63
+ started_at: str
64
+ completed_at: str
65
+ duration_seconds: float
66
+
67
+ # Optional fields must come last
68
+ error_message: Optional[str] = None
69
+
70
+
71
+ class WorkflowTierTracker:
72
+ """
73
+ Automatically track tier progression for workflow runs.
74
+
75
+ Usage in BaseWorkflow:
76
+ tracker = WorkflowTierTracker(workflow_name, description)
77
+ tracker.show_recommendation(files_affected)
78
+ # ... run workflow ...
79
+ tracker.save_progression(result)
80
+ """
81
+
82
+ TIER_COSTS = {
83
+ "cheap": 0.030,
84
+ "capable": 0.090,
85
+ "premium": 0.450,
86
+ }
87
+
88
+ def __init__(
89
+ self,
90
+ workflow_name: str,
91
+ workflow_description: str,
92
+ patterns_dir: Optional[Path] = None,
93
+ ):
94
+ """
95
+ Initialize tier tracker for a workflow.
96
+
97
+ Args:
98
+ workflow_name: Name of the workflow
99
+ workflow_description: Description/purpose of workflow
100
+ patterns_dir: Directory to save tier progression patterns
101
+ """
102
+ self.workflow_name = workflow_name
103
+ self.workflow_description = workflow_description
104
+ self.workflow_id = str(uuid.uuid4())
105
+ self.started_at = datetime.now()
106
+
107
+ if patterns_dir is None:
108
+ patterns_dir = Path.cwd() / "patterns" / "debugging"
109
+ self.patterns_dir = Path(patterns_dir)
110
+ self.patterns_dir.mkdir(parents=True, exist_ok=True)
111
+
112
+ self.recommended_tier: Optional[str] = None
113
+ self.starting_tier: Optional[str] = None
114
+ self.tier_attempts: List[TierAttempt] = []
115
+
116
+ def show_recommendation(
117
+ self,
118
+ files_affected: Optional[List[str]] = None,
119
+ show_ui: bool = True,
120
+ ) -> str:
121
+ """
122
+ Show tier recommendation at workflow start.
123
+
124
+ Args:
125
+ files_affected: Files involved in this workflow run
126
+ show_ui: Whether to print recommendation to console
127
+
128
+ Returns:
129
+ Recommended tier (CHEAP, CAPABLE, or PREMIUM)
130
+ """
131
+ try:
132
+ from empathy_os.tier_recommender import TierRecommender
133
+
134
+ recommender = TierRecommender()
135
+ result = recommender.recommend(
136
+ bug_description=self.workflow_description,
137
+ files_affected=files_affected or [],
138
+ )
139
+
140
+ self.recommended_tier = result.tier
141
+
142
+ if show_ui:
143
+ self._print_recommendation(result)
144
+
145
+ return result.tier
146
+
147
+ except Exception as e:
148
+ logger.debug(f"Could not get tier recommendation: {e}")
149
+ # Fallback to CHEAP if recommendation fails
150
+ self.recommended_tier = "CHEAP"
151
+ return "CHEAP"
152
+
153
+ def _print_recommendation(self, result):
154
+ """Print tier recommendation to console."""
155
+ from rich.console import Console
156
+ from rich.panel import Panel
157
+
158
+ console = Console()
159
+
160
+ confidence_color = "green" if result.confidence > 0.7 else "yellow"
161
+
162
+ message = f"""[bold]Workflow:[/bold] {self.workflow_name}
163
+ [bold]Description:[/bold] {self.workflow_description}
164
+
165
+ [bold cyan]💡 Tier Recommendation[/bold cyan]
166
+ 📍 Recommended: [bold]{result.tier}[/bold]
167
+ 🎯 Confidence: [{confidence_color}]{result.confidence * 100:.0f}%[/{confidence_color}]
168
+ 💰 Expected Cost: ${result.expected_cost:.3f}
169
+ 🔄 Expected Attempts: {result.expected_attempts:.1f}
170
+
171
+ [dim]Reasoning: {result.reasoning}[/dim]"""
172
+
173
+ if result.fallback_used:
174
+ message += "\n\n[yellow]⚠️ Using default - limited historical data[/yellow]"
175
+ else:
176
+ message += f"\n\n[green]✅ Based on {result.similar_patterns_count} similar patterns[/green]"
177
+
178
+ console.print(Panel(message, title="🎯 Auto Tier Recommendation", border_style="cyan"))
179
+
180
+ def record_tier_attempt(
181
+ self,
182
+ tier: str,
183
+ attempt: int,
184
+ success: bool,
185
+ quality_gate_failed: Optional[str] = None,
186
+ quality_gates_passed: Optional[List[str]] = None,
187
+ ):
188
+ """Record a tier attempt during workflow execution."""
189
+ self.tier_attempts.append(
190
+ TierAttempt(
191
+ tier=tier,
192
+ attempt=attempt,
193
+ success=success,
194
+ quality_gate_failed=quality_gate_failed,
195
+ quality_gates_passed=quality_gates_passed,
196
+ )
197
+ )
198
+
199
+ def save_progression(
200
+ self,
201
+ workflow_result: Any,
202
+ files_affected: Optional[List[str]] = None,
203
+ bug_type: str = "workflow_run",
204
+ ) -> Optional[Path]:
205
+ """
206
+ Save tier progression data after workflow completion.
207
+
208
+ Args:
209
+ workflow_result: WorkflowResult from workflow execution
210
+ files_affected: Files processed by workflow
211
+ bug_type: Type of issue being addressed
212
+
213
+ Returns:
214
+ Path to saved pattern file, or None if save failed
215
+ """
216
+ try:
217
+ completed_at = datetime.now()
218
+ duration = (completed_at - self.started_at).total_seconds()
219
+
220
+ # Determine successful tier from workflow result
221
+ successful_tier = self._determine_successful_tier(workflow_result)
222
+ self.starting_tier = self.starting_tier or successful_tier
223
+
224
+ # Build tier history from stages
225
+ tier_history = self._build_tier_history(workflow_result)
226
+
227
+ # Calculate costs
228
+ total_cost = workflow_result.cost_report.get("total", 0) if isinstance(workflow_result.cost_report, dict) else sum(stage.cost for stage in workflow_result.stages)
229
+ cost_if_premium = self._estimate_premium_cost(workflow_result)
230
+ savings_percent = ((cost_if_premium - total_cost) / cost_if_premium * 100) if cost_if_premium > 0 else 0
231
+
232
+ # Create progression record
233
+ progression = {
234
+ "pattern_id": f"workflow_{datetime.now().strftime('%Y%m%d')}_{self.workflow_id[:8]}",
235
+ "bug_type": bug_type,
236
+ "status": "resolved" if workflow_result.error is None else "failed",
237
+ "root_cause": f"Workflow: {self.workflow_name} - {self.workflow_description}",
238
+ "fix": f"Completed via {self.workflow_name} workflow",
239
+ "resolved_by": "@empathy_framework",
240
+ "resolved_at": completed_at.strftime("%Y-%m-%d"),
241
+ "files_affected": files_affected or [],
242
+ "source": "workflow_tracking",
243
+
244
+ "tier_progression": {
245
+ "methodology": "AI-ADDIE",
246
+ "recommended_tier": self.recommended_tier or self.starting_tier,
247
+ "starting_tier": self.starting_tier,
248
+ "successful_tier": successful_tier,
249
+ "total_attempts": len(tier_history),
250
+ "tier_history": tier_history,
251
+ "cost_breakdown": {
252
+ "total_cost": round(total_cost, 3),
253
+ "cost_if_always_premium": round(cost_if_premium, 3),
254
+ "savings_percent": round(savings_percent, 1),
255
+ },
256
+ "quality_metrics": {
257
+ "tests_passed": workflow_result.error is None,
258
+ "health_score_before": 73, # Default
259
+ "health_score_after": 73,
260
+ },
261
+ "xml_protocol_compliance": {
262
+ "prompt_used_xml": True,
263
+ "response_used_xml": True,
264
+ "all_sections_present": True,
265
+ "test_evidence_provided": True,
266
+ "false_complete_avoided": workflow_result.error is None,
267
+ },
268
+ },
269
+
270
+ "workflow_metadata": {
271
+ "workflow_name": self.workflow_name,
272
+ "workflow_id": self.workflow_id,
273
+ "duration_seconds": round(duration, 2),
274
+ "started_at": self.started_at.isoformat(),
275
+ "completed_at": completed_at.isoformat(),
276
+ },
277
+ }
278
+
279
+ # Save to individual pattern file
280
+ pattern_file = self.patterns_dir / f"{progression['pattern_id']}.json"
281
+ with open(pattern_file, "w") as f:
282
+ json.dump(progression, f, indent=2)
283
+
284
+ logger.info(f"💾 Saved tier progression: {pattern_file}")
285
+
286
+ # Also update consolidated patterns file
287
+ self._update_consolidated_patterns(progression)
288
+
289
+ return pattern_file
290
+
291
+ except Exception as e:
292
+ logger.warning(f"Failed to save tier progression: {e}")
293
+ return None
294
+
295
+ def _determine_successful_tier(self, workflow_result: Any) -> str:
296
+ """Determine which tier successfully completed the workflow."""
297
+ if not workflow_result.stages:
298
+ return "CHEAP"
299
+
300
+ # Use the highest tier that was actually used
301
+ tiers_used = [stage.tier.value if hasattr(stage.tier, 'value') else str(stage.tier).lower()
302
+ for stage in workflow_result.stages]
303
+
304
+ if "premium" in tiers_used:
305
+ return "PREMIUM"
306
+ elif "capable" in tiers_used:
307
+ return "CAPABLE"
308
+ else:
309
+ return "CHEAP"
310
+
311
+ def _build_tier_history(self, workflow_result: Any) -> List[Dict[str, Any]]:
312
+ """Build tier history from workflow stages."""
313
+ tier_groups: Dict[str, List[Any]] = {}
314
+
315
+ # Group stages by tier
316
+ for stage in workflow_result.stages:
317
+ tier = stage.tier.value if hasattr(stage.tier, 'value') else str(stage.tier).lower()
318
+ tier_upper = tier.upper()
319
+ if tier_upper not in tier_groups:
320
+ tier_groups[tier_upper] = []
321
+ tier_groups[tier_upper].append(stage)
322
+
323
+ # Build history entries
324
+ history = []
325
+ for tier, stages in tier_groups.items():
326
+ # Check if any stage failed
327
+ failures = []
328
+ success_stage = None
329
+
330
+ for i, stage in enumerate(stages, 1):
331
+ if hasattr(stage, 'error') and stage.error:
332
+ failures.append({"attempt": i, "quality_gate_failed": "execution"})
333
+ else:
334
+ success_stage = i
335
+
336
+ entry = {
337
+ "tier": tier,
338
+ "attempts": len(stages),
339
+ }
340
+
341
+ if failures:
342
+ entry["failures"] = failures
343
+
344
+ if success_stage:
345
+ entry["success"] = {
346
+ "attempt": success_stage,
347
+ "quality_gates_passed": ["execution", "output"],
348
+ }
349
+
350
+ history.append(entry)
351
+
352
+ return history
353
+
354
+ def _estimate_premium_cost(self, workflow_result: Any) -> float:
355
+ """Estimate what the cost would be if all stages used PREMIUM tier."""
356
+ _total_tokens = sum(
357
+ (stage.input_tokens or 0) + (stage.output_tokens or 0)
358
+ for stage in workflow_result.stages
359
+ )
360
+
361
+ # Calculate actual cost from stages
362
+ actual_cost = sum(stage.cost for stage in workflow_result.stages)
363
+
364
+ # Rough estimate: PREMIUM tier is ~15x more expensive than CHEAP
365
+ return actual_cost * 5 # Conservative multiplier
366
+
367
+ def _update_consolidated_patterns(self, progression: Dict[str, Any]):
368
+ """Update the consolidated patterns.json file."""
369
+ consolidated_file = self.patterns_dir / "all_patterns.json"
370
+
371
+ try:
372
+ if consolidated_file.exists():
373
+ with open(consolidated_file) as f:
374
+ data = json.load(f)
375
+ if "patterns" not in data:
376
+ data = {"patterns": []}
377
+ else:
378
+ data = {"patterns": []}
379
+
380
+ # Add new progression
381
+ data["patterns"].append(progression)
382
+
383
+ # Save updated file
384
+ with open(consolidated_file, "w") as f:
385
+ json.dump(data, f, indent=2)
386
+
387
+ except Exception as e:
388
+ logger.warning(f"Could not update consolidated patterns: {e}")
389
+
390
+
391
+ def auto_recommend_tier(
392
+ workflow_name: str,
393
+ workflow_description: str,
394
+ files_affected: Optional[List[str]] = None,
395
+ ) -> str:
396
+ """
397
+ Quick helper to get tier recommendation without tracker.
398
+
399
+ Args:
400
+ workflow_name: Name of workflow
401
+ workflow_description: What the workflow does
402
+ files_affected: Files involved
403
+
404
+ Returns:
405
+ Recommended tier
406
+ """
407
+ tracker = WorkflowTierTracker(workflow_name, workflow_description)
408
+ return tracker.show_recommendation(files_affected, show_ui=False)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes