empathy-framework 3.8.3__py3-none-any.whl → 3.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/METADATA +67 -7
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/RECORD +50 -39
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/top_level.txt +0 -4
- empathy_os/.empathy/costs.json +60 -0
- empathy_os/.empathy/discovery_stats.json +15 -0
- empathy_os/.empathy/workflow_runs.json +45 -0
- empathy_os/cli.py +372 -13
- empathy_os/cli_unified.py +111 -0
- empathy_os/config/xml_config.py +45 -3
- empathy_os/config.py +46 -2
- empathy_os/memory/control_panel.py +128 -8
- empathy_os/memory/long_term.py +26 -4
- empathy_os/memory/short_term.py +110 -0
- empathy_os/models/token_estimator.py +25 -0
- empathy_os/pattern_library.py +81 -8
- empathy_os/patterns/debugging/all_patterns.json +81 -0
- empathy_os/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- empathy_os/patterns/refactoring_memory.json +89 -0
- empathy_os/telemetry/__init__.py +11 -0
- empathy_os/telemetry/cli.py +451 -0
- empathy_os/telemetry/usage_tracker.py +475 -0
- {test_generator → empathy_os/test_generator}/generator.py +1 -0
- empathy_os/tier_recommender.py +422 -0
- empathy_os/workflows/base.py +223 -23
- empathy_os/workflows/config.py +50 -5
- empathy_os/workflows/tier_tracking.py +408 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/WHEEL +0 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/licenses/LICENSE +0 -0
- {hot_reload → empathy_os/hot_reload}/README.md +0 -0
- {hot_reload → empathy_os/hot_reload}/__init__.py +0 -0
- {hot_reload → empathy_os/hot_reload}/config.py +0 -0
- {hot_reload → empathy_os/hot_reload}/integration.py +0 -0
- {hot_reload → empathy_os/hot_reload}/reloader.py +0 -0
- {hot_reload → empathy_os/hot_reload}/watcher.py +0 -0
- {hot_reload → empathy_os/hot_reload}/websocket.py +0 -0
- {scaffolding → empathy_os/scaffolding}/README.md +0 -0
- {scaffolding → empathy_os/scaffolding}/__init__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/__main__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/__init__.py +0 -0
- {test_generator → empathy_os/test_generator}/__main__.py +0 -0
- {test_generator → empathy_os/test_generator}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/risk_analyzer.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/__init__.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/behavior.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/core.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/output.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/registry.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/structural.py +0 -0
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Real-time tier recommendation system for cascading workflows.
|
|
3
|
+
|
|
4
|
+
This module provides intelligent tier selection based on historical patterns,
|
|
5
|
+
bug types, and file analysis. It can be used programmatically or via CLI.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from empathy_os import TierRecommender
|
|
9
|
+
|
|
10
|
+
recommender = TierRecommender()
|
|
11
|
+
tier = recommender.recommend(
|
|
12
|
+
bug_description="integration test failure with import error",
|
|
13
|
+
files_affected=["tests/integration/test_foo.py"]
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
print(f"Recommended tier: {tier.tier}")
|
|
17
|
+
print(f"Confidence: {tier.confidence}")
|
|
18
|
+
print(f"Expected cost: ${tier.expected_cost}")
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import List, Optional, Dict, Tuple
|
|
24
|
+
import json
|
|
25
|
+
from collections import defaultdict
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class TierRecommendationResult:
|
|
30
|
+
"""Result of tier recommendation."""
|
|
31
|
+
tier: str # CHEAP, CAPABLE, or PREMIUM
|
|
32
|
+
confidence: float # 0.0-1.0
|
|
33
|
+
reasoning: str
|
|
34
|
+
expected_cost: float
|
|
35
|
+
expected_attempts: float
|
|
36
|
+
similar_patterns_count: int
|
|
37
|
+
fallback_used: bool = False
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class TierRecommender:
|
|
41
|
+
"""
|
|
42
|
+
Intelligent tier recommendation system.
|
|
43
|
+
|
|
44
|
+
Learns from historical patterns to recommend optimal starting tier
|
|
45
|
+
for new bugs based on:
|
|
46
|
+
- Bug type/description
|
|
47
|
+
- Files affected
|
|
48
|
+
- Historical success rates
|
|
49
|
+
- Cost optimization
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
patterns_dir: Optional[Path] = None,
|
|
55
|
+
confidence_threshold: float = 0.7
|
|
56
|
+
):
|
|
57
|
+
"""
|
|
58
|
+
Initialize tier recommender.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
patterns_dir: Directory containing pattern JSON files.
|
|
62
|
+
Defaults to patterns/debugging/
|
|
63
|
+
confidence_threshold: Minimum confidence for non-default recommendations
|
|
64
|
+
|
|
65
|
+
Raises:
|
|
66
|
+
ValueError: If confidence_threshold is out of valid range
|
|
67
|
+
"""
|
|
68
|
+
# Pattern 4: Range validation
|
|
69
|
+
if not 0.0 <= confidence_threshold <= 1.0:
|
|
70
|
+
raise ValueError(f"confidence_threshold must be between 0.0 and 1.0, got {confidence_threshold}")
|
|
71
|
+
|
|
72
|
+
if patterns_dir is None:
|
|
73
|
+
patterns_dir = Path(__file__).parent.parent.parent / "patterns" / "debugging"
|
|
74
|
+
|
|
75
|
+
self.patterns_dir = Path(patterns_dir)
|
|
76
|
+
self.confidence_threshold = confidence_threshold
|
|
77
|
+
self.patterns = self._load_patterns()
|
|
78
|
+
|
|
79
|
+
# Build indexes for fast lookup
|
|
80
|
+
self._build_indexes()
|
|
81
|
+
|
|
82
|
+
def _load_patterns(self) -> List[Dict]:
|
|
83
|
+
"""Load all enhanced patterns with tier_progression data."""
|
|
84
|
+
patterns = []
|
|
85
|
+
|
|
86
|
+
if not self.patterns_dir.exists():
|
|
87
|
+
return patterns
|
|
88
|
+
|
|
89
|
+
for file_path in self.patterns_dir.glob("*.json"):
|
|
90
|
+
try:
|
|
91
|
+
with open(file_path) as f:
|
|
92
|
+
data = json.load(f)
|
|
93
|
+
|
|
94
|
+
# Check if this is an enhanced pattern
|
|
95
|
+
if isinstance(data, dict) and "tier_progression" in data:
|
|
96
|
+
patterns.append(data)
|
|
97
|
+
# Or if it's a patterns array
|
|
98
|
+
elif isinstance(data, dict) and "patterns" in data:
|
|
99
|
+
for pattern in data["patterns"]:
|
|
100
|
+
if "tier_progression" in pattern:
|
|
101
|
+
patterns.append(pattern)
|
|
102
|
+
except (json.JSONDecodeError, KeyError):
|
|
103
|
+
continue
|
|
104
|
+
|
|
105
|
+
return patterns
|
|
106
|
+
|
|
107
|
+
def _build_indexes(self):
|
|
108
|
+
"""Build indexes for fast pattern lookup."""
|
|
109
|
+
self.bug_type_index: Dict[str, List[Dict]] = defaultdict(list)
|
|
110
|
+
self.file_pattern_index: Dict[str, List[Dict]] = defaultdict(list)
|
|
111
|
+
|
|
112
|
+
for pattern in self.patterns:
|
|
113
|
+
# Index by bug type
|
|
114
|
+
bug_type = pattern.get("bug_type", "unknown")
|
|
115
|
+
self.bug_type_index[bug_type].append(pattern)
|
|
116
|
+
|
|
117
|
+
# Index by file patterns
|
|
118
|
+
files = pattern.get("files_affected", [])
|
|
119
|
+
for file in files:
|
|
120
|
+
# Extract file pattern (e.g., "tests/" from "tests/test_foo.py")
|
|
121
|
+
parts = Path(file).parts
|
|
122
|
+
if parts:
|
|
123
|
+
self.file_pattern_index[parts[0]].append(pattern)
|
|
124
|
+
|
|
125
|
+
def recommend(
|
|
126
|
+
self,
|
|
127
|
+
bug_description: str,
|
|
128
|
+
files_affected: Optional[List[str]] = None,
|
|
129
|
+
complexity_hint: Optional[int] = None
|
|
130
|
+
) -> TierRecommendationResult:
|
|
131
|
+
"""
|
|
132
|
+
Recommend optimal starting tier for a new bug.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
bug_description: Description of the bug/task
|
|
136
|
+
files_affected: List of files involved (optional)
|
|
137
|
+
complexity_hint: Manual complexity score 1-10 (optional)
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
TierRecommendationResult with tier, confidence, and reasoning
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
ValueError: If bug_description is empty or complexity_hint out of range
|
|
144
|
+
TypeError: If files_affected is not a list
|
|
145
|
+
"""
|
|
146
|
+
# Pattern 1: String ID validation
|
|
147
|
+
if not bug_description or not bug_description.strip():
|
|
148
|
+
raise ValueError("bug_description cannot be empty")
|
|
149
|
+
|
|
150
|
+
# Pattern 5: Type validation
|
|
151
|
+
if files_affected is not None and not isinstance(files_affected, list):
|
|
152
|
+
raise TypeError(f"files_affected must be list, got {type(files_affected).__name__}")
|
|
153
|
+
|
|
154
|
+
# Pattern 4: Range validation for complexity_hint
|
|
155
|
+
if complexity_hint is not None and not (1 <= complexity_hint <= 10):
|
|
156
|
+
raise ValueError(f"complexity_hint must be between 1 and 10, got {complexity_hint}")
|
|
157
|
+
|
|
158
|
+
# Step 1: Match bug type from description
|
|
159
|
+
bug_type = self._classify_bug_type(bug_description)
|
|
160
|
+
|
|
161
|
+
# Step 2: Find similar patterns
|
|
162
|
+
similar_patterns = self._find_similar_patterns(
|
|
163
|
+
bug_type=bug_type,
|
|
164
|
+
files_affected=files_affected or []
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Step 3: If no similar patterns, use fallback logic
|
|
168
|
+
if not similar_patterns:
|
|
169
|
+
return self._fallback_recommendation(
|
|
170
|
+
bug_description=bug_description,
|
|
171
|
+
complexity_hint=complexity_hint
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Step 4: Analyze tier distribution in similar patterns
|
|
175
|
+
tier_analysis = self._analyze_tier_distribution(similar_patterns)
|
|
176
|
+
|
|
177
|
+
# Step 5: Select tier with highest success rate
|
|
178
|
+
recommended_tier, confidence = self._select_tier(tier_analysis)
|
|
179
|
+
|
|
180
|
+
# Step 6: Calculate expected cost and attempts
|
|
181
|
+
cost_estimate = self._estimate_cost(similar_patterns, recommended_tier)
|
|
182
|
+
|
|
183
|
+
return TierRecommendationResult(
|
|
184
|
+
tier=recommended_tier,
|
|
185
|
+
confidence=confidence,
|
|
186
|
+
reasoning=self._generate_reasoning(
|
|
187
|
+
bug_type=bug_type,
|
|
188
|
+
tier=recommended_tier,
|
|
189
|
+
confidence=confidence,
|
|
190
|
+
similar_count=len(similar_patterns)
|
|
191
|
+
),
|
|
192
|
+
expected_cost=cost_estimate["avg_cost"],
|
|
193
|
+
expected_attempts=cost_estimate["avg_attempts"],
|
|
194
|
+
similar_patterns_count=len(similar_patterns),
|
|
195
|
+
fallback_used=False
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
def _classify_bug_type(self, description: str) -> str:
|
|
199
|
+
"""Classify bug type from description using keyword matching."""
|
|
200
|
+
desc_lower = description.lower()
|
|
201
|
+
|
|
202
|
+
# Define bug type keywords
|
|
203
|
+
bug_type_keywords = {
|
|
204
|
+
"integration_error": ["integration", "import", "module", "package"],
|
|
205
|
+
"type_mismatch": ["type", "annotation", "mypy", "typing"],
|
|
206
|
+
"import_error": ["import", "module", "cannot import", "no module"],
|
|
207
|
+
"syntax_error": ["syntax", "invalid syntax", "parse error"],
|
|
208
|
+
"runtime_error": ["runtime", "exception", "traceback"],
|
|
209
|
+
"test_failure": ["test fail", "assertion", "pytest"],
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
for bug_type, keywords in bug_type_keywords.items():
|
|
213
|
+
if any(kw in desc_lower for kw in keywords):
|
|
214
|
+
return bug_type
|
|
215
|
+
|
|
216
|
+
return "unknown"
|
|
217
|
+
|
|
218
|
+
def _find_similar_patterns(
|
|
219
|
+
self,
|
|
220
|
+
bug_type: str,
|
|
221
|
+
files_affected: List[str]
|
|
222
|
+
) -> List[Dict]:
|
|
223
|
+
"""Find patterns similar to current bug.
|
|
224
|
+
|
|
225
|
+
Raises:
|
|
226
|
+
TypeError: If files_affected is not a list
|
|
227
|
+
"""
|
|
228
|
+
# Pattern 5: Type validation
|
|
229
|
+
if not isinstance(files_affected, list):
|
|
230
|
+
raise TypeError(f"files_affected must be list, got {type(files_affected).__name__}")
|
|
231
|
+
|
|
232
|
+
similar = []
|
|
233
|
+
|
|
234
|
+
# Match by bug type
|
|
235
|
+
similar.extend(self.bug_type_index.get(bug_type, []))
|
|
236
|
+
|
|
237
|
+
# Match by file patterns
|
|
238
|
+
if files_affected:
|
|
239
|
+
for file in files_affected:
|
|
240
|
+
parts = Path(file).parts
|
|
241
|
+
if parts:
|
|
242
|
+
file_matches = self.file_pattern_index.get(parts[0], [])
|
|
243
|
+
# Add only if not already in similar list
|
|
244
|
+
for pattern in file_matches:
|
|
245
|
+
if pattern not in similar:
|
|
246
|
+
similar.append(pattern)
|
|
247
|
+
|
|
248
|
+
return similar
|
|
249
|
+
|
|
250
|
+
def _analyze_tier_distribution(
|
|
251
|
+
self,
|
|
252
|
+
patterns: List[Dict]
|
|
253
|
+
) -> Dict[str, Dict]:
|
|
254
|
+
"""Analyze tier success rates from similar patterns."""
|
|
255
|
+
tier_stats: Dict[str, Dict] = defaultdict(lambda: {
|
|
256
|
+
"count": 0,
|
|
257
|
+
"total_cost": 0.0,
|
|
258
|
+
"total_attempts": 0
|
|
259
|
+
})
|
|
260
|
+
|
|
261
|
+
for pattern in patterns:
|
|
262
|
+
tp = pattern["tier_progression"]
|
|
263
|
+
successful_tier = tp["successful_tier"]
|
|
264
|
+
|
|
265
|
+
stats = tier_stats[successful_tier]
|
|
266
|
+
stats["count"] += 1
|
|
267
|
+
stats["total_cost"] += tp["cost_breakdown"]["total_cost"]
|
|
268
|
+
stats["total_attempts"] += tp["total_attempts"]
|
|
269
|
+
|
|
270
|
+
# Calculate averages
|
|
271
|
+
for tier, stats in tier_stats.items():
|
|
272
|
+
count = stats["count"]
|
|
273
|
+
stats["success_rate"] = count / len(patterns)
|
|
274
|
+
stats["avg_cost"] = stats["total_cost"] / count
|
|
275
|
+
stats["avg_attempts"] = stats["total_attempts"] / count
|
|
276
|
+
|
|
277
|
+
return dict(tier_stats)
|
|
278
|
+
|
|
279
|
+
def _select_tier(
|
|
280
|
+
self,
|
|
281
|
+
tier_analysis: Dict[str, Dict]
|
|
282
|
+
) -> Tuple[str, float]:
|
|
283
|
+
"""Select best tier based on success rate and cost."""
|
|
284
|
+
if not tier_analysis:
|
|
285
|
+
return "CHEAP", 0.5
|
|
286
|
+
|
|
287
|
+
# Sort by success rate
|
|
288
|
+
sorted_tiers = sorted(
|
|
289
|
+
tier_analysis.items(),
|
|
290
|
+
key=lambda x: x[1]["success_rate"],
|
|
291
|
+
reverse=True
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
best_tier, stats = sorted_tiers[0]
|
|
295
|
+
confidence = stats["success_rate"]
|
|
296
|
+
|
|
297
|
+
return best_tier, confidence
|
|
298
|
+
|
|
299
|
+
def _estimate_cost(
|
|
300
|
+
self,
|
|
301
|
+
patterns: List[Dict],
|
|
302
|
+
tier: str
|
|
303
|
+
) -> Dict[str, float]:
|
|
304
|
+
"""Estimate cost and attempts for recommended tier."""
|
|
305
|
+
matching = [
|
|
306
|
+
p for p in patterns
|
|
307
|
+
if p["tier_progression"]["successful_tier"] == tier
|
|
308
|
+
]
|
|
309
|
+
|
|
310
|
+
if not matching:
|
|
311
|
+
# Default estimates by tier
|
|
312
|
+
defaults = {
|
|
313
|
+
"CHEAP": {"avg_cost": 0.030, "avg_attempts": 1.5},
|
|
314
|
+
"CAPABLE": {"avg_cost": 0.150, "avg_attempts": 2.5},
|
|
315
|
+
"PREMIUM": {"avg_cost": 0.450, "avg_attempts": 1.0},
|
|
316
|
+
}
|
|
317
|
+
return defaults.get(tier, defaults["CHEAP"])
|
|
318
|
+
|
|
319
|
+
total_cost = sum(
|
|
320
|
+
p["tier_progression"]["cost_breakdown"]["total_cost"]
|
|
321
|
+
for p in matching
|
|
322
|
+
)
|
|
323
|
+
total_attempts = sum(
|
|
324
|
+
p["tier_progression"]["total_attempts"]
|
|
325
|
+
for p in matching
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
return {
|
|
329
|
+
"avg_cost": total_cost / len(matching),
|
|
330
|
+
"avg_attempts": total_attempts / len(matching)
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
def _fallback_recommendation(
|
|
334
|
+
self,
|
|
335
|
+
bug_description: str,
|
|
336
|
+
complexity_hint: Optional[int]
|
|
337
|
+
) -> TierRecommendationResult:
|
|
338
|
+
"""Provide fallback recommendation when no historical data available."""
|
|
339
|
+
|
|
340
|
+
# Use complexity hint if provided
|
|
341
|
+
if complexity_hint is not None:
|
|
342
|
+
if complexity_hint <= 3:
|
|
343
|
+
tier = "CHEAP"
|
|
344
|
+
cost = 0.030
|
|
345
|
+
elif complexity_hint <= 7:
|
|
346
|
+
tier = "CAPABLE"
|
|
347
|
+
cost = 0.150
|
|
348
|
+
else:
|
|
349
|
+
tier = "PREMIUM"
|
|
350
|
+
cost = 0.450
|
|
351
|
+
|
|
352
|
+
return TierRecommendationResult(
|
|
353
|
+
tier=tier,
|
|
354
|
+
confidence=0.6,
|
|
355
|
+
reasoning=f"Based on complexity score {complexity_hint}/10",
|
|
356
|
+
expected_cost=cost,
|
|
357
|
+
expected_attempts=2.0,
|
|
358
|
+
similar_patterns_count=0,
|
|
359
|
+
fallback_used=True
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
# Default: start with CHEAP tier (conservative)
|
|
363
|
+
return TierRecommendationResult(
|
|
364
|
+
tier="CHEAP",
|
|
365
|
+
confidence=0.5,
|
|
366
|
+
reasoning="No historical data - defaulting to CHEAP tier (conservative approach)",
|
|
367
|
+
expected_cost=0.030,
|
|
368
|
+
expected_attempts=1.5,
|
|
369
|
+
similar_patterns_count=0,
|
|
370
|
+
fallback_used=True
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
def _generate_reasoning(
|
|
374
|
+
self,
|
|
375
|
+
bug_type: str,
|
|
376
|
+
tier: str,
|
|
377
|
+
confidence: float,
|
|
378
|
+
similar_count: int
|
|
379
|
+
) -> str:
|
|
380
|
+
"""Generate human-readable reasoning for recommendation."""
|
|
381
|
+
percent = int(confidence * 100)
|
|
382
|
+
|
|
383
|
+
if similar_count == 0:
|
|
384
|
+
return "No historical data - defaulting to CHEAP tier"
|
|
385
|
+
elif similar_count == 1:
|
|
386
|
+
return f"1 similar bug ({bug_type}) resolved at {tier} tier"
|
|
387
|
+
else:
|
|
388
|
+
return (
|
|
389
|
+
f"{percent}% of {similar_count} similar bugs ({bug_type}) "
|
|
390
|
+
f"resolved at {tier} tier"
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
def get_stats(self) -> Dict:
|
|
394
|
+
"""Get overall statistics about pattern learning."""
|
|
395
|
+
if not self.patterns:
|
|
396
|
+
return {
|
|
397
|
+
"total_patterns": 0,
|
|
398
|
+
"message": "No patterns loaded"
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
# Calculate tier distribution
|
|
402
|
+
tier_dist = defaultdict(int)
|
|
403
|
+
bug_type_dist = defaultdict(int)
|
|
404
|
+
total_savings = 0.0
|
|
405
|
+
|
|
406
|
+
for pattern in self.patterns:
|
|
407
|
+
tp = pattern["tier_progression"]
|
|
408
|
+
tier_dist[tp["successful_tier"]] += 1
|
|
409
|
+
bug_type_dist[pattern["bug_type"]] += 1
|
|
410
|
+
total_savings += tp["cost_breakdown"]["savings_percent"]
|
|
411
|
+
|
|
412
|
+
return {
|
|
413
|
+
"total_patterns": len(self.patterns),
|
|
414
|
+
"tier_distribution": dict(tier_dist),
|
|
415
|
+
"bug_type_distribution": dict(bug_type_dist),
|
|
416
|
+
"avg_savings_percent": round(total_savings / len(self.patterns), 1),
|
|
417
|
+
"patterns_by_tier": {
|
|
418
|
+
"CHEAP": tier_dist.get("CHEAP", 0),
|
|
419
|
+
"CAPABLE": tier_dist.get("CAPABLE", 0),
|
|
420
|
+
"PREMIUM": tier_dist.get("PREMIUM", 0),
|
|
421
|
+
}
|
|
422
|
+
}
|