empathy-framework 3.9.1__py3-none-any.whl → 3.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/METADATA +1 -1
  2. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/RECORD +40 -43
  3. empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
  4. empathy_llm_toolkit/agent_factory/__init__.py +6 -6
  5. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +4 -1
  6. empathy_llm_toolkit/agent_factory/framework.py +2 -1
  7. empathy_llm_toolkit/config/__init__.py +8 -8
  8. empathy_llm_toolkit/security/__init__.py +17 -17
  9. empathy_os/adaptive/__init__.py +3 -3
  10. empathy_os/cli.py +5 -8
  11. empathy_os/cli_unified.py +1 -1
  12. empathy_os/memory/__init__.py +30 -30
  13. empathy_os/memory/control_panel.py +3 -1
  14. empathy_os/memory/long_term.py +3 -1
  15. empathy_os/models/__init__.py +48 -48
  16. empathy_os/monitoring/__init__.py +7 -7
  17. empathy_os/optimization/__init__.py +3 -3
  18. empathy_os/pattern_library.py +2 -7
  19. empathy_os/plugins/__init__.py +6 -6
  20. empathy_os/resilience/__init__.py +5 -5
  21. empathy_os/scaffolding/cli.py +1 -1
  22. empathy_os/telemetry/cli.py +56 -13
  23. empathy_os/telemetry/usage_tracker.py +2 -5
  24. empathy_os/tier_recommender.py +36 -76
  25. empathy_os/trust/__init__.py +7 -7
  26. empathy_os/validation/__init__.py +3 -3
  27. empathy_os/workflow_patterns/output.py +1 -1
  28. empathy_os/workflow_patterns/structural.py +4 -4
  29. empathy_os/workflows/code_review_pipeline.py +1 -5
  30. empathy_os/workflows/dependency_check.py +1 -5
  31. empathy_os/workflows/keyboard_shortcuts/__init__.py +5 -5
  32. empathy_os/workflows/tier_tracking.py +39 -29
  33. empathy_software_plugin/cli.py +1 -3
  34. empathy_software_plugin/wizards/code_review_wizard.py +1 -3
  35. empathy_software_plugin/wizards/debugging/__init__.py +4 -4
  36. empathy_software_plugin/wizards/security/__init__.py +6 -6
  37. empathy_os/.empathy/costs.json +0 -60
  38. empathy_os/.empathy/discovery_stats.json +0 -15
  39. empathy_os/.empathy/workflow_runs.json +0 -45
  40. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/WHEEL +0 -0
  41. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/entry_points.txt +0 -0
  42. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/licenses/LICENSE +0 -0
  43. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.2.dist-info}/top_level.txt +0 -0
@@ -12,64 +12,64 @@ Licensed under Fair Source License 0.9
12
12
  from .empathy_executor import EmpathyLLMExecutor
13
13
  from .executor import ExecutionContext, LLMExecutor, LLMResponse, MockLLMExecutor
14
14
  from .fallback import (
15
- DEFAULT_FALLBACK_POLICY,
16
- DEFAULT_RETRY_POLICY,
17
- CircuitBreaker,
18
- CircuitBreakerState,
19
- FallbackPolicy,
20
- FallbackStep,
21
- FallbackStrategy,
22
- ResilientExecutor,
23
- RetryPolicy,
15
+ DEFAULT_FALLBACK_POLICY,
16
+ DEFAULT_RETRY_POLICY,
17
+ CircuitBreaker,
18
+ CircuitBreakerState,
19
+ FallbackPolicy,
20
+ FallbackStep,
21
+ FallbackStrategy,
22
+ ResilientExecutor,
23
+ RetryPolicy,
24
24
  )
25
25
  from .provider_config import (
26
- ProviderConfig,
27
- ProviderMode,
28
- configure_provider_cli,
29
- configure_provider_interactive,
30
- get_provider_config,
31
- reset_provider_config,
32
- set_provider_config,
26
+ ProviderConfig,
27
+ ProviderMode,
28
+ configure_provider_cli,
29
+ configure_provider_interactive,
30
+ get_provider_config,
31
+ reset_provider_config,
32
+ set_provider_config,
33
33
  )
34
34
  from .registry import (
35
- MODEL_REGISTRY,
36
- ModelInfo,
37
- ModelProvider,
38
- ModelTier,
39
- get_all_models,
40
- get_model,
41
- get_pricing_for_model,
35
+ MODEL_REGISTRY,
36
+ ModelInfo,
37
+ ModelProvider,
38
+ ModelTier,
39
+ get_all_models,
40
+ get_model,
41
+ get_pricing_for_model,
42
42
  )
43
43
  from .tasks import (
44
- CAPABLE_TASKS,
45
- CHEAP_TASKS,
46
- PREMIUM_TASKS,
47
- TASK_TIER_MAP,
48
- TaskInfo,
49
- TaskType,
50
- get_all_tasks,
51
- get_tasks_for_tier,
52
- get_tier_for_task,
53
- is_known_task,
54
- normalize_task_type,
44
+ CAPABLE_TASKS,
45
+ CHEAP_TASKS,
46
+ PREMIUM_TASKS,
47
+ TASK_TIER_MAP,
48
+ TaskInfo,
49
+ TaskType,
50
+ get_all_tasks,
51
+ get_tasks_for_tier,
52
+ get_tier_for_task,
53
+ is_known_task,
54
+ normalize_task_type,
55
55
  )
56
56
  from .telemetry import (
57
- LLMCallRecord,
58
- TelemetryAnalytics,
59
- TelemetryBackend,
60
- TelemetryStore,
61
- WorkflowRunRecord,
62
- WorkflowStageRecord,
63
- get_telemetry_store,
64
- log_llm_call,
65
- log_workflow_run,
57
+ LLMCallRecord,
58
+ TelemetryAnalytics,
59
+ TelemetryBackend,
60
+ TelemetryStore,
61
+ WorkflowRunRecord,
62
+ WorkflowStageRecord,
63
+ get_telemetry_store,
64
+ log_llm_call,
65
+ log_workflow_run,
66
66
  )
67
67
  from .validation import (
68
- ConfigValidator,
69
- ValidationError,
70
- ValidationResult,
71
- validate_config,
72
- validate_yaml_file,
68
+ ConfigValidator,
69
+ ValidationError,
70
+ ValidationResult,
71
+ validate_config,
72
+ validate_yaml_file,
73
73
  )
74
74
 
75
75
  __all__ = [
@@ -25,13 +25,13 @@ from empathy_os.agent_monitoring import AgentMetrics, AgentMonitor, TeamMetrics
25
25
 
26
26
  # Import telemetry classes
27
27
  from empathy_os.models.telemetry import (
28
- LLMCallRecord,
29
- TelemetryAnalytics,
30
- TelemetryStore,
31
- WorkflowRunRecord,
32
- get_telemetry_store,
33
- log_llm_call,
34
- log_workflow_run,
28
+ LLMCallRecord,
29
+ TelemetryAnalytics,
30
+ TelemetryStore,
31
+ WorkflowRunRecord,
32
+ get_telemetry_store,
33
+ log_llm_call,
34
+ log_workflow_run,
35
35
  )
36
36
 
37
37
  __all__ = [
@@ -7,9 +7,9 @@ Licensed under Fair Source License 0.9
7
7
  """
8
8
 
9
9
  from empathy_os.optimization.context_optimizer import (
10
- CompressionLevel,
11
- ContextOptimizer,
12
- optimize_xml_prompt,
10
+ CompressionLevel,
11
+ ContextOptimizer,
12
+ optimize_xml_prompt,
13
13
  )
14
14
 
15
15
  __all__ = [
@@ -262,9 +262,7 @@ class PatternLibrary:
262
262
  """
263
263
  pattern = self.patterns.get(pattern_id)
264
264
  if not pattern:
265
- raise ValueError(
266
- f"Pattern '{pattern_id}' not found. Cannot record outcome."
267
- )
265
+ raise ValueError(f"Pattern '{pattern_id}' not found. Cannot record outcome.")
268
266
  pattern.record_usage(success)
269
267
 
270
268
  def link_patterns(self, pattern_id_1: str, pattern_id_2: str):
@@ -300,10 +298,7 @@ class PatternLibrary:
300
298
  self.pattern_graph[pattern_id_2].append(pattern_id_1)
301
299
 
302
300
  def get_related_patterns(
303
- self,
304
- pattern_id: str,
305
- depth: int = 1,
306
- _visited: set[str] | None = None
301
+ self, pattern_id: str, depth: int = 1, _visited: set[str] | None = None
307
302
  ) -> list[Pattern]:
308
303
  """Get patterns related to a given pattern
309
304
 
@@ -7,12 +7,12 @@ Licensed under Fair Source 0.9
7
7
  """
8
8
 
9
9
  from .base import (
10
- BasePlugin,
11
- BaseWizard,
12
- PluginError,
13
- PluginLoadError,
14
- PluginMetadata,
15
- PluginValidationError,
10
+ BasePlugin,
11
+ BaseWizard,
12
+ PluginError,
13
+ PluginLoadError,
14
+ PluginMetadata,
15
+ PluginValidationError,
16
16
  )
17
17
  from .registry import PluginRegistry, get_global_registry
18
18
 
@@ -18,11 +18,11 @@ Licensed under Fair Source 0.9
18
18
  """
19
19
 
20
20
  from .circuit_breaker import (
21
- CircuitBreaker,
22
- CircuitOpenError,
23
- CircuitState,
24
- circuit_breaker,
25
- get_circuit_breaker,
21
+ CircuitBreaker,
22
+ CircuitOpenError,
23
+ CircuitState,
24
+ circuit_breaker,
25
+ get_circuit_breaker,
26
26
  )
27
27
  from .fallback import Fallback, fallback, with_fallback
28
28
  from .health import HealthCheck, HealthStatus, SystemHealth
@@ -218,7 +218,7 @@ Examples:
218
218
  )
219
219
 
220
220
  # List patterns command
221
- list_parser = subparsers.add_parser("list-patterns", help="List available patterns")
221
+ subparsers.add_parser("list-patterns", help="List available patterns")
222
222
 
223
223
  args = parser.parse_args()
224
224
 
@@ -18,6 +18,7 @@ try:
18
18
  from rich.panel import Panel
19
19
  from rich.table import Table
20
20
  from rich.text import Text
21
+
21
22
  RICH_AVAILABLE = True
22
23
  except ImportError:
23
24
  RICH_AVAILABLE = False
@@ -149,7 +150,9 @@ def cmd_telemetry_show(args: Any) -> int:
149
150
  console.print(f"\n[dim]Data location: {tracker.telemetry_dir}[/dim]")
150
151
  else:
151
152
  # Fallback to plain text
152
- print(f"\n{'Time':<19} {'Workflow':<20} {'Stage':<15} {'Tier':<10} {'Cost':>10} {'Cache':<10} {'Duration':>10}")
153
+ print(
154
+ f"\n{'Time':<19} {'Workflow':<20} {'Stage':<15} {'Tier':<10} {'Cost':>10} {'Cache':<10} {'Duration':>10}"
155
+ )
153
156
  print("-" * 120)
154
157
  total_cost = 0.0
155
158
  for entry in entries:
@@ -162,7 +165,9 @@ def cmd_telemetry_show(args: Any) -> int:
162
165
  duration_ms = entry.get("duration_ms", 0)
163
166
 
164
167
  cache_str = "HIT" if cache.get("hit") else "MISS"
165
- print(f"{ts:<19} {workflow:<20} {stage:<15} {tier:<10} ${cost:>9.4f} {cache_str:<10} {duration_ms:>9}ms")
168
+ print(
169
+ f"{ts:<19} {workflow:<20} {stage:<15} {tier:<10} ${cost:>9.4f} {cache_str:<10} {duration_ms:>9}ms"
170
+ )
166
171
  total_cost += cost
167
172
 
168
173
  print("-" * 120)
@@ -209,7 +214,9 @@ def cmd_telemetry_savings(args: Any) -> int:
209
214
  content_lines.append(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
210
215
  content_lines.append("")
211
216
  savings_color = "green" if savings["savings"] > 0 else "red"
212
- content_lines.append(f"[bold {savings_color}]YOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)[/bold {savings_color}]")
217
+ content_lines.append(
218
+ f"[bold {savings_color}]YOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)[/bold {savings_color}]"
219
+ )
213
220
  content_lines.append("")
214
221
  content_lines.append(f"Cache savings: ${savings['cache_savings']:.2f}")
215
222
  content_lines.append(f"Total calls: {savings['total_calls']}")
@@ -271,7 +278,11 @@ def cmd_telemetry_compare(args: Any) -> int:
271
278
  table.add_column("Change", justify="right", style="blue")
272
279
 
273
280
  # Total calls
274
- calls_change = ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100) if stats2["total_calls"] > 0 else 0
281
+ calls_change = (
282
+ ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
283
+ if stats2["total_calls"] > 0
284
+ else 0
285
+ )
275
286
  table.add_row(
276
287
  "Total Calls",
277
288
  str(stats1["total_calls"]),
@@ -280,7 +291,11 @@ def cmd_telemetry_compare(args: Any) -> int:
280
291
  )
281
292
 
282
293
  # Total cost
283
- cost_change = ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100) if stats2["total_cost"] > 0 else 0
294
+ cost_change = (
295
+ ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
296
+ if stats2["total_cost"] > 0
297
+ else 0
298
+ )
284
299
  table.add_row(
285
300
  "Total Cost",
286
301
  f"${stats1['total_cost']:.2f}",
@@ -314,14 +329,28 @@ def cmd_telemetry_compare(args: Any) -> int:
314
329
  print("\n" + "=" * 80)
315
330
  print("TELEMETRY COMPARISON")
316
331
  print("=" * 80)
317
- print(f"{'Metric':<20} {'Last ' + str(period1_days) + ' days':>20} {'Last ' + str(period2_days) + ' days':>20} {'Change':>15}")
332
+ print(
333
+ f"{'Metric':<20} {'Last ' + str(period1_days) + ' days':>20} {'Last ' + str(period2_days) + ' days':>20} {'Change':>15}"
334
+ )
318
335
  print("-" * 80)
319
336
 
320
- calls_change = ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100) if stats2["total_calls"] > 0 else 0
321
- print(f"{'Total Calls':<20} {stats1['total_calls']:>20} {stats2['total_calls']:>20} {calls_change:>14.1f}%")
337
+ calls_change = (
338
+ ((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
339
+ if stats2["total_calls"] > 0
340
+ else 0
341
+ )
342
+ print(
343
+ f"{'Total Calls':<20} {stats1['total_calls']:>20} {stats2['total_calls']:>20} {calls_change:>14.1f}%"
344
+ )
322
345
 
323
- cost_change = ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100) if stats2["total_cost"] > 0 else 0
324
- print(f"{'Total Cost':<20} ${stats1['total_cost']:>19.2f} ${stats2['total_cost']:>19.2f} {cost_change:>14.1f}%")
346
+ cost_change = (
347
+ ((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
348
+ if stats2["total_cost"] > 0
349
+ else 0
350
+ )
351
+ print(
352
+ f"{'Total Cost':<20} ${stats1['total_cost']:>19.2f} ${stats2['total_cost']:>19.2f} {cost_change:>14.1f}%"
353
+ )
325
354
 
326
355
  avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
327
356
  avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
@@ -329,7 +358,9 @@ def cmd_telemetry_compare(args: Any) -> int:
329
358
  print(f"{'Avg Cost/Call':<20} ${avg1:>19.4f} ${avg2:>19.4f} {avg_change:>14.1f}%")
330
359
 
331
360
  cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
332
- print(f"{'Cache Hit Rate':<20} {stats1['cache_hit_rate']:>19.1f}% {stats2['cache_hit_rate']:>19.1f}% {cache_change:>14.1f}pp")
361
+ print(
362
+ f"{'Cache Hit Rate':<20} {stats1['cache_hit_rate']:>19.1f}% {stats2['cache_hit_rate']:>19.1f}% {cache_change:>14.1f}pp"
363
+ )
333
364
 
334
365
  print("=" * 80)
335
366
 
@@ -398,8 +429,20 @@ def cmd_telemetry_export(args: Any) -> int:
398
429
  return 0
399
430
 
400
431
  # Get all possible fields
401
- fieldnames = ["ts", "workflow", "stage", "tier", "model", "provider", "cost",
402
- "tokens_input", "tokens_output", "cache_hit", "cache_type", "duration_ms"]
432
+ fieldnames = [
433
+ "ts",
434
+ "workflow",
435
+ "stage",
436
+ "tier",
437
+ "model",
438
+ "provider",
439
+ "cost",
440
+ "tokens_input",
441
+ "tokens_output",
442
+ "cache_hit",
443
+ "cache_type",
444
+ "duration_ms",
445
+ ]
403
446
 
404
447
  if output_file:
405
448
  validated_path = _validate_file_path(output_file)
@@ -356,9 +356,7 @@ class UsageTracker:
356
356
  by_provider[provider] = by_provider.get(provider, 0.0) + cost
357
357
 
358
358
  total_calls = len(entries)
359
- cache_hit_rate = (
360
- (cache_hits / total_calls * 100) if total_calls > 0 else 0.0
361
- )
359
+ cache_hit_rate = (cache_hits / total_calls * 100) if total_calls > 0 else 0.0
362
360
 
363
361
  return {
364
362
  "total_calls": total_calls,
@@ -419,8 +417,7 @@ class UsageTracker:
419
417
 
420
418
  total_calls = len(entries)
421
419
  tier_distribution = {
422
- tier: round(count / total_calls * 100, 1)
423
- for tier, count in tier_counts.items()
420
+ tier: round(count / total_calls * 100, 1) for tier, count in tier_counts.items()
424
421
  }
425
422
 
426
423
  # Cache savings estimation
@@ -18,16 +18,16 @@ Usage:
18
18
  print(f"Expected cost: ${tier.expected_cost}")
19
19
  """
20
20
 
21
- from dataclasses import dataclass
22
- from pathlib import Path
23
- from typing import List, Optional, Dict, Tuple
24
21
  import json
25
22
  from collections import defaultdict
23
+ from dataclasses import dataclass
24
+ from pathlib import Path
26
25
 
27
26
 
28
27
  @dataclass
29
28
  class TierRecommendationResult:
30
29
  """Result of tier recommendation."""
30
+
31
31
  tier: str # CHEAP, CAPABLE, or PREMIUM
32
32
  confidence: float # 0.0-1.0
33
33
  reasoning: str
@@ -49,11 +49,7 @@ class TierRecommender:
49
49
  - Cost optimization
50
50
  """
51
51
 
52
- def __init__(
53
- self,
54
- patterns_dir: Optional[Path] = None,
55
- confidence_threshold: float = 0.7
56
- ):
52
+ def __init__(self, patterns_dir: Path | None = None, confidence_threshold: float = 0.7):
57
53
  """
58
54
  Initialize tier recommender.
59
55
 
@@ -67,7 +63,9 @@ class TierRecommender:
67
63
  """
68
64
  # Pattern 4: Range validation
69
65
  if not 0.0 <= confidence_threshold <= 1.0:
70
- raise ValueError(f"confidence_threshold must be between 0.0 and 1.0, got {confidence_threshold}")
66
+ raise ValueError(
67
+ f"confidence_threshold must be between 0.0 and 1.0, got {confidence_threshold}"
68
+ )
71
69
 
72
70
  if patterns_dir is None:
73
71
  patterns_dir = Path(__file__).parent.parent.parent / "patterns" / "debugging"
@@ -79,7 +77,7 @@ class TierRecommender:
79
77
  # Build indexes for fast lookup
80
78
  self._build_indexes()
81
79
 
82
- def _load_patterns(self) -> List[Dict]:
80
+ def _load_patterns(self) -> list[dict]:
83
81
  """Load all enhanced patterns with tier_progression data."""
84
82
  patterns = []
85
83
 
@@ -106,8 +104,8 @@ class TierRecommender:
106
104
 
107
105
  def _build_indexes(self):
108
106
  """Build indexes for fast pattern lookup."""
109
- self.bug_type_index: Dict[str, List[Dict]] = defaultdict(list)
110
- self.file_pattern_index: Dict[str, List[Dict]] = defaultdict(list)
107
+ self.bug_type_index: dict[str, list[dict]] = defaultdict(list)
108
+ self.file_pattern_index: dict[str, list[dict]] = defaultdict(list)
111
109
 
112
110
  for pattern in self.patterns:
113
111
  # Index by bug type
@@ -125,8 +123,8 @@ class TierRecommender:
125
123
  def recommend(
126
124
  self,
127
125
  bug_description: str,
128
- files_affected: Optional[List[str]] = None,
129
- complexity_hint: Optional[int] = None
126
+ files_affected: list[str] | None = None,
127
+ complexity_hint: int | None = None,
130
128
  ) -> TierRecommendationResult:
131
129
  """
132
130
  Recommend optimal starting tier for a new bug.
@@ -160,15 +158,13 @@ class TierRecommender:
160
158
 
161
159
  # Step 2: Find similar patterns
162
160
  similar_patterns = self._find_similar_patterns(
163
- bug_type=bug_type,
164
- files_affected=files_affected or []
161
+ bug_type=bug_type, files_affected=files_affected or []
165
162
  )
166
163
 
167
164
  # Step 3: If no similar patterns, use fallback logic
168
165
  if not similar_patterns:
169
166
  return self._fallback_recommendation(
170
- bug_description=bug_description,
171
- complexity_hint=complexity_hint
167
+ bug_description=bug_description, complexity_hint=complexity_hint
172
168
  )
173
169
 
174
170
  # Step 4: Analyze tier distribution in similar patterns
@@ -187,12 +183,12 @@ class TierRecommender:
187
183
  bug_type=bug_type,
188
184
  tier=recommended_tier,
189
185
  confidence=confidence,
190
- similar_count=len(similar_patterns)
186
+ similar_count=len(similar_patterns),
191
187
  ),
192
188
  expected_cost=cost_estimate["avg_cost"],
193
189
  expected_attempts=cost_estimate["avg_attempts"],
194
190
  similar_patterns_count=len(similar_patterns),
195
- fallback_used=False
191
+ fallback_used=False,
196
192
  )
197
193
 
198
194
  def _classify_bug_type(self, description: str) -> str:
@@ -215,11 +211,7 @@ class TierRecommender:
215
211
 
216
212
  return "unknown"
217
213
 
218
- def _find_similar_patterns(
219
- self,
220
- bug_type: str,
221
- files_affected: List[str]
222
- ) -> List[Dict]:
214
+ def _find_similar_patterns(self, bug_type: str, files_affected: list[str]) -> list[dict]:
223
215
  """Find patterns similar to current bug.
224
216
 
225
217
  Raises:
@@ -247,16 +239,11 @@ class TierRecommender:
247
239
 
248
240
  return similar
249
241
 
250
- def _analyze_tier_distribution(
251
- self,
252
- patterns: List[Dict]
253
- ) -> Dict[str, Dict]:
242
+ def _analyze_tier_distribution(self, patterns: list[dict]) -> dict[str, dict]:
254
243
  """Analyze tier success rates from similar patterns."""
255
- tier_stats: Dict[str, Dict] = defaultdict(lambda: {
256
- "count": 0,
257
- "total_cost": 0.0,
258
- "total_attempts": 0
259
- })
244
+ tier_stats: dict[str, dict] = defaultdict(
245
+ lambda: {"count": 0, "total_cost": 0.0, "total_attempts": 0}
246
+ )
260
247
 
261
248
  for pattern in patterns:
262
249
  tp = pattern["tier_progression"]
@@ -268,7 +255,7 @@ class TierRecommender:
268
255
  stats["total_attempts"] += tp["total_attempts"]
269
256
 
270
257
  # Calculate averages
271
- for tier, stats in tier_stats.items():
258
+ for _tier, stats in tier_stats.items():
272
259
  count = stats["count"]
273
260
  stats["success_rate"] = count / len(patterns)
274
261
  stats["avg_cost"] = stats["total_cost"] / count
@@ -276,19 +263,14 @@ class TierRecommender:
276
263
 
277
264
  return dict(tier_stats)
278
265
 
279
- def _select_tier(
280
- self,
281
- tier_analysis: Dict[str, Dict]
282
- ) -> Tuple[str, float]:
266
+ def _select_tier(self, tier_analysis: dict[str, dict]) -> tuple[str, float]:
283
267
  """Select best tier based on success rate and cost."""
284
268
  if not tier_analysis:
285
269
  return "CHEAP", 0.5
286
270
 
287
271
  # Sort by success rate
288
272
  sorted_tiers = sorted(
289
- tier_analysis.items(),
290
- key=lambda x: x[1]["success_rate"],
291
- reverse=True
273
+ tier_analysis.items(), key=lambda x: x[1]["success_rate"], reverse=True
292
274
  )
293
275
 
294
276
  best_tier, stats = sorted_tiers[0]
@@ -296,16 +278,9 @@ class TierRecommender:
296
278
 
297
279
  return best_tier, confidence
298
280
 
299
- def _estimate_cost(
300
- self,
301
- patterns: List[Dict],
302
- tier: str
303
- ) -> Dict[str, float]:
281
+ def _estimate_cost(self, patterns: list[dict], tier: str) -> dict[str, float]:
304
282
  """Estimate cost and attempts for recommended tier."""
305
- matching = [
306
- p for p in patterns
307
- if p["tier_progression"]["successful_tier"] == tier
308
- ]
283
+ matching = [p for p in patterns if p["tier_progression"]["successful_tier"] == tier]
309
284
 
310
285
  if not matching:
311
286
  # Default estimates by tier
@@ -316,24 +291,16 @@ class TierRecommender:
316
291
  }
317
292
  return defaults.get(tier, defaults["CHEAP"])
318
293
 
319
- total_cost = sum(
320
- p["tier_progression"]["cost_breakdown"]["total_cost"]
321
- for p in matching
322
- )
323
- total_attempts = sum(
324
- p["tier_progression"]["total_attempts"]
325
- for p in matching
326
- )
294
+ total_cost = sum(p["tier_progression"]["cost_breakdown"]["total_cost"] for p in matching)
295
+ total_attempts = sum(p["tier_progression"]["total_attempts"] for p in matching)
327
296
 
328
297
  return {
329
298
  "avg_cost": total_cost / len(matching),
330
- "avg_attempts": total_attempts / len(matching)
299
+ "avg_attempts": total_attempts / len(matching),
331
300
  }
332
301
 
333
302
  def _fallback_recommendation(
334
- self,
335
- bug_description: str,
336
- complexity_hint: Optional[int]
303
+ self, bug_description: str, complexity_hint: int | None
337
304
  ) -> TierRecommendationResult:
338
305
  """Provide fallback recommendation when no historical data available."""
339
306
 
@@ -356,7 +323,7 @@ class TierRecommender:
356
323
  expected_cost=cost,
357
324
  expected_attempts=2.0,
358
325
  similar_patterns_count=0,
359
- fallback_used=True
326
+ fallback_used=True,
360
327
  )
361
328
 
362
329
  # Default: start with CHEAP tier (conservative)
@@ -367,15 +334,11 @@ class TierRecommender:
367
334
  expected_cost=0.030,
368
335
  expected_attempts=1.5,
369
336
  similar_patterns_count=0,
370
- fallback_used=True
337
+ fallback_used=True,
371
338
  )
372
339
 
373
340
  def _generate_reasoning(
374
- self,
375
- bug_type: str,
376
- tier: str,
377
- confidence: float,
378
- similar_count: int
341
+ self, bug_type: str, tier: str, confidence: float, similar_count: int
379
342
  ) -> str:
380
343
  """Generate human-readable reasoning for recommendation."""
381
344
  percent = int(confidence * 100)
@@ -390,13 +353,10 @@ class TierRecommender:
390
353
  f"resolved at {tier} tier"
391
354
  )
392
355
 
393
- def get_stats(self) -> Dict:
356
+ def get_stats(self) -> dict:
394
357
  """Get overall statistics about pattern learning."""
395
358
  if not self.patterns:
396
- return {
397
- "total_patterns": 0,
398
- "message": "No patterns loaded"
399
- }
359
+ return {"total_patterns": 0, "message": "No patterns loaded"}
400
360
 
401
361
  # Calculate tier distribution
402
362
  tier_dist = defaultdict(int)
@@ -418,5 +378,5 @@ class TierRecommender:
418
378
  "CHEAP": tier_dist.get("CHEAP", 0),
419
379
  "CAPABLE": tier_dist.get("CAPABLE", 0),
420
380
  "PREMIUM": tier_dist.get("PREMIUM", 0),
421
- }
381
+ },
422
382
  }
@@ -8,13 +8,13 @@ Transfer: Protect user trust like protecting system stability
8
8
  """
9
9
 
10
10
  from .circuit_breaker import (
11
- TrustCircuitBreaker,
12
- TrustConfig,
13
- TrustDamageEvent,
14
- TrustDamageType,
15
- TrustRecoveryEvent,
16
- TrustState,
17
- create_trust_breaker,
11
+ TrustCircuitBreaker,
12
+ TrustConfig,
13
+ TrustDamageEvent,
14
+ TrustDamageType,
15
+ TrustRecoveryEvent,
16
+ TrustState,
17
+ create_trust_breaker,
18
18
  )
19
19
 
20
20
  __all__ = [