empathy-framework 3.9.1__py3-none-any.whl → 3.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/METADATA +1 -1
  2. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/RECORD +58 -61
  3. empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
  4. empathy_llm_toolkit/agent_factory/__init__.py +6 -6
  5. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +4 -1
  6. empathy_llm_toolkit/agent_factory/crews/health_check.py +36 -29
  7. empathy_llm_toolkit/agent_factory/framework.py +2 -1
  8. empathy_llm_toolkit/config/__init__.py +8 -8
  9. empathy_llm_toolkit/security/__init__.py +17 -17
  10. empathy_os/__init__.py +1 -1
  11. empathy_os/adaptive/__init__.py +3 -3
  12. empathy_os/cli.py +5 -8
  13. empathy_os/cli_unified.py +86 -2
  14. empathy_os/config.py +7 -4
  15. empathy_os/hot_reload/integration.py +2 -1
  16. empathy_os/hot_reload/watcher.py +8 -4
  17. empathy_os/hot_reload/websocket.py +2 -1
  18. empathy_os/memory/__init__.py +30 -30
  19. empathy_os/memory/control_panel.py +3 -1
  20. empathy_os/memory/long_term.py +3 -1
  21. empathy_os/models/__init__.py +48 -48
  22. empathy_os/monitoring/__init__.py +7 -7
  23. empathy_os/optimization/__init__.py +3 -3
  24. empathy_os/pattern_library.py +2 -7
  25. empathy_os/plugins/__init__.py +6 -6
  26. empathy_os/resilience/__init__.py +5 -5
  27. empathy_os/scaffolding/cli.py +1 -1
  28. empathy_os/telemetry/cli.py +56 -13
  29. empathy_os/telemetry/usage_tracker.py +2 -5
  30. empathy_os/test_generator/generator.py +1 -1
  31. empathy_os/tier_recommender.py +39 -79
  32. empathy_os/trust/__init__.py +7 -7
  33. empathy_os/validation/__init__.py +3 -3
  34. empathy_os/workflow_patterns/output.py +1 -1
  35. empathy_os/workflow_patterns/structural.py +4 -4
  36. empathy_os/workflows/base.py +5 -2
  37. empathy_os/workflows/code_review_pipeline.py +1 -5
  38. empathy_os/workflows/dependency_check.py +1 -5
  39. empathy_os/workflows/keyboard_shortcuts/__init__.py +5 -5
  40. empathy_os/workflows/tier_tracking.py +40 -30
  41. empathy_software_plugin/cli.py +1 -3
  42. empathy_software_plugin/wizards/advanced_debugging_wizard.py +9 -6
  43. empathy_software_plugin/wizards/code_review_wizard.py +1 -3
  44. empathy_software_plugin/wizards/debugging/__init__.py +4 -4
  45. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +1 -1
  46. empathy_software_plugin/wizards/debugging/config_loaders.py +6 -2
  47. empathy_software_plugin/wizards/debugging/language_patterns.py +4 -2
  48. empathy_software_plugin/wizards/debugging/linter_parsers.py +1 -1
  49. empathy_software_plugin/wizards/performance/profiler_parsers.py +7 -7
  50. empathy_software_plugin/wizards/security/__init__.py +6 -6
  51. empathy_software_plugin/wizards/security/vulnerability_scanner.py +1 -1
  52. empathy_software_plugin/wizards/security_analysis_wizard.py +2 -2
  53. empathy_software_plugin/wizards/testing/quality_analyzer.py +3 -9
  54. empathy_software_plugin/wizards/testing/test_suggester.py +1 -1
  55. empathy_os/.empathy/costs.json +0 -60
  56. empathy_os/.empathy/discovery_stats.json +0 -15
  57. empathy_os/.empathy/workflow_runs.json +0 -45
  58. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/WHEEL +0 -0
  59. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/entry_points.txt +0 -0
  60. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/licenses/LICENSE +0 -0
  61. {empathy_framework-3.9.1.dist-info → empathy_framework-3.9.3.dist-info}/top_level.txt +0 -0
@@ -12,10 +12,10 @@ Licensed under Fair Source License 0.9
12
12
 
13
13
  import json
14
14
  import uuid
15
- from dataclasses import asdict, dataclass
15
+ from dataclasses import dataclass
16
16
  from datetime import datetime
17
17
  from pathlib import Path
18
- from typing import Any, Dict, List, Optional
18
+ from typing import Any
19
19
 
20
20
  from empathy_os.logging_config import get_logger
21
21
 
@@ -29,8 +29,8 @@ class TierAttempt:
29
29
  tier: str
30
30
  attempt: int
31
31
  success: bool
32
- quality_gate_failed: Optional[str] = None
33
- quality_gates_passed: Optional[List[str]] = None
32
+ quality_gate_failed: str | None = None
33
+ quality_gates_passed: list[str] | None = None
34
34
 
35
35
 
36
36
  @dataclass
@@ -40,7 +40,7 @@ class WorkflowTierProgression:
40
40
  workflow_name: str
41
41
  workflow_id: str
42
42
  bug_description: str
43
- files_affected: List[str]
43
+ files_affected: list[str]
44
44
  bug_type: str
45
45
 
46
46
  # Tier progression
@@ -48,7 +48,7 @@ class WorkflowTierProgression:
48
48
  starting_tier: str
49
49
  successful_tier: str
50
50
  total_attempts: int
51
- tier_history: List[Dict[str, Any]]
51
+ tier_history: list[dict[str, Any]]
52
52
 
53
53
  # Costs
54
54
  total_cost: float
@@ -65,7 +65,7 @@ class WorkflowTierProgression:
65
65
  duration_seconds: float
66
66
 
67
67
  # Optional fields must come last
68
- error_message: Optional[str] = None
68
+ error_message: str | None = None
69
69
 
70
70
 
71
71
  class WorkflowTierTracker:
@@ -89,7 +89,7 @@ class WorkflowTierTracker:
89
89
  self,
90
90
  workflow_name: str,
91
91
  workflow_description: str,
92
- patterns_dir: Optional[Path] = None,
92
+ patterns_dir: Path | None = None,
93
93
  ):
94
94
  """
95
95
  Initialize tier tracker for a workflow.
@@ -109,13 +109,13 @@ class WorkflowTierTracker:
109
109
  self.patterns_dir = Path(patterns_dir)
110
110
  self.patterns_dir.mkdir(parents=True, exist_ok=True)
111
111
 
112
- self.recommended_tier: Optional[str] = None
113
- self.starting_tier: Optional[str] = None
114
- self.tier_attempts: List[TierAttempt] = []
112
+ self.recommended_tier: str | None = None
113
+ self.starting_tier: str | None = None
114
+ self.tier_attempts: list[TierAttempt] = []
115
115
 
116
116
  def show_recommendation(
117
117
  self,
118
- files_affected: Optional[List[str]] = None,
118
+ files_affected: list[str] | None = None,
119
119
  show_ui: bool = True,
120
120
  ) -> str:
121
121
  """
@@ -173,7 +173,9 @@ class WorkflowTierTracker:
173
173
  if result.fallback_used:
174
174
  message += "\n\n[yellow]⚠️ Using default - limited historical data[/yellow]"
175
175
  else:
176
- message += f"\n\n[green]✅ Based on {result.similar_patterns_count} similar patterns[/green]"
176
+ message += (
177
+ f"\n\n[green]✅ Based on {result.similar_patterns_count} similar patterns[/green]"
178
+ )
177
179
 
178
180
  console.print(Panel(message, title="🎯 Auto Tier Recommendation", border_style="cyan"))
179
181
 
@@ -182,8 +184,8 @@ class WorkflowTierTracker:
182
184
  tier: str,
183
185
  attempt: int,
184
186
  success: bool,
185
- quality_gate_failed: Optional[str] = None,
186
- quality_gates_passed: Optional[List[str]] = None,
187
+ quality_gate_failed: str | None = None,
188
+ quality_gates_passed: list[str] | None = None,
187
189
  ):
188
190
  """Record a tier attempt during workflow execution."""
189
191
  self.tier_attempts.append(
@@ -199,9 +201,9 @@ class WorkflowTierTracker:
199
201
  def save_progression(
200
202
  self,
201
203
  workflow_result: Any,
202
- files_affected: Optional[List[str]] = None,
204
+ files_affected: list[str] | None = None,
203
205
  bug_type: str = "workflow_run",
204
- ) -> Optional[Path]:
206
+ ) -> Path | None:
205
207
  """
206
208
  Save tier progression data after workflow completion.
207
209
 
@@ -225,9 +227,17 @@ class WorkflowTierTracker:
225
227
  tier_history = self._build_tier_history(workflow_result)
226
228
 
227
229
  # Calculate costs
228
- total_cost = workflow_result.cost_report.get("total", 0) if isinstance(workflow_result.cost_report, dict) else sum(stage.cost for stage in workflow_result.stages)
230
+ total_cost = (
231
+ workflow_result.cost_report.get("total", 0)
232
+ if isinstance(workflow_result.cost_report, dict)
233
+ else sum(stage.cost for stage in workflow_result.stages)
234
+ )
229
235
  cost_if_premium = self._estimate_premium_cost(workflow_result)
230
- savings_percent = ((cost_if_premium - total_cost) / cost_if_premium * 100) if cost_if_premium > 0 else 0
236
+ savings_percent = (
237
+ ((cost_if_premium - total_cost) / cost_if_premium * 100)
238
+ if cost_if_premium > 0
239
+ else 0
240
+ )
231
241
 
232
242
  # Create progression record
233
243
  progression = {
@@ -240,7 +250,6 @@ class WorkflowTierTracker:
240
250
  "resolved_at": completed_at.strftime("%Y-%m-%d"),
241
251
  "files_affected": files_affected or [],
242
252
  "source": "workflow_tracking",
243
-
244
253
  "tier_progression": {
245
254
  "methodology": "AI-ADDIE",
246
255
  "recommended_tier": self.recommended_tier or self.starting_tier,
@@ -266,7 +275,6 @@ class WorkflowTierTracker:
266
275
  "false_complete_avoided": workflow_result.error is None,
267
276
  },
268
277
  },
269
-
270
278
  "workflow_metadata": {
271
279
  "workflow_name": self.workflow_name,
272
280
  "workflow_id": self.workflow_id,
@@ -298,8 +306,10 @@ class WorkflowTierTracker:
298
306
  return "CHEAP"
299
307
 
300
308
  # Use the highest tier that was actually used
301
- tiers_used = [stage.tier.value if hasattr(stage.tier, 'value') else str(stage.tier).lower()
302
- for stage in workflow_result.stages]
309
+ tiers_used = [
310
+ stage.tier.value if hasattr(stage.tier, "value") else str(stage.tier).lower()
311
+ for stage in workflow_result.stages
312
+ ]
303
313
 
304
314
  if "premium" in tiers_used:
305
315
  return "PREMIUM"
@@ -308,13 +318,13 @@ class WorkflowTierTracker:
308
318
  else:
309
319
  return "CHEAP"
310
320
 
311
- def _build_tier_history(self, workflow_result: Any) -> List[Dict[str, Any]]:
321
+ def _build_tier_history(self, workflow_result: Any) -> list[dict[str, Any]]:
312
322
  """Build tier history from workflow stages."""
313
- tier_groups: Dict[str, List[Any]] = {}
323
+ tier_groups: dict[str, list[Any]] = {}
314
324
 
315
325
  # Group stages by tier
316
326
  for stage in workflow_result.stages:
317
- tier = stage.tier.value if hasattr(stage.tier, 'value') else str(stage.tier).lower()
327
+ tier = stage.tier.value if hasattr(stage.tier, "value") else str(stage.tier).lower()
318
328
  tier_upper = tier.upper()
319
329
  if tier_upper not in tier_groups:
320
330
  tier_groups[tier_upper] = []
@@ -328,7 +338,7 @@ class WorkflowTierTracker:
328
338
  success_stage = None
329
339
 
330
340
  for i, stage in enumerate(stages, 1):
331
- if hasattr(stage, 'error') and stage.error:
341
+ if hasattr(stage, "error") and stage.error:
332
342
  failures.append({"attempt": i, "quality_gate_failed": "execution"})
333
343
  else:
334
344
  success_stage = i
@@ -359,12 +369,12 @@ class WorkflowTierTracker:
359
369
  )
360
370
 
361
371
  # Calculate actual cost from stages
362
- actual_cost = sum(stage.cost for stage in workflow_result.stages)
372
+ actual_cost: float = sum(stage.cost for stage in workflow_result.stages)
363
373
 
364
374
  # Rough estimate: PREMIUM tier is ~15x more expensive than CHEAP
365
375
  return actual_cost * 5 # Conservative multiplier
366
376
 
367
- def _update_consolidated_patterns(self, progression: Dict[str, Any]):
377
+ def _update_consolidated_patterns(self, progression: dict[str, Any]):
368
378
  """Update the consolidated patterns.json file."""
369
379
  consolidated_file = self.patterns_dir / "all_patterns.json"
370
380
 
@@ -391,7 +401,7 @@ class WorkflowTierTracker:
391
401
  def auto_recommend_tier(
392
402
  workflow_name: str,
393
403
  workflow_description: str,
394
- files_affected: Optional[List[str]] = None,
404
+ files_affected: list[str] | None = None,
395
405
  ) -> str:
396
406
  """
397
407
  Quick helper to get tier recommendation without tracker.
@@ -662,9 +662,7 @@ def scan_command():
662
662
  severity_icon = (
663
663
  "🔴"
664
664
  if issue.severity == "high"
665
- else "🟡"
666
- if issue.severity == "medium"
667
- else "🔵"
665
+ else "🟡" if issue.severity == "medium" else "🔵"
668
666
  )
669
667
  print(f" {severity_icon} Line {issue.line_number}: {issue.message}")
670
668
  if len(result.issues) > 3:
@@ -47,8 +47,8 @@ class AdvancedDebuggingWizard(BaseWizard):
47
47
  super().__init__()
48
48
  self.bug_analyzer = BugRiskAnalyzer()
49
49
  self.pattern_library = get_pattern_library()
50
- self._name = "Advanced Debugging Wizard"
51
- self._level = 4
50
+ self._name: str = "Advanced Debugging Wizard"
51
+ self._level: int = 4
52
52
 
53
53
  @property
54
54
  def name(self) -> str:
@@ -126,7 +126,8 @@ class AdvancedDebuggingWizard(BaseWizard):
126
126
  # Phase 4: Group by fixability
127
127
  fixability_by_linter = {}
128
128
  for linter_name, result in linter_results.items():
129
- fixability = group_issues_by_fixability(linter_name, result["issues"])
129
+ issues_for_fixability: list[LintIssue] = result["issues"] # type: ignore[assignment]
130
+ fixability = group_issues_by_fixability(linter_name, issues_for_fixability)
130
131
  fixability_by_linter[linter_name] = {
131
132
  "auto_fixable": len(fixability["auto_fixable"]),
132
133
  "manual": len(fixability["manual"]),
@@ -138,7 +139,8 @@ class AdvancedDebuggingWizard(BaseWizard):
138
139
  logger.info("Applying auto-fixes...")
139
140
 
140
141
  for linter_name, result in linter_results.items():
141
- fixes = apply_fixes(linter_name, result["issues"], dry_run=False, auto_only=True)
142
+ issues_for_fixing: list[LintIssue] = result["issues"] # type: ignore[assignment]
143
+ fixes = apply_fixes(linter_name, issues_for_fixing, dry_run=False, auto_only=True)
142
144
 
143
145
  successful = [f for f in fixes if f.success]
144
146
  failed = [f for f in fixes if not f.success]
@@ -155,7 +157,8 @@ class AdvancedDebuggingWizard(BaseWizard):
155
157
  logger.info("Verifying fixes...")
156
158
 
157
159
  for linter_name, result in linter_results.items():
158
- verification = verify_fixes(linter_name, project_path, result["issues"])
160
+ issues_for_verification: list[LintIssue] = result["issues"] # type: ignore[assignment]
161
+ verification = verify_fixes(linter_name, project_path, issues_for_verification)
159
162
 
160
163
  verification_results[linter_name] = verification.to_dict()
161
164
 
@@ -201,7 +204,7 @@ class AdvancedDebuggingWizard(BaseWizard):
201
204
  insights = []
202
205
 
203
206
  # Group issues by language
204
- by_language = {}
207
+ by_language: dict[str, list[LintIssue]] = {}
205
208
  for issue in issues:
206
209
  lang = issue.linter
207
210
  if lang not in by_language:
@@ -561,9 +561,7 @@ class CodeReviewWizard(BaseWizard):
561
561
  icon = (
562
562
  "⚠️"
563
563
  if finding["severity"] == "warning"
564
- else "❌"
565
- if finding["severity"] == "error"
566
- else "ℹ️"
564
+ else "❌" if finding["severity"] == "error" else "ℹ️"
567
565
  )
568
566
  lines.append(f"{icon} {finding['file']}:{finding['line']}")
569
567
  lines.append(f" Pattern: {finding['pattern_type']} ({finding['pattern_id']})")
@@ -10,10 +10,10 @@ from .bug_risk_analyzer import BugRisk, BugRiskAnalyzer, RiskAssessment
10
10
  from .config_loaders import ConfigLoaderFactory, LintConfig, load_config
11
11
  from .fix_applier import FixApplierFactory, FixResult, apply_fixes, group_issues_by_fixability
12
12
  from .language_patterns import (
13
- CrossLanguagePatternLibrary,
14
- PatternCategory,
15
- UniversalPattern,
16
- get_pattern_library,
13
+ CrossLanguagePatternLibrary,
14
+ PatternCategory,
15
+ UniversalPattern,
16
+ get_pattern_library,
17
17
  )
18
18
  from .linter_parsers import LinterParserFactory, LintIssue, Severity, parse_linter_output
19
19
  from .verification import VerificationResult, compare_issue_lists, run_linter, verify_fixes
@@ -335,7 +335,7 @@ class BugRiskAnalyzer:
335
335
  This is the Level 4 alert format.
336
336
  """
337
337
  # Count by risk level
338
- by_risk = {
338
+ by_risk: dict[BugRisk, list[RiskAssessment]] = {
339
339
  BugRisk.CRITICAL: [],
340
340
  BugRisk.HIGH: [],
341
341
  BugRisk.MEDIUM: [],
@@ -246,15 +246,19 @@ class PylintConfigLoader(BaseConfigLoader):
246
246
  """Load from pyproject.toml"""
247
247
  try:
248
248
  import tomli
249
+
250
+ toml_loader = tomli
249
251
  except ImportError:
250
252
  # Fallback for Python 3.11+
251
253
  try:
252
- import tomllib as tomli
254
+ import tomllib
255
+
256
+ toml_loader = tomllib
253
257
  except ImportError as e:
254
258
  raise ImportError("tomli or tomllib required for pyproject.toml") from e
255
259
 
256
260
  with open(path, "rb") as f:
257
- data = tomli.load(f)
261
+ data = toml_loader.load(f)
258
262
 
259
263
  pylint_config = data.get("tool", {}).get("pylint", {})
260
264
 
@@ -278,6 +278,7 @@ class CrossLanguagePatternLibrary:
278
278
  UniversalPattern if found, None otherwise
279
279
 
280
280
  """
281
+ pattern: UniversalPattern
281
282
  for pattern in self.patterns.values():
282
283
  if linter in pattern.language_manifestations:
283
284
  if pattern.language_manifestations[linter] == rule:
@@ -295,11 +296,12 @@ class CrossLanguagePatternLibrary:
295
296
  List of fix steps, or None if not found
296
297
 
297
298
  """
298
- pattern = self.patterns.get(pattern_name)
299
+ pattern: UniversalPattern | None = self.patterns.get(pattern_name)
299
300
  if not pattern:
300
301
  return None
301
302
 
302
- return pattern.language_specific_fixes.get(language)
303
+ result: list[str] | None = pattern.language_specific_fixes.get(language)
304
+ return result
303
305
 
304
306
  def suggest_cross_language_insight(
305
307
  self,
@@ -360,7 +360,7 @@ class ClippyParser(BaseLinterParser):
360
360
 
361
361
  # Pattern: warning: unused variable: `x`
362
362
  # --> src/main.rs:5:9
363
- current_issue = {}
363
+ current_issue: dict[str, Any] = {}
364
364
 
365
365
  for line in output.split("\n"):
366
366
  # Check for severity line
@@ -167,20 +167,20 @@ class ChromeDevToolsParser(BaseProfilerParser):
167
167
  function_times[name]["call_count"] += 1
168
168
 
169
169
  # Convert to FunctionProfile
170
- total_time = sum(data["total_time"] for data in function_times.values())
170
+ total_time = sum(stats["total_time"] for stats in function_times.values())
171
171
 
172
- for func_name, data in function_times.items():
172
+ for func_name, stats in function_times.items():
173
173
  profiles.append(
174
174
  FunctionProfile(
175
175
  function_name=func_name,
176
176
  file_path="", # Chrome doesn't always provide
177
177
  line_number=0,
178
- total_time=data["total_time"],
179
- self_time=data["total_time"],
180
- call_count=data["call_count"],
181
- cumulative_time=data["total_time"],
178
+ total_time=stats["total_time"],
179
+ self_time=stats["total_time"],
180
+ call_count=stats["call_count"],
181
+ cumulative_time=stats["total_time"],
182
182
  percent_total=(
183
- (data["total_time"] / total_time * 100) if total_time > 0 else 0
183
+ (stats["total_time"] / total_time * 100) if total_time > 0 else 0
184
184
  ),
185
185
  profiler=self.profiler_name,
186
186
  ),
@@ -9,12 +9,12 @@ Licensed under Fair Source License 0.9
9
9
  from .exploit_analyzer import ExploitAnalyzer
10
10
  from .owasp_patterns import OWASPPatternDetector
11
11
  from .vulnerability_scanner import (
12
- DependencyVulnerability,
13
- Severity,
14
- Vulnerability,
15
- VulnerabilityScanner,
16
- VulnerabilityScanReport,
17
- VulnerabilityType,
12
+ DependencyVulnerability,
13
+ Severity,
14
+ Vulnerability,
15
+ VulnerabilityScanner,
16
+ VulnerabilityScanReport,
17
+ VulnerabilityType,
18
18
  )
19
19
 
20
20
  __all__ = [
@@ -225,7 +225,7 @@ class VulnerabilityScanner:
225
225
  lines: list[str],
226
226
  ) -> list[Vulnerability]:
227
227
  """Scan for hardcoded secrets"""
228
- vulnerabilities = []
228
+ vulnerabilities: list[Vulnerability] = []
229
229
 
230
230
  # Skip certain file types
231
231
  if file_path.suffix in [".md", ".txt", ".json", ".xml"]:
@@ -168,10 +168,10 @@ class SecurityAnalysisWizard(BaseWizard):
168
168
 
169
169
  def _group_by_category(self, vulnerabilities: list[dict[str, Any]]) -> dict[str, int]:
170
170
  """Group vulnerabilities by OWASP category"""
171
- by_category = {}
171
+ by_category: dict[str, int] = {}
172
172
 
173
173
  for vuln in vulnerabilities:
174
- category = vuln.get("category", "unknown")
174
+ category: str = str(vuln.get("category", "unknown"))
175
175
  by_category[category] = by_category.get(category, 0) + 1
176
176
 
177
177
  return by_category
@@ -8,7 +8,7 @@ Licensed under Fair Source License 0.9
8
8
  """
9
9
 
10
10
  import re
11
- from dataclasses import dataclass
11
+ from dataclasses import dataclass, field
12
12
  from enum import Enum
13
13
  from pathlib import Path
14
14
  from typing import Any
@@ -37,14 +37,8 @@ class TestFunction:
37
37
  assertions_count: int
38
38
  execution_time: float | None = None
39
39
  is_async: bool = False
40
- uses_fixtures: list[str] | None = None
41
- issues: list[TestQualityIssue] | None = None
42
-
43
- def __post_init__(self):
44
- if self.uses_fixtures is None:
45
- self.uses_fixtures = []
46
- if self.issues is None:
47
- self.issues = []
40
+ uses_fixtures: list[str] = field(default_factory=list)
41
+ issues: list[TestQualityIssue] = field(default_factory=list)
48
42
 
49
43
  @property
50
44
  def quality_score(self) -> float:
@@ -494,7 +494,7 @@ def {test_name}({params_str}):
494
494
  summary.append("=" * 60)
495
495
 
496
496
  # Group by priority
497
- by_priority = {
497
+ by_priority: dict[TestPriority, list[TestSuggestion]] = {
498
498
  TestPriority.CRITICAL: [],
499
499
  TestPriority.HIGH: [],
500
500
  TestPriority.MEDIUM: [],
@@ -1,60 +0,0 @@
1
- {
2
- "requests": [
3
- {
4
- "timestamp": "2026-01-07T06:30:40.722211",
5
- "model": "claude-3-5-haiku-20241022",
6
- "tier": "cheap",
7
- "task_type": "workflow:refactor-plan:scan",
8
- "input_tokens": 0,
9
- "output_tokens": 572,
10
- "actual_cost": 0.002288,
11
- "baseline_cost": 0.0429,
12
- "savings": 0.040612
13
- },
14
- {
15
- "timestamp": "2026-01-07T06:30:40.723099",
16
- "model": "claude-sonnet-4-20250514",
17
- "tier": "capable",
18
- "task_type": "workflow:refactor-plan:analyze",
19
- "input_tokens": 676,
20
- "output_tokens": 193,
21
- "actual_cost": 0.004923,
22
- "baseline_cost": 0.024615,
23
- "savings": 0.019692
24
- },
25
- {
26
- "timestamp": "2026-01-07T06:30:43.930646",
27
- "model": "claude-sonnet-4-20250514",
28
- "tier": "capable",
29
- "task_type": "workflow:refactor-plan:prioritize",
30
- "input_tokens": 873,
31
- "output_tokens": 736,
32
- "actual_cost": 0.013659,
33
- "baseline_cost": 0.068295,
34
- "savings": 0.054636
35
- },
36
- {
37
- "timestamp": "2026-01-07T06:30:43.934470",
38
- "model": "claude-opus-4-5-20251101",
39
- "tier": "premium",
40
- "task_type": "workflow:refactor-plan:plan",
41
- "input_tokens": 0,
42
- "output_tokens": 0,
43
- "actual_cost": 0.0,
44
- "baseline_cost": 0.0,
45
- "savings": 0.0
46
- }
47
- ],
48
- "daily_totals": {
49
- "2026-01-07": {
50
- "requests": 4,
51
- "input_tokens": 1549,
52
- "output_tokens": 1501,
53
- "actual_cost": 0.02087,
54
- "baseline_cost": 0.13581,
55
- "savings": 0.11494
56
- }
57
- },
58
- "created_at": "2026-01-07T06:30:37.244775",
59
- "last_updated": "2026-01-07T06:30:43.934483"
60
- }
@@ -1,15 +0,0 @@
1
- {
2
- "command_counts": {
3
- "workflow": 1
4
- },
5
- "tips_shown": [
6
- "weekly_sync"
7
- ],
8
- "total_commands": 1,
9
- "patterns_learned": 0,
10
- "api_requests": 0,
11
- "tech_debt_trend": "unknown",
12
- "last_claude_sync": null,
13
- "first_run": "2026-01-07T06:30:43.936384",
14
- "last_updated": "2026-01-07T06:30:43.936546"
15
- }
@@ -1,45 +0,0 @@
1
- [
2
- {
3
- "workflow": "refactor-plan",
4
- "provider": "anthropic",
5
- "success": true,
6
- "started_at": "2026-01-07T06:30:40.702139",
7
- "completed_at": "2026-01-07T06:30:43.934850",
8
- "duration_ms": 3232,
9
- "cost": 0.02087,
10
- "baseline_cost": 0.13581,
11
- "savings": 0.11493999999999999,
12
- "savings_percent": 84.63294308224725,
13
- "stages": [
14
- {
15
- "name": "scan",
16
- "tier": "cheap",
17
- "skipped": false,
18
- "cost": 0.002288,
19
- "duration_ms": 20
20
- },
21
- {
22
- "name": "analyze",
23
- "tier": "capable",
24
- "skipped": false,
25
- "cost": 0.004923,
26
- "duration_ms": 0
27
- },
28
- {
29
- "name": "prioritize",
30
- "tier": "capable",
31
- "skipped": false,
32
- "cost": 0.013659,
33
- "duration_ms": 3207
34
- },
35
- {
36
- "name": "plan",
37
- "tier": "premium",
38
- "skipped": false,
39
- "cost": 0.0,
40
- "duration_ms": 3
41
- }
42
- ],
43
- "error": null
44
- }
45
- ]