empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,235 @@
1
+ """Status and health check commands for the CLI.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+ import asyncio
8
+
9
+
10
+ def cmd_status(args):
11
+ """Session status assistant - prioritized project status report.
12
+
13
+ Collects and displays project status including patterns, git context,
14
+ and health metrics with priority scoring.
15
+
16
+ Args:
17
+ args: Namespace object from argparse with attributes:
18
+ - patterns_dir (str): Path to patterns directory (default: ./patterns).
19
+ - project_root (str): Project root directory (default: .).
20
+ - inactivity (int): Minutes of inactivity before showing status.
21
+ - full (bool): If True, show all items without limit.
22
+ - json (bool): If True, output as JSON format.
23
+ - select (int | None): Select specific item for action prompt.
24
+ - force (bool): If True, show status even with recent activity.
25
+
26
+ Returns:
27
+ None: Prints prioritized status report or JSON output.
28
+ """
29
+ from empathy_llm_toolkit.session_status import SessionStatusCollector
30
+
31
+ config = {"inactivity_minutes": args.inactivity}
32
+ collector = SessionStatusCollector(
33
+ patterns_dir=args.patterns_dir,
34
+ project_root=args.project_root,
35
+ config=config,
36
+ )
37
+
38
+ # Check if should show (unless forced)
39
+ if not args.force and not collector.should_show():
40
+ print("No status update needed (recent activity detected).")
41
+ print("Use --force to show status anyway.")
42
+ return
43
+
44
+ # Collect status
45
+ status = collector.collect()
46
+
47
+ # Handle selection
48
+ if args.select:
49
+ prompt = collector.get_action_prompt(status, args.select)
50
+ if prompt:
51
+ print(f"\nAction prompt for selection {args.select}:\n")
52
+ print(prompt)
53
+ else:
54
+ print(f"Invalid selection: {args.select}")
55
+ return
56
+
57
+ # Output
58
+ if args.json:
59
+ print(collector.format_json(status))
60
+ else:
61
+ max_items = None if args.full else 5
62
+ print()
63
+ print(collector.format_output(status, max_items=max_items))
64
+ print()
65
+
66
+ # Record interaction
67
+ collector.record_interaction()
68
+
69
+
70
+ def cmd_review(args):
71
+ """Pattern-based code review against historical bugs.
72
+
73
+ Note: This command has been deprecated. The underlying workflow module
74
+ has been removed. Use 'empathy workflow run bug-predict' instead.
75
+
76
+ Args:
77
+ args: Namespace object from argparse.
78
+
79
+ Returns:
80
+ None: Prints deprecation message.
81
+ """
82
+ print("⚠️ The 'review' command has been deprecated.")
83
+ print()
84
+ print("The CodeReviewWorkflow module has been removed.")
85
+ print("Please use one of these alternatives:")
86
+ print()
87
+ print(" empathy workflow run bug-predict # Scan for risky patterns")
88
+ print(" ruff check <files> # Fast linting")
89
+ print(" bandit -r <path> # Security scanning")
90
+ print()
91
+
92
+
93
+ def cmd_health(args):
94
+ """Code health assistant - run health checks and auto-fix issues.
95
+
96
+ Runs comprehensive health checks including linting, type checking,
97
+ and formatting with optional auto-fix capability.
98
+
99
+ Args:
100
+ args: Namespace object from argparse with attributes:
101
+ - check (str | None): Specific check to run (lint/type/format/test).
102
+ - deep (bool): If True, run comprehensive checks.
103
+ - fix (bool): If True, auto-fix issues where possible.
104
+ - threshold (str): Severity threshold for issues.
105
+ - project_root (str): Project root directory.
106
+ - patterns_dir (str): Path to patterns directory.
107
+ - details (bool): If True, show detailed issue list.
108
+ - compare (str | None): Compare against historical baseline.
109
+ - export (str | None): Export results to file.
110
+ - json (bool): If True, output as JSON format.
111
+
112
+ Returns:
113
+ None: Prints health check results and optionally fixes issues.
114
+ """
115
+ from empathy_llm_toolkit.code_health import (
116
+ AutoFixer,
117
+ CheckCategory,
118
+ HealthCheckRunner,
119
+ HealthTrendTracker,
120
+ format_health_output,
121
+ )
122
+
123
+ runner = HealthCheckRunner(
124
+ project_root=args.project_root,
125
+ )
126
+
127
+ # Determine what checks to run
128
+ if args.check:
129
+ # Run specific check
130
+ try:
131
+ category = CheckCategory(args.check)
132
+ report_future = runner.run_check(category)
133
+ result = asyncio.run(report_future)
134
+ # Create a minimal report with just this result
135
+ from empathy_llm_toolkit.code_health import HealthReport
136
+
137
+ report = HealthReport(project_root=args.project_root)
138
+ report.add_result(result)
139
+ except ValueError:
140
+ print(f"Unknown check category: {args.check}")
141
+ print(f"Available: {', '.join(c.value for c in CheckCategory)}")
142
+ return
143
+ elif args.deep:
144
+ # Run all checks
145
+ print("Running comprehensive health check...\n")
146
+ report = asyncio.run(runner.run_all())
147
+ else:
148
+ # Run quick checks (default)
149
+ report = asyncio.run(runner.run_quick())
150
+
151
+ # Handle fix mode
152
+ if args.fix:
153
+ fixer = AutoFixer()
154
+
155
+ if args.dry_run:
156
+ # Preview only
157
+ fixes = fixer.preview_fixes(report)
158
+ if fixes:
159
+ print("Would fix the following issues:\n")
160
+ for fix in fixes:
161
+ safe_indicator = " (safe)" if fix["safe"] else " (needs confirmation)"
162
+ print(f" [{fix['category']}] {fix['file']}")
163
+ print(f" {fix['issue']}")
164
+ print(f" Command: {fix['fix_command']}{safe_indicator}")
165
+ print()
166
+ else:
167
+ print("No auto-fixable issues found.")
168
+ return
169
+
170
+ # Apply fixes
171
+ if args.check:
172
+ try:
173
+ category = CheckCategory(args.check)
174
+ result = asyncio.run(fixer.fix_category(report, category))
175
+ except ValueError:
176
+ result = {"fixed": [], "skipped": [], "failed": []}
177
+ else:
178
+ result = asyncio.run(fixer.fix_all(report, interactive=args.interactive))
179
+
180
+ # Report fix results
181
+ if result["fixed"]:
182
+ print(f"✓ Fixed {len(result['fixed'])} issue(s)")
183
+ for fix in result["fixed"][:5]:
184
+ print(f" - {fix['file_path']}: {fix['message']}")
185
+ if len(result["fixed"]) > 5:
186
+ print(f" ... and {len(result['fixed']) - 5} more")
187
+
188
+ if result["skipped"]:
189
+ if args.interactive:
190
+ print(f"\n⚠ Skipped {len(result['skipped'])} issue(s) (could not auto-fix)")
191
+ else:
192
+ print(
193
+ f"\n⚠ Skipped {len(result['skipped'])} issue(s) (use --interactive to review)",
194
+ )
195
+
196
+ if result["failed"]:
197
+ print(f"\n✗ Failed to fix {len(result['failed'])} issue(s)")
198
+
199
+ return
200
+
201
+ # Handle trends
202
+ if args.trends:
203
+ tracker = HealthTrendTracker(project_root=args.project_root)
204
+ trends = tracker.get_trends(days=args.trends)
205
+
206
+ print(f"📈 Health Trends ({trends['period_days']} days)\n")
207
+ print(f"Average Score: {trends['average_score']}/100")
208
+ print(f"Trend: {trends['trend_direction']} ({trends['score_change']:+d})")
209
+
210
+ if trends["data_points"]:
211
+ print("\nRecent scores:")
212
+ for point in trends["data_points"][:7]:
213
+ print(f" {point['date']}: {point['score']}/100")
214
+
215
+ hotspots = tracker.identify_hotspots()
216
+ if hotspots:
217
+ print("\n🔥 Hotspots (files with recurring issues):")
218
+ for spot in hotspots[:5]:
219
+ print(f" {spot['file']}: {spot['issue_count']} issues")
220
+
221
+ return
222
+
223
+ # Output report
224
+ if args.json:
225
+ import json
226
+
227
+ print(json.dumps(report.to_dict(), indent=2, default=str))
228
+ else:
229
+ level = 3 if args.full else (2 if args.details else 1)
230
+ print(format_health_output(report, level=level))
231
+
232
+ # Record to trend history
233
+ if not args.check: # Only record full runs
234
+ tracker = HealthTrendTracker(project_root=args.project_root)
235
+ tracker.record_check(report)
@@ -0,0 +1,166 @@
1
+ """Sync commands for pattern synchronization.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+ import json as json_mod
8
+ from pathlib import Path
9
+
10
+ from empathy_os.config import _validate_file_path
11
+ from empathy_os.logging_config import get_logger
12
+
13
+ logger = get_logger(__name__)
14
+
15
+
16
+ def cmd_sync_claude(args):
17
+ """Sync patterns to Claude Code rules directory.
18
+
19
+ Converts learned patterns into Claude Code markdown rules.
20
+
21
+ Args:
22
+ args: Namespace object from argparse with attributes:
23
+ - patterns_dir (str): Source patterns directory.
24
+ - output_dir (str): Target Claude Code rules directory.
25
+
26
+ Returns:
27
+ int: 0 on success, 1 on failure.
28
+
29
+ Raises:
30
+ ValueError: If output path is invalid or unsafe.
31
+ """
32
+ patterns_dir = Path(args.patterns_dir)
33
+ # Validate output directory path
34
+ validated_output_dir = _validate_file_path(args.output_dir)
35
+ output_dir = validated_output_dir
36
+
37
+ print("=" * 60)
38
+ print(" SYNC PATTERNS TO CLAUDE CODE")
39
+ print("=" * 60 + "\n")
40
+
41
+ if not patterns_dir.exists():
42
+ print(f"✗ Patterns directory not found: {patterns_dir}")
43
+ print(" Run 'empathy learn --analyze 20' first to learn patterns")
44
+ return 1
45
+
46
+ # Create output directory
47
+ output_dir.mkdir(parents=True, exist_ok=True)
48
+
49
+ synced_count = 0
50
+ pattern_files = ["debugging.json", "security.json", "tech_debt.json", "inspection.json"]
51
+
52
+ for pattern_file in pattern_files:
53
+ source_path = patterns_dir / pattern_file
54
+ if not source_path.exists():
55
+ continue
56
+
57
+ try:
58
+ with open(source_path) as f:
59
+ data = json_mod.load(f)
60
+
61
+ patterns = data.get("patterns", data.get("items", []))
62
+ if not patterns:
63
+ continue
64
+
65
+ # Generate markdown rule file
66
+ category = pattern_file.replace(".json", "")
67
+ rule_content = _generate_claude_rule(category, patterns)
68
+
69
+ # Write rule file
70
+ rule_file = output_dir / f"{category}.md"
71
+ # Validate rule file path before writing
72
+ validated_rule_file = _validate_file_path(str(rule_file), allowed_dir=str(output_dir))
73
+ with open(validated_rule_file, "w") as f:
74
+ f.write(rule_content)
75
+
76
+ print(f" ✓ {category}: {len(patterns)} patterns → {rule_file}")
77
+ synced_count += len(patterns)
78
+
79
+ except (json_mod.JSONDecodeError, OSError) as e:
80
+ print(f" ✗ Failed to process {pattern_file}: {e}")
81
+
82
+ print(f"\n{'─' * 60}")
83
+ print(f" Total: {synced_count} patterns synced to {output_dir}")
84
+ print("=" * 60 + "\n")
85
+
86
+ if synced_count == 0:
87
+ print("No patterns to sync. Run 'empathy learn' first.")
88
+ return 1
89
+
90
+ return 0
91
+
92
+
93
+ def _generate_claude_rule(category: str, patterns: list) -> str:
94
+ """Generate a Claude Code rule file from patterns."""
95
+ lines = [
96
+ f"# {category.replace('_', ' ').title()} Patterns",
97
+ "",
98
+ "Auto-generated from Empathy Framework learned patterns.",
99
+ f"Total patterns: {len(patterns)}",
100
+ "",
101
+ "---",
102
+ "",
103
+ ]
104
+
105
+ if category == "debugging":
106
+ lines.extend(
107
+ [
108
+ "## Bug Fix Patterns",
109
+ "",
110
+ "When debugging similar issues, consider these historical fixes:",
111
+ "",
112
+ ],
113
+ )
114
+ for p in patterns[:20]: # Limit to 20 most recent
115
+ bug_type = p.get("bug_type", "unknown")
116
+ root_cause = p.get("root_cause", "Unknown")
117
+ fix = p.get("fix", "See commit history")
118
+ files = p.get("files_affected", [])
119
+
120
+ lines.append(f"### {bug_type}")
121
+ lines.append(f"- **Root cause**: {root_cause}")
122
+ lines.append(f"- **Fix**: {fix}")
123
+ if files:
124
+ lines.append(f"- **Files**: {', '.join(files[:3])}")
125
+ lines.append("")
126
+
127
+ elif category == "security":
128
+ lines.extend(
129
+ [
130
+ "## Security Decisions",
131
+ "",
132
+ "Previously reviewed security items:",
133
+ "",
134
+ ],
135
+ )
136
+ for p in patterns[:20]:
137
+ decision = p.get("decision", "unknown")
138
+ reason = p.get("reason", "")
139
+ lines.append(f"- **{p.get('type', 'unknown')}**: {decision}")
140
+ if reason:
141
+ lines.append(f" - Reason: {reason}")
142
+ lines.append("")
143
+
144
+ elif category == "tech_debt":
145
+ lines.extend(
146
+ [
147
+ "## Tech Debt Tracking",
148
+ "",
149
+ "Known technical debt items:",
150
+ "",
151
+ ],
152
+ )
153
+ for p in patterns[:20]:
154
+ lines.append(f"- {p.get('description', str(p))}")
155
+
156
+ else:
157
+ lines.extend(
158
+ [
159
+ f"## {category.title()} Items",
160
+ "",
161
+ ],
162
+ )
163
+ for p in patterns[:20]:
164
+ lines.append(f"- {p.get('description', str(p)[:100])}")
165
+
166
+ return "\n".join(lines)
@@ -0,0 +1,121 @@
1
+ """Tier management commands for intelligent model selection.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+
8
+ def cmd_tier_recommend(args):
9
+ """Get intelligent tier recommendation for a bug/task.
10
+
11
+ Analyzes bug description and historical patterns to recommend
12
+ the most cost-effective tier (HAIKU/SONNET/OPUS).
13
+
14
+ Args:
15
+ args: Namespace object from argparse with attributes:
16
+ - description (str): Bug or task description to analyze.
17
+ - files (str | None): Comma-separated list of affected files.
18
+ - complexity (str | None): Complexity hint (low/medium/high).
19
+
20
+ Returns:
21
+ None: Prints tier recommendation with confidence and expected cost.
22
+ """
23
+ from empathy_os.tier_recommender import TierRecommender
24
+
25
+ recommender = TierRecommender()
26
+
27
+ # Get recommendation
28
+ result = recommender.recommend(
29
+ bug_description=args.description,
30
+ files_affected=args.files.split(",") if args.files else None,
31
+ complexity_hint=args.complexity,
32
+ )
33
+
34
+ # Display results
35
+ print()
36
+ print("=" * 60)
37
+ print(" TIER RECOMMENDATION")
38
+ print("=" * 60)
39
+ print()
40
+ print(f" Bug/Task: {args.description}")
41
+ print()
42
+ print(f" 📍 Recommended Tier: {result.tier}")
43
+ print(f" 🎯 Confidence: {result.confidence * 100:.1f}%")
44
+ print(f" 💰 Expected Cost: ${result.expected_cost:.3f}")
45
+ print(f" 🔄 Expected Attempts: {result.expected_attempts:.1f}")
46
+ print()
47
+ print(" 📊 Reasoning:")
48
+ print(f" {result.reasoning}")
49
+ print()
50
+
51
+ if result.similar_patterns_count > 0:
52
+ print(f" ✅ Based on {result.similar_patterns_count} similar patterns")
53
+ else:
54
+ print(" ⚠️ No historical data - using conservative default")
55
+
56
+ if result.fallback_used:
57
+ print()
58
+ print(" 💡 Tip: As more patterns are collected, recommendations")
59
+ print(" will become more accurate and personalized.")
60
+
61
+ print()
62
+ print("=" * 60)
63
+ print()
64
+
65
+
66
+ def cmd_tier_stats(args):
67
+ """Show tier pattern learning statistics.
68
+
69
+ Displays statistics about collected patterns and tier distribution.
70
+
71
+ Args:
72
+ args: Namespace object from argparse (no additional attributes used).
73
+
74
+ Returns:
75
+ None: Prints tier statistics, savings percentages, and bug type distribution.
76
+ """
77
+ from empathy_os.tier_recommender import TierRecommender
78
+
79
+ recommender = TierRecommender()
80
+ stats = recommender.get_stats()
81
+
82
+ print()
83
+ print("=" * 60)
84
+ print(" TIER PATTERN LEARNING STATS")
85
+ print("=" * 60)
86
+ print()
87
+
88
+ if stats.get("total_patterns", 0) == 0:
89
+ print(" No patterns collected yet.")
90
+ print()
91
+ print(" 💡 Patterns are automatically collected as you use")
92
+ print(" cascading workflows with enhanced tracking enabled.")
93
+ print()
94
+ print("=" * 60)
95
+ print()
96
+ return
97
+
98
+ print(f" Total Patterns: {stats['total_patterns']}")
99
+ print(f" Avg Savings: {stats['avg_savings_percent']}%")
100
+ print()
101
+
102
+ print(" TIER DISTRIBUTION")
103
+ print(" " + "-" * 40)
104
+ for tier, count in stats["patterns_by_tier"].items():
105
+ percent = (count / stats["total_patterns"]) * 100
106
+ bar = "█" * int(percent / 5)
107
+ print(f" {tier:10} {count:3} ({percent:5.1f}%) {bar}")
108
+ print()
109
+
110
+ print(" BUG TYPE DISTRIBUTION")
111
+ print(" " + "-" * 40)
112
+ sorted_types = sorted(
113
+ stats["bug_type_distribution"].items(), key=lambda x: x[1], reverse=True
114
+ )
115
+ for bug_type, count in sorted_types[:10]:
116
+ percent = (count / stats["total_patterns"]) * 100
117
+ print(f" {bug_type:20} {count:3} ({percent:5.1f}%)")
118
+
119
+ print()
120
+ print("=" * 60)
121
+ print()