empathy-framework 4.7.1__py3-none-any.whl → 4.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/METADATA +65 -2
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +73 -52
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -1
- empathy_os/__init__.py +2 -0
- empathy_os/cache/hash_only.py +6 -3
- empathy_os/cache/hybrid.py +6 -3
- empathy_os/cli/__init__.py +128 -238
- empathy_os/cli/__main__.py +5 -33
- empathy_os/cli/commands/__init__.py +1 -8
- empathy_os/cli/commands/help.py +331 -0
- empathy_os/cli/commands/info.py +140 -0
- empathy_os/cli/commands/inspect.py +437 -0
- empathy_os/cli/commands/metrics.py +92 -0
- empathy_os/cli/commands/orchestrate.py +184 -0
- empathy_os/cli/commands/patterns.py +207 -0
- empathy_os/cli/commands/provider.py +93 -81
- empathy_os/cli/commands/setup.py +96 -0
- empathy_os/cli/commands/status.py +235 -0
- empathy_os/cli/commands/sync.py +166 -0
- empathy_os/cli/commands/tier.py +121 -0
- empathy_os/cli/commands/workflow.py +574 -0
- empathy_os/cli/parsers/__init__.py +62 -0
- empathy_os/cli/parsers/help.py +41 -0
- empathy_os/cli/parsers/info.py +26 -0
- empathy_os/cli/parsers/inspect.py +66 -0
- empathy_os/cli/parsers/metrics.py +42 -0
- empathy_os/cli/parsers/orchestrate.py +61 -0
- empathy_os/cli/parsers/patterns.py +54 -0
- empathy_os/cli/parsers/provider.py +40 -0
- empathy_os/cli/parsers/setup.py +42 -0
- empathy_os/cli/parsers/status.py +47 -0
- empathy_os/cli/parsers/sync.py +31 -0
- empathy_os/cli/parsers/tier.py +33 -0
- empathy_os/cli/parsers/workflow.py +77 -0
- empathy_os/cli/utils/__init__.py +1 -0
- empathy_os/cli/utils/data.py +242 -0
- empathy_os/cli/utils/helpers.py +68 -0
- empathy_os/{cli.py → cli_legacy.py} +27 -27
- empathy_os/cli_minimal.py +662 -0
- empathy_os/cli_router.py +384 -0
- empathy_os/cli_unified.py +38 -2
- empathy_os/memory/__init__.py +19 -5
- empathy_os/memory/short_term.py +14 -404
- empathy_os/memory/types.py +437 -0
- empathy_os/memory/unified.py +61 -48
- empathy_os/models/fallback.py +1 -1
- empathy_os/models/provider_config.py +59 -344
- empathy_os/models/registry.py +31 -180
- empathy_os/monitoring/alerts.py +14 -20
- empathy_os/monitoring/alerts_cli.py +24 -7
- empathy_os/project_index/__init__.py +2 -0
- empathy_os/project_index/index.py +210 -5
- empathy_os/project_index/scanner.py +45 -14
- empathy_os/project_index/scanner_parallel.py +291 -0
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/workflows/__init__.py +31 -2
- empathy_os/workflows/base.py +349 -325
- empathy_os/workflows/bug_predict.py +8 -0
- empathy_os/workflows/builder.py +273 -0
- empathy_os/workflows/caching.py +253 -0
- empathy_os/workflows/code_review_pipeline.py +1 -0
- empathy_os/workflows/history.py +510 -0
- empathy_os/workflows/output.py +410 -0
- empathy_os/workflows/perf_audit.py +125 -19
- empathy_os/workflows/progress.py +324 -22
- empathy_os/workflows/routing.py +168 -0
- empathy_os/workflows/secure_release.py +1 -0
- empathy_os/workflows/security_audit.py +190 -0
- empathy_os/workflows/security_audit_phase3.py +328 -0
- empathy_os/workflows/telemetry_mixin.py +269 -0
- empathy_os/dashboard/__init__.py +0 -15
- empathy_os/dashboard/server.py +0 -941
- patterns/README.md +0 -119
- patterns/__init__.py +0 -95
- patterns/behavior.py +0 -298
- patterns/code_review_memory.json +0 -441
- patterns/core.py +0 -97
- patterns/debugging.json +0 -3763
- patterns/empathy.py +0 -268
- patterns/health_check_memory.json +0 -505
- patterns/input.py +0 -161
- patterns/memory_graph.json +0 -8
- patterns/refactoring_memory.json +0 -1113
- patterns/registry.py +0 -663
- patterns/security_memory.json +0 -8
- patterns/structural.py +0 -415
- patterns/validation.py +0 -194
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"""Orchestration commands for meta-workflows.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import json
|
|
9
|
+
|
|
10
|
+
from empathy_os.logging_config import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def cmd_orchestrate(args):
|
|
16
|
+
"""Run meta-orchestration workflows.
|
|
17
|
+
|
|
18
|
+
Orchestrates teams of agents to accomplish complex tasks through
|
|
19
|
+
intelligent composition patterns.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
args: Namespace object from argparse with attributes:
|
|
23
|
+
- workflow (str): Orchestration workflow name.
|
|
24
|
+
- path (str): Target path for orchestration.
|
|
25
|
+
- mode (str | None): Execution mode (e.g., 'daily', 'weekly', 'release').
|
|
26
|
+
- json (bool): If True, output as JSON format.
|
|
27
|
+
- dry_run (bool): If True, show plan without executing.
|
|
28
|
+
- verbose (bool): If True, show detailed output.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
int: 0 on success, 1 on failure.
|
|
32
|
+
"""
|
|
33
|
+
from empathy_os.workflows.orchestrated_health_check import OrchestratedHealthCheckWorkflow
|
|
34
|
+
from empathy_os.workflows.orchestrated_release_prep import OrchestratedReleasePrepWorkflow
|
|
35
|
+
|
|
36
|
+
# Get workflow type
|
|
37
|
+
workflow_type = args.workflow
|
|
38
|
+
|
|
39
|
+
# Only print header in non-JSON mode
|
|
40
|
+
if not (hasattr(args, "json") and args.json):
|
|
41
|
+
print()
|
|
42
|
+
print("=" * 60)
|
|
43
|
+
print(f" META-ORCHESTRATION: {workflow_type.upper()}")
|
|
44
|
+
print("=" * 60)
|
|
45
|
+
print()
|
|
46
|
+
|
|
47
|
+
if workflow_type == "release-prep":
|
|
48
|
+
# Release Preparation workflow
|
|
49
|
+
path = args.path or "."
|
|
50
|
+
quality_gates = {}
|
|
51
|
+
|
|
52
|
+
# Collect custom quality gates
|
|
53
|
+
if hasattr(args, "min_coverage") and args.min_coverage is not None:
|
|
54
|
+
quality_gates["min_coverage"] = args.min_coverage
|
|
55
|
+
if hasattr(args, "min_quality") and args.min_quality is not None:
|
|
56
|
+
quality_gates["min_quality_score"] = args.min_quality
|
|
57
|
+
if hasattr(args, "max_critical") and args.max_critical is not None:
|
|
58
|
+
quality_gates["max_critical_issues"] = args.max_critical
|
|
59
|
+
|
|
60
|
+
# Only print details in non-JSON mode
|
|
61
|
+
if not (hasattr(args, "json") and args.json):
|
|
62
|
+
print(f" Project Path: {path}")
|
|
63
|
+
if quality_gates:
|
|
64
|
+
print(f" Quality Gates: {quality_gates}")
|
|
65
|
+
print()
|
|
66
|
+
print(" 🔍 Parallel Validation Agents:")
|
|
67
|
+
print(" • Security Auditor (vulnerability scan)")
|
|
68
|
+
print(" • Test Coverage Analyzer (gap analysis)")
|
|
69
|
+
print(" • Code Quality Reviewer (best practices)")
|
|
70
|
+
print(" • Documentation Writer (completeness)")
|
|
71
|
+
print()
|
|
72
|
+
|
|
73
|
+
# Create workflow
|
|
74
|
+
workflow = OrchestratedReleasePrepWorkflow(
|
|
75
|
+
quality_gates=quality_gates if quality_gates else None
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
# Execute workflow
|
|
80
|
+
report = asyncio.run(workflow.execute(path=path))
|
|
81
|
+
|
|
82
|
+
# Display results
|
|
83
|
+
if hasattr(args, "json") and args.json:
|
|
84
|
+
print(json.dumps(report.to_dict(), indent=2))
|
|
85
|
+
else:
|
|
86
|
+
print(report.format_console_output())
|
|
87
|
+
|
|
88
|
+
# Return appropriate exit code
|
|
89
|
+
return 0 if report.approved else 1
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
print(f" ❌ Error executing release prep workflow: {e}")
|
|
93
|
+
print()
|
|
94
|
+
logger.exception("Release prep workflow failed")
|
|
95
|
+
return 1
|
|
96
|
+
|
|
97
|
+
elif workflow_type == "test-coverage":
|
|
98
|
+
# Test Coverage Boost workflow - DISABLED in v4.0.0
|
|
99
|
+
print(" ⚠️ FEATURE DISABLED")
|
|
100
|
+
print(" " + "-" * 56)
|
|
101
|
+
print()
|
|
102
|
+
print(" The test-coverage workflow has been disabled in v4.0.0")
|
|
103
|
+
print(" due to poor quality (0% test pass rate).")
|
|
104
|
+
print()
|
|
105
|
+
print(" This feature is being redesigned and will return in a")
|
|
106
|
+
print(" future release with improved test generation quality.")
|
|
107
|
+
print()
|
|
108
|
+
print(" Available v4.0 workflows:")
|
|
109
|
+
print(" • health-check - Real-time codebase health analysis")
|
|
110
|
+
print(" • release-prep - Quality gate validation")
|
|
111
|
+
print()
|
|
112
|
+
return 1
|
|
113
|
+
|
|
114
|
+
elif workflow_type == "health-check":
|
|
115
|
+
# Health Check workflow
|
|
116
|
+
mode = args.mode or "daily"
|
|
117
|
+
project_root = args.project_root or "."
|
|
118
|
+
focus_area = getattr(args, "focus", None)
|
|
119
|
+
|
|
120
|
+
# Only print details in non-JSON mode
|
|
121
|
+
if not (hasattr(args, "json") and args.json):
|
|
122
|
+
print(f" Mode: {mode.upper()}")
|
|
123
|
+
print(f" Project Root: {project_root}")
|
|
124
|
+
if focus_area:
|
|
125
|
+
print(f" Focus Area: {focus_area}")
|
|
126
|
+
print()
|
|
127
|
+
|
|
128
|
+
# Show agents for mode
|
|
129
|
+
mode_agents = {
|
|
130
|
+
"daily": ["Security", "Coverage", "Quality"],
|
|
131
|
+
"weekly": ["Security", "Coverage", "Quality", "Performance", "Documentation"],
|
|
132
|
+
"release": [
|
|
133
|
+
"Security",
|
|
134
|
+
"Coverage",
|
|
135
|
+
"Quality",
|
|
136
|
+
"Performance",
|
|
137
|
+
"Documentation",
|
|
138
|
+
"Architecture",
|
|
139
|
+
],
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
print(f" 🔍 {mode.capitalize()} Check Agents:")
|
|
143
|
+
for agent in mode_agents.get(mode, []):
|
|
144
|
+
print(f" • {agent}")
|
|
145
|
+
print()
|
|
146
|
+
|
|
147
|
+
# Create workflow
|
|
148
|
+
workflow = OrchestratedHealthCheckWorkflow(mode=mode, project_root=project_root)
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
# Execute workflow
|
|
152
|
+
report = asyncio.run(workflow.execute())
|
|
153
|
+
|
|
154
|
+
# Display results
|
|
155
|
+
if hasattr(args, "json") and args.json:
|
|
156
|
+
print(json.dumps(report.to_dict(), indent=2))
|
|
157
|
+
else:
|
|
158
|
+
print(report.format_console_output())
|
|
159
|
+
|
|
160
|
+
# Return appropriate exit code (70+ is passing)
|
|
161
|
+
return 0 if report.overall_health_score >= 70 else 1
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
print(f" ❌ Error executing health check workflow: {e}")
|
|
165
|
+
print()
|
|
166
|
+
logger.exception("Health check workflow failed")
|
|
167
|
+
return 1
|
|
168
|
+
|
|
169
|
+
else:
|
|
170
|
+
print(f" ❌ Unknown workflow type: {workflow_type}")
|
|
171
|
+
print()
|
|
172
|
+
print(" Available workflows:")
|
|
173
|
+
print(" - release-prep: Release readiness validation (parallel agents)")
|
|
174
|
+
print(" - health-check: Project health assessment (daily/weekly/release modes)")
|
|
175
|
+
print()
|
|
176
|
+
print(" Note: test-coverage workflow disabled in v4.0.0 (being redesigned)")
|
|
177
|
+
print()
|
|
178
|
+
return 1
|
|
179
|
+
|
|
180
|
+
print()
|
|
181
|
+
print("=" * 60)
|
|
182
|
+
print()
|
|
183
|
+
|
|
184
|
+
return 0
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
"""Pattern management commands for the CLI.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
|
|
9
|
+
from empathy_os.config import _validate_file_path
|
|
10
|
+
from empathy_os.logging_config import get_logger
|
|
11
|
+
from empathy_os.persistence import PatternPersistence
|
|
12
|
+
|
|
13
|
+
logger = get_logger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def cmd_patterns_list(args):
|
|
17
|
+
"""List patterns in a pattern library.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
args: Namespace object from argparse with attributes:
|
|
21
|
+
- library (str): Path to pattern library file.
|
|
22
|
+
- format (str): Library format ('json' or 'sqlite').
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
None: Prints pattern list to stdout. Exits with code 1 on failure.
|
|
26
|
+
"""
|
|
27
|
+
filepath = args.library
|
|
28
|
+
format_type = args.format
|
|
29
|
+
logger.info(f"Listing patterns from library: {filepath} (format: {format_type})")
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
if format_type == "json":
|
|
33
|
+
library = PatternPersistence.load_from_json(filepath)
|
|
34
|
+
elif format_type == "sqlite":
|
|
35
|
+
library = PatternPersistence.load_from_sqlite(filepath)
|
|
36
|
+
else:
|
|
37
|
+
logger.error(f"Unknown pattern library format: {format_type}")
|
|
38
|
+
logger.error(f"✗ Unknown format: {format_type}")
|
|
39
|
+
sys.exit(1)
|
|
40
|
+
|
|
41
|
+
logger.info(f"Loaded {len(library.patterns)} patterns from {filepath}")
|
|
42
|
+
logger.info(f"=== Pattern Library: {filepath} ===\n")
|
|
43
|
+
logger.info(f"Total patterns: {len(library.patterns)}")
|
|
44
|
+
logger.info(f"Total agents: {len(library.agent_contributions)}")
|
|
45
|
+
|
|
46
|
+
if library.patterns:
|
|
47
|
+
logger.info("\nPatterns:")
|
|
48
|
+
for pattern_id, pattern in library.patterns.items():
|
|
49
|
+
logger.info(f"\n [{pattern_id}] {pattern.name}")
|
|
50
|
+
logger.info(f" Agent: {pattern.agent_id}")
|
|
51
|
+
logger.info(f" Type: {pattern.pattern_type}")
|
|
52
|
+
logger.info(f" Confidence: {pattern.confidence:.2f}")
|
|
53
|
+
logger.info(f" Usage: {pattern.usage_count}")
|
|
54
|
+
logger.info(f" Success Rate: {pattern.success_rate:.2f}")
|
|
55
|
+
except FileNotFoundError:
|
|
56
|
+
logger.error(f"Pattern library not found: {filepath}")
|
|
57
|
+
logger.error(f"✗ Pattern library not found: {filepath}")
|
|
58
|
+
sys.exit(1)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def cmd_patterns_export(args):
|
|
62
|
+
"""Export patterns from one format to another.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
args: Namespace object from argparse with attributes:
|
|
66
|
+
- input (str): Input file path.
|
|
67
|
+
- output (str): Output file path.
|
|
68
|
+
- input_format (str): Input format ('json' or 'sqlite').
|
|
69
|
+
- output_format (str): Output format ('json' or 'sqlite').
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
None: Exports patterns to output file. Exits with code 1 on failure.
|
|
73
|
+
|
|
74
|
+
Raises:
|
|
75
|
+
ValueError: If output path is invalid or unsafe.
|
|
76
|
+
"""
|
|
77
|
+
input_file = args.input
|
|
78
|
+
input_format = args.input_format
|
|
79
|
+
output_file = args.output
|
|
80
|
+
output_format = args.output_format
|
|
81
|
+
|
|
82
|
+
logger.info(f"Exporting patterns from {input_format} to {output_format}")
|
|
83
|
+
|
|
84
|
+
# Load from input format
|
|
85
|
+
try:
|
|
86
|
+
if input_format == "json":
|
|
87
|
+
library = PatternPersistence.load_from_json(input_file)
|
|
88
|
+
elif input_format == "sqlite":
|
|
89
|
+
library = PatternPersistence.load_from_sqlite(input_file)
|
|
90
|
+
else:
|
|
91
|
+
logger.error(f"Unknown input format: {input_format}")
|
|
92
|
+
logger.error(f"✗ Unknown input format: {input_format}")
|
|
93
|
+
sys.exit(1)
|
|
94
|
+
|
|
95
|
+
logger.info(f"Loaded {len(library.patterns)} patterns from {input_file}")
|
|
96
|
+
logger.info(f"✓ Loaded {len(library.patterns)} patterns from {input_file}")
|
|
97
|
+
except (OSError, FileNotFoundError) as e:
|
|
98
|
+
# Input file not found or cannot be read
|
|
99
|
+
logger.error(f"Pattern file error: {e}")
|
|
100
|
+
logger.error(f"✗ Cannot read pattern file: {e}")
|
|
101
|
+
sys.exit(1)
|
|
102
|
+
except (ValueError, KeyError) as e:
|
|
103
|
+
# Invalid pattern data format
|
|
104
|
+
logger.error(f"Pattern data error: {e}")
|
|
105
|
+
logger.error(f"✗ Invalid pattern data: {e}")
|
|
106
|
+
sys.exit(1)
|
|
107
|
+
except Exception as e:
|
|
108
|
+
# Unexpected errors loading patterns
|
|
109
|
+
logger.exception(f"Unexpected error loading patterns: {e}")
|
|
110
|
+
logger.error(f"✗ Failed to load patterns: {e}")
|
|
111
|
+
sys.exit(1)
|
|
112
|
+
|
|
113
|
+
# Validate output path
|
|
114
|
+
validated_output = _validate_file_path(output_file)
|
|
115
|
+
|
|
116
|
+
# Save to output format
|
|
117
|
+
try:
|
|
118
|
+
if output_format == "json":
|
|
119
|
+
PatternPersistence.save_to_json(library, str(validated_output))
|
|
120
|
+
elif output_format == "sqlite":
|
|
121
|
+
PatternPersistence.save_to_sqlite(library, str(validated_output))
|
|
122
|
+
|
|
123
|
+
logger.info(f"Saved {len(library.patterns)} patterns to {output_file}")
|
|
124
|
+
logger.info(f"✓ Saved {len(library.patterns)} patterns to {output_file}")
|
|
125
|
+
except (OSError, FileNotFoundError, PermissionError) as e:
|
|
126
|
+
# Cannot write output file
|
|
127
|
+
logger.error(f"Pattern file write error: {e}")
|
|
128
|
+
logger.error(f"✗ Cannot write pattern file: {e}")
|
|
129
|
+
sys.exit(1)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
# Unexpected errors saving patterns
|
|
132
|
+
logger.exception(f"Unexpected error saving patterns: {e}")
|
|
133
|
+
logger.error(f"✗ Failed to save patterns: {e}")
|
|
134
|
+
sys.exit(1)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def cmd_patterns_resolve(args):
|
|
138
|
+
"""Resolve investigating bug patterns with root cause and fix.
|
|
139
|
+
|
|
140
|
+
Updates pattern status and adds resolution information.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
args: Namespace object from argparse with attributes:
|
|
144
|
+
- pattern_id (str | None): Pattern ID to resolve.
|
|
145
|
+
- root_cause (str | None): Root cause description.
|
|
146
|
+
- fix (str | None): Fix description.
|
|
147
|
+
- fix_code (str | None): Code snippet of the fix.
|
|
148
|
+
- time (int | None): Resolution time in minutes.
|
|
149
|
+
- status (str): New status ('resolved', 'wont_fix', etc.).
|
|
150
|
+
- patterns_dir (str): Patterns directory path.
|
|
151
|
+
- commit (str | None): Related commit hash.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
None: Updates pattern and prints result. Exits with code 1 on failure.
|
|
155
|
+
"""
|
|
156
|
+
from empathy_llm_toolkit.pattern_resolver import PatternResolver
|
|
157
|
+
|
|
158
|
+
resolver = PatternResolver(args.patterns_dir)
|
|
159
|
+
|
|
160
|
+
# If no bug_id, list investigating bugs
|
|
161
|
+
if not args.bug_id:
|
|
162
|
+
investigating = resolver.list_investigating()
|
|
163
|
+
if not investigating:
|
|
164
|
+
print("No bugs with 'investigating' status found.")
|
|
165
|
+
return
|
|
166
|
+
|
|
167
|
+
print(f"\nBugs needing resolution ({len(investigating)}):\n")
|
|
168
|
+
for bug in investigating:
|
|
169
|
+
print(f" {bug.get('bug_id', 'unknown')}")
|
|
170
|
+
print(f" Type: {bug.get('error_type', 'unknown')}")
|
|
171
|
+
print(f" File: {bug.get('file_path', 'unknown')}")
|
|
172
|
+
msg = bug.get("error_message", "N/A")
|
|
173
|
+
print(f" Message: {msg[:60]}..." if len(msg) > 60 else f" Message: {msg}")
|
|
174
|
+
print()
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
# Validate required args
|
|
178
|
+
if not args.root_cause or not args.fix:
|
|
179
|
+
print("✗ --root-cause and --fix are required when resolving a bug")
|
|
180
|
+
print(
|
|
181
|
+
" Example: empathy patterns resolve bug_123 --root-cause 'Null check' --fix 'Added ?.'",
|
|
182
|
+
)
|
|
183
|
+
sys.exit(1)
|
|
184
|
+
|
|
185
|
+
# Resolve the specified bug
|
|
186
|
+
success = resolver.resolve_bug(
|
|
187
|
+
bug_id=args.bug_id,
|
|
188
|
+
root_cause=args.root_cause,
|
|
189
|
+
fix_applied=args.fix,
|
|
190
|
+
fix_code=args.fix_code,
|
|
191
|
+
resolution_time_minutes=args.time or 0,
|
|
192
|
+
resolved_by=args.resolved_by or "@developer",
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
if success:
|
|
196
|
+
print(f"✓ Resolved: {args.bug_id}")
|
|
197
|
+
|
|
198
|
+
# Regenerate summary if requested
|
|
199
|
+
if not args.no_regenerate:
|
|
200
|
+
if resolver.regenerate_summary():
|
|
201
|
+
print("✓ Regenerated patterns_summary.md")
|
|
202
|
+
else:
|
|
203
|
+
print("⚠ Failed to regenerate summary")
|
|
204
|
+
else:
|
|
205
|
+
print(f"✗ Failed to resolve: {args.bug_id}")
|
|
206
|
+
print(" Use 'empathy patterns resolve' (no args) to list investigating bugs")
|
|
207
|
+
sys.exit(1)
|
|
@@ -1,86 +1,98 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
Commands for managing provider settings, registry, costs, and telemetry.
|
|
1
|
+
"""Provider configuration commands.
|
|
4
2
|
|
|
5
3
|
Copyright 2025 Smart-AI-Memory
|
|
6
4
|
Licensed under Fair Source License 0.9
|
|
7
5
|
"""
|
|
8
6
|
|
|
9
|
-
import
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
import
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from empathy_os.config import _validate_file_path
|
|
10
|
+
from empathy_os.logging_config import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def cmd_provider_show(args):
|
|
16
|
+
"""Show current provider configuration (Anthropic-only as of v5.0.0).
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
args: Namespace object from argparse (no additional attributes used).
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
None: Prints Anthropic provider configuration and model mappings.
|
|
23
|
+
"""
|
|
24
|
+
from empathy_os.models import MODEL_REGISTRY
|
|
25
|
+
from empathy_os.models.provider_config import ProviderConfig
|
|
26
|
+
|
|
27
|
+
print("\n" + "=" * 60)
|
|
28
|
+
print("Provider Configuration (Claude-Native v5.0.0)")
|
|
29
|
+
print("=" * 60)
|
|
30
|
+
|
|
31
|
+
# Check for Anthropic API key
|
|
32
|
+
config = ProviderConfig.auto_detect()
|
|
33
|
+
if config.available_providers:
|
|
34
|
+
print("\n✓ ANTHROPIC_API_KEY detected")
|
|
35
|
+
else:
|
|
36
|
+
print("\n⚠️ ANTHROPIC_API_KEY not detected")
|
|
37
|
+
print(" Set your API key: export ANTHROPIC_API_KEY='your-key-here'")
|
|
38
|
+
print(" Get key at: https://console.anthropic.com/settings/keys")
|
|
39
|
+
|
|
40
|
+
print("\nProvider: anthropic")
|
|
41
|
+
|
|
42
|
+
# Show Anthropic models
|
|
43
|
+
print("\nModel mapping:")
|
|
44
|
+
anthropic_models = MODEL_REGISTRY.get("anthropic", {})
|
|
45
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
46
|
+
model_info = anthropic_models.get(tier)
|
|
47
|
+
if model_info:
|
|
48
|
+
cost = f"${model_info.input_cost_per_million:.2f}/${model_info.output_cost_per_million:.2f} per M tokens"
|
|
49
|
+
print(f" {tier:8} → {model_info.id:40} {cost}")
|
|
50
|
+
|
|
51
|
+
print()
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def cmd_provider_set(args):
|
|
55
|
+
"""Set default provider (Anthropic-only as of v5.0.0).
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
args: Namespace object from argparse with attributes:
|
|
59
|
+
- name (str): Provider name to set as default (must be 'anthropic').
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
None: Saves provider to .empathy/workflows.yaml.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
SystemExit: If provider is not 'anthropic'.
|
|
66
|
+
"""
|
|
67
|
+
import sys
|
|
68
|
+
|
|
69
|
+
import yaml
|
|
70
|
+
|
|
71
|
+
provider = args.name
|
|
72
|
+
|
|
73
|
+
# Validate provider is Anthropic
|
|
74
|
+
if provider.lower() != "anthropic":
|
|
75
|
+
print(f"❌ Error: Provider '{provider}' is not supported.")
|
|
76
|
+
print(" Empathy Framework is now Claude-native (v5.0.0).")
|
|
77
|
+
print(" Only 'anthropic' provider is available.")
|
|
78
|
+
print(" See docs/CLAUDE_NATIVE.md for migration guide.")
|
|
79
|
+
sys.exit(1)
|
|
80
|
+
|
|
81
|
+
workflows_path = Path(".empathy/workflows.yaml")
|
|
82
|
+
|
|
83
|
+
# Load existing config or create new
|
|
84
|
+
if workflows_path.exists():
|
|
85
|
+
with open(workflows_path) as f:
|
|
86
|
+
config = yaml.safe_load(f) or {}
|
|
87
|
+
else:
|
|
88
|
+
config = {}
|
|
89
|
+
workflows_path.parent.mkdir(parents=True, exist_ok=True)
|
|
90
|
+
|
|
91
|
+
config["default_provider"] = provider
|
|
92
|
+
|
|
93
|
+
validated_workflows_path = _validate_file_path(str(workflows_path))
|
|
94
|
+
with open(validated_workflows_path, "w") as f:
|
|
95
|
+
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
|
96
|
+
|
|
97
|
+
print(f"✓ Default provider set to: {provider}")
|
|
98
|
+
print(f" Saved to: {validated_workflows_path}")
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
"""Setup commands for initialization and validation.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
|
|
9
|
+
from empathy_os.config import EmpathyConfig, _validate_file_path, load_config
|
|
10
|
+
from empathy_os.logging_config import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def cmd_init(args):
|
|
16
|
+
"""Initialize a new Empathy Framework project.
|
|
17
|
+
|
|
18
|
+
Creates a configuration file with sensible defaults.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
args: Namespace object from argparse with attributes:
|
|
22
|
+
- format (str): Output format ('yaml' or 'json').
|
|
23
|
+
- output (str | None): Output file path.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
None: Creates configuration file at specified path.
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If output path is invalid or unsafe.
|
|
30
|
+
"""
|
|
31
|
+
config_format = args.format
|
|
32
|
+
output_path = args.output or f"empathy.config.{config_format}"
|
|
33
|
+
|
|
34
|
+
# Validate output path to prevent path traversal attacks
|
|
35
|
+
validated_path = _validate_file_path(output_path)
|
|
36
|
+
|
|
37
|
+
logger.info(f"Initializing new Empathy Framework project with format: {config_format}")
|
|
38
|
+
|
|
39
|
+
# Create default config
|
|
40
|
+
config = EmpathyConfig()
|
|
41
|
+
|
|
42
|
+
# Save to file
|
|
43
|
+
if config_format == "yaml":
|
|
44
|
+
config.to_yaml(str(validated_path))
|
|
45
|
+
logger.info(f"Created YAML configuration file: {output_path}")
|
|
46
|
+
logger.info(f"✓ Created YAML configuration: {output_path}")
|
|
47
|
+
elif config_format == "json":
|
|
48
|
+
config.to_json(str(validated_path))
|
|
49
|
+
logger.info(f"Created JSON configuration file: {validated_path}")
|
|
50
|
+
logger.info(f"✓ Created JSON configuration: {validated_path}")
|
|
51
|
+
|
|
52
|
+
logger.info("\nNext steps:")
|
|
53
|
+
logger.info(f" 1. Edit {output_path} to customize settings")
|
|
54
|
+
logger.info(" 2. Use 'empathy run' to start using the framework")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def cmd_validate(args):
|
|
58
|
+
"""Validate a configuration file.
|
|
59
|
+
|
|
60
|
+
Loads and validates the specified configuration file.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
args: Namespace object from argparse with attributes:
|
|
64
|
+
- config (str): Path to configuration file to validate.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
None: Prints validation result. Exits with code 1 on failure.
|
|
68
|
+
"""
|
|
69
|
+
filepath = args.config
|
|
70
|
+
logger.info(f"Validating configuration file: {filepath}")
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
config = load_config(filepath=filepath, use_env=False)
|
|
74
|
+
config.validate()
|
|
75
|
+
logger.info(f"Configuration validation successful: {filepath}")
|
|
76
|
+
logger.info(f"✓ Configuration valid: {filepath}")
|
|
77
|
+
logger.info(f"\n User ID: {config.user_id}")
|
|
78
|
+
logger.info(f" Target Level: {config.target_level}")
|
|
79
|
+
logger.info(f" Confidence Threshold: {config.confidence_threshold}")
|
|
80
|
+
logger.info(f" Persistence Backend: {config.persistence_backend}")
|
|
81
|
+
logger.info(f" Metrics Enabled: {config.metrics_enabled}")
|
|
82
|
+
except (OSError, FileNotFoundError) as e:
|
|
83
|
+
# Config file not found or cannot be read
|
|
84
|
+
logger.error(f"Configuration file error: {e}")
|
|
85
|
+
logger.error(f"✗ Cannot read configuration file: {e}")
|
|
86
|
+
sys.exit(1)
|
|
87
|
+
except ValueError as e:
|
|
88
|
+
# Invalid configuration values
|
|
89
|
+
logger.error(f"Configuration validation failed: {e}")
|
|
90
|
+
logger.error(f"✗ Configuration invalid: {e}")
|
|
91
|
+
sys.exit(1)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
# Unexpected errors during config validation
|
|
94
|
+
logger.exception(f"Unexpected error validating configuration: {e}")
|
|
95
|
+
logger.error(f"✗ Configuration invalid: {e}")
|
|
96
|
+
sys.exit(1)
|