empathy-framework 4.7.1__py3-none-any.whl → 4.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/METADATA +65 -2
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +73 -52
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -1
- empathy_os/__init__.py +2 -0
- empathy_os/cache/hash_only.py +6 -3
- empathy_os/cache/hybrid.py +6 -3
- empathy_os/cli/__init__.py +128 -238
- empathy_os/cli/__main__.py +5 -33
- empathy_os/cli/commands/__init__.py +1 -8
- empathy_os/cli/commands/help.py +331 -0
- empathy_os/cli/commands/info.py +140 -0
- empathy_os/cli/commands/inspect.py +437 -0
- empathy_os/cli/commands/metrics.py +92 -0
- empathy_os/cli/commands/orchestrate.py +184 -0
- empathy_os/cli/commands/patterns.py +207 -0
- empathy_os/cli/commands/provider.py +93 -81
- empathy_os/cli/commands/setup.py +96 -0
- empathy_os/cli/commands/status.py +235 -0
- empathy_os/cli/commands/sync.py +166 -0
- empathy_os/cli/commands/tier.py +121 -0
- empathy_os/cli/commands/workflow.py +574 -0
- empathy_os/cli/parsers/__init__.py +62 -0
- empathy_os/cli/parsers/help.py +41 -0
- empathy_os/cli/parsers/info.py +26 -0
- empathy_os/cli/parsers/inspect.py +66 -0
- empathy_os/cli/parsers/metrics.py +42 -0
- empathy_os/cli/parsers/orchestrate.py +61 -0
- empathy_os/cli/parsers/patterns.py +54 -0
- empathy_os/cli/parsers/provider.py +40 -0
- empathy_os/cli/parsers/setup.py +42 -0
- empathy_os/cli/parsers/status.py +47 -0
- empathy_os/cli/parsers/sync.py +31 -0
- empathy_os/cli/parsers/tier.py +33 -0
- empathy_os/cli/parsers/workflow.py +77 -0
- empathy_os/cli/utils/__init__.py +1 -0
- empathy_os/cli/utils/data.py +242 -0
- empathy_os/cli/utils/helpers.py +68 -0
- empathy_os/{cli.py → cli_legacy.py} +27 -27
- empathy_os/cli_minimal.py +662 -0
- empathy_os/cli_router.py +384 -0
- empathy_os/cli_unified.py +38 -2
- empathy_os/memory/__init__.py +19 -5
- empathy_os/memory/short_term.py +14 -404
- empathy_os/memory/types.py +437 -0
- empathy_os/memory/unified.py +61 -48
- empathy_os/models/fallback.py +1 -1
- empathy_os/models/provider_config.py +59 -344
- empathy_os/models/registry.py +31 -180
- empathy_os/monitoring/alerts.py +14 -20
- empathy_os/monitoring/alerts_cli.py +24 -7
- empathy_os/project_index/__init__.py +2 -0
- empathy_os/project_index/index.py +210 -5
- empathy_os/project_index/scanner.py +45 -14
- empathy_os/project_index/scanner_parallel.py +291 -0
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/workflows/__init__.py +31 -2
- empathy_os/workflows/base.py +349 -325
- empathy_os/workflows/bug_predict.py +8 -0
- empathy_os/workflows/builder.py +273 -0
- empathy_os/workflows/caching.py +253 -0
- empathy_os/workflows/code_review_pipeline.py +1 -0
- empathy_os/workflows/history.py +510 -0
- empathy_os/workflows/output.py +410 -0
- empathy_os/workflows/perf_audit.py +125 -19
- empathy_os/workflows/progress.py +324 -22
- empathy_os/workflows/routing.py +168 -0
- empathy_os/workflows/secure_release.py +1 -0
- empathy_os/workflows/security_audit.py +190 -0
- empathy_os/workflows/security_audit_phase3.py +328 -0
- empathy_os/workflows/telemetry_mixin.py +269 -0
- empathy_os/dashboard/__init__.py +0 -15
- empathy_os/dashboard/server.py +0 -941
- patterns/README.md +0 -119
- patterns/__init__.py +0 -95
- patterns/behavior.py +0 -298
- patterns/code_review_memory.json +0 -441
- patterns/core.py +0 -97
- patterns/debugging.json +0 -3763
- patterns/empathy.py +0 -268
- patterns/health_check_memory.json +0 -505
- patterns/input.py +0 -161
- patterns/memory_graph.json +0 -8
- patterns/refactoring_memory.json +0 -1113
- patterns/registry.py +0 -663
- patterns/security_memory.json +0 -8
- patterns/structural.py +0 -415
- patterns/validation.py +0 -194
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,662 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Empathy Framework Minimal CLI.
|
|
3
|
+
|
|
4
|
+
A streamlined CLI for automation and CI/CD workflows.
|
|
5
|
+
Interactive features are available via Claude Code skills.
|
|
6
|
+
|
|
7
|
+
Commands:
|
|
8
|
+
empathy workflow list List available workflows
|
|
9
|
+
empathy workflow run <name> Execute a workflow
|
|
10
|
+
empathy workflow info <name> Show workflow details
|
|
11
|
+
|
|
12
|
+
empathy telemetry show Display usage summary
|
|
13
|
+
empathy telemetry savings Show cost savings
|
|
14
|
+
empathy telemetry export Export to CSV/JSON
|
|
15
|
+
|
|
16
|
+
empathy provider show Show current provider config
|
|
17
|
+
empathy provider set <name> Set provider (anthropic, openai, hybrid)
|
|
18
|
+
|
|
19
|
+
empathy validate Validate configuration
|
|
20
|
+
empathy version Show version
|
|
21
|
+
|
|
22
|
+
For interactive features, use Claude Code skills:
|
|
23
|
+
/dev Developer tools (debug, commit, PR, review)
|
|
24
|
+
/testing Run tests, coverage, benchmarks
|
|
25
|
+
/docs Documentation generation
|
|
26
|
+
/release Release preparation
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from __future__ import annotations
|
|
30
|
+
|
|
31
|
+
import argparse
|
|
32
|
+
import json
|
|
33
|
+
import logging
|
|
34
|
+
import sys
|
|
35
|
+
from pathlib import Path
|
|
36
|
+
from typing import TYPE_CHECKING
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
from argparse import Namespace
|
|
40
|
+
|
|
41
|
+
logger = logging.getLogger(__name__)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_version() -> str:
|
|
45
|
+
"""Get package version."""
|
|
46
|
+
try:
|
|
47
|
+
from importlib.metadata import version
|
|
48
|
+
|
|
49
|
+
return version("empathy-framework")
|
|
50
|
+
except Exception: # noqa: BLE001
|
|
51
|
+
# INTENTIONAL: Fallback for dev installs without metadata
|
|
52
|
+
return "dev"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# =============================================================================
|
|
56
|
+
# Workflow Commands
|
|
57
|
+
# =============================================================================
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def cmd_workflow_list(args: Namespace) -> int:
|
|
61
|
+
"""List available workflows."""
|
|
62
|
+
from empathy_os.workflows import discover_workflows
|
|
63
|
+
|
|
64
|
+
workflows = discover_workflows()
|
|
65
|
+
|
|
66
|
+
print("\n📋 Available Workflows\n")
|
|
67
|
+
print("-" * 60)
|
|
68
|
+
|
|
69
|
+
if not workflows:
|
|
70
|
+
print("No workflows registered.")
|
|
71
|
+
return 0
|
|
72
|
+
|
|
73
|
+
for name, workflow_cls in sorted(workflows.items()):
|
|
74
|
+
doc = workflow_cls.__doc__ or "No description"
|
|
75
|
+
# Get first line of docstring
|
|
76
|
+
description = doc.split("\n")[0].strip()
|
|
77
|
+
print(f" {name:25} {description}")
|
|
78
|
+
|
|
79
|
+
print("-" * 60)
|
|
80
|
+
print(f"\nTotal: {len(workflows)} workflows")
|
|
81
|
+
print("\nRun a workflow: empathy workflow run <name>")
|
|
82
|
+
return 0
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def cmd_workflow_info(args: Namespace) -> int:
|
|
86
|
+
"""Show workflow details."""
|
|
87
|
+
from empathy_os.workflows import discover_workflows
|
|
88
|
+
|
|
89
|
+
workflows = discover_workflows()
|
|
90
|
+
name = args.name
|
|
91
|
+
if name not in workflows:
|
|
92
|
+
print(f"❌ Workflow not found: {name}")
|
|
93
|
+
print("\nAvailable workflows:")
|
|
94
|
+
for wf_name in sorted(workflows.keys()):
|
|
95
|
+
print(f" - {wf_name}")
|
|
96
|
+
return 1
|
|
97
|
+
|
|
98
|
+
workflow_cls = workflows[name]
|
|
99
|
+
print(f"\n📋 Workflow: {name}\n")
|
|
100
|
+
print("-" * 60)
|
|
101
|
+
|
|
102
|
+
# Show docstring
|
|
103
|
+
if workflow_cls.__doc__:
|
|
104
|
+
print(workflow_cls.__doc__)
|
|
105
|
+
|
|
106
|
+
# Show input schema if available
|
|
107
|
+
if hasattr(workflow_cls, "input_schema"):
|
|
108
|
+
print("\nInput Schema:")
|
|
109
|
+
print(json.dumps(workflow_cls.input_schema, indent=2))
|
|
110
|
+
|
|
111
|
+
print("-" * 60)
|
|
112
|
+
return 0
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def cmd_workflow_run(args: Namespace) -> int:
|
|
116
|
+
"""Execute a workflow."""
|
|
117
|
+
import asyncio
|
|
118
|
+
|
|
119
|
+
from empathy_os.config import _validate_file_path
|
|
120
|
+
from empathy_os.workflows import discover_workflows
|
|
121
|
+
|
|
122
|
+
workflows = discover_workflows()
|
|
123
|
+
name = args.name
|
|
124
|
+
if name not in workflows:
|
|
125
|
+
print(f"❌ Workflow not found: {name}")
|
|
126
|
+
return 1
|
|
127
|
+
|
|
128
|
+
# Parse input if provided
|
|
129
|
+
input_data = {}
|
|
130
|
+
if args.input:
|
|
131
|
+
try:
|
|
132
|
+
input_data = json.loads(args.input)
|
|
133
|
+
except json.JSONDecodeError as e:
|
|
134
|
+
print(f"❌ Invalid JSON input: {e}")
|
|
135
|
+
return 1
|
|
136
|
+
|
|
137
|
+
# Add common options with validation
|
|
138
|
+
if args.path:
|
|
139
|
+
try:
|
|
140
|
+
# Validate path to prevent path traversal attacks
|
|
141
|
+
validated_path = _validate_file_path(args.path)
|
|
142
|
+
input_data["path"] = str(validated_path)
|
|
143
|
+
except ValueError as e:
|
|
144
|
+
print(f"❌ Invalid path: {e}")
|
|
145
|
+
return 1
|
|
146
|
+
if args.target:
|
|
147
|
+
input_data["target"] = args.target
|
|
148
|
+
|
|
149
|
+
print(f"\n🚀 Running workflow: {name}\n")
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
workflow_cls = workflows[name]
|
|
153
|
+
workflow = workflow_cls()
|
|
154
|
+
|
|
155
|
+
# Run the workflow
|
|
156
|
+
if asyncio.iscoroutinefunction(workflow.execute):
|
|
157
|
+
result = asyncio.run(workflow.execute(**input_data))
|
|
158
|
+
else:
|
|
159
|
+
result = workflow.execute(**input_data)
|
|
160
|
+
|
|
161
|
+
# Output result
|
|
162
|
+
if args.json:
|
|
163
|
+
print(json.dumps(result, indent=2, default=str))
|
|
164
|
+
else:
|
|
165
|
+
if isinstance(result, dict):
|
|
166
|
+
print("\n✅ Workflow completed\n")
|
|
167
|
+
for key, value in result.items():
|
|
168
|
+
print(f" {key}: {value}")
|
|
169
|
+
else:
|
|
170
|
+
print(f"\n✅ Result: {result}")
|
|
171
|
+
|
|
172
|
+
return 0
|
|
173
|
+
|
|
174
|
+
except Exception as e: # noqa: BLE001
|
|
175
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
176
|
+
logger.exception(f"Workflow failed: {e}")
|
|
177
|
+
print(f"\n❌ Workflow failed: {e}")
|
|
178
|
+
return 1
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
# =============================================================================
|
|
182
|
+
# Telemetry Commands
|
|
183
|
+
# =============================================================================
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def cmd_telemetry_show(args: Namespace) -> int:
|
|
187
|
+
"""Display usage summary."""
|
|
188
|
+
try:
|
|
189
|
+
from empathy_os.models.telemetry import TelemetryStore
|
|
190
|
+
|
|
191
|
+
store = TelemetryStore()
|
|
192
|
+
|
|
193
|
+
print("\n📊 Telemetry Summary\n")
|
|
194
|
+
print("-" * 60)
|
|
195
|
+
print(f" Period: Last {args.days} days")
|
|
196
|
+
|
|
197
|
+
# Get workflow records from store
|
|
198
|
+
# TODO: Consider adding aggregate methods to TelemetryStore for better performance
|
|
199
|
+
# with large datasets (e.g., store.get_total_cost(), store.get_token_counts())
|
|
200
|
+
workflows = store.get_workflows(limit=1000)
|
|
201
|
+
calls = store.get_calls(limit=1000)
|
|
202
|
+
|
|
203
|
+
if workflows:
|
|
204
|
+
total_cost = sum(r.total_cost for r in workflows)
|
|
205
|
+
total_tokens = sum(r.total_input_tokens + r.total_output_tokens for r in workflows)
|
|
206
|
+
print(f" Workflow runs: {len(workflows):,}")
|
|
207
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
208
|
+
print(f" Total cost: ${total_cost:.2f}")
|
|
209
|
+
elif calls:
|
|
210
|
+
total_cost = sum(c.estimated_cost for c in calls)
|
|
211
|
+
total_tokens = sum(c.input_tokens + c.output_tokens for c in calls)
|
|
212
|
+
print(f" API calls: {len(calls):,}")
|
|
213
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
214
|
+
print(f" Total cost: ${total_cost:.2f}")
|
|
215
|
+
else:
|
|
216
|
+
print(" No telemetry data found.")
|
|
217
|
+
|
|
218
|
+
print("-" * 60)
|
|
219
|
+
return 0
|
|
220
|
+
|
|
221
|
+
except ImportError:
|
|
222
|
+
print("❌ Telemetry module not available")
|
|
223
|
+
return 1
|
|
224
|
+
except Exception as e: # noqa: BLE001
|
|
225
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
226
|
+
logger.exception(f"Telemetry error: {e}")
|
|
227
|
+
print(f"❌ Error: {e}")
|
|
228
|
+
return 1
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def cmd_telemetry_savings(args: Namespace) -> int:
|
|
232
|
+
"""Show cost savings from tier routing."""
|
|
233
|
+
try:
|
|
234
|
+
from empathy_os.models.telemetry import TelemetryStore
|
|
235
|
+
|
|
236
|
+
store = TelemetryStore()
|
|
237
|
+
|
|
238
|
+
print("\n💰 Cost Savings Report\n")
|
|
239
|
+
print("-" * 60)
|
|
240
|
+
print(f" Period: Last {args.days} days")
|
|
241
|
+
|
|
242
|
+
# Calculate savings from workflow runs
|
|
243
|
+
records = store.get_workflows(limit=1000)
|
|
244
|
+
if records:
|
|
245
|
+
actual_cost = sum(r.total_cost for r in records)
|
|
246
|
+
total_tokens = sum(r.total_input_tokens + r.total_output_tokens for r in records)
|
|
247
|
+
|
|
248
|
+
# Calculate what premium-only pricing would cost
|
|
249
|
+
# Using Claude Opus pricing as premium baseline: ~$15/1M input, ~$75/1M output
|
|
250
|
+
# Simplified: ~$45/1M tokens average (blended input/output)
|
|
251
|
+
premium_rate_per_token = 45.0 / 1_000_000
|
|
252
|
+
baseline_cost = total_tokens * premium_rate_per_token
|
|
253
|
+
|
|
254
|
+
# Only show savings if we actually routed to cheaper models
|
|
255
|
+
if baseline_cost > actual_cost:
|
|
256
|
+
savings = baseline_cost - actual_cost
|
|
257
|
+
savings_pct = (savings / baseline_cost * 100) if baseline_cost > 0 else 0
|
|
258
|
+
|
|
259
|
+
print(f" Actual cost: ${actual_cost:.2f}")
|
|
260
|
+
print(f" Premium-only cost: ${baseline_cost:.2f} (estimated)")
|
|
261
|
+
print(f" Savings: ${savings:.2f}")
|
|
262
|
+
print(f" Savings percentage: {savings_pct:.1f}%")
|
|
263
|
+
else:
|
|
264
|
+
print(f" Total cost: ${actual_cost:.2f}")
|
|
265
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
266
|
+
print("\n Note: No savings detected (may already be optimized)")
|
|
267
|
+
|
|
268
|
+
print("\n * Premium baseline assumes Claude Opus pricing (~$45/1M tokens)")
|
|
269
|
+
else:
|
|
270
|
+
print(" No telemetry data found.")
|
|
271
|
+
|
|
272
|
+
print("-" * 60)
|
|
273
|
+
return 0
|
|
274
|
+
|
|
275
|
+
except ImportError:
|
|
276
|
+
print("❌ Telemetry module not available")
|
|
277
|
+
return 1
|
|
278
|
+
except Exception as e: # noqa: BLE001
|
|
279
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
280
|
+
logger.exception(f"Telemetry error: {e}")
|
|
281
|
+
print(f"❌ Error: {e}")
|
|
282
|
+
return 1
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def cmd_telemetry_export(args: Namespace) -> int:
|
|
286
|
+
"""Export telemetry data to file."""
|
|
287
|
+
from empathy_os.config import _validate_file_path
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
from empathy_os.models.telemetry import TelemetryStore
|
|
291
|
+
|
|
292
|
+
store = TelemetryStore()
|
|
293
|
+
records = store.get_workflows(limit=10000)
|
|
294
|
+
|
|
295
|
+
# Convert to exportable format
|
|
296
|
+
data = [
|
|
297
|
+
{
|
|
298
|
+
"run_id": r.run_id,
|
|
299
|
+
"workflow_name": r.workflow_name,
|
|
300
|
+
"timestamp": r.started_at,
|
|
301
|
+
"total_cost": r.total_cost,
|
|
302
|
+
"input_tokens": r.total_input_tokens,
|
|
303
|
+
"output_tokens": r.total_output_tokens,
|
|
304
|
+
"success": r.success,
|
|
305
|
+
}
|
|
306
|
+
for r in records
|
|
307
|
+
]
|
|
308
|
+
|
|
309
|
+
# Validate output path
|
|
310
|
+
output_path = _validate_file_path(args.output)
|
|
311
|
+
|
|
312
|
+
if args.format == "csv":
|
|
313
|
+
import csv
|
|
314
|
+
|
|
315
|
+
with output_path.open("w", newline="") as f:
|
|
316
|
+
if data:
|
|
317
|
+
writer = csv.DictWriter(f, fieldnames=data[0].keys())
|
|
318
|
+
writer.writeheader()
|
|
319
|
+
writer.writerows(data)
|
|
320
|
+
print(f"✅ Exported {len(data)} entries to {output_path}")
|
|
321
|
+
|
|
322
|
+
elif args.format == "json":
|
|
323
|
+
with output_path.open("w") as f:
|
|
324
|
+
json.dump(data, f, indent=2, default=str)
|
|
325
|
+
print(f"✅ Exported {len(data)} entries to {output_path}")
|
|
326
|
+
|
|
327
|
+
return 0
|
|
328
|
+
|
|
329
|
+
except ImportError:
|
|
330
|
+
print("❌ Telemetry module not available")
|
|
331
|
+
return 1
|
|
332
|
+
except ValueError as e:
|
|
333
|
+
print(f"❌ Invalid path: {e}")
|
|
334
|
+
return 1
|
|
335
|
+
except Exception as e: # noqa: BLE001
|
|
336
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
337
|
+
logger.exception(f"Export error: {e}")
|
|
338
|
+
print(f"❌ Error: {e}")
|
|
339
|
+
return 1
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# =============================================================================
|
|
343
|
+
# Provider Commands
|
|
344
|
+
# =============================================================================
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def cmd_provider_show(args: Namespace) -> int:
|
|
348
|
+
"""Show current provider configuration."""
|
|
349
|
+
try:
|
|
350
|
+
from empathy_os.models.provider_config import get_provider_config
|
|
351
|
+
|
|
352
|
+
config = get_provider_config()
|
|
353
|
+
|
|
354
|
+
print("\n🔧 Provider Configuration\n")
|
|
355
|
+
print("-" * 60)
|
|
356
|
+
print(f" Mode: {config.mode.value}")
|
|
357
|
+
print(f" Primary provider: {config.primary_provider}")
|
|
358
|
+
print(f" Cost optimization: {'✅ Enabled' if config.cost_optimization else '❌ Disabled'}")
|
|
359
|
+
|
|
360
|
+
if config.available_providers:
|
|
361
|
+
print("\n Available providers:")
|
|
362
|
+
for provider in config.available_providers:
|
|
363
|
+
status = "✓" if provider == config.primary_provider else " "
|
|
364
|
+
print(f" [{status}] {provider}")
|
|
365
|
+
else:
|
|
366
|
+
print("\n ⚠️ No API keys detected")
|
|
367
|
+
print(" Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GOOGLE_API_KEY")
|
|
368
|
+
|
|
369
|
+
print("-" * 60)
|
|
370
|
+
return 0
|
|
371
|
+
|
|
372
|
+
except ImportError:
|
|
373
|
+
print("❌ Provider module not available")
|
|
374
|
+
return 1
|
|
375
|
+
except Exception as e: # noqa: BLE001
|
|
376
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
377
|
+
logger.exception(f"Provider error: {e}")
|
|
378
|
+
print(f"❌ Error: {e}")
|
|
379
|
+
return 1
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def cmd_provider_set(args: Namespace) -> int:
|
|
383
|
+
"""Set the LLM provider."""
|
|
384
|
+
try:
|
|
385
|
+
from empathy_os.models.provider_config import (
|
|
386
|
+
ProviderMode,
|
|
387
|
+
get_provider_config,
|
|
388
|
+
set_provider_config,
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
# Get current config and update
|
|
392
|
+
config = get_provider_config()
|
|
393
|
+
|
|
394
|
+
if args.name == "hybrid":
|
|
395
|
+
config.mode = ProviderMode.HYBRID
|
|
396
|
+
print("✅ Provider mode set to: hybrid (multi-provider)")
|
|
397
|
+
else:
|
|
398
|
+
config.mode = ProviderMode.SINGLE
|
|
399
|
+
config.primary_provider = args.name
|
|
400
|
+
print(f"✅ Provider set to: {args.name}")
|
|
401
|
+
|
|
402
|
+
set_provider_config(config)
|
|
403
|
+
return 0
|
|
404
|
+
|
|
405
|
+
except ImportError:
|
|
406
|
+
print("❌ Provider module not available")
|
|
407
|
+
return 1
|
|
408
|
+
except Exception as e: # noqa: BLE001
|
|
409
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
410
|
+
logger.exception(f"Provider error: {e}")
|
|
411
|
+
print(f"❌ Error: {e}")
|
|
412
|
+
return 1
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
# =============================================================================
|
|
416
|
+
# Utility Commands
|
|
417
|
+
# =============================================================================
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def cmd_validate(args: Namespace) -> int:
|
|
421
|
+
"""Validate configuration."""
|
|
422
|
+
print("\n🔍 Validating configuration...\n")
|
|
423
|
+
|
|
424
|
+
errors = []
|
|
425
|
+
warnings = []
|
|
426
|
+
|
|
427
|
+
# Check config file
|
|
428
|
+
config_paths = [
|
|
429
|
+
Path("empathy.config.json"),
|
|
430
|
+
Path("empathy.config.yml"),
|
|
431
|
+
Path("empathy.config.yaml"),
|
|
432
|
+
]
|
|
433
|
+
|
|
434
|
+
config_found = False
|
|
435
|
+
for config_path in config_paths:
|
|
436
|
+
if config_path.exists():
|
|
437
|
+
config_found = True
|
|
438
|
+
print(f" ✅ Config file: {config_path}")
|
|
439
|
+
break
|
|
440
|
+
|
|
441
|
+
if not config_found:
|
|
442
|
+
warnings.append("No empathy.config file found (using defaults)")
|
|
443
|
+
|
|
444
|
+
# Check for API keys
|
|
445
|
+
import os
|
|
446
|
+
|
|
447
|
+
api_keys = {
|
|
448
|
+
"ANTHROPIC_API_KEY": "Anthropic (Claude)",
|
|
449
|
+
"OPENAI_API_KEY": "OpenAI (GPT)",
|
|
450
|
+
"GOOGLE_API_KEY": "Google (Gemini)",
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
keys_found = 0
|
|
454
|
+
for key, name in api_keys.items():
|
|
455
|
+
if os.environ.get(key):
|
|
456
|
+
print(f" ✅ {name} API key set")
|
|
457
|
+
keys_found += 1
|
|
458
|
+
|
|
459
|
+
if keys_found == 0:
|
|
460
|
+
errors.append(
|
|
461
|
+
"No API keys found. Set at least one: ANTHROPIC_API_KEY, OPENAI_API_KEY, or GOOGLE_API_KEY"
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
# Check workflows directory
|
|
465
|
+
try:
|
|
466
|
+
from empathy_os.workflows import WORKFLOW_REGISTRY
|
|
467
|
+
|
|
468
|
+
print(f" ✅ {len(WORKFLOW_REGISTRY)} workflows registered")
|
|
469
|
+
except ImportError as e:
|
|
470
|
+
warnings.append(f"Could not load workflows: {e}")
|
|
471
|
+
|
|
472
|
+
# Summary
|
|
473
|
+
print("\n" + "-" * 60)
|
|
474
|
+
|
|
475
|
+
if errors:
|
|
476
|
+
print("\n❌ Validation failed:")
|
|
477
|
+
for error in errors:
|
|
478
|
+
print(f" - {error}")
|
|
479
|
+
return 1
|
|
480
|
+
|
|
481
|
+
if warnings:
|
|
482
|
+
print("\n⚠️ Warnings:")
|
|
483
|
+
for warning in warnings:
|
|
484
|
+
print(f" - {warning}")
|
|
485
|
+
|
|
486
|
+
print("\n✅ Configuration is valid")
|
|
487
|
+
return 0
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
def cmd_version(args: Namespace) -> int:
|
|
491
|
+
"""Show version information."""
|
|
492
|
+
version = get_version()
|
|
493
|
+
print(f"empathy-framework {version}")
|
|
494
|
+
|
|
495
|
+
if args.verbose:
|
|
496
|
+
print(f"\nPython: {sys.version}")
|
|
497
|
+
print(f"Platform: {sys.platform}")
|
|
498
|
+
|
|
499
|
+
# Show installed extras
|
|
500
|
+
try:
|
|
501
|
+
from importlib.metadata import requires
|
|
502
|
+
|
|
503
|
+
reqs = requires("empathy-framework") or []
|
|
504
|
+
print(f"\nDependencies: {len(reqs)}")
|
|
505
|
+
except Exception: # noqa: BLE001
|
|
506
|
+
pass
|
|
507
|
+
|
|
508
|
+
return 0
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
# =============================================================================
|
|
512
|
+
# Main Entry Point
|
|
513
|
+
# =============================================================================
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
def create_parser() -> argparse.ArgumentParser:
|
|
517
|
+
"""Create the argument parser."""
|
|
518
|
+
parser = argparse.ArgumentParser(
|
|
519
|
+
prog="empathy",
|
|
520
|
+
description="Empathy Framework CLI - AI-powered developer workflows",
|
|
521
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
522
|
+
epilog="""
|
|
523
|
+
For interactive features, use Claude Code skills:
|
|
524
|
+
/dev Developer tools (debug, commit, PR, review)
|
|
525
|
+
/testing Run tests, coverage, benchmarks
|
|
526
|
+
/docs Documentation generation
|
|
527
|
+
/release Release preparation
|
|
528
|
+
|
|
529
|
+
Documentation: https://smartaimemory.com/framework-docs/
|
|
530
|
+
""",
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
parser.add_argument(
|
|
534
|
+
"-v",
|
|
535
|
+
"--verbose",
|
|
536
|
+
action="store_true",
|
|
537
|
+
help="Enable verbose output",
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
541
|
+
|
|
542
|
+
# --- Workflow commands ---
|
|
543
|
+
workflow_parser = subparsers.add_parser("workflow", help="Workflow management")
|
|
544
|
+
workflow_sub = workflow_parser.add_subparsers(dest="workflow_command")
|
|
545
|
+
|
|
546
|
+
# workflow list
|
|
547
|
+
workflow_sub.add_parser("list", help="List available workflows")
|
|
548
|
+
|
|
549
|
+
# workflow info
|
|
550
|
+
info_parser = workflow_sub.add_parser("info", help="Show workflow details")
|
|
551
|
+
info_parser.add_argument("name", help="Workflow name")
|
|
552
|
+
|
|
553
|
+
# workflow run
|
|
554
|
+
run_parser = workflow_sub.add_parser("run", help="Run a workflow")
|
|
555
|
+
run_parser.add_argument("name", help="Workflow name")
|
|
556
|
+
run_parser.add_argument("--input", "-i", help="JSON input data")
|
|
557
|
+
run_parser.add_argument("--path", "-p", help="Target path")
|
|
558
|
+
run_parser.add_argument("--target", "-t", help="Target value (e.g., coverage target)")
|
|
559
|
+
run_parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
|
|
560
|
+
|
|
561
|
+
# --- Telemetry commands ---
|
|
562
|
+
telemetry_parser = subparsers.add_parser("telemetry", help="Usage telemetry")
|
|
563
|
+
telemetry_sub = telemetry_parser.add_subparsers(dest="telemetry_command")
|
|
564
|
+
|
|
565
|
+
# telemetry show
|
|
566
|
+
show_parser = telemetry_sub.add_parser("show", help="Display usage summary")
|
|
567
|
+
show_parser.add_argument(
|
|
568
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# telemetry savings
|
|
572
|
+
savings_parser = telemetry_sub.add_parser("savings", help="Show cost savings")
|
|
573
|
+
savings_parser.add_argument(
|
|
574
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
# telemetry export
|
|
578
|
+
export_parser = telemetry_sub.add_parser("export", help="Export telemetry data")
|
|
579
|
+
export_parser.add_argument("--output", "-o", required=True, help="Output file path")
|
|
580
|
+
export_parser.add_argument(
|
|
581
|
+
"--format", "-f", choices=["csv", "json"], default="json", help="Output format"
|
|
582
|
+
)
|
|
583
|
+
export_parser.add_argument(
|
|
584
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
# --- Provider commands ---
|
|
588
|
+
provider_parser = subparsers.add_parser("provider", help="LLM provider configuration")
|
|
589
|
+
provider_sub = provider_parser.add_subparsers(dest="provider_command")
|
|
590
|
+
|
|
591
|
+
# provider show
|
|
592
|
+
provider_sub.add_parser("show", help="Show current provider")
|
|
593
|
+
|
|
594
|
+
# provider set
|
|
595
|
+
set_parser = provider_sub.add_parser("set", help="Set provider")
|
|
596
|
+
set_parser.add_argument("name", choices=["anthropic", "openai", "hybrid"], help="Provider name")
|
|
597
|
+
|
|
598
|
+
# --- Utility commands ---
|
|
599
|
+
subparsers.add_parser("validate", help="Validate configuration")
|
|
600
|
+
|
|
601
|
+
version_parser = subparsers.add_parser("version", help="Show version")
|
|
602
|
+
version_parser.add_argument("-v", "--verbose", action="store_true", help="Show detailed info")
|
|
603
|
+
|
|
604
|
+
return parser
|
|
605
|
+
|
|
606
|
+
|
|
607
|
+
def main(argv: list[str] | None = None) -> int:
|
|
608
|
+
"""Main entry point."""
|
|
609
|
+
parser = create_parser()
|
|
610
|
+
args = parser.parse_args(argv)
|
|
611
|
+
|
|
612
|
+
# Configure logging
|
|
613
|
+
if args.verbose:
|
|
614
|
+
logging.basicConfig(level=logging.DEBUG)
|
|
615
|
+
else:
|
|
616
|
+
logging.basicConfig(level=logging.WARNING)
|
|
617
|
+
|
|
618
|
+
# Route to command handlers
|
|
619
|
+
if args.command == "workflow":
|
|
620
|
+
if args.workflow_command == "list":
|
|
621
|
+
return cmd_workflow_list(args)
|
|
622
|
+
elif args.workflow_command == "info":
|
|
623
|
+
return cmd_workflow_info(args)
|
|
624
|
+
elif args.workflow_command == "run":
|
|
625
|
+
return cmd_workflow_run(args)
|
|
626
|
+
else:
|
|
627
|
+
print("Usage: empathy workflow {list|info|run}")
|
|
628
|
+
return 1
|
|
629
|
+
|
|
630
|
+
elif args.command == "telemetry":
|
|
631
|
+
if args.telemetry_command == "show":
|
|
632
|
+
return cmd_telemetry_show(args)
|
|
633
|
+
elif args.telemetry_command == "savings":
|
|
634
|
+
return cmd_telemetry_savings(args)
|
|
635
|
+
elif args.telemetry_command == "export":
|
|
636
|
+
return cmd_telemetry_export(args)
|
|
637
|
+
else:
|
|
638
|
+
print("Usage: empathy telemetry {show|savings|export}")
|
|
639
|
+
return 1
|
|
640
|
+
|
|
641
|
+
elif args.command == "provider":
|
|
642
|
+
if args.provider_command == "show":
|
|
643
|
+
return cmd_provider_show(args)
|
|
644
|
+
elif args.provider_command == "set":
|
|
645
|
+
return cmd_provider_set(args)
|
|
646
|
+
else:
|
|
647
|
+
print("Usage: empathy provider {show|set}")
|
|
648
|
+
return 1
|
|
649
|
+
|
|
650
|
+
elif args.command == "validate":
|
|
651
|
+
return cmd_validate(args)
|
|
652
|
+
|
|
653
|
+
elif args.command == "version":
|
|
654
|
+
return cmd_version(args)
|
|
655
|
+
|
|
656
|
+
else:
|
|
657
|
+
parser.print_help()
|
|
658
|
+
return 0
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
if __name__ == "__main__":
|
|
662
|
+
sys.exit(main())
|