attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/cli/__init__.py +3 -55
- attune/cli/commands/batch.py +4 -12
- attune/cli/commands/cache.py +7 -15
- attune/cli/commands/provider.py +17 -0
- attune/cli/commands/routing.py +3 -1
- attune/cli/commands/setup.py +122 -0
- attune/cli/commands/tier.py +1 -3
- attune/cli/commands/workflow.py +31 -0
- attune/cli/parsers/cache.py +1 -0
- attune/cli/parsers/help.py +1 -3
- attune/cli/parsers/provider.py +7 -0
- attune/cli/parsers/routing.py +1 -3
- attune/cli/parsers/setup.py +7 -0
- attune/cli/parsers/status.py +1 -3
- attune/cli/parsers/tier.py +1 -3
- attune/cli_minimal.py +34 -28
- attune/cli_router.py +9 -7
- attune/cli_unified.py +3 -0
- attune/core.py +190 -0
- attune/dashboard/app.py +4 -2
- attune/dashboard/simple_server.py +3 -1
- attune/dashboard/standalone_server.py +7 -3
- attune/mcp/server.py +54 -102
- attune/memory/long_term.py +0 -2
- attune/memory/short_term/__init__.py +84 -0
- attune/memory/short_term/base.py +467 -0
- attune/memory/short_term/batch.py +219 -0
- attune/memory/short_term/caching.py +227 -0
- attune/memory/short_term/conflicts.py +265 -0
- attune/memory/short_term/cross_session.py +122 -0
- attune/memory/short_term/facade.py +655 -0
- attune/memory/short_term/pagination.py +215 -0
- attune/memory/short_term/patterns.py +271 -0
- attune/memory/short_term/pubsub.py +286 -0
- attune/memory/short_term/queues.py +244 -0
- attune/memory/short_term/security.py +300 -0
- attune/memory/short_term/sessions.py +250 -0
- attune/memory/short_term/streams.py +249 -0
- attune/memory/short_term/timelines.py +234 -0
- attune/memory/short_term/transactions.py +186 -0
- attune/memory/short_term/working.py +252 -0
- attune/meta_workflows/cli_commands/__init__.py +3 -0
- attune/meta_workflows/cli_commands/agent_commands.py +0 -4
- attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
- attune/meta_workflows/cli_commands/config_commands.py +0 -5
- attune/meta_workflows/cli_commands/memory_commands.py +0 -5
- attune/meta_workflows/cli_commands/template_commands.py +0 -5
- attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
- attune/meta_workflows/workflow.py +1 -1
- attune/models/adaptive_routing.py +4 -8
- attune/models/auth_cli.py +3 -9
- attune/models/auth_strategy.py +2 -4
- attune/models/provider_config.py +20 -1
- attune/models/telemetry/analytics.py +0 -2
- attune/models/telemetry/backend.py +0 -3
- attune/models/telemetry/storage.py +0 -2
- attune/orchestration/_strategies/__init__.py +156 -0
- attune/orchestration/_strategies/base.py +231 -0
- attune/orchestration/_strategies/conditional_strategies.py +373 -0
- attune/orchestration/_strategies/conditions.py +369 -0
- attune/orchestration/_strategies/core_strategies.py +491 -0
- attune/orchestration/_strategies/data_classes.py +64 -0
- attune/orchestration/_strategies/nesting.py +233 -0
- attune/orchestration/execution_strategies.py +58 -1567
- attune/orchestration/meta_orchestrator.py +1 -3
- attune/project_index/scanner.py +1 -3
- attune/project_index/scanner_parallel.py +7 -5
- attune/socratic_router.py +1 -3
- attune/telemetry/agent_coordination.py +9 -3
- attune/telemetry/agent_tracking.py +16 -3
- attune/telemetry/approval_gates.py +22 -5
- attune/telemetry/cli.py +3 -3
- attune/telemetry/commands/dashboard_commands.py +24 -8
- attune/telemetry/event_streaming.py +8 -2
- attune/telemetry/feedback_loop.py +10 -2
- attune/tools.py +1 -0
- attune/workflow_commands.py +1 -3
- attune/workflows/__init__.py +53 -10
- attune/workflows/autonomous_test_gen.py +160 -104
- attune/workflows/base.py +48 -664
- attune/workflows/batch_processing.py +2 -4
- attune/workflows/compat.py +156 -0
- attune/workflows/cost_mixin.py +141 -0
- attune/workflows/data_classes.py +92 -0
- attune/workflows/document_gen/workflow.py +11 -14
- attune/workflows/history.py +62 -37
- attune/workflows/llm_base.py +2 -4
- attune/workflows/migration.py +422 -0
- attune/workflows/output.py +3 -9
- attune/workflows/parsing_mixin.py +427 -0
- attune/workflows/perf_audit.py +3 -1
- attune/workflows/progress.py +10 -13
- attune/workflows/release_prep.py +5 -1
- attune/workflows/routing.py +0 -2
- attune/workflows/secure_release.py +2 -1
- attune/workflows/security_audit.py +19 -14
- attune/workflows/security_audit_phase3.py +28 -22
- attune/workflows/seo_optimization.py +29 -29
- attune/workflows/test_gen/test_templates.py +1 -4
- attune/workflows/test_gen/workflow.py +0 -2
- attune/workflows/test_gen_behavioral.py +7 -20
- attune/workflows/test_gen_parallel.py +6 -4
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
- attune_healthcare/monitors/monitoring/__init__.py +9 -9
- attune_llm/agent_factory/__init__.py +6 -6
- attune_llm/commands/__init__.py +10 -10
- attune_llm/commands/models.py +3 -3
- attune_llm/config/__init__.py +8 -8
- attune_llm/learning/__init__.py +3 -3
- attune_llm/learning/extractor.py +5 -3
- attune_llm/learning/storage.py +5 -3
- attune_llm/security/__init__.py +17 -17
- attune_llm/utils/tokens.py +3 -1
- attune/cli_legacy.py +0 -3957
- attune/memory/short_term.py +0 -2192
- attune/workflows/manage_docs.py +0 -87
- attune/workflows/test5.py +0 -125
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
attune/cli_legacy.py
DELETED
|
@@ -1,3957 +0,0 @@
|
|
|
1
|
-
"""Command-Line Interface for Empathy Framework (LEGACY)
|
|
2
|
-
|
|
3
|
-
DEPRECATED: This module is deprecated as of v5.0.0.
|
|
4
|
-
Use the minimal CLI instead: `empathy` (attune.cli_minimal)
|
|
5
|
-
|
|
6
|
-
The minimal CLI provides:
|
|
7
|
-
- `empathy workflow list|info|run` - Workflow management
|
|
8
|
-
- `empathy telemetry show|savings|export` - Usage tracking
|
|
9
|
-
- `empathy provider show|set` - Provider configuration
|
|
10
|
-
- `empathy validate` - Configuration validation
|
|
11
|
-
|
|
12
|
-
For interactive features, use Claude Code slash commands:
|
|
13
|
-
- /dev, /testing, /docs, /release, /help
|
|
14
|
-
|
|
15
|
-
Migration guide: https://smartaimemory.com/framework-docs/migration/cli/
|
|
16
|
-
|
|
17
|
-
---
|
|
18
|
-
|
|
19
|
-
Original description:
|
|
20
|
-
Provides CLI commands for:
|
|
21
|
-
- Running interactive REPL (empathy run)
|
|
22
|
-
- Inspecting patterns, metrics, state (empathy inspect)
|
|
23
|
-
- Exporting/importing patterns (empathy export/import)
|
|
24
|
-
- Interactive setup workflow (empathy workflow)
|
|
25
|
-
- Configuration management
|
|
26
|
-
- Power user workflows: morning, ship, fix-all, learn (v2.4+)
|
|
27
|
-
|
|
28
|
-
Copyright 2025 Smart-AI-Memory
|
|
29
|
-
Licensed under Fair Source License 0.9
|
|
30
|
-
"""
|
|
31
|
-
|
|
32
|
-
import warnings
|
|
33
|
-
|
|
34
|
-
warnings.warn(
|
|
35
|
-
"attune.cli_legacy is deprecated. Use 'empathy' (cli_minimal) instead. "
|
|
36
|
-
"See: https://smartaimemory.com/framework-docs/reference/cli-reference/",
|
|
37
|
-
DeprecationWarning,
|
|
38
|
-
stacklevel=2,
|
|
39
|
-
)
|
|
40
|
-
|
|
41
|
-
import argparse
|
|
42
|
-
import heapq
|
|
43
|
-
import sys
|
|
44
|
-
import time
|
|
45
|
-
from importlib.metadata import version as get_version
|
|
46
|
-
from pathlib import Path
|
|
47
|
-
|
|
48
|
-
from attune import EmpathyConfig, EmpathyOS, load_config
|
|
49
|
-
from attune.config import _validate_file_path
|
|
50
|
-
from attune.cost_tracker import cmd_costs
|
|
51
|
-
from attune.discovery import show_tip_if_available
|
|
52
|
-
from attune.logging_config import get_logger
|
|
53
|
-
from attune.pattern_library import PatternLibrary
|
|
54
|
-
from attune.persistence import MetricsCollector, PatternPersistence, StateManager
|
|
55
|
-
from attune.platform_utils import setup_asyncio_policy
|
|
56
|
-
from attune.templates import cmd_new
|
|
57
|
-
from attune.workflows import (
|
|
58
|
-
cmd_fix_all,
|
|
59
|
-
cmd_learn,
|
|
60
|
-
cmd_morning,
|
|
61
|
-
cmd_ship,
|
|
62
|
-
create_example_config,
|
|
63
|
-
get_workflow,
|
|
64
|
-
)
|
|
65
|
-
from attune.workflows import list_workflows as get_workflow_list
|
|
66
|
-
|
|
67
|
-
# Import telemetry CLI commands
|
|
68
|
-
try:
|
|
69
|
-
from attune.telemetry.cli import (
|
|
70
|
-
cmd_agent_performance,
|
|
71
|
-
cmd_file_test_dashboard,
|
|
72
|
-
cmd_file_test_status,
|
|
73
|
-
cmd_task_routing_report,
|
|
74
|
-
cmd_telemetry_compare,
|
|
75
|
-
cmd_telemetry_export,
|
|
76
|
-
cmd_telemetry_reset,
|
|
77
|
-
cmd_telemetry_savings,
|
|
78
|
-
cmd_telemetry_show,
|
|
79
|
-
cmd_test_status,
|
|
80
|
-
cmd_tier1_status,
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
TELEMETRY_CLI_AVAILABLE = True
|
|
84
|
-
except ImportError:
|
|
85
|
-
TELEMETRY_CLI_AVAILABLE = False
|
|
86
|
-
|
|
87
|
-
# Import progressive workflow CLI commands
|
|
88
|
-
try:
|
|
89
|
-
from attune.workflows.progressive.cli import (
|
|
90
|
-
cmd_analytics,
|
|
91
|
-
cmd_cleanup,
|
|
92
|
-
cmd_list_results,
|
|
93
|
-
cmd_show_report,
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
PROGRESSIVE_CLI_AVAILABLE = True
|
|
97
|
-
except ImportError:
|
|
98
|
-
PROGRESSIVE_CLI_AVAILABLE = False
|
|
99
|
-
|
|
100
|
-
logger = get_logger(__name__)
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
# =============================================================================
|
|
104
|
-
# =============================================================================
|
|
105
|
-
# CHEATSHEET DATA - Quick reference for all commands
|
|
106
|
-
# =============================================================================
|
|
107
|
-
|
|
108
|
-
CHEATSHEET = {
|
|
109
|
-
"Getting Started": [
|
|
110
|
-
("empathy init", "Create a new config file"),
|
|
111
|
-
("empathy workflow", "Interactive setup workflow"),
|
|
112
|
-
("empathy run", "Interactive REPL mode"),
|
|
113
|
-
],
|
|
114
|
-
"Daily Workflow": [
|
|
115
|
-
("empathy morning", "Start-of-day briefing"),
|
|
116
|
-
("empathy status", "What needs attention now"),
|
|
117
|
-
("empathy ship", "Pre-commit validation"),
|
|
118
|
-
],
|
|
119
|
-
"Code Quality": [
|
|
120
|
-
("empathy health", "Quick health check"),
|
|
121
|
-
("empathy health --deep", "Comprehensive check"),
|
|
122
|
-
("empathy health --fix", "Auto-fix issues"),
|
|
123
|
-
("empathy fix-all", "Fix all lint/format issues"),
|
|
124
|
-
],
|
|
125
|
-
"Pattern Learning": [
|
|
126
|
-
("empathy learn --analyze 20", "Learn from last 20 commits"),
|
|
127
|
-
("empathy sync-claude", "Sync patterns to Claude Code"),
|
|
128
|
-
("empathy inspect patterns", "View learned patterns"),
|
|
129
|
-
],
|
|
130
|
-
"Code Review": [
|
|
131
|
-
("empathy review", "Review recent changes"),
|
|
132
|
-
("empathy review --staged", "Review staged changes only"),
|
|
133
|
-
],
|
|
134
|
-
"Memory & State": [
|
|
135
|
-
("empathy inspect state", "View saved states"),
|
|
136
|
-
("empathy inspect metrics --user-id X", "View user metrics"),
|
|
137
|
-
("empathy export patterns.json", "Export patterns"),
|
|
138
|
-
],
|
|
139
|
-
"Advanced": [
|
|
140
|
-
("empathy costs", "View API cost tracking"),
|
|
141
|
-
("empathy dashboard", "Launch visual dashboard"),
|
|
142
|
-
("empathy frameworks", "List agent frameworks"),
|
|
143
|
-
("empathy workflow list", "List multi-model workflows"),
|
|
144
|
-
("empathy new <template>", "Create project from template"),
|
|
145
|
-
],
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
EXPLAIN_CONTENT = {
|
|
149
|
-
"morning": """
|
|
150
|
-
HOW 'empathy morning' WORKS:
|
|
151
|
-
━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
152
|
-
This command aggregates multiple data sources to give you a prioritized
|
|
153
|
-
start-of-day briefing:
|
|
154
|
-
|
|
155
|
-
1. PATTERNS ANALYSIS
|
|
156
|
-
Reads ./patterns/*.json to find:
|
|
157
|
-
- Unresolved bugs (status: investigating)
|
|
158
|
-
- Recent security decisions
|
|
159
|
-
- Tech debt trends
|
|
160
|
-
|
|
161
|
-
2. GIT CONTEXT
|
|
162
|
-
Checks your recent git activity:
|
|
163
|
-
- Commits from yesterday
|
|
164
|
-
- Uncommitted changes
|
|
165
|
-
- Branch status
|
|
166
|
-
|
|
167
|
-
3. HEALTH SNAPSHOT
|
|
168
|
-
Runs quick health checks:
|
|
169
|
-
- Lint issues count
|
|
170
|
-
- Type errors
|
|
171
|
-
- Test status
|
|
172
|
-
|
|
173
|
-
4. PRIORITY SCORING
|
|
174
|
-
Items are scored and sorted by:
|
|
175
|
-
- Age (older = higher priority)
|
|
176
|
-
- Severity (critical > high > medium)
|
|
177
|
-
- Your recent activity patterns
|
|
178
|
-
|
|
179
|
-
TIPS:
|
|
180
|
-
• Run this first thing each day
|
|
181
|
-
• Use 'empathy morning --verbose' for details
|
|
182
|
-
• Pair with 'empathy status --select N' to dive deeper
|
|
183
|
-
""",
|
|
184
|
-
"ship": """
|
|
185
|
-
HOW 'empathy ship' WORKS:
|
|
186
|
-
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
187
|
-
Pre-commit validation pipeline that ensures code quality before shipping:
|
|
188
|
-
|
|
189
|
-
1. HEALTH CHECKS
|
|
190
|
-
- Runs lint checks (ruff/flake8)
|
|
191
|
-
- Validates types (mypy/pyright)
|
|
192
|
-
- Checks formatting (black/prettier)
|
|
193
|
-
|
|
194
|
-
2. PATTERN REVIEW
|
|
195
|
-
- Compares changes against known bug patterns
|
|
196
|
-
- Flags code that matches historical issues
|
|
197
|
-
- Suggests fixes based on past resolutions
|
|
198
|
-
|
|
199
|
-
3. SECURITY SCAN
|
|
200
|
-
- Checks for hardcoded secrets
|
|
201
|
-
- Validates against security patterns
|
|
202
|
-
- Reports potential vulnerabilities
|
|
203
|
-
|
|
204
|
-
4. PATTERN SYNC (optional)
|
|
205
|
-
- Updates Claude Code rules
|
|
206
|
-
- Syncs new patterns discovered
|
|
207
|
-
- Skip with --skip-sync
|
|
208
|
-
|
|
209
|
-
EXIT CODES:
|
|
210
|
-
• 0 = All checks passed, safe to commit
|
|
211
|
-
• 1 = Issues found, review before committing
|
|
212
|
-
|
|
213
|
-
TIPS:
|
|
214
|
-
• Add to pre-commit hook: empathy ship --skip-sync
|
|
215
|
-
• Use 'empathy ship --verbose' to see all checks
|
|
216
|
-
""",
|
|
217
|
-
"learn": """
|
|
218
|
-
HOW 'empathy learn' WORKS:
|
|
219
|
-
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
220
|
-
Extracts patterns from your git history to teach Claude about your codebase:
|
|
221
|
-
|
|
222
|
-
1. COMMIT ANALYSIS
|
|
223
|
-
Parses commit messages looking for:
|
|
224
|
-
- fix: Bug fixes → debugging.json
|
|
225
|
-
- security: decisions → security.json
|
|
226
|
-
- TODO/FIXME in code → tech_debt.json
|
|
227
|
-
|
|
228
|
-
2. DIFF INSPECTION
|
|
229
|
-
Analyzes code changes to:
|
|
230
|
-
- Identify affected files
|
|
231
|
-
- Extract error types
|
|
232
|
-
- Record fix patterns
|
|
233
|
-
|
|
234
|
-
3. PATTERN STORAGE
|
|
235
|
-
Saves to ./patterns/:
|
|
236
|
-
- debugging.json: Bug patterns
|
|
237
|
-
- security.json: Security decisions
|
|
238
|
-
- tech_debt.json: Technical debt
|
|
239
|
-
- inspection.json: Code review findings
|
|
240
|
-
|
|
241
|
-
4. SUMMARY GENERATION
|
|
242
|
-
Creates .claude/patterns_summary.md:
|
|
243
|
-
- Human-readable pattern overview
|
|
244
|
-
- Loaded by Claude Code automatically
|
|
245
|
-
|
|
246
|
-
USAGE EXAMPLES:
|
|
247
|
-
• empathy learn --analyze 10 # Last 10 commits
|
|
248
|
-
• empathy learn --analyze 100 # Deeper history
|
|
249
|
-
• empathy sync-claude # Apply patterns to Claude
|
|
250
|
-
|
|
251
|
-
TIPS:
|
|
252
|
-
• Run weekly to keep patterns current
|
|
253
|
-
• Use good commit messages (fix:, feat:, etc.)
|
|
254
|
-
• Check ./patterns/ to see what was learned
|
|
255
|
-
""",
|
|
256
|
-
"health": """
|
|
257
|
-
HOW 'empathy health' WORKS:
|
|
258
|
-
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
259
|
-
Code health dashboard that runs multiple quality checks:
|
|
260
|
-
|
|
261
|
-
1. QUICK MODE (default)
|
|
262
|
-
Fast checks that run in seconds:
|
|
263
|
-
- Lint: ruff check or flake8
|
|
264
|
-
- Format: black --check or prettier
|
|
265
|
-
- Basic type checking
|
|
266
|
-
|
|
267
|
-
2. DEEP MODE (--deep)
|
|
268
|
-
Comprehensive checks (slower):
|
|
269
|
-
- Full type analysis (mypy --strict)
|
|
270
|
-
- Test suite execution
|
|
271
|
-
- Security scanning
|
|
272
|
-
- Dependency audit
|
|
273
|
-
|
|
274
|
-
3. SCORING
|
|
275
|
-
Health score 0-100 based on:
|
|
276
|
-
- Lint issues (×2 penalty each)
|
|
277
|
-
- Type errors (×5 penalty each)
|
|
278
|
-
- Test failures (×10 penalty each)
|
|
279
|
-
- Security issues (×20 penalty each)
|
|
280
|
-
|
|
281
|
-
4. AUTO-FIX (--fix)
|
|
282
|
-
Can automatically fix:
|
|
283
|
-
- Formatting issues
|
|
284
|
-
- Import sorting
|
|
285
|
-
- Simple lint errors
|
|
286
|
-
|
|
287
|
-
USAGE:
|
|
288
|
-
• empathy health # Quick check
|
|
289
|
-
• empathy health --deep # Full check
|
|
290
|
-
• empathy health --fix # Auto-fix issues
|
|
291
|
-
• empathy health --trends 30 # 30-day trend
|
|
292
|
-
|
|
293
|
-
TIPS:
|
|
294
|
-
• Run quick checks before commits
|
|
295
|
-
• Run deep checks in CI/CD
|
|
296
|
-
• Track trends to catch regressions
|
|
297
|
-
""",
|
|
298
|
-
"sync-claude": """
|
|
299
|
-
HOW 'empathy sync-claude' WORKS:
|
|
300
|
-
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
301
|
-
Converts learned patterns into Claude Code rules:
|
|
302
|
-
|
|
303
|
-
1. READS PATTERNS
|
|
304
|
-
Loads from ./patterns/:
|
|
305
|
-
- debugging.json → Bug fix patterns
|
|
306
|
-
- security.json → Security decisions
|
|
307
|
-
- tech_debt.json → Known debt items
|
|
308
|
-
|
|
309
|
-
2. GENERATES RULES
|
|
310
|
-
Creates .claude/rules/empathy/:
|
|
311
|
-
- debugging.md
|
|
312
|
-
- security.md
|
|
313
|
-
- tech_debt.md
|
|
314
|
-
|
|
315
|
-
3. CLAUDE CODE INTEGRATION
|
|
316
|
-
Rules are automatically loaded when:
|
|
317
|
-
- Claude Code starts in this directory
|
|
318
|
-
- Combined with CLAUDE.md instructions
|
|
319
|
-
|
|
320
|
-
HOW CLAUDE USES THESE:
|
|
321
|
-
• Sees historical bugs before suggesting code
|
|
322
|
-
• Knows about accepted security patterns
|
|
323
|
-
• Understands existing tech debt
|
|
324
|
-
|
|
325
|
-
FILE STRUCTURE:
|
|
326
|
-
./patterns/ # Your pattern storage
|
|
327
|
-
debugging.json
|
|
328
|
-
security.json
|
|
329
|
-
.claude/
|
|
330
|
-
CLAUDE.md # Project instructions
|
|
331
|
-
rules/
|
|
332
|
-
empathy/ # Generated rules
|
|
333
|
-
debugging.md
|
|
334
|
-
security.md
|
|
335
|
-
|
|
336
|
-
TIPS:
|
|
337
|
-
• Run after 'empathy learn'
|
|
338
|
-
• Commit .claude/rules/ to share with team
|
|
339
|
-
• Weekly sync keeps Claude current
|
|
340
|
-
""",
|
|
341
|
-
}
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
def cmd_version(args):
|
|
345
|
-
"""Display version information for Empathy Framework.
|
|
346
|
-
|
|
347
|
-
Args:
|
|
348
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
349
|
-
|
|
350
|
-
Returns:
|
|
351
|
-
None: Prints version, copyright, and license information to stdout.
|
|
352
|
-
"""
|
|
353
|
-
logger.info("Displaying version information")
|
|
354
|
-
try:
|
|
355
|
-
version = get_version("empathy")
|
|
356
|
-
except Exception as e:
|
|
357
|
-
# Package metadata not available or invalid (development install)
|
|
358
|
-
logger.debug(f"Version not available: {e}")
|
|
359
|
-
version = "unknown"
|
|
360
|
-
logger.info(f"Empathy v{version}")
|
|
361
|
-
logger.info("Copyright 2025 Smart-AI-Memory")
|
|
362
|
-
logger.info("Licensed under Fair Source License 0.9")
|
|
363
|
-
logger.info("\n✨ Built with Claude Code + MemDocs + VS Code transformative stack")
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
def cmd_cheatsheet(args):
|
|
367
|
-
"""Display quick reference cheatsheet for all commands.
|
|
368
|
-
|
|
369
|
-
Args:
|
|
370
|
-
args: Namespace object from argparse with attributes:
|
|
371
|
-
- category (str | None): Specific category to show (e.g., 'daily-workflow').
|
|
372
|
-
- compact (bool): If True, show commands only without descriptions.
|
|
373
|
-
|
|
374
|
-
Returns:
|
|
375
|
-
None: Prints formatted cheatsheet to stdout.
|
|
376
|
-
"""
|
|
377
|
-
category = getattr(args, "category", None)
|
|
378
|
-
compact = getattr(args, "compact", False)
|
|
379
|
-
|
|
380
|
-
print()
|
|
381
|
-
print("=" * 60)
|
|
382
|
-
print(" EMPATHY FRAMEWORK - QUICK REFERENCE")
|
|
383
|
-
print("=" * 60)
|
|
384
|
-
|
|
385
|
-
if category:
|
|
386
|
-
# Show specific category
|
|
387
|
-
category_title = category.replace("-", " ").title()
|
|
388
|
-
if category_title in CHEATSHEET:
|
|
389
|
-
print(f"\n {category_title}")
|
|
390
|
-
print(" " + "-" * 40)
|
|
391
|
-
for cmd, desc in CHEATSHEET[category_title]:
|
|
392
|
-
if compact:
|
|
393
|
-
print(f" {cmd}")
|
|
394
|
-
else:
|
|
395
|
-
print(f" {cmd:35} {desc}")
|
|
396
|
-
else:
|
|
397
|
-
print(f"\n Unknown category: {category}")
|
|
398
|
-
print(" Available: " + ", ".join(k.lower().replace(" ", "-") for k in CHEATSHEET))
|
|
399
|
-
else:
|
|
400
|
-
# Show all categories
|
|
401
|
-
for cat_name, commands in CHEATSHEET.items():
|
|
402
|
-
print(f"\n {cat_name}")
|
|
403
|
-
print(" " + "-" * 40)
|
|
404
|
-
for cmd, desc in commands:
|
|
405
|
-
if compact:
|
|
406
|
-
print(f" {cmd}")
|
|
407
|
-
else:
|
|
408
|
-
print(f" {cmd:35} {desc}")
|
|
409
|
-
|
|
410
|
-
print()
|
|
411
|
-
print("-" * 60)
|
|
412
|
-
print(" Use: empathy <command> --explain for detailed explanation")
|
|
413
|
-
print(" Use: empathy onboard for interactive tutorial")
|
|
414
|
-
print("=" * 60)
|
|
415
|
-
print()
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
def cmd_onboard(args):
|
|
419
|
-
"""Interactive onboarding tutorial for new users.
|
|
420
|
-
|
|
421
|
-
Guides users through setup steps: init, learn, sync-claude, health check.
|
|
422
|
-
|
|
423
|
-
Args:
|
|
424
|
-
args: Namespace object from argparse with attributes:
|
|
425
|
-
- step (int | None): Jump to specific tutorial step (1-5).
|
|
426
|
-
- reset (bool): If True, reset onboarding progress.
|
|
427
|
-
|
|
428
|
-
Returns:
|
|
429
|
-
None: Prints tutorial content and tracks progress.
|
|
430
|
-
"""
|
|
431
|
-
from attune.discovery import get_engine
|
|
432
|
-
|
|
433
|
-
step = getattr(args, "step", None)
|
|
434
|
-
reset = getattr(args, "reset", False)
|
|
435
|
-
|
|
436
|
-
engine = get_engine()
|
|
437
|
-
stats = engine.get_stats()
|
|
438
|
-
|
|
439
|
-
if reset:
|
|
440
|
-
# Reset onboarding progress
|
|
441
|
-
engine.state["onboarding_step"] = 0
|
|
442
|
-
engine.state["onboarding_completed"] = []
|
|
443
|
-
engine._save()
|
|
444
|
-
print("Onboarding progress reset.")
|
|
445
|
-
return
|
|
446
|
-
|
|
447
|
-
# Define onboarding steps
|
|
448
|
-
steps = [
|
|
449
|
-
{
|
|
450
|
-
"title": "Welcome to Empathy Framework",
|
|
451
|
-
"content": """
|
|
452
|
-
Welcome! Empathy Framework helps you build AI systems with 5 levels
|
|
453
|
-
of sophistication, from reactive responses to anticipatory assistance.
|
|
454
|
-
|
|
455
|
-
This tutorial will walk you through the key features.
|
|
456
|
-
|
|
457
|
-
Let's check your current setup first...
|
|
458
|
-
""",
|
|
459
|
-
"check": lambda: True,
|
|
460
|
-
"action": None,
|
|
461
|
-
},
|
|
462
|
-
{
|
|
463
|
-
"title": "Step 1: Initialize Your Project",
|
|
464
|
-
"content": """
|
|
465
|
-
First, let's create a configuration file for your project.
|
|
466
|
-
|
|
467
|
-
Run: empathy init
|
|
468
|
-
|
|
469
|
-
This creates attune.config.yaml with sensible defaults.
|
|
470
|
-
Alternatively, use 'empathy workflow' for an interactive setup.
|
|
471
|
-
""",
|
|
472
|
-
"check": lambda: _file_exists("attune.config.yaml")
|
|
473
|
-
or _file_exists("attune.config.yml"),
|
|
474
|
-
"action": "empathy init",
|
|
475
|
-
},
|
|
476
|
-
{
|
|
477
|
-
"title": "Step 2: Learn From Your History",
|
|
478
|
-
"content": """
|
|
479
|
-
Empathy can learn patterns from your git commit history.
|
|
480
|
-
This teaches Claude about your codebase's patterns and past bugs.
|
|
481
|
-
|
|
482
|
-
Run: empathy learn --analyze 10
|
|
483
|
-
|
|
484
|
-
This analyzes the last 10 commits and extracts:
|
|
485
|
-
- Bug fix patterns
|
|
486
|
-
- Security decisions
|
|
487
|
-
- Technical debt markers
|
|
488
|
-
""",
|
|
489
|
-
"check": lambda: _file_exists("patterns/debugging.json"),
|
|
490
|
-
"action": "empathy learn --analyze 10",
|
|
491
|
-
},
|
|
492
|
-
{
|
|
493
|
-
"title": "Step 3: Sync Patterns to Claude",
|
|
494
|
-
"content": """
|
|
495
|
-
Now let's share what we learned with Claude Code.
|
|
496
|
-
|
|
497
|
-
Run: empathy sync-claude
|
|
498
|
-
|
|
499
|
-
This creates .claude/rules/empathy/ with markdown rules
|
|
500
|
-
that Claude Code automatically loads when you work in this directory.
|
|
501
|
-
""",
|
|
502
|
-
"check": lambda: _file_exists(".claude/rules/empathy/debugging.md"),
|
|
503
|
-
"action": "empathy sync-claude",
|
|
504
|
-
},
|
|
505
|
-
{
|
|
506
|
-
"title": "Step 4: Check Code Health",
|
|
507
|
-
"content": """
|
|
508
|
-
Let's run a quick health check on your codebase.
|
|
509
|
-
|
|
510
|
-
Run: empathy health
|
|
511
|
-
|
|
512
|
-
This checks:
|
|
513
|
-
- Linting issues
|
|
514
|
-
- Type errors
|
|
515
|
-
- Formatting problems
|
|
516
|
-
|
|
517
|
-
Try 'empathy health --fix' to auto-fix what's possible.
|
|
518
|
-
""",
|
|
519
|
-
"check": lambda: stats.get("command_counts", {}).get("health", 0) > 0,
|
|
520
|
-
"action": "empathy health",
|
|
521
|
-
},
|
|
522
|
-
{
|
|
523
|
-
"title": "Step 5: Daily Workflow",
|
|
524
|
-
"content": """
|
|
525
|
-
You're almost there! Here's your recommended daily workflow:
|
|
526
|
-
|
|
527
|
-
MORNING:
|
|
528
|
-
empathy morning - Get your priority briefing
|
|
529
|
-
|
|
530
|
-
BEFORE COMMITS:
|
|
531
|
-
empathy ship - Validate before committing
|
|
532
|
-
|
|
533
|
-
WEEKLY:
|
|
534
|
-
empathy learn - Update patterns from new commits
|
|
535
|
-
empathy sync-claude - Keep Claude current
|
|
536
|
-
|
|
537
|
-
You've completed the basics! Run 'empathy cheatsheet' anytime
|
|
538
|
-
for a quick reference of all commands.
|
|
539
|
-
""",
|
|
540
|
-
"check": lambda: True,
|
|
541
|
-
"action": None,
|
|
542
|
-
},
|
|
543
|
-
]
|
|
544
|
-
|
|
545
|
-
# Determine current step
|
|
546
|
-
current_step = engine.state.get("onboarding_step", 0)
|
|
547
|
-
if step is not None:
|
|
548
|
-
current_step = max(0, min(step - 1, len(steps) - 1))
|
|
549
|
-
|
|
550
|
-
step_data = steps[current_step]
|
|
551
|
-
|
|
552
|
-
# Display header
|
|
553
|
-
print()
|
|
554
|
-
print("=" * 60)
|
|
555
|
-
print(f" ONBOARDING ({current_step + 1}/{len(steps)})")
|
|
556
|
-
print("=" * 60)
|
|
557
|
-
print()
|
|
558
|
-
print(f" {step_data['title']}")
|
|
559
|
-
print(" " + "-" * 50)
|
|
560
|
-
print(step_data["content"])
|
|
561
|
-
|
|
562
|
-
# Check if step is completed
|
|
563
|
-
if step_data["check"]():
|
|
564
|
-
if current_step < len(steps) - 1:
|
|
565
|
-
print(" [DONE] This step is complete!")
|
|
566
|
-
print()
|
|
567
|
-
print(f" Continue with: empathy onboard --step {current_step + 2}")
|
|
568
|
-
# Auto-advance
|
|
569
|
-
engine.state["onboarding_step"] = current_step + 1
|
|
570
|
-
engine._save()
|
|
571
|
-
else:
|
|
572
|
-
print(" Congratulations! You've completed the onboarding!")
|
|
573
|
-
print()
|
|
574
|
-
_show_achievements(engine)
|
|
575
|
-
elif step_data["action"]:
|
|
576
|
-
print(f" NEXT: Run '{step_data['action']}'")
|
|
577
|
-
print(" Then run 'empathy onboard' to continue")
|
|
578
|
-
|
|
579
|
-
print()
|
|
580
|
-
print("-" * 60)
|
|
581
|
-
print(f" Progress: {'*' * (current_step + 1)}{'.' * (len(steps) - current_step - 1)}")
|
|
582
|
-
print("=" * 60)
|
|
583
|
-
print()
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
def _file_exists(path: str) -> bool:
|
|
587
|
-
"""Check if a file exists."""
|
|
588
|
-
|
|
589
|
-
return Path(path).exists()
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
def _show_achievements(engine) -> None:
|
|
593
|
-
"""Show user achievements based on usage."""
|
|
594
|
-
stats = engine.get_stats()
|
|
595
|
-
|
|
596
|
-
achievements = []
|
|
597
|
-
total_cmds = stats.get("total_commands", 0)
|
|
598
|
-
cmd_counts = stats.get("command_counts", {})
|
|
599
|
-
|
|
600
|
-
# Check achievements
|
|
601
|
-
if total_cmds >= 1:
|
|
602
|
-
achievements.append(("First Steps", "Ran your first command"))
|
|
603
|
-
if total_cmds >= 10:
|
|
604
|
-
achievements.append(("Getting Started", "Ran 10+ commands"))
|
|
605
|
-
if total_cmds >= 50:
|
|
606
|
-
achievements.append(("Power User", "Ran 50+ commands"))
|
|
607
|
-
if total_cmds >= 100:
|
|
608
|
-
achievements.append(("Expert", "Ran 100+ commands"))
|
|
609
|
-
|
|
610
|
-
if cmd_counts.get("learn", 0) >= 1:
|
|
611
|
-
achievements.append(("Pattern Learner", "Learned from git history"))
|
|
612
|
-
if cmd_counts.get("sync-claude", 0) >= 1:
|
|
613
|
-
achievements.append(("Claude Whisperer", "Synced patterns to Claude"))
|
|
614
|
-
if cmd_counts.get("morning", 0) >= 5:
|
|
615
|
-
achievements.append(("Early Bird", "Used morning briefing 5+ times"))
|
|
616
|
-
if cmd_counts.get("ship", 0) >= 10:
|
|
617
|
-
achievements.append(("Quality Shipper", "Used pre-commit checks 10+ times"))
|
|
618
|
-
if cmd_counts.get("health", 0) >= 1 and cmd_counts.get("fix-all", 0) >= 1:
|
|
619
|
-
achievements.append(("Code Doctor", "Used health checks and fixes"))
|
|
620
|
-
|
|
621
|
-
if stats.get("patterns_learned", 0) >= 10:
|
|
622
|
-
achievements.append(("Pattern Master", "Learned 10+ patterns"))
|
|
623
|
-
|
|
624
|
-
if stats.get("days_active", 0) >= 7:
|
|
625
|
-
achievements.append(("Week Warrior", "Active for 7+ days"))
|
|
626
|
-
if stats.get("days_active", 0) >= 30:
|
|
627
|
-
achievements.append(("Monthly Maven", "Active for 30+ days"))
|
|
628
|
-
|
|
629
|
-
if achievements:
|
|
630
|
-
print(" ACHIEVEMENTS UNLOCKED")
|
|
631
|
-
print(" " + "-" * 30)
|
|
632
|
-
for name, desc in achievements:
|
|
633
|
-
print(f" * {name}: {desc}")
|
|
634
|
-
print()
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
def cmd_explain(args):
|
|
638
|
-
"""Show detailed explanation for a command.
|
|
639
|
-
|
|
640
|
-
Provides in-depth documentation about how specific commands work.
|
|
641
|
-
|
|
642
|
-
Args:
|
|
643
|
-
args: Namespace object from argparse with attributes:
|
|
644
|
-
- command (str): Command name to explain (e.g., 'morning', 'ship').
|
|
645
|
-
|
|
646
|
-
Returns:
|
|
647
|
-
None: Prints detailed explanation to stdout.
|
|
648
|
-
"""
|
|
649
|
-
command = args.command
|
|
650
|
-
|
|
651
|
-
if command in EXPLAIN_CONTENT:
|
|
652
|
-
print(EXPLAIN_CONTENT[command])
|
|
653
|
-
else:
|
|
654
|
-
available = ", ".join(EXPLAIN_CONTENT.keys())
|
|
655
|
-
print(f"\nNo detailed explanation available for '{command}'")
|
|
656
|
-
print(f"Available: {available}")
|
|
657
|
-
print("\nTry: empathy cheatsheet for a quick reference")
|
|
658
|
-
print()
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
def cmd_achievements(args):
|
|
662
|
-
"""Show user achievements and progress.
|
|
663
|
-
|
|
664
|
-
Displays gamification stats including unlocked achievements and usage streaks.
|
|
665
|
-
|
|
666
|
-
Args:
|
|
667
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
668
|
-
|
|
669
|
-
Returns:
|
|
670
|
-
None: Prints achievements and progress to stdout.
|
|
671
|
-
"""
|
|
672
|
-
from attune.discovery import get_engine
|
|
673
|
-
|
|
674
|
-
engine = get_engine()
|
|
675
|
-
stats = engine.get_stats()
|
|
676
|
-
|
|
677
|
-
print()
|
|
678
|
-
print("=" * 60)
|
|
679
|
-
print(" YOUR EMPATHY FRAMEWORK JOURNEY")
|
|
680
|
-
print("=" * 60)
|
|
681
|
-
print()
|
|
682
|
-
|
|
683
|
-
# Stats summary
|
|
684
|
-
print(" STATISTICS")
|
|
685
|
-
print(" " + "-" * 40)
|
|
686
|
-
print(f" Total commands run: {stats.get('total_commands', 0)}")
|
|
687
|
-
print(f" Days active: {stats.get('days_active', 0)}")
|
|
688
|
-
print(f" Patterns learned: {stats.get('patterns_learned', 0)}")
|
|
689
|
-
shown = stats.get("tips_shown", 0)
|
|
690
|
-
total = shown + stats.get("tips_remaining", 0)
|
|
691
|
-
print(f" Tips discovered: {shown}/{total}")
|
|
692
|
-
print()
|
|
693
|
-
|
|
694
|
-
# Command breakdown
|
|
695
|
-
cmd_counts = stats.get("command_counts", {})
|
|
696
|
-
if cmd_counts:
|
|
697
|
-
print(" COMMAND USAGE")
|
|
698
|
-
print(" " + "-" * 40)
|
|
699
|
-
sorted_cmds = sorted(cmd_counts.items(), key=lambda x: x[1], reverse=True)
|
|
700
|
-
for cmd, count in sorted_cmds[:10]:
|
|
701
|
-
bar = "*" * min(count, 20)
|
|
702
|
-
print(f" {cmd:15} {count:4} {bar}")
|
|
703
|
-
print()
|
|
704
|
-
|
|
705
|
-
# Achievements
|
|
706
|
-
_show_achievements(engine)
|
|
707
|
-
|
|
708
|
-
print("=" * 60)
|
|
709
|
-
print()
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
def cmd_tier_recommend(args):
|
|
713
|
-
"""Get intelligent tier recommendation for a bug/task.
|
|
714
|
-
|
|
715
|
-
Analyzes bug description and historical patterns to recommend
|
|
716
|
-
the most cost-effective tier (HAIKU/SONNET/OPUS).
|
|
717
|
-
|
|
718
|
-
Args:
|
|
719
|
-
args: Namespace object from argparse with attributes:
|
|
720
|
-
- description (str): Bug or task description to analyze.
|
|
721
|
-
- files (str | None): Comma-separated list of affected files.
|
|
722
|
-
- complexity (str | None): Complexity hint (low/medium/high).
|
|
723
|
-
|
|
724
|
-
Returns:
|
|
725
|
-
None: Prints tier recommendation with confidence and expected cost.
|
|
726
|
-
"""
|
|
727
|
-
from attune.tier_recommender import TierRecommender
|
|
728
|
-
|
|
729
|
-
recommender = TierRecommender()
|
|
730
|
-
|
|
731
|
-
# Get recommendation
|
|
732
|
-
result = recommender.recommend(
|
|
733
|
-
bug_description=args.description,
|
|
734
|
-
files_affected=args.files.split(",") if args.files else None,
|
|
735
|
-
complexity_hint=args.complexity,
|
|
736
|
-
)
|
|
737
|
-
|
|
738
|
-
# Display results
|
|
739
|
-
print()
|
|
740
|
-
print("=" * 60)
|
|
741
|
-
print(" TIER RECOMMENDATION")
|
|
742
|
-
print("=" * 60)
|
|
743
|
-
print()
|
|
744
|
-
print(f" Bug/Task: {args.description}")
|
|
745
|
-
print()
|
|
746
|
-
print(f" 📍 Recommended Tier: {result.tier}")
|
|
747
|
-
print(f" 🎯 Confidence: {result.confidence * 100:.1f}%")
|
|
748
|
-
print(f" 💰 Expected Cost: ${result.expected_cost:.3f}")
|
|
749
|
-
print(f" 🔄 Expected Attempts: {result.expected_attempts:.1f}")
|
|
750
|
-
print()
|
|
751
|
-
print(" 📊 Reasoning:")
|
|
752
|
-
print(f" {result.reasoning}")
|
|
753
|
-
print()
|
|
754
|
-
|
|
755
|
-
if result.similar_patterns_count > 0:
|
|
756
|
-
print(f" ✅ Based on {result.similar_patterns_count} similar patterns")
|
|
757
|
-
else:
|
|
758
|
-
print(" ⚠️ No historical data - using conservative default")
|
|
759
|
-
|
|
760
|
-
if result.fallback_used:
|
|
761
|
-
print()
|
|
762
|
-
print(" 💡 Tip: As more patterns are collected, recommendations")
|
|
763
|
-
print(" will become more accurate and personalized.")
|
|
764
|
-
|
|
765
|
-
print()
|
|
766
|
-
print("=" * 60)
|
|
767
|
-
print()
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
def cmd_tier_stats(args):
|
|
771
|
-
"""Show tier pattern learning statistics.
|
|
772
|
-
|
|
773
|
-
Displays statistics about collected patterns and tier distribution.
|
|
774
|
-
|
|
775
|
-
Args:
|
|
776
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
777
|
-
|
|
778
|
-
Returns:
|
|
779
|
-
None: Prints tier statistics, savings percentages, and bug type distribution.
|
|
780
|
-
"""
|
|
781
|
-
from attune.tier_recommender import TierRecommender
|
|
782
|
-
|
|
783
|
-
recommender = TierRecommender()
|
|
784
|
-
stats = recommender.get_stats()
|
|
785
|
-
|
|
786
|
-
print()
|
|
787
|
-
print("=" * 60)
|
|
788
|
-
print(" TIER PATTERN LEARNING STATS")
|
|
789
|
-
print("=" * 60)
|
|
790
|
-
print()
|
|
791
|
-
|
|
792
|
-
if stats.get("total_patterns", 0) == 0:
|
|
793
|
-
print(" No patterns collected yet.")
|
|
794
|
-
print()
|
|
795
|
-
print(" 💡 Patterns are automatically collected as you use")
|
|
796
|
-
print(" cascading workflows with enhanced tracking enabled.")
|
|
797
|
-
print()
|
|
798
|
-
print("=" * 60)
|
|
799
|
-
print()
|
|
800
|
-
return
|
|
801
|
-
|
|
802
|
-
print(f" Total Patterns: {stats['total_patterns']}")
|
|
803
|
-
print(f" Avg Savings: {stats['avg_savings_percent']}%")
|
|
804
|
-
print()
|
|
805
|
-
|
|
806
|
-
print(" TIER DISTRIBUTION")
|
|
807
|
-
print(" " + "-" * 40)
|
|
808
|
-
for tier, count in stats["patterns_by_tier"].items():
|
|
809
|
-
percent = (count / stats["total_patterns"]) * 100
|
|
810
|
-
bar = "█" * int(percent / 5)
|
|
811
|
-
print(f" {tier:10} {count:3} ({percent:5.1f}%) {bar}")
|
|
812
|
-
print()
|
|
813
|
-
|
|
814
|
-
print(" BUG TYPE DISTRIBUTION")
|
|
815
|
-
print(" " + "-" * 40)
|
|
816
|
-
sorted_types = sorted(stats["bug_type_distribution"].items(), key=lambda x: x[1], reverse=True)
|
|
817
|
-
for bug_type, count in sorted_types[:10]:
|
|
818
|
-
percent = (count / stats["total_patterns"]) * 100
|
|
819
|
-
print(f" {bug_type:20} {count:3} ({percent:5.1f}%)")
|
|
820
|
-
|
|
821
|
-
print()
|
|
822
|
-
print("=" * 60)
|
|
823
|
-
print()
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
def cmd_orchestrate(args):
|
|
827
|
-
"""Run meta-orchestration workflows.
|
|
828
|
-
|
|
829
|
-
Orchestrates teams of agents to accomplish complex tasks through
|
|
830
|
-
intelligent composition patterns.
|
|
831
|
-
|
|
832
|
-
Args:
|
|
833
|
-
args: Namespace object from argparse with attributes:
|
|
834
|
-
- workflow (str): Orchestration workflow name.
|
|
835
|
-
- path (str): Target path for orchestration.
|
|
836
|
-
- mode (str | None): Execution mode (e.g., 'daily', 'weekly', 'release').
|
|
837
|
-
- json (bool): If True, output as JSON format.
|
|
838
|
-
- dry_run (bool): If True, show plan without executing.
|
|
839
|
-
- verbose (bool): If True, show detailed output.
|
|
840
|
-
|
|
841
|
-
Returns:
|
|
842
|
-
int: 0 on success, 1 on failure.
|
|
843
|
-
"""
|
|
844
|
-
import asyncio
|
|
845
|
-
import json
|
|
846
|
-
|
|
847
|
-
from attune.workflows.orchestrated_health_check import OrchestratedHealthCheckWorkflow
|
|
848
|
-
from attune.workflows.orchestrated_release_prep import OrchestratedReleasePrepWorkflow
|
|
849
|
-
|
|
850
|
-
# test_coverage_boost removed - feature disabled in v4.0.0 (being redesigned)
|
|
851
|
-
# Get workflow type
|
|
852
|
-
workflow_type = args.workflow
|
|
853
|
-
|
|
854
|
-
# Only print header in non-JSON mode
|
|
855
|
-
if not (hasattr(args, "json") and args.json):
|
|
856
|
-
print()
|
|
857
|
-
print("=" * 60)
|
|
858
|
-
print(f" META-ORCHESTRATION: {workflow_type.upper()}")
|
|
859
|
-
print("=" * 60)
|
|
860
|
-
print()
|
|
861
|
-
|
|
862
|
-
if workflow_type == "release-prep":
|
|
863
|
-
# Release Preparation workflow
|
|
864
|
-
path = args.path or "."
|
|
865
|
-
quality_gates = {}
|
|
866
|
-
|
|
867
|
-
# Collect custom quality gates
|
|
868
|
-
if hasattr(args, "min_coverage") and args.min_coverage is not None:
|
|
869
|
-
quality_gates["min_coverage"] = args.min_coverage
|
|
870
|
-
if hasattr(args, "min_quality") and args.min_quality is not None:
|
|
871
|
-
quality_gates["min_quality_score"] = args.min_quality
|
|
872
|
-
if hasattr(args, "max_critical") and args.max_critical is not None:
|
|
873
|
-
quality_gates["max_critical_issues"] = args.max_critical
|
|
874
|
-
|
|
875
|
-
# Only print details in non-JSON mode
|
|
876
|
-
if not (hasattr(args, "json") and args.json):
|
|
877
|
-
print(f" Project Path: {path}")
|
|
878
|
-
if quality_gates:
|
|
879
|
-
print(f" Quality Gates: {quality_gates}")
|
|
880
|
-
print()
|
|
881
|
-
print(" 🔍 Parallel Validation Agents:")
|
|
882
|
-
print(" • Security Auditor (vulnerability scan)")
|
|
883
|
-
print(" • Test Coverage Analyzer (gap analysis)")
|
|
884
|
-
print(" • Code Quality Reviewer (best practices)")
|
|
885
|
-
print(" • Documentation Writer (completeness)")
|
|
886
|
-
print()
|
|
887
|
-
|
|
888
|
-
# Create workflow
|
|
889
|
-
workflow = OrchestratedReleasePrepWorkflow(
|
|
890
|
-
quality_gates=quality_gates if quality_gates else None
|
|
891
|
-
)
|
|
892
|
-
|
|
893
|
-
try:
|
|
894
|
-
# Execute workflow
|
|
895
|
-
report = asyncio.run(workflow.execute(path=path))
|
|
896
|
-
|
|
897
|
-
# Display results
|
|
898
|
-
if hasattr(args, "json") and args.json:
|
|
899
|
-
print(json.dumps(report.to_dict(), indent=2))
|
|
900
|
-
else:
|
|
901
|
-
print(report.format_console_output())
|
|
902
|
-
|
|
903
|
-
# Return appropriate exit code
|
|
904
|
-
return 0 if report.approved else 1
|
|
905
|
-
|
|
906
|
-
except Exception as e:
|
|
907
|
-
print(f" ❌ Error executing release prep workflow: {e}")
|
|
908
|
-
print()
|
|
909
|
-
logger.exception("Release prep workflow failed")
|
|
910
|
-
return 1
|
|
911
|
-
|
|
912
|
-
elif workflow_type == "test-coverage":
|
|
913
|
-
# Test Coverage Boost workflow - DISABLED in v4.0.0
|
|
914
|
-
print(" ⚠️ FEATURE DISABLED")
|
|
915
|
-
print(" " + "-" * 56)
|
|
916
|
-
print()
|
|
917
|
-
print(" The test-coverage workflow has been disabled in v4.0.0")
|
|
918
|
-
print(" due to poor quality (0% test pass rate).")
|
|
919
|
-
print()
|
|
920
|
-
print(" This feature is being redesigned and will return in a")
|
|
921
|
-
print(" future release with improved test generation quality.")
|
|
922
|
-
print()
|
|
923
|
-
print(" Available v4.0 workflows:")
|
|
924
|
-
print(" • health-check - Real-time codebase health analysis")
|
|
925
|
-
print(" • release-prep - Quality gate validation")
|
|
926
|
-
print()
|
|
927
|
-
return 1
|
|
928
|
-
|
|
929
|
-
elif workflow_type == "health-check":
|
|
930
|
-
# Health Check workflow
|
|
931
|
-
mode = args.mode or "daily"
|
|
932
|
-
project_root = args.project_root or "."
|
|
933
|
-
focus_area = getattr(args, "focus", None)
|
|
934
|
-
|
|
935
|
-
# Only print details in non-JSON mode
|
|
936
|
-
if not (hasattr(args, "json") and args.json):
|
|
937
|
-
print(f" Mode: {mode.upper()}")
|
|
938
|
-
print(f" Project Root: {project_root}")
|
|
939
|
-
if focus_area:
|
|
940
|
-
print(f" Focus Area: {focus_area}")
|
|
941
|
-
print()
|
|
942
|
-
|
|
943
|
-
# Show agents for mode
|
|
944
|
-
mode_agents = {
|
|
945
|
-
"daily": ["Security", "Coverage", "Quality"],
|
|
946
|
-
"weekly": ["Security", "Coverage", "Quality", "Performance", "Documentation"],
|
|
947
|
-
"release": [
|
|
948
|
-
"Security",
|
|
949
|
-
"Coverage",
|
|
950
|
-
"Quality",
|
|
951
|
-
"Performance",
|
|
952
|
-
"Documentation",
|
|
953
|
-
"Architecture",
|
|
954
|
-
],
|
|
955
|
-
}
|
|
956
|
-
|
|
957
|
-
print(f" 🔍 {mode.capitalize()} Check Agents:")
|
|
958
|
-
for agent in mode_agents.get(mode, []):
|
|
959
|
-
print(f" • {agent}")
|
|
960
|
-
print()
|
|
961
|
-
|
|
962
|
-
# Create workflow
|
|
963
|
-
workflow = OrchestratedHealthCheckWorkflow(mode=mode, project_root=project_root)
|
|
964
|
-
|
|
965
|
-
try:
|
|
966
|
-
# Execute workflow
|
|
967
|
-
report = asyncio.run(workflow.execute())
|
|
968
|
-
|
|
969
|
-
# Display results
|
|
970
|
-
if hasattr(args, "json") and args.json:
|
|
971
|
-
print(json.dumps(report.to_dict(), indent=2))
|
|
972
|
-
else:
|
|
973
|
-
print(report.format_console_output())
|
|
974
|
-
|
|
975
|
-
# Return appropriate exit code (70+ is passing)
|
|
976
|
-
return 0 if report.overall_health_score >= 70 else 1
|
|
977
|
-
|
|
978
|
-
except Exception as e:
|
|
979
|
-
print(f" ❌ Error executing health check workflow: {e}")
|
|
980
|
-
print()
|
|
981
|
-
logger.exception("Health check workflow failed")
|
|
982
|
-
return 1
|
|
983
|
-
|
|
984
|
-
else:
|
|
985
|
-
print(f" ❌ Unknown workflow type: {workflow_type}")
|
|
986
|
-
print()
|
|
987
|
-
print(" Available workflows:")
|
|
988
|
-
print(" - release-prep: Release readiness validation (parallel agents)")
|
|
989
|
-
print(" - health-check: Project health assessment (daily/weekly/release modes)")
|
|
990
|
-
print()
|
|
991
|
-
print(" Note: test-coverage workflow disabled in v4.0.0 (being redesigned)")
|
|
992
|
-
print()
|
|
993
|
-
return 1
|
|
994
|
-
|
|
995
|
-
print()
|
|
996
|
-
print("=" * 60)
|
|
997
|
-
print()
|
|
998
|
-
|
|
999
|
-
return 0
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
def cmd_init(args):
|
|
1003
|
-
"""Initialize a new Empathy Framework project.
|
|
1004
|
-
|
|
1005
|
-
Creates a configuration file with sensible defaults.
|
|
1006
|
-
|
|
1007
|
-
Args:
|
|
1008
|
-
args: Namespace object from argparse with attributes:
|
|
1009
|
-
- format (str): Output format ('yaml' or 'json').
|
|
1010
|
-
- output (str | None): Output file path.
|
|
1011
|
-
|
|
1012
|
-
Returns:
|
|
1013
|
-
None: Creates configuration file at specified path.
|
|
1014
|
-
|
|
1015
|
-
Raises:
|
|
1016
|
-
ValueError: If output path is invalid or unsafe.
|
|
1017
|
-
"""
|
|
1018
|
-
config_format = args.format
|
|
1019
|
-
output_path = args.output or f"attune.config.{config_format}"
|
|
1020
|
-
|
|
1021
|
-
# Validate output path to prevent path traversal attacks
|
|
1022
|
-
validated_path = _validate_file_path(output_path)
|
|
1023
|
-
|
|
1024
|
-
logger.info(f"Initializing new Empathy Framework project with format: {config_format}")
|
|
1025
|
-
|
|
1026
|
-
# Create default config
|
|
1027
|
-
config = EmpathyConfig()
|
|
1028
|
-
|
|
1029
|
-
# Save to file
|
|
1030
|
-
if config_format == "yaml":
|
|
1031
|
-
config.to_yaml(str(validated_path))
|
|
1032
|
-
logger.info(f"Created YAML configuration file: {output_path}")
|
|
1033
|
-
logger.info(f"✓ Created YAML configuration: {output_path}")
|
|
1034
|
-
elif config_format == "json":
|
|
1035
|
-
config.to_json(str(validated_path))
|
|
1036
|
-
logger.info(f"Created JSON configuration file: {validated_path}")
|
|
1037
|
-
logger.info(f"✓ Created JSON configuration: {validated_path}")
|
|
1038
|
-
|
|
1039
|
-
logger.info("\nNext steps:")
|
|
1040
|
-
logger.info(f" 1. Edit {output_path} to customize settings")
|
|
1041
|
-
logger.info(" 2. Use 'empathy run' to start using the framework")
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
def cmd_validate(args):
|
|
1045
|
-
"""Validate a configuration file.
|
|
1046
|
-
|
|
1047
|
-
Loads and validates the specified configuration file.
|
|
1048
|
-
|
|
1049
|
-
Args:
|
|
1050
|
-
args: Namespace object from argparse with attributes:
|
|
1051
|
-
- config (str): Path to configuration file to validate.
|
|
1052
|
-
|
|
1053
|
-
Returns:
|
|
1054
|
-
None: Prints validation result. Exits with code 1 on failure.
|
|
1055
|
-
"""
|
|
1056
|
-
filepath = args.config
|
|
1057
|
-
logger.info(f"Validating configuration file: {filepath}")
|
|
1058
|
-
|
|
1059
|
-
try:
|
|
1060
|
-
config = load_config(filepath=filepath, use_env=False)
|
|
1061
|
-
config.validate()
|
|
1062
|
-
logger.info(f"Configuration validation successful: {filepath}")
|
|
1063
|
-
logger.info(f"✓ Configuration valid: {filepath}")
|
|
1064
|
-
logger.info(f"\n User ID: {config.user_id}")
|
|
1065
|
-
logger.info(f" Target Level: {config.target_level}")
|
|
1066
|
-
logger.info(f" Confidence Threshold: {config.confidence_threshold}")
|
|
1067
|
-
logger.info(f" Persistence Backend: {config.persistence_backend}")
|
|
1068
|
-
logger.info(f" Metrics Enabled: {config.metrics_enabled}")
|
|
1069
|
-
except (OSError, FileNotFoundError) as e:
|
|
1070
|
-
# Config file not found or cannot be read
|
|
1071
|
-
logger.error(f"Configuration file error: {e}")
|
|
1072
|
-
logger.error(f"✗ Cannot read configuration file: {e}")
|
|
1073
|
-
sys.exit(1)
|
|
1074
|
-
except ValueError as e:
|
|
1075
|
-
# Invalid configuration values
|
|
1076
|
-
logger.error(f"Configuration validation failed: {e}")
|
|
1077
|
-
logger.error(f"✗ Configuration invalid: {e}")
|
|
1078
|
-
sys.exit(1)
|
|
1079
|
-
except Exception as e:
|
|
1080
|
-
# Unexpected errors during config validation
|
|
1081
|
-
logger.exception(f"Unexpected error validating configuration: {e}")
|
|
1082
|
-
logger.error(f"✗ Configuration invalid: {e}")
|
|
1083
|
-
sys.exit(1)
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
def cmd_info(args):
|
|
1087
|
-
"""Display information about the framework.
|
|
1088
|
-
|
|
1089
|
-
Shows configuration, persistence, and feature status.
|
|
1090
|
-
|
|
1091
|
-
Args:
|
|
1092
|
-
args: Namespace object from argparse with attributes:
|
|
1093
|
-
- config (str | None): Optional path to configuration file.
|
|
1094
|
-
|
|
1095
|
-
Returns:
|
|
1096
|
-
None: Prints framework information to stdout.
|
|
1097
|
-
"""
|
|
1098
|
-
config_file = args.config
|
|
1099
|
-
logger.info("Displaying framework information")
|
|
1100
|
-
|
|
1101
|
-
if config_file:
|
|
1102
|
-
logger.debug(f"Loading config from file: {config_file}")
|
|
1103
|
-
config = load_config(filepath=config_file)
|
|
1104
|
-
else:
|
|
1105
|
-
logger.debug("Loading default configuration")
|
|
1106
|
-
config = load_config()
|
|
1107
|
-
|
|
1108
|
-
logger.info("=== Empathy Framework Info ===\n")
|
|
1109
|
-
logger.info("Configuration:")
|
|
1110
|
-
logger.info(f" User ID: {config.user_id}")
|
|
1111
|
-
logger.info(f" Target Level: {config.target_level}")
|
|
1112
|
-
logger.info(f" Confidence Threshold: {config.confidence_threshold}")
|
|
1113
|
-
logger.info("\nPersistence:")
|
|
1114
|
-
logger.info(f" Backend: {config.persistence_backend}")
|
|
1115
|
-
logger.info(f" Path: {config.persistence_path}")
|
|
1116
|
-
logger.info(f" Enabled: {config.persistence_enabled}")
|
|
1117
|
-
logger.info("\nMetrics:")
|
|
1118
|
-
logger.info(f" Enabled: {config.metrics_enabled}")
|
|
1119
|
-
logger.info(f" Path: {config.metrics_path}")
|
|
1120
|
-
logger.info("\nPattern Library:")
|
|
1121
|
-
logger.info(f" Enabled: {config.pattern_library_enabled}")
|
|
1122
|
-
logger.info(f" Pattern Sharing: {config.pattern_sharing}")
|
|
1123
|
-
logger.info(f" Confidence Threshold: {config.pattern_confidence_threshold}")
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
def cmd_patterns_list(args):
|
|
1127
|
-
"""List patterns in a pattern library.
|
|
1128
|
-
|
|
1129
|
-
Args:
|
|
1130
|
-
args: Namespace object from argparse with attributes:
|
|
1131
|
-
- library (str): Path to pattern library file.
|
|
1132
|
-
- format (str): Library format ('json' or 'sqlite').
|
|
1133
|
-
|
|
1134
|
-
Returns:
|
|
1135
|
-
None: Prints pattern list to stdout. Exits with code 1 on failure.
|
|
1136
|
-
"""
|
|
1137
|
-
filepath = args.library
|
|
1138
|
-
format_type = args.format
|
|
1139
|
-
logger.info(f"Listing patterns from library: {filepath} (format: {format_type})")
|
|
1140
|
-
|
|
1141
|
-
try:
|
|
1142
|
-
if format_type == "json":
|
|
1143
|
-
library = PatternPersistence.load_from_json(filepath)
|
|
1144
|
-
elif format_type == "sqlite":
|
|
1145
|
-
library = PatternPersistence.load_from_sqlite(filepath)
|
|
1146
|
-
else:
|
|
1147
|
-
logger.error(f"Unknown pattern library format: {format_type}")
|
|
1148
|
-
logger.error(f"✗ Unknown format: {format_type}")
|
|
1149
|
-
sys.exit(1)
|
|
1150
|
-
|
|
1151
|
-
logger.info(f"Loaded {len(library.patterns)} patterns from {filepath}")
|
|
1152
|
-
logger.info(f"=== Pattern Library: {filepath} ===\n")
|
|
1153
|
-
logger.info(f"Total patterns: {len(library.patterns)}")
|
|
1154
|
-
logger.info(f"Total agents: {len(library.agent_contributions)}")
|
|
1155
|
-
|
|
1156
|
-
if library.patterns:
|
|
1157
|
-
logger.info("\nPatterns:")
|
|
1158
|
-
for pattern_id, pattern in library.patterns.items():
|
|
1159
|
-
logger.info(f"\n [{pattern_id}] {pattern.name}")
|
|
1160
|
-
logger.info(f" Agent: {pattern.agent_id}")
|
|
1161
|
-
logger.info(f" Type: {pattern.pattern_type}")
|
|
1162
|
-
logger.info(f" Confidence: {pattern.confidence:.2f}")
|
|
1163
|
-
logger.info(f" Usage: {pattern.usage_count}")
|
|
1164
|
-
logger.info(f" Success Rate: {pattern.success_rate:.2f}")
|
|
1165
|
-
except FileNotFoundError:
|
|
1166
|
-
logger.error(f"Pattern library not found: {filepath}")
|
|
1167
|
-
logger.error(f"✗ Pattern library not found: {filepath}")
|
|
1168
|
-
sys.exit(1)
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
def cmd_patterns_export(args):
|
|
1172
|
-
"""Export patterns from one format to another.
|
|
1173
|
-
|
|
1174
|
-
Args:
|
|
1175
|
-
args: Namespace object from argparse with attributes:
|
|
1176
|
-
- input (str): Input file path.
|
|
1177
|
-
- output (str): Output file path.
|
|
1178
|
-
- input_format (str): Input format ('json' or 'sqlite').
|
|
1179
|
-
- output_format (str): Output format ('json' or 'sqlite').
|
|
1180
|
-
|
|
1181
|
-
Returns:
|
|
1182
|
-
None: Exports patterns to output file. Exits with code 1 on failure.
|
|
1183
|
-
|
|
1184
|
-
Raises:
|
|
1185
|
-
ValueError: If output path is invalid or unsafe.
|
|
1186
|
-
"""
|
|
1187
|
-
input_file = args.input
|
|
1188
|
-
input_format = args.input_format
|
|
1189
|
-
output_file = args.output
|
|
1190
|
-
output_format = args.output_format
|
|
1191
|
-
|
|
1192
|
-
logger.info(f"Exporting patterns from {input_format} to {output_format}")
|
|
1193
|
-
|
|
1194
|
-
# Load from input format
|
|
1195
|
-
try:
|
|
1196
|
-
if input_format == "json":
|
|
1197
|
-
library = PatternPersistence.load_from_json(input_file)
|
|
1198
|
-
elif input_format == "sqlite":
|
|
1199
|
-
library = PatternPersistence.load_from_sqlite(input_file)
|
|
1200
|
-
else:
|
|
1201
|
-
logger.error(f"Unknown input format: {input_format}")
|
|
1202
|
-
logger.error(f"✗ Unknown input format: {input_format}")
|
|
1203
|
-
sys.exit(1)
|
|
1204
|
-
|
|
1205
|
-
logger.info(f"Loaded {len(library.patterns)} patterns from {input_file}")
|
|
1206
|
-
logger.info(f"✓ Loaded {len(library.patterns)} patterns from {input_file}")
|
|
1207
|
-
except (OSError, FileNotFoundError) as e:
|
|
1208
|
-
# Input file not found or cannot be read
|
|
1209
|
-
logger.error(f"Pattern file error: {e}")
|
|
1210
|
-
logger.error(f"✗ Cannot read pattern file: {e}")
|
|
1211
|
-
sys.exit(1)
|
|
1212
|
-
except (ValueError, KeyError) as e:
|
|
1213
|
-
# Invalid pattern data format
|
|
1214
|
-
logger.error(f"Pattern data error: {e}")
|
|
1215
|
-
logger.error(f"✗ Invalid pattern data: {e}")
|
|
1216
|
-
sys.exit(1)
|
|
1217
|
-
except Exception as e:
|
|
1218
|
-
# Unexpected errors loading patterns
|
|
1219
|
-
logger.exception(f"Unexpected error loading patterns: {e}")
|
|
1220
|
-
logger.error(f"✗ Failed to load patterns: {e}")
|
|
1221
|
-
sys.exit(1)
|
|
1222
|
-
|
|
1223
|
-
# Validate output path
|
|
1224
|
-
validated_output = _validate_file_path(output_file)
|
|
1225
|
-
|
|
1226
|
-
# Save to output format
|
|
1227
|
-
try:
|
|
1228
|
-
if output_format == "json":
|
|
1229
|
-
PatternPersistence.save_to_json(library, str(validated_output))
|
|
1230
|
-
elif output_format == "sqlite":
|
|
1231
|
-
PatternPersistence.save_to_sqlite(library, str(validated_output))
|
|
1232
|
-
|
|
1233
|
-
logger.info(f"Saved {len(library.patterns)} patterns to {output_file}")
|
|
1234
|
-
logger.info(f"✓ Saved {len(library.patterns)} patterns to {output_file}")
|
|
1235
|
-
except (OSError, FileNotFoundError, PermissionError) as e:
|
|
1236
|
-
# Cannot write output file
|
|
1237
|
-
logger.error(f"Pattern file write error: {e}")
|
|
1238
|
-
logger.error(f"✗ Cannot write pattern file: {e}")
|
|
1239
|
-
sys.exit(1)
|
|
1240
|
-
except Exception as e:
|
|
1241
|
-
# Unexpected errors saving patterns
|
|
1242
|
-
logger.exception(f"Unexpected error saving patterns: {e}")
|
|
1243
|
-
logger.error(f"✗ Failed to save patterns: {e}")
|
|
1244
|
-
sys.exit(1)
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
def cmd_patterns_resolve(args):
|
|
1248
|
-
"""Resolve investigating bug patterns with root cause and fix.
|
|
1249
|
-
|
|
1250
|
-
Updates pattern status and adds resolution information.
|
|
1251
|
-
|
|
1252
|
-
Args:
|
|
1253
|
-
args: Namespace object from argparse with attributes:
|
|
1254
|
-
- pattern_id (str | None): Pattern ID to resolve.
|
|
1255
|
-
- root_cause (str | None): Root cause description.
|
|
1256
|
-
- fix (str | None): Fix description.
|
|
1257
|
-
- fix_code (str | None): Code snippet of the fix.
|
|
1258
|
-
- time (int | None): Resolution time in minutes.
|
|
1259
|
-
- status (str): New status ('resolved', 'wont_fix', etc.).
|
|
1260
|
-
- patterns_dir (str): Patterns directory path.
|
|
1261
|
-
- commit (str | None): Related commit hash.
|
|
1262
|
-
|
|
1263
|
-
Returns:
|
|
1264
|
-
None: Updates pattern and prints result. Exits with code 1 on failure.
|
|
1265
|
-
"""
|
|
1266
|
-
from attune_llm.pattern_resolver import PatternResolver
|
|
1267
|
-
|
|
1268
|
-
resolver = PatternResolver(args.patterns_dir)
|
|
1269
|
-
|
|
1270
|
-
# If no bug_id, list investigating bugs
|
|
1271
|
-
if not args.bug_id:
|
|
1272
|
-
investigating = resolver.list_investigating()
|
|
1273
|
-
if not investigating:
|
|
1274
|
-
print("No bugs with 'investigating' status found.")
|
|
1275
|
-
return
|
|
1276
|
-
|
|
1277
|
-
print(f"\nBugs needing resolution ({len(investigating)}):\n")
|
|
1278
|
-
for bug in investigating:
|
|
1279
|
-
print(f" {bug.get('bug_id', 'unknown')}")
|
|
1280
|
-
print(f" Type: {bug.get('error_type', 'unknown')}")
|
|
1281
|
-
print(f" File: {bug.get('file_path', 'unknown')}")
|
|
1282
|
-
msg = bug.get("error_message", "N/A")
|
|
1283
|
-
print(f" Message: {msg[:60]}..." if len(msg) > 60 else f" Message: {msg}")
|
|
1284
|
-
print()
|
|
1285
|
-
return
|
|
1286
|
-
|
|
1287
|
-
# Validate required args
|
|
1288
|
-
if not args.root_cause or not args.fix:
|
|
1289
|
-
print("✗ --root-cause and --fix are required when resolving a bug")
|
|
1290
|
-
print(
|
|
1291
|
-
" Example: empathy patterns resolve bug_123 --root-cause 'Null check' --fix 'Added ?.'",
|
|
1292
|
-
)
|
|
1293
|
-
sys.exit(1)
|
|
1294
|
-
|
|
1295
|
-
# Resolve the specified bug
|
|
1296
|
-
success = resolver.resolve_bug(
|
|
1297
|
-
bug_id=args.bug_id,
|
|
1298
|
-
root_cause=args.root_cause,
|
|
1299
|
-
fix_applied=args.fix,
|
|
1300
|
-
fix_code=args.fix_code,
|
|
1301
|
-
resolution_time_minutes=args.time or 0,
|
|
1302
|
-
resolved_by=args.resolved_by or "@developer",
|
|
1303
|
-
)
|
|
1304
|
-
|
|
1305
|
-
if success:
|
|
1306
|
-
print(f"✓ Resolved: {args.bug_id}")
|
|
1307
|
-
|
|
1308
|
-
# Regenerate summary if requested
|
|
1309
|
-
if not args.no_regenerate:
|
|
1310
|
-
if resolver.regenerate_summary():
|
|
1311
|
-
print("✓ Regenerated patterns_summary.md")
|
|
1312
|
-
else:
|
|
1313
|
-
print("⚠ Failed to regenerate summary")
|
|
1314
|
-
else:
|
|
1315
|
-
print(f"✗ Failed to resolve: {args.bug_id}")
|
|
1316
|
-
print(" Use 'empathy patterns resolve' (no args) to list investigating bugs")
|
|
1317
|
-
sys.exit(1)
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
def cmd_status(args):
|
|
1321
|
-
"""Session status assistant - prioritized project status report.
|
|
1322
|
-
|
|
1323
|
-
Collects and displays project status including patterns, git context,
|
|
1324
|
-
and health metrics with priority scoring.
|
|
1325
|
-
|
|
1326
|
-
Args:
|
|
1327
|
-
args: Namespace object from argparse with attributes:
|
|
1328
|
-
- patterns_dir (str): Path to patterns directory (default: ./patterns).
|
|
1329
|
-
- project_root (str): Project root directory (default: .).
|
|
1330
|
-
- inactivity (int): Minutes of inactivity before showing status.
|
|
1331
|
-
- full (bool): If True, show all items without limit.
|
|
1332
|
-
- json (bool): If True, output as JSON format.
|
|
1333
|
-
- select (int | None): Select specific item for action prompt.
|
|
1334
|
-
- force (bool): If True, show status even with recent activity.
|
|
1335
|
-
|
|
1336
|
-
Returns:
|
|
1337
|
-
None: Prints prioritized status report or JSON output.
|
|
1338
|
-
"""
|
|
1339
|
-
from attune_llm.session_status import SessionStatusCollector
|
|
1340
|
-
|
|
1341
|
-
config = {"inactivity_minutes": args.inactivity}
|
|
1342
|
-
collector = SessionStatusCollector(
|
|
1343
|
-
patterns_dir=args.patterns_dir,
|
|
1344
|
-
project_root=args.project_root,
|
|
1345
|
-
config=config,
|
|
1346
|
-
)
|
|
1347
|
-
|
|
1348
|
-
# Check if should show (unless forced)
|
|
1349
|
-
if not args.force and not collector.should_show():
|
|
1350
|
-
print("No status update needed (recent activity detected).")
|
|
1351
|
-
print("Use --force to show status anyway.")
|
|
1352
|
-
return
|
|
1353
|
-
|
|
1354
|
-
# Collect status
|
|
1355
|
-
status = collector.collect()
|
|
1356
|
-
|
|
1357
|
-
# Handle selection
|
|
1358
|
-
if args.select:
|
|
1359
|
-
prompt = collector.get_action_prompt(status, args.select)
|
|
1360
|
-
if prompt:
|
|
1361
|
-
print(f"\nAction prompt for selection {args.select}:\n")
|
|
1362
|
-
print(prompt)
|
|
1363
|
-
else:
|
|
1364
|
-
print(f"Invalid selection: {args.select}")
|
|
1365
|
-
return
|
|
1366
|
-
|
|
1367
|
-
# Output
|
|
1368
|
-
if args.json:
|
|
1369
|
-
print(collector.format_json(status))
|
|
1370
|
-
else:
|
|
1371
|
-
max_items = None if args.full else 5
|
|
1372
|
-
print()
|
|
1373
|
-
print(collector.format_output(status, max_items=max_items))
|
|
1374
|
-
print()
|
|
1375
|
-
|
|
1376
|
-
# Record interaction
|
|
1377
|
-
collector.record_interaction()
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
def cmd_review(args):
|
|
1381
|
-
"""Pattern-based code review against historical bugs.
|
|
1382
|
-
|
|
1383
|
-
Note: This command has been deprecated. The underlying workflow module
|
|
1384
|
-
has been removed. Use 'empathy workflow run bug-predict' instead.
|
|
1385
|
-
|
|
1386
|
-
Args:
|
|
1387
|
-
args: Namespace object from argparse.
|
|
1388
|
-
|
|
1389
|
-
Returns:
|
|
1390
|
-
None: Prints deprecation message.
|
|
1391
|
-
"""
|
|
1392
|
-
print("⚠️ The 'review' command has been deprecated.")
|
|
1393
|
-
print()
|
|
1394
|
-
print("The CodeReviewWorkflow module has been removed.")
|
|
1395
|
-
print("Please use one of these alternatives:")
|
|
1396
|
-
print()
|
|
1397
|
-
print(" empathy workflow run bug-predict # Scan for risky patterns")
|
|
1398
|
-
print(" ruff check <files> # Fast linting")
|
|
1399
|
-
print(" bandit -r <path> # Security scanning")
|
|
1400
|
-
print()
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
def cmd_health(args):
|
|
1404
|
-
"""Code health assistant - run health checks and auto-fix issues.
|
|
1405
|
-
|
|
1406
|
-
Runs comprehensive health checks including linting, type checking,
|
|
1407
|
-
and formatting with optional auto-fix capability.
|
|
1408
|
-
|
|
1409
|
-
Args:
|
|
1410
|
-
args: Namespace object from argparse with attributes:
|
|
1411
|
-
- check (str | None): Specific check to run (lint/type/format/test).
|
|
1412
|
-
- deep (bool): If True, run comprehensive checks.
|
|
1413
|
-
- fix (bool): If True, auto-fix issues where possible.
|
|
1414
|
-
- threshold (str): Severity threshold for issues.
|
|
1415
|
-
- project_root (str): Project root directory.
|
|
1416
|
-
- patterns_dir (str): Path to patterns directory.
|
|
1417
|
-
- details (bool): If True, show detailed issue list.
|
|
1418
|
-
- compare (str | None): Compare against historical baseline.
|
|
1419
|
-
- export (str | None): Export results to file.
|
|
1420
|
-
- json (bool): If True, output as JSON format.
|
|
1421
|
-
|
|
1422
|
-
Returns:
|
|
1423
|
-
None: Prints health check results and optionally fixes issues.
|
|
1424
|
-
"""
|
|
1425
|
-
import asyncio
|
|
1426
|
-
|
|
1427
|
-
from attune_llm.code_health import (
|
|
1428
|
-
AutoFixer,
|
|
1429
|
-
CheckCategory,
|
|
1430
|
-
HealthCheckRunner,
|
|
1431
|
-
HealthTrendTracker,
|
|
1432
|
-
format_health_output,
|
|
1433
|
-
)
|
|
1434
|
-
|
|
1435
|
-
runner = HealthCheckRunner(
|
|
1436
|
-
project_root=args.project_root,
|
|
1437
|
-
)
|
|
1438
|
-
|
|
1439
|
-
# Determine what checks to run
|
|
1440
|
-
if args.check:
|
|
1441
|
-
# Run specific check
|
|
1442
|
-
try:
|
|
1443
|
-
category = CheckCategory(args.check)
|
|
1444
|
-
report_future = runner.run_check(category)
|
|
1445
|
-
result = asyncio.run(report_future)
|
|
1446
|
-
# Create a minimal report with just this result
|
|
1447
|
-
from attune_llm.code_health import HealthReport
|
|
1448
|
-
|
|
1449
|
-
report = HealthReport(project_root=args.project_root)
|
|
1450
|
-
report.add_result(result)
|
|
1451
|
-
except ValueError:
|
|
1452
|
-
print(f"Unknown check category: {args.check}")
|
|
1453
|
-
print(f"Available: {', '.join(c.value for c in CheckCategory)}")
|
|
1454
|
-
return
|
|
1455
|
-
elif args.deep:
|
|
1456
|
-
# Run all checks
|
|
1457
|
-
print("Running comprehensive health check...\n")
|
|
1458
|
-
report = asyncio.run(runner.run_all())
|
|
1459
|
-
else:
|
|
1460
|
-
# Run quick checks (default)
|
|
1461
|
-
report = asyncio.run(runner.run_quick())
|
|
1462
|
-
|
|
1463
|
-
# Handle fix mode
|
|
1464
|
-
if args.fix:
|
|
1465
|
-
fixer = AutoFixer()
|
|
1466
|
-
|
|
1467
|
-
if args.dry_run:
|
|
1468
|
-
# Preview only
|
|
1469
|
-
fixes = fixer.preview_fixes(report)
|
|
1470
|
-
if fixes:
|
|
1471
|
-
print("Would fix the following issues:\n")
|
|
1472
|
-
for fix in fixes:
|
|
1473
|
-
safe_indicator = " (safe)" if fix["safe"] else " (needs confirmation)"
|
|
1474
|
-
print(f" [{fix['category']}] {fix['file']}")
|
|
1475
|
-
print(f" {fix['issue']}")
|
|
1476
|
-
print(f" Command: {fix['fix_command']}{safe_indicator}")
|
|
1477
|
-
print()
|
|
1478
|
-
else:
|
|
1479
|
-
print("No auto-fixable issues found.")
|
|
1480
|
-
return
|
|
1481
|
-
|
|
1482
|
-
# Apply fixes
|
|
1483
|
-
if args.check:
|
|
1484
|
-
try:
|
|
1485
|
-
category = CheckCategory(args.check)
|
|
1486
|
-
result = asyncio.run(fixer.fix_category(report, category))
|
|
1487
|
-
except ValueError:
|
|
1488
|
-
result = {"fixed": [], "skipped": [], "failed": []}
|
|
1489
|
-
else:
|
|
1490
|
-
result = asyncio.run(fixer.fix_all(report, interactive=args.interactive))
|
|
1491
|
-
|
|
1492
|
-
# Report fix results
|
|
1493
|
-
if result["fixed"]:
|
|
1494
|
-
print(f"✓ Fixed {len(result['fixed'])} issue(s)")
|
|
1495
|
-
for fix in result["fixed"][:5]:
|
|
1496
|
-
print(f" - {fix['file_path']}: {fix['message']}")
|
|
1497
|
-
if len(result["fixed"]) > 5:
|
|
1498
|
-
print(f" ... and {len(result['fixed']) - 5} more")
|
|
1499
|
-
|
|
1500
|
-
if result["skipped"]:
|
|
1501
|
-
if args.interactive:
|
|
1502
|
-
print(f"\n⚠ Skipped {len(result['skipped'])} issue(s) (could not auto-fix)")
|
|
1503
|
-
else:
|
|
1504
|
-
print(
|
|
1505
|
-
f"\n⚠ Skipped {len(result['skipped'])} issue(s) (use --interactive to review)",
|
|
1506
|
-
)
|
|
1507
|
-
|
|
1508
|
-
if result["failed"]:
|
|
1509
|
-
print(f"\n✗ Failed to fix {len(result['failed'])} issue(s)")
|
|
1510
|
-
|
|
1511
|
-
return
|
|
1512
|
-
|
|
1513
|
-
# Handle trends
|
|
1514
|
-
if args.trends:
|
|
1515
|
-
tracker = HealthTrendTracker(project_root=args.project_root)
|
|
1516
|
-
trends = tracker.get_trends(days=args.trends)
|
|
1517
|
-
|
|
1518
|
-
print(f"📈 Health Trends ({trends['period_days']} days)\n")
|
|
1519
|
-
print(f"Average Score: {trends['average_score']}/100")
|
|
1520
|
-
print(f"Trend: {trends['trend_direction']} ({trends['score_change']:+d})")
|
|
1521
|
-
|
|
1522
|
-
if trends["data_points"]:
|
|
1523
|
-
print("\nRecent scores:")
|
|
1524
|
-
for point in trends["data_points"][:7]:
|
|
1525
|
-
print(f" {point['date']}: {point['score']}/100")
|
|
1526
|
-
|
|
1527
|
-
hotspots = tracker.identify_hotspots()
|
|
1528
|
-
if hotspots:
|
|
1529
|
-
print("\n🔥 Hotspots (files with recurring issues):")
|
|
1530
|
-
for spot in hotspots[:5]:
|
|
1531
|
-
print(f" {spot['file']}: {spot['issue_count']} issues")
|
|
1532
|
-
|
|
1533
|
-
return
|
|
1534
|
-
|
|
1535
|
-
# Output report
|
|
1536
|
-
if args.json:
|
|
1537
|
-
import json
|
|
1538
|
-
|
|
1539
|
-
print(json.dumps(report.to_dict(), indent=2, default=str))
|
|
1540
|
-
else:
|
|
1541
|
-
level = 3 if args.full else (2 if args.details else 1)
|
|
1542
|
-
print(format_health_output(report, level=level))
|
|
1543
|
-
|
|
1544
|
-
# Record to trend history
|
|
1545
|
-
if not args.check: # Only record full runs
|
|
1546
|
-
tracker = HealthTrendTracker(project_root=args.project_root)
|
|
1547
|
-
tracker.record_check(report)
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
def cmd_metrics_show(args):
|
|
1551
|
-
"""Display metrics for a user.
|
|
1552
|
-
|
|
1553
|
-
Args:
|
|
1554
|
-
args: Namespace object from argparse with attributes:
|
|
1555
|
-
- user (str): User ID to retrieve metrics for.
|
|
1556
|
-
- db (str): Path to metrics database (default: ./metrics.db).
|
|
1557
|
-
|
|
1558
|
-
Returns:
|
|
1559
|
-
None: Prints user metrics to stdout. Exits with code 1 on failure.
|
|
1560
|
-
"""
|
|
1561
|
-
db_path = args.db
|
|
1562
|
-
user_id = args.user
|
|
1563
|
-
|
|
1564
|
-
logger.info(f"Retrieving metrics for user: {user_id} from {db_path}")
|
|
1565
|
-
|
|
1566
|
-
collector = MetricsCollector(db_path)
|
|
1567
|
-
|
|
1568
|
-
try:
|
|
1569
|
-
stats = collector.get_user_stats(user_id)
|
|
1570
|
-
|
|
1571
|
-
logger.info(f"Successfully retrieved metrics for user: {user_id}")
|
|
1572
|
-
logger.info(f"=== Metrics for User: {user_id} ===\n")
|
|
1573
|
-
logger.info(f"Total Operations: {stats['total_operations']}")
|
|
1574
|
-
logger.info(f"Success Rate: {stats['success_rate']:.1%}")
|
|
1575
|
-
logger.info(f"Average Response Time: {stats.get('avg_response_time_ms', 0):.0f} ms")
|
|
1576
|
-
logger.info(f"\nFirst Use: {stats['first_use']}")
|
|
1577
|
-
logger.info(f"Last Use: {stats['last_use']}")
|
|
1578
|
-
|
|
1579
|
-
logger.info("\nEmpathy Level Usage:")
|
|
1580
|
-
logger.info(f" Level 1: {stats.get('level_1_count', 0)} uses")
|
|
1581
|
-
logger.info(f" Level 2: {stats.get('level_2_count', 0)} uses")
|
|
1582
|
-
logger.info(f" Level 3: {stats.get('level_3_count', 0)} uses")
|
|
1583
|
-
logger.info(f" Level 4: {stats.get('level_4_count', 0)} uses")
|
|
1584
|
-
logger.info(f" Level 5: {stats.get('level_5_count', 0)} uses")
|
|
1585
|
-
except (OSError, FileNotFoundError) as e:
|
|
1586
|
-
# Database file not found
|
|
1587
|
-
logger.error(f"Metrics database error: {e}")
|
|
1588
|
-
logger.error(f"✗ Cannot read metrics database: {e}")
|
|
1589
|
-
sys.exit(1)
|
|
1590
|
-
except KeyError as e:
|
|
1591
|
-
# User not found in database
|
|
1592
|
-
logger.error(f"User not found in metrics: {e}")
|
|
1593
|
-
logger.error(f"✗ User {user_id} not found: {e}")
|
|
1594
|
-
sys.exit(1)
|
|
1595
|
-
except Exception as e:
|
|
1596
|
-
# Unexpected errors retrieving metrics
|
|
1597
|
-
logger.exception(f"Unexpected error retrieving metrics for user {user_id}: {e}")
|
|
1598
|
-
logger.error(f"✗ Failed to retrieve metrics: {e}")
|
|
1599
|
-
sys.exit(1)
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
def cmd_state_list(args):
|
|
1603
|
-
"""List saved user states.
|
|
1604
|
-
|
|
1605
|
-
Args:
|
|
1606
|
-
args: Namespace object from argparse with attributes:
|
|
1607
|
-
- state_dir (str): Directory containing state files.
|
|
1608
|
-
|
|
1609
|
-
Returns:
|
|
1610
|
-
None: Prints list of users with saved states.
|
|
1611
|
-
"""
|
|
1612
|
-
state_dir = args.state_dir
|
|
1613
|
-
|
|
1614
|
-
logger.info(f"Listing saved user states from: {state_dir}")
|
|
1615
|
-
|
|
1616
|
-
manager = StateManager(state_dir)
|
|
1617
|
-
users = manager.list_users()
|
|
1618
|
-
|
|
1619
|
-
logger.info(f"Found {len(users)} saved user states")
|
|
1620
|
-
logger.info(f"=== Saved User States: {state_dir} ===\n")
|
|
1621
|
-
logger.info(f"Total users: {len(users)}")
|
|
1622
|
-
|
|
1623
|
-
if users:
|
|
1624
|
-
logger.info("\nUsers:")
|
|
1625
|
-
for user_id in users:
|
|
1626
|
-
logger.info(f" - {user_id}")
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
def cmd_run(args):
|
|
1630
|
-
"""Interactive REPL for testing empathy interactions.
|
|
1631
|
-
|
|
1632
|
-
Starts an interactive session for testing empathy levels and features.
|
|
1633
|
-
|
|
1634
|
-
Args:
|
|
1635
|
-
args: Namespace object from argparse with attributes:
|
|
1636
|
-
- config (str | None): Path to configuration file.
|
|
1637
|
-
- user_id (str | None): User ID (default: cli_user).
|
|
1638
|
-
- level (int): Target empathy level (1-5).
|
|
1639
|
-
|
|
1640
|
-
Returns:
|
|
1641
|
-
None: Runs interactive REPL until user exits.
|
|
1642
|
-
"""
|
|
1643
|
-
config_file = args.config
|
|
1644
|
-
user_id = args.user_id or "cli_user"
|
|
1645
|
-
level = args.level
|
|
1646
|
-
|
|
1647
|
-
print("🧠 Empathy Framework - Interactive Mode")
|
|
1648
|
-
print("=" * 50)
|
|
1649
|
-
|
|
1650
|
-
# Load configuration
|
|
1651
|
-
if config_file:
|
|
1652
|
-
config = load_config(filepath=config_file)
|
|
1653
|
-
print(f"✓ Loaded config from: {config_file}")
|
|
1654
|
-
else:
|
|
1655
|
-
config = EmpathyConfig(user_id=user_id, target_level=level)
|
|
1656
|
-
print("✓ Using default configuration")
|
|
1657
|
-
|
|
1658
|
-
print(f"\nUser ID: {config.user_id}")
|
|
1659
|
-
print(f"Target Level: {config.target_level}")
|
|
1660
|
-
print(f"Confidence Threshold: {config.confidence_threshold:.0%}")
|
|
1661
|
-
|
|
1662
|
-
# Create EmpathyOS instance
|
|
1663
|
-
try:
|
|
1664
|
-
empathy = EmpathyOS(
|
|
1665
|
-
user_id=config.user_id,
|
|
1666
|
-
target_level=config.target_level,
|
|
1667
|
-
confidence_threshold=config.confidence_threshold,
|
|
1668
|
-
persistence_enabled=config.persistence_enabled,
|
|
1669
|
-
)
|
|
1670
|
-
print("✓ Empathy OS initialized")
|
|
1671
|
-
except ValueError as e:
|
|
1672
|
-
# Invalid configuration parameters
|
|
1673
|
-
print(f"✗ Configuration error: {e}")
|
|
1674
|
-
sys.exit(1)
|
|
1675
|
-
except (OSError, FileNotFoundError, PermissionError) as e:
|
|
1676
|
-
# Cannot access required files/directories
|
|
1677
|
-
print(f"✗ File system error: {e}")
|
|
1678
|
-
sys.exit(1)
|
|
1679
|
-
except Exception as e:
|
|
1680
|
-
# Unexpected initialization failure
|
|
1681
|
-
logger.exception(f"Unexpected error initializing Empathy OS: {e}")
|
|
1682
|
-
print(f"✗ Failed to initialize Empathy OS: {e}")
|
|
1683
|
-
sys.exit(1)
|
|
1684
|
-
|
|
1685
|
-
print("\n" + "=" * 50)
|
|
1686
|
-
print("Type your input (or 'exit'/'quit' to stop)")
|
|
1687
|
-
print("Type 'help' for available commands")
|
|
1688
|
-
print("=" * 50 + "\n")
|
|
1689
|
-
|
|
1690
|
-
# Interactive loop
|
|
1691
|
-
while True:
|
|
1692
|
-
try:
|
|
1693
|
-
user_input = input("You: ").strip()
|
|
1694
|
-
|
|
1695
|
-
if not user_input:
|
|
1696
|
-
continue
|
|
1697
|
-
|
|
1698
|
-
if user_input.lower() in ["exit", "quit", "q"]:
|
|
1699
|
-
print("\n👋 Goodbye!")
|
|
1700
|
-
break
|
|
1701
|
-
|
|
1702
|
-
if user_input.lower() == "help":
|
|
1703
|
-
print("\nAvailable commands:")
|
|
1704
|
-
print(" exit, quit, q - Exit the program")
|
|
1705
|
-
print(" help - Show this help message")
|
|
1706
|
-
print(" trust - Show current trust level")
|
|
1707
|
-
print(" stats - Show session statistics")
|
|
1708
|
-
print(" level - Show current empathy level")
|
|
1709
|
-
print()
|
|
1710
|
-
continue
|
|
1711
|
-
|
|
1712
|
-
if user_input.lower() == "trust":
|
|
1713
|
-
trust = empathy.collaboration_state.trust_level
|
|
1714
|
-
print(f"\n Current trust level: {trust:.0%}\n")
|
|
1715
|
-
continue
|
|
1716
|
-
|
|
1717
|
-
if user_input.lower() == "level":
|
|
1718
|
-
current_level = empathy.collaboration_state.current_level
|
|
1719
|
-
print(f"\n Current empathy level: {current_level}\n")
|
|
1720
|
-
continue
|
|
1721
|
-
|
|
1722
|
-
if user_input.lower() == "stats":
|
|
1723
|
-
print("\n Session Statistics:")
|
|
1724
|
-
print(f" Trust: {empathy.collaboration_state.trust_level:.0%}")
|
|
1725
|
-
print(f" Current Level: {empathy.collaboration_state.current_level}")
|
|
1726
|
-
print(f" Target Level: {config.target_level}")
|
|
1727
|
-
print()
|
|
1728
|
-
continue
|
|
1729
|
-
|
|
1730
|
-
# Process interaction
|
|
1731
|
-
start_time = time.time()
|
|
1732
|
-
response = empathy.interact(user_id=config.user_id, user_input=user_input, context={})
|
|
1733
|
-
duration = (time.time() - start_time) * 1000
|
|
1734
|
-
|
|
1735
|
-
# Display response with level indicator
|
|
1736
|
-
level_indicators = ["❌", "🔵", "🟢", "🟡", "🔮"]
|
|
1737
|
-
level_indicator = level_indicators[response.level]
|
|
1738
|
-
|
|
1739
|
-
print(f"\nBot {level_indicator} [L{response.level}]: {response.response}")
|
|
1740
|
-
|
|
1741
|
-
# Show predictions if Level 4
|
|
1742
|
-
if response.predictions:
|
|
1743
|
-
print("\n🔮 Predictions:")
|
|
1744
|
-
for pred in response.predictions:
|
|
1745
|
-
print(f" • {pred}")
|
|
1746
|
-
|
|
1747
|
-
conf = f"{response.confidence:.0%}"
|
|
1748
|
-
print(f"\n Level: {response.level} | Confidence: {conf} | Time: {duration:.0f}ms")
|
|
1749
|
-
print()
|
|
1750
|
-
|
|
1751
|
-
# Ask for feedback
|
|
1752
|
-
feedback = input("Was this helpful? (y/n/skip): ").strip().lower()
|
|
1753
|
-
if feedback == "y":
|
|
1754
|
-
empathy.record_success(success=True)
|
|
1755
|
-
trust = empathy.collaboration_state.trust_level
|
|
1756
|
-
print(f" ✓ Trust increased to {trust:.0%}\n")
|
|
1757
|
-
elif feedback == "n":
|
|
1758
|
-
empathy.record_success(success=False)
|
|
1759
|
-
trust = empathy.collaboration_state.trust_level
|
|
1760
|
-
print(f" ✗ Trust decreased to {trust:.0%}\n")
|
|
1761
|
-
|
|
1762
|
-
except KeyboardInterrupt:
|
|
1763
|
-
print("\n\n👋 Goodbye!")
|
|
1764
|
-
break
|
|
1765
|
-
except (ValueError, KeyError) as e:
|
|
1766
|
-
# Invalid input or response structure
|
|
1767
|
-
print(f"\n✗ Input error: {e}\n")
|
|
1768
|
-
except Exception as e:
|
|
1769
|
-
# Unexpected errors in interactive loop - log and continue
|
|
1770
|
-
logger.exception(f"Unexpected error in interactive loop: {e}")
|
|
1771
|
-
print(f"\n✗ Error: {e}\n")
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
def cmd_inspect(args):
|
|
1775
|
-
"""Unified inspection command for patterns, metrics, and state.
|
|
1776
|
-
|
|
1777
|
-
Inspect various framework data including patterns, user metrics, and states.
|
|
1778
|
-
|
|
1779
|
-
Args:
|
|
1780
|
-
args: Namespace object from argparse with attributes:
|
|
1781
|
-
- type (str): What to inspect ('patterns', 'metrics', or 'state').
|
|
1782
|
-
- user_id (str | None): Filter by user ID.
|
|
1783
|
-
- db (str | None): Database path (default: .attune/patterns.db).
|
|
1784
|
-
- state_dir (str | None): State directory for state inspection.
|
|
1785
|
-
|
|
1786
|
-
Returns:
|
|
1787
|
-
None: Prints inspection results. Exits with code 1 on failure.
|
|
1788
|
-
"""
|
|
1789
|
-
inspect_type = args.type
|
|
1790
|
-
user_id = args.user_id
|
|
1791
|
-
db_path = args.db or ".attune/patterns.db"
|
|
1792
|
-
|
|
1793
|
-
print(f"🔍 Inspecting: {inspect_type}")
|
|
1794
|
-
print("=" * 50)
|
|
1795
|
-
|
|
1796
|
-
if inspect_type == "patterns":
|
|
1797
|
-
try:
|
|
1798
|
-
# Determine file format from extension
|
|
1799
|
-
if db_path.endswith(".json"):
|
|
1800
|
-
library = PatternPersistence.load_from_json(db_path)
|
|
1801
|
-
else:
|
|
1802
|
-
library = PatternPersistence.load_from_sqlite(db_path)
|
|
1803
|
-
|
|
1804
|
-
patterns = list(library.patterns.values())
|
|
1805
|
-
|
|
1806
|
-
# Filter by user_id if specified
|
|
1807
|
-
if user_id:
|
|
1808
|
-
patterns = [p for p in patterns if p.agent_id == user_id]
|
|
1809
|
-
|
|
1810
|
-
print(f"\nPatterns for {'user ' + user_id if user_id else 'all users'}:")
|
|
1811
|
-
print(f" Total patterns: {len(patterns)}")
|
|
1812
|
-
|
|
1813
|
-
if patterns:
|
|
1814
|
-
print("\n Top patterns:")
|
|
1815
|
-
# Sort by confidence
|
|
1816
|
-
top_patterns = heapq.nlargest(10, patterns, key=lambda p: p.confidence)
|
|
1817
|
-
for i, pattern in enumerate(top_patterns, 1):
|
|
1818
|
-
print(f"\n {i}. {pattern.name}")
|
|
1819
|
-
print(f" Confidence: {pattern.confidence:.0%}")
|
|
1820
|
-
print(f" Used: {pattern.usage_count} times")
|
|
1821
|
-
print(f" Success rate: {pattern.success_rate:.0%}")
|
|
1822
|
-
except FileNotFoundError:
|
|
1823
|
-
print(f"✗ Pattern library not found: {db_path}")
|
|
1824
|
-
print(" Tip: Use 'empathy-framework workflow' to set up your first project")
|
|
1825
|
-
sys.exit(1)
|
|
1826
|
-
except (ValueError, KeyError) as e:
|
|
1827
|
-
# Invalid pattern data format
|
|
1828
|
-
print(f"✗ Invalid pattern data: {e}")
|
|
1829
|
-
sys.exit(1)
|
|
1830
|
-
except Exception as e:
|
|
1831
|
-
# Unexpected errors loading patterns
|
|
1832
|
-
logger.exception(f"Unexpected error loading patterns: {e}")
|
|
1833
|
-
print(f"✗ Failed to load patterns: {e}")
|
|
1834
|
-
sys.exit(1)
|
|
1835
|
-
|
|
1836
|
-
elif inspect_type == "metrics":
|
|
1837
|
-
if not user_id:
|
|
1838
|
-
print("✗ User ID required for metrics inspection")
|
|
1839
|
-
print(" Usage: empathy-framework inspect metrics --user-id USER_ID")
|
|
1840
|
-
sys.exit(1)
|
|
1841
|
-
|
|
1842
|
-
try:
|
|
1843
|
-
collector = MetricsCollector(db_path=db_path)
|
|
1844
|
-
stats = collector.get_user_stats(user_id)
|
|
1845
|
-
|
|
1846
|
-
print(f"\nMetrics for user: {user_id}")
|
|
1847
|
-
print(f" Total operations: {stats.get('total_operations', 0)}")
|
|
1848
|
-
print(f" Success rate: {stats.get('success_rate', 0):.0%}")
|
|
1849
|
-
print(f" Average response time: {stats.get('avg_response_time_ms', 0):.0f}ms")
|
|
1850
|
-
print("\n Empathy level usage:")
|
|
1851
|
-
for level in range(1, 6):
|
|
1852
|
-
count = stats.get(f"level_{level}_count", 0)
|
|
1853
|
-
print(f" Level {level}: {count} times")
|
|
1854
|
-
except (OSError, FileNotFoundError) as e:
|
|
1855
|
-
# Database file not found
|
|
1856
|
-
print(f"✗ Metrics database not found: {e}")
|
|
1857
|
-
sys.exit(1)
|
|
1858
|
-
except KeyError as e:
|
|
1859
|
-
# User not found
|
|
1860
|
-
print(f"✗ User {user_id} not found: {e}")
|
|
1861
|
-
sys.exit(1)
|
|
1862
|
-
except Exception as e:
|
|
1863
|
-
# Unexpected errors loading metrics
|
|
1864
|
-
logger.exception(f"Unexpected error loading metrics: {e}")
|
|
1865
|
-
print(f"✗ Failed to load metrics: {e}")
|
|
1866
|
-
sys.exit(1)
|
|
1867
|
-
|
|
1868
|
-
elif inspect_type == "state":
|
|
1869
|
-
state_dir = args.state_dir or ".attune/state"
|
|
1870
|
-
try:
|
|
1871
|
-
manager = StateManager(state_dir)
|
|
1872
|
-
users = manager.list_users()
|
|
1873
|
-
|
|
1874
|
-
print("\nSaved states:")
|
|
1875
|
-
print(f" Total users: {len(users)}")
|
|
1876
|
-
|
|
1877
|
-
if users:
|
|
1878
|
-
print("\n Users:")
|
|
1879
|
-
for uid in users:
|
|
1880
|
-
print(f" • {uid}")
|
|
1881
|
-
except (OSError, FileNotFoundError) as e:
|
|
1882
|
-
# State directory not found
|
|
1883
|
-
print(f"✗ State directory not found: {e}")
|
|
1884
|
-
sys.exit(1)
|
|
1885
|
-
except Exception as e:
|
|
1886
|
-
# Unexpected errors loading state
|
|
1887
|
-
logger.exception(f"Unexpected error loading state: {e}")
|
|
1888
|
-
print(f"✗ Failed to load state: {e}")
|
|
1889
|
-
sys.exit(1)
|
|
1890
|
-
|
|
1891
|
-
print()
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
def cmd_export(args):
|
|
1895
|
-
"""Export patterns to file for sharing/backup.
|
|
1896
|
-
|
|
1897
|
-
Args:
|
|
1898
|
-
args: Namespace object from argparse with attributes:
|
|
1899
|
-
- output (str): Output file path.
|
|
1900
|
-
- user_id (str | None): Filter patterns by user ID.
|
|
1901
|
-
- db (str | None): Source database path.
|
|
1902
|
-
- format (str): Output format ('json').
|
|
1903
|
-
|
|
1904
|
-
Returns:
|
|
1905
|
-
None: Exports patterns to file. Exits with code 1 on failure.
|
|
1906
|
-
|
|
1907
|
-
Raises:
|
|
1908
|
-
ValueError: If output path is invalid or unsafe.
|
|
1909
|
-
"""
|
|
1910
|
-
output_file = args.output
|
|
1911
|
-
user_id = args.user_id
|
|
1912
|
-
db_path = args.db or ".attune/patterns.db"
|
|
1913
|
-
format_type = args.format
|
|
1914
|
-
|
|
1915
|
-
print(f"📦 Exporting patterns to: {output_file}")
|
|
1916
|
-
print("=" * 50)
|
|
1917
|
-
|
|
1918
|
-
try:
|
|
1919
|
-
# Load pattern library from source file
|
|
1920
|
-
if db_path.endswith(".json"):
|
|
1921
|
-
library = PatternPersistence.load_from_json(db_path)
|
|
1922
|
-
else:
|
|
1923
|
-
library = PatternPersistence.load_from_sqlite(db_path)
|
|
1924
|
-
|
|
1925
|
-
patterns = list(library.patterns.values())
|
|
1926
|
-
|
|
1927
|
-
# Filter by user_id if specified
|
|
1928
|
-
if user_id:
|
|
1929
|
-
patterns = [p for p in patterns if p.agent_id == user_id]
|
|
1930
|
-
|
|
1931
|
-
print(f" Found {len(patterns)} patterns")
|
|
1932
|
-
|
|
1933
|
-
# Validate output path
|
|
1934
|
-
validated_output = _validate_file_path(output_file)
|
|
1935
|
-
|
|
1936
|
-
if format_type == "json":
|
|
1937
|
-
# Create filtered library if user_id specified
|
|
1938
|
-
if user_id:
|
|
1939
|
-
filtered_library = PatternLibrary()
|
|
1940
|
-
for pattern in patterns:
|
|
1941
|
-
filtered_library.contribute_pattern(pattern.agent_id, pattern)
|
|
1942
|
-
else:
|
|
1943
|
-
filtered_library = library
|
|
1944
|
-
|
|
1945
|
-
# Export as JSON
|
|
1946
|
-
PatternPersistence.save_to_json(filtered_library, str(validated_output))
|
|
1947
|
-
print(f" ✓ Exported {len(patterns)} patterns to {output_file}")
|
|
1948
|
-
else:
|
|
1949
|
-
print(f"✗ Unsupported format: {format_type}")
|
|
1950
|
-
sys.exit(1)
|
|
1951
|
-
|
|
1952
|
-
except FileNotFoundError:
|
|
1953
|
-
print(f"✗ Source file not found: {db_path}")
|
|
1954
|
-
print(" Tip: Patterns are saved automatically when using the framework")
|
|
1955
|
-
sys.exit(1)
|
|
1956
|
-
except (OSError, PermissionError) as e:
|
|
1957
|
-
# Cannot write output file
|
|
1958
|
-
print(f"✗ Cannot write to file: {e}")
|
|
1959
|
-
sys.exit(1)
|
|
1960
|
-
except (ValueError, KeyError) as e:
|
|
1961
|
-
# Invalid pattern data
|
|
1962
|
-
print(f"✗ Invalid pattern data: {e}")
|
|
1963
|
-
sys.exit(1)
|
|
1964
|
-
except Exception as e:
|
|
1965
|
-
# Unexpected errors during export
|
|
1966
|
-
logger.exception(f"Unexpected error exporting patterns: {e}")
|
|
1967
|
-
print(f"✗ Export failed: {e}")
|
|
1968
|
-
sys.exit(1)
|
|
1969
|
-
|
|
1970
|
-
print()
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
def cmd_import(args):
|
|
1974
|
-
"""Import patterns from file (local dev only - SQLite/JSON).
|
|
1975
|
-
|
|
1976
|
-
Merges imported patterns into existing pattern library.
|
|
1977
|
-
|
|
1978
|
-
Args:
|
|
1979
|
-
args: Namespace object from argparse with attributes:
|
|
1980
|
-
- input (str): Input file path.
|
|
1981
|
-
- db (str | None): Target database path (default: .attune/patterns.db).
|
|
1982
|
-
|
|
1983
|
-
Returns:
|
|
1984
|
-
None: Imports and merges patterns. Exits with code 1 on failure.
|
|
1985
|
-
"""
|
|
1986
|
-
input_file = args.input
|
|
1987
|
-
db_path = args.db or ".attune/patterns.db"
|
|
1988
|
-
|
|
1989
|
-
print(f"📥 Importing patterns from: {input_file}")
|
|
1990
|
-
print("=" * 50)
|
|
1991
|
-
|
|
1992
|
-
try:
|
|
1993
|
-
# Load patterns from input file
|
|
1994
|
-
if input_file.endswith(".json"):
|
|
1995
|
-
imported_library = PatternPersistence.load_from_json(input_file)
|
|
1996
|
-
else:
|
|
1997
|
-
imported_library = PatternPersistence.load_from_sqlite(input_file)
|
|
1998
|
-
|
|
1999
|
-
pattern_count = len(imported_library.patterns)
|
|
2000
|
-
print(f" Found {pattern_count} patterns in file")
|
|
2001
|
-
|
|
2002
|
-
# Load existing library if it exists, otherwise create new one
|
|
2003
|
-
try:
|
|
2004
|
-
if db_path.endswith(".json"):
|
|
2005
|
-
existing_library = PatternPersistence.load_from_json(db_path)
|
|
2006
|
-
else:
|
|
2007
|
-
existing_library = PatternPersistence.load_from_sqlite(db_path)
|
|
2008
|
-
|
|
2009
|
-
print(f" Existing library has {len(existing_library.patterns)} patterns")
|
|
2010
|
-
except FileNotFoundError:
|
|
2011
|
-
existing_library = PatternLibrary()
|
|
2012
|
-
print(" Creating new pattern library")
|
|
2013
|
-
|
|
2014
|
-
# Merge imported patterns into existing library
|
|
2015
|
-
for pattern in imported_library.patterns.values():
|
|
2016
|
-
existing_library.contribute_pattern(pattern.agent_id, pattern)
|
|
2017
|
-
|
|
2018
|
-
# Save merged library (SQLite for local dev)
|
|
2019
|
-
if db_path.endswith(".json"):
|
|
2020
|
-
PatternPersistence.save_to_json(existing_library, db_path)
|
|
2021
|
-
else:
|
|
2022
|
-
PatternPersistence.save_to_sqlite(existing_library, db_path)
|
|
2023
|
-
|
|
2024
|
-
print(f" ✓ Imported {pattern_count} patterns")
|
|
2025
|
-
print(f" ✓ Total patterns in library: {len(existing_library.patterns)}")
|
|
2026
|
-
|
|
2027
|
-
except FileNotFoundError:
|
|
2028
|
-
print(f"✗ Input file not found: {input_file}")
|
|
2029
|
-
sys.exit(1)
|
|
2030
|
-
except (ValueError, KeyError) as e:
|
|
2031
|
-
# Invalid pattern data format
|
|
2032
|
-
print(f"✗ Invalid pattern data: {e}")
|
|
2033
|
-
sys.exit(1)
|
|
2034
|
-
except (OSError, PermissionError) as e:
|
|
2035
|
-
# Cannot read input or write to database
|
|
2036
|
-
print(f"✗ File access error: {e}")
|
|
2037
|
-
sys.exit(1)
|
|
2038
|
-
except Exception as e:
|
|
2039
|
-
# Unexpected errors during import
|
|
2040
|
-
logger.exception(f"Unexpected error importing patterns: {e}")
|
|
2041
|
-
print(f"✗ Import failed: {e}")
|
|
2042
|
-
sys.exit(1)
|
|
2043
|
-
|
|
2044
|
-
print()
|
|
2045
|
-
|
|
2046
|
-
|
|
2047
|
-
def cmd_workflow(args):
|
|
2048
|
-
"""Interactive setup workflow.
|
|
2049
|
-
|
|
2050
|
-
Guides user through initial framework configuration step by step.
|
|
2051
|
-
|
|
2052
|
-
Args:
|
|
2053
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
2054
|
-
|
|
2055
|
-
Returns:
|
|
2056
|
-
None: Creates attune.config.yml with user's choices.
|
|
2057
|
-
"""
|
|
2058
|
-
print("🧙 Empathy Framework Setup Workflow")
|
|
2059
|
-
print("=" * 50)
|
|
2060
|
-
print("\nI'll help you set up your Empathy Framework configuration.\n")
|
|
2061
|
-
|
|
2062
|
-
# Step 1: Use case
|
|
2063
|
-
print("1. What's your primary use case?")
|
|
2064
|
-
print(" [1] Software development")
|
|
2065
|
-
print(" [2] Healthcare applications")
|
|
2066
|
-
print(" [3] Customer support")
|
|
2067
|
-
print(" [4] Other")
|
|
2068
|
-
|
|
2069
|
-
use_case_choice = input("\nYour choice (1-4): ").strip()
|
|
2070
|
-
use_case_map = {
|
|
2071
|
-
"1": "software_development",
|
|
2072
|
-
"2": "healthcare",
|
|
2073
|
-
"3": "customer_support",
|
|
2074
|
-
"4": "general",
|
|
2075
|
-
}
|
|
2076
|
-
use_case = use_case_map.get(use_case_choice, "general")
|
|
2077
|
-
|
|
2078
|
-
# Step 2: Empathy level
|
|
2079
|
-
print("\n2. What empathy level do you want to target?")
|
|
2080
|
-
print(" [1] Level 1 - Reactive (basic Q&A)")
|
|
2081
|
-
print(" [2] Level 2 - Guided (asks clarifying questions)")
|
|
2082
|
-
print(" [3] Level 3 - Proactive (offers improvements)")
|
|
2083
|
-
print(" [4] Level 4 - Anticipatory (predicts problems) ⭐ Recommended")
|
|
2084
|
-
print(" [5] Level 5 - Transformative (reshapes workflows)")
|
|
2085
|
-
|
|
2086
|
-
level_choice = input("\nYour choice (1-5) [4]: ").strip() or "4"
|
|
2087
|
-
target_level = int(level_choice) if level_choice in ["1", "2", "3", "4", "5"] else 4
|
|
2088
|
-
|
|
2089
|
-
# Step 3: LLM provider
|
|
2090
|
-
print("\n3. Which LLM provider will you use?")
|
|
2091
|
-
print(" [1] Anthropic Claude ⭐ Recommended")
|
|
2092
|
-
print(" [2] OpenAI GPT-4")
|
|
2093
|
-
print(" [3] Google Gemini (2M context)")
|
|
2094
|
-
print(" [4] Local (Ollama)")
|
|
2095
|
-
print(" [5] Hybrid (mix best models from each provider)")
|
|
2096
|
-
print(" [6] Skip (configure later)")
|
|
2097
|
-
|
|
2098
|
-
llm_choice = input("\nYour choice (1-6) [1]: ").strip() or "1"
|
|
2099
|
-
llm_map = {
|
|
2100
|
-
"1": "anthropic",
|
|
2101
|
-
"2": "openai",
|
|
2102
|
-
"3": "google",
|
|
2103
|
-
"4": "ollama",
|
|
2104
|
-
"5": "hybrid",
|
|
2105
|
-
"6": None,
|
|
2106
|
-
}
|
|
2107
|
-
llm_provider = llm_map.get(llm_choice, "anthropic")
|
|
2108
|
-
|
|
2109
|
-
# If hybrid selected, launch interactive tier selection
|
|
2110
|
-
if llm_provider == "hybrid":
|
|
2111
|
-
from attune.models.provider_config import configure_hybrid_interactive
|
|
2112
|
-
|
|
2113
|
-
configure_hybrid_interactive()
|
|
2114
|
-
llm_provider = None # Already saved by hybrid config
|
|
2115
|
-
|
|
2116
|
-
# Step 4: User ID
|
|
2117
|
-
print("\n4. What user ID should we use?")
|
|
2118
|
-
user_id = input("User ID [default_user]: ").strip() or "default_user"
|
|
2119
|
-
|
|
2120
|
-
# Generate configuration
|
|
2121
|
-
config = {
|
|
2122
|
-
"user_id": user_id,
|
|
2123
|
-
"target_level": target_level,
|
|
2124
|
-
"confidence_threshold": 0.75,
|
|
2125
|
-
"persistence_enabled": True,
|
|
2126
|
-
"persistence_backend": "sqlite",
|
|
2127
|
-
"persistence_path": ".empathy",
|
|
2128
|
-
"metrics_enabled": True,
|
|
2129
|
-
"use_case": use_case,
|
|
2130
|
-
}
|
|
2131
|
-
|
|
2132
|
-
if llm_provider:
|
|
2133
|
-
config["llm_provider"] = llm_provider
|
|
2134
|
-
|
|
2135
|
-
# Save configuration
|
|
2136
|
-
output_file = "attune.config.yml"
|
|
2137
|
-
print(f"\n5. Creating configuration file: {output_file}")
|
|
2138
|
-
|
|
2139
|
-
# Write YAML config
|
|
2140
|
-
yaml_content = f"""# Empathy Framework Configuration
|
|
2141
|
-
# Generated by setup workflow
|
|
2142
|
-
|
|
2143
|
-
# Core settings
|
|
2144
|
-
user_id: "{config["user_id"]}"
|
|
2145
|
-
target_level: {config["target_level"]}
|
|
2146
|
-
confidence_threshold: {config["confidence_threshold"]}
|
|
2147
|
-
|
|
2148
|
-
# Use case
|
|
2149
|
-
use_case: "{config["use_case"]}"
|
|
2150
|
-
|
|
2151
|
-
# Persistence
|
|
2152
|
-
persistence_enabled: {str(config["persistence_enabled"]).lower()}
|
|
2153
|
-
persistence_backend: "{config["persistence_backend"]}"
|
|
2154
|
-
persistence_path: "{config["persistence_path"]}"
|
|
2155
|
-
|
|
2156
|
-
# Metrics
|
|
2157
|
-
metrics_enabled: {str(config["metrics_enabled"]).lower()}
|
|
2158
|
-
"""
|
|
2159
|
-
|
|
2160
|
-
if llm_provider:
|
|
2161
|
-
yaml_content += f"""
|
|
2162
|
-
# LLM Provider
|
|
2163
|
-
llm_provider: "{llm_provider}"
|
|
2164
|
-
"""
|
|
2165
|
-
|
|
2166
|
-
validated_output = _validate_file_path(output_file)
|
|
2167
|
-
with open(validated_output, "w") as f:
|
|
2168
|
-
f.write(yaml_content)
|
|
2169
|
-
|
|
2170
|
-
print(f" ✓ Created {validated_output}")
|
|
2171
|
-
|
|
2172
|
-
print("\n" + "=" * 50)
|
|
2173
|
-
print("✅ Setup complete!")
|
|
2174
|
-
print("\nNext steps:")
|
|
2175
|
-
print(f" 1. Edit {output_file} to customize settings")
|
|
2176
|
-
|
|
2177
|
-
if llm_provider in ["anthropic", "openai", "google"]:
|
|
2178
|
-
env_var_map = {
|
|
2179
|
-
"anthropic": "ANTHROPIC_API_KEY",
|
|
2180
|
-
"openai": "OPENAI_API_KEY",
|
|
2181
|
-
"google": "GOOGLE_API_KEY",
|
|
2182
|
-
}
|
|
2183
|
-
env_var = env_var_map.get(llm_provider, "API_KEY")
|
|
2184
|
-
print(f" 2. Set {env_var} environment variable")
|
|
2185
|
-
|
|
2186
|
-
print(" 3. Run: empathy-framework run --config attune.config.yml")
|
|
2187
|
-
print("\nHappy empathizing! 🧠✨\n")
|
|
2188
|
-
|
|
2189
|
-
|
|
2190
|
-
def cmd_provider_hybrid(args):
|
|
2191
|
-
"""Configure hybrid mode - pick best models for each tier.
|
|
2192
|
-
|
|
2193
|
-
Args:
|
|
2194
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
2195
|
-
|
|
2196
|
-
Returns:
|
|
2197
|
-
None: Launches interactive tier configuration.
|
|
2198
|
-
"""
|
|
2199
|
-
from attune.models.provider_config import configure_hybrid_interactive
|
|
2200
|
-
|
|
2201
|
-
configure_hybrid_interactive()
|
|
2202
|
-
|
|
2203
|
-
|
|
2204
|
-
def cmd_provider_show(args):
|
|
2205
|
-
"""Show current provider configuration.
|
|
2206
|
-
|
|
2207
|
-
Args:
|
|
2208
|
-
args: Namespace object from argparse (no additional attributes used).
|
|
2209
|
-
|
|
2210
|
-
Returns:
|
|
2211
|
-
None: Prints provider configuration and model mappings.
|
|
2212
|
-
"""
|
|
2213
|
-
from attune.models.provider_config import ProviderConfig
|
|
2214
|
-
from attune.workflows.config import WorkflowConfig
|
|
2215
|
-
|
|
2216
|
-
print("\n" + "=" * 60)
|
|
2217
|
-
print("Provider Configuration")
|
|
2218
|
-
print("=" * 60)
|
|
2219
|
-
|
|
2220
|
-
# Detect available providers
|
|
2221
|
-
config = ProviderConfig.auto_detect()
|
|
2222
|
-
print(
|
|
2223
|
-
f"\nDetected API keys for: {', '.join(config.available_providers) if config.available_providers else 'None'}",
|
|
2224
|
-
)
|
|
2225
|
-
|
|
2226
|
-
# Load workflow config
|
|
2227
|
-
wf_config = WorkflowConfig.load()
|
|
2228
|
-
print(f"\nDefault provider: {wf_config.default_provider}")
|
|
2229
|
-
|
|
2230
|
-
# Show effective models
|
|
2231
|
-
print("\nEffective model mapping:")
|
|
2232
|
-
if wf_config.custom_models and "hybrid" in wf_config.custom_models:
|
|
2233
|
-
hybrid = wf_config.custom_models["hybrid"]
|
|
2234
|
-
for tier in ["cheap", "capable", "premium"]:
|
|
2235
|
-
model = hybrid.get(tier, "not configured")
|
|
2236
|
-
print(f" {tier:8} → {model}")
|
|
2237
|
-
else:
|
|
2238
|
-
from attune.models import MODEL_REGISTRY
|
|
2239
|
-
|
|
2240
|
-
provider = wf_config.default_provider
|
|
2241
|
-
if provider in MODEL_REGISTRY:
|
|
2242
|
-
for tier in ["cheap", "capable", "premium"]:
|
|
2243
|
-
model_info = MODEL_REGISTRY[provider].get(tier)
|
|
2244
|
-
if model_info:
|
|
2245
|
-
print(f" {tier:8} → {model_info.id} ({provider})")
|
|
2246
|
-
|
|
2247
|
-
print()
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
def cmd_provider_set(args):
|
|
2251
|
-
"""Set default provider.
|
|
2252
|
-
|
|
2253
|
-
Args:
|
|
2254
|
-
args: Namespace object from argparse with attributes:
|
|
2255
|
-
- name (str): Provider name to set as default.
|
|
2256
|
-
|
|
2257
|
-
Returns:
|
|
2258
|
-
None: Saves provider to .attune/workflows.yaml.
|
|
2259
|
-
"""
|
|
2260
|
-
import yaml
|
|
2261
|
-
|
|
2262
|
-
provider = args.name
|
|
2263
|
-
workflows_path = Path(".attune/workflows.yaml")
|
|
2264
|
-
|
|
2265
|
-
# Load existing config or create new
|
|
2266
|
-
if workflows_path.exists():
|
|
2267
|
-
with open(workflows_path) as f:
|
|
2268
|
-
config = yaml.safe_load(f) or {}
|
|
2269
|
-
else:
|
|
2270
|
-
config = {}
|
|
2271
|
-
workflows_path.parent.mkdir(parents=True, exist_ok=True)
|
|
2272
|
-
|
|
2273
|
-
config["default_provider"] = provider
|
|
2274
|
-
|
|
2275
|
-
validated_workflows_path = _validate_file_path(str(workflows_path))
|
|
2276
|
-
with open(validated_workflows_path, "w") as f:
|
|
2277
|
-
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
|
2278
|
-
|
|
2279
|
-
print(f"✓ Default provider set to: {provider}")
|
|
2280
|
-
print(f" Saved to: {validated_workflows_path}")
|
|
2281
|
-
|
|
2282
|
-
if provider == "hybrid":
|
|
2283
|
-
print("\n Tip: Run 'empathy provider hybrid' to customize tier models")
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
def cmd_sync_claude(args):
|
|
2287
|
-
"""Sync patterns to Claude Code rules directory.
|
|
2288
|
-
|
|
2289
|
-
Converts learned patterns into Claude Code markdown rules.
|
|
2290
|
-
|
|
2291
|
-
Args:
|
|
2292
|
-
args: Namespace object from argparse with attributes:
|
|
2293
|
-
- patterns_dir (str): Source patterns directory.
|
|
2294
|
-
- output_dir (str): Target Claude Code rules directory.
|
|
2295
|
-
|
|
2296
|
-
Returns:
|
|
2297
|
-
int: 0 on success, 1 on failure.
|
|
2298
|
-
|
|
2299
|
-
Raises:
|
|
2300
|
-
ValueError: If output path is invalid or unsafe.
|
|
2301
|
-
"""
|
|
2302
|
-
import json as json_mod
|
|
2303
|
-
|
|
2304
|
-
patterns_dir = Path(args.patterns_dir)
|
|
2305
|
-
# Validate output directory path
|
|
2306
|
-
validated_output_dir = _validate_file_path(args.output_dir)
|
|
2307
|
-
output_dir = validated_output_dir
|
|
2308
|
-
|
|
2309
|
-
print("=" * 60)
|
|
2310
|
-
print(" SYNC PATTERNS TO CLAUDE CODE")
|
|
2311
|
-
print("=" * 60 + "\n")
|
|
2312
|
-
|
|
2313
|
-
if not patterns_dir.exists():
|
|
2314
|
-
print(f"✗ Patterns directory not found: {patterns_dir}")
|
|
2315
|
-
print(" Run 'empathy learn --analyze 20' first to learn patterns")
|
|
2316
|
-
return 1
|
|
2317
|
-
|
|
2318
|
-
# Create output directory
|
|
2319
|
-
output_dir.mkdir(parents=True, exist_ok=True)
|
|
2320
|
-
|
|
2321
|
-
synced_count = 0
|
|
2322
|
-
pattern_files = ["debugging.json", "security.json", "tech_debt.json", "inspection.json"]
|
|
2323
|
-
|
|
2324
|
-
for pattern_file in pattern_files:
|
|
2325
|
-
source_path = patterns_dir / pattern_file
|
|
2326
|
-
if not source_path.exists():
|
|
2327
|
-
continue
|
|
2328
|
-
|
|
2329
|
-
try:
|
|
2330
|
-
with open(source_path) as f:
|
|
2331
|
-
data = json_mod.load(f)
|
|
2332
|
-
|
|
2333
|
-
patterns = data.get("patterns", data.get("items", []))
|
|
2334
|
-
if not patterns:
|
|
2335
|
-
continue
|
|
2336
|
-
|
|
2337
|
-
# Generate markdown rule file
|
|
2338
|
-
category = pattern_file.replace(".json", "")
|
|
2339
|
-
rule_content = _generate_claude_rule(category, patterns)
|
|
2340
|
-
|
|
2341
|
-
# Write rule file
|
|
2342
|
-
rule_file = output_dir / f"{category}.md"
|
|
2343
|
-
# Validate rule file path before writing
|
|
2344
|
-
validated_rule_file = _validate_file_path(str(rule_file), allowed_dir=str(output_dir))
|
|
2345
|
-
with open(validated_rule_file, "w") as f:
|
|
2346
|
-
f.write(rule_content)
|
|
2347
|
-
|
|
2348
|
-
print(f" ✓ {category}: {len(patterns)} patterns → {rule_file}")
|
|
2349
|
-
synced_count += len(patterns)
|
|
2350
|
-
|
|
2351
|
-
except (json_mod.JSONDecodeError, OSError) as e:
|
|
2352
|
-
print(f" ✗ Failed to process {pattern_file}: {e}")
|
|
2353
|
-
|
|
2354
|
-
print(f"\n{'─' * 60}")
|
|
2355
|
-
print(f" Total: {synced_count} patterns synced to {output_dir}")
|
|
2356
|
-
print("=" * 60 + "\n")
|
|
2357
|
-
|
|
2358
|
-
if synced_count == 0:
|
|
2359
|
-
print("No patterns to sync. Run 'empathy learn' first.")
|
|
2360
|
-
return 1
|
|
2361
|
-
|
|
2362
|
-
return 0
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
def _generate_claude_rule(category: str, patterns: list) -> str:
|
|
2366
|
-
"""Generate a Claude Code rule file from patterns."""
|
|
2367
|
-
lines = [
|
|
2368
|
-
f"# {category.replace('_', ' ').title()} Patterns",
|
|
2369
|
-
"",
|
|
2370
|
-
"Auto-generated from Empathy Framework learned patterns.",
|
|
2371
|
-
f"Total patterns: {len(patterns)}",
|
|
2372
|
-
"",
|
|
2373
|
-
"---",
|
|
2374
|
-
"",
|
|
2375
|
-
]
|
|
2376
|
-
|
|
2377
|
-
if category == "debugging":
|
|
2378
|
-
lines.extend(
|
|
2379
|
-
[
|
|
2380
|
-
"## Bug Fix Patterns",
|
|
2381
|
-
"",
|
|
2382
|
-
"When debugging similar issues, consider these historical fixes:",
|
|
2383
|
-
"",
|
|
2384
|
-
],
|
|
2385
|
-
)
|
|
2386
|
-
for p in patterns[:20]: # Limit to 20 most recent
|
|
2387
|
-
bug_type = p.get("bug_type", "unknown")
|
|
2388
|
-
root_cause = p.get("root_cause", "Unknown")
|
|
2389
|
-
fix = p.get("fix", "See commit history")
|
|
2390
|
-
files = p.get("files_affected", [])
|
|
2391
|
-
|
|
2392
|
-
lines.append(f"### {bug_type}")
|
|
2393
|
-
lines.append(f"- **Root cause**: {root_cause}")
|
|
2394
|
-
lines.append(f"- **Fix**: {fix}")
|
|
2395
|
-
if files:
|
|
2396
|
-
lines.append(f"- **Files**: {', '.join(files[:3])}")
|
|
2397
|
-
lines.append("")
|
|
2398
|
-
|
|
2399
|
-
elif category == "security":
|
|
2400
|
-
lines.extend(
|
|
2401
|
-
[
|
|
2402
|
-
"## Security Decisions",
|
|
2403
|
-
"",
|
|
2404
|
-
"Previously reviewed security items:",
|
|
2405
|
-
"",
|
|
2406
|
-
],
|
|
2407
|
-
)
|
|
2408
|
-
for p in patterns[:20]:
|
|
2409
|
-
decision = p.get("decision", "unknown")
|
|
2410
|
-
reason = p.get("reason", "")
|
|
2411
|
-
lines.append(f"- **{p.get('type', 'unknown')}**: {decision}")
|
|
2412
|
-
if reason:
|
|
2413
|
-
lines.append(f" - Reason: {reason}")
|
|
2414
|
-
lines.append("")
|
|
2415
|
-
|
|
2416
|
-
elif category == "tech_debt":
|
|
2417
|
-
lines.extend(
|
|
2418
|
-
[
|
|
2419
|
-
"## Tech Debt Tracking",
|
|
2420
|
-
"",
|
|
2421
|
-
"Known technical debt items:",
|
|
2422
|
-
"",
|
|
2423
|
-
],
|
|
2424
|
-
)
|
|
2425
|
-
for p in patterns[:20]:
|
|
2426
|
-
lines.append(f"- {p.get('description', str(p))}")
|
|
2427
|
-
|
|
2428
|
-
else:
|
|
2429
|
-
lines.extend(
|
|
2430
|
-
[
|
|
2431
|
-
f"## {category.title()} Items",
|
|
2432
|
-
"",
|
|
2433
|
-
],
|
|
2434
|
-
)
|
|
2435
|
-
for p in patterns[:20]:
|
|
2436
|
-
lines.append(f"- {p.get('description', str(p)[:100])}")
|
|
2437
|
-
|
|
2438
|
-
return "\n".join(lines)
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
def _extract_workflow_content(final_output):
|
|
2442
|
-
"""Extract readable content from workflow final_output.
|
|
2443
|
-
|
|
2444
|
-
Workflows return their results in various formats - this extracts
|
|
2445
|
-
the actual content users want to see.
|
|
2446
|
-
"""
|
|
2447
|
-
if final_output is None:
|
|
2448
|
-
return None
|
|
2449
|
-
|
|
2450
|
-
# If it's already a string, return it
|
|
2451
|
-
if isinstance(final_output, str):
|
|
2452
|
-
return final_output
|
|
2453
|
-
|
|
2454
|
-
# If it's a dict, try to extract meaningful content
|
|
2455
|
-
if isinstance(final_output, dict):
|
|
2456
|
-
# Common keys that contain the main output
|
|
2457
|
-
# formatted_report is first - preferred for security-audit and other formatted outputs
|
|
2458
|
-
content_keys = [
|
|
2459
|
-
"formatted_report", # Human-readable formatted output (security-audit, etc.)
|
|
2460
|
-
"answer",
|
|
2461
|
-
"synthesis",
|
|
2462
|
-
"result",
|
|
2463
|
-
"output",
|
|
2464
|
-
"content",
|
|
2465
|
-
"report",
|
|
2466
|
-
"summary",
|
|
2467
|
-
"analysis",
|
|
2468
|
-
"review",
|
|
2469
|
-
"documentation",
|
|
2470
|
-
"response",
|
|
2471
|
-
"recommendations",
|
|
2472
|
-
"findings",
|
|
2473
|
-
"tests",
|
|
2474
|
-
"plan",
|
|
2475
|
-
]
|
|
2476
|
-
for key in content_keys:
|
|
2477
|
-
if final_output.get(key):
|
|
2478
|
-
val = final_output[key]
|
|
2479
|
-
if isinstance(val, str):
|
|
2480
|
-
return val
|
|
2481
|
-
if isinstance(val, dict):
|
|
2482
|
-
# Recursively extract
|
|
2483
|
-
return _extract_workflow_content(val)
|
|
2484
|
-
|
|
2485
|
-
# If no common key found, try to format the dict nicely
|
|
2486
|
-
# Look for any string value that's substantial
|
|
2487
|
-
for _key, val in final_output.items():
|
|
2488
|
-
if isinstance(val, str) and len(val) > 100:
|
|
2489
|
-
return val
|
|
2490
|
-
|
|
2491
|
-
# Last resort: return a formatted version
|
|
2492
|
-
import json
|
|
2493
|
-
|
|
2494
|
-
return json.dumps(final_output, indent=2)
|
|
2495
|
-
|
|
2496
|
-
# For lists or other types, convert to string
|
|
2497
|
-
return str(final_output)
|
|
2498
|
-
|
|
2499
|
-
|
|
2500
|
-
def cmd_workflow(args):
|
|
2501
|
-
"""Multi-model workflow management and execution.
|
|
2502
|
-
|
|
2503
|
-
Supports listing, describing, and running workflows with tier-based models.
|
|
2504
|
-
|
|
2505
|
-
Args:
|
|
2506
|
-
args: Namespace object from argparse with attributes:
|
|
2507
|
-
- action (str): Action to perform ('list', 'describe', 'run').
|
|
2508
|
-
- name (str | None): Workflow name (for describe/run).
|
|
2509
|
-
- input (str | None): JSON input for workflow execution.
|
|
2510
|
-
- provider (str | None): LLM provider override.
|
|
2511
|
-
- json (bool): If True, output as JSON format.
|
|
2512
|
-
- use_recommended_tier (bool): Enable tier fallback.
|
|
2513
|
-
- write_tests (bool): For test-gen, write tests to files.
|
|
2514
|
-
- output_dir (str | None): For test-gen, output directory.
|
|
2515
|
-
|
|
2516
|
-
Returns:
|
|
2517
|
-
int | None: 0 on success, 1 on failure, None for list action.
|
|
2518
|
-
"""
|
|
2519
|
-
import asyncio
|
|
2520
|
-
import json as json_mod
|
|
2521
|
-
|
|
2522
|
-
action = args.action
|
|
2523
|
-
|
|
2524
|
-
if action == "list":
|
|
2525
|
-
# List available workflows
|
|
2526
|
-
workflows = get_workflow_list()
|
|
2527
|
-
|
|
2528
|
-
if args.json:
|
|
2529
|
-
print(json_mod.dumps(workflows, indent=2))
|
|
2530
|
-
else:
|
|
2531
|
-
print("\n" + "=" * 60)
|
|
2532
|
-
print(" MULTI-MODEL WORKFLOWS")
|
|
2533
|
-
print("=" * 60 + "\n")
|
|
2534
|
-
|
|
2535
|
-
for wf in workflows:
|
|
2536
|
-
print(f" {wf['name']:15} {wf['description']}")
|
|
2537
|
-
stages = " → ".join(f"{s}({wf['tier_map'][s]})" for s in wf["stages"])
|
|
2538
|
-
print(f" Stages: {stages}")
|
|
2539
|
-
print()
|
|
2540
|
-
|
|
2541
|
-
print("-" * 60)
|
|
2542
|
-
print(" Use: empathy workflow describe <name>")
|
|
2543
|
-
print(" Use: empathy workflow run <name> [--input JSON]")
|
|
2544
|
-
print("=" * 60 + "\n")
|
|
2545
|
-
|
|
2546
|
-
elif action == "describe":
|
|
2547
|
-
# Describe a specific workflow
|
|
2548
|
-
name = args.name
|
|
2549
|
-
if not name:
|
|
2550
|
-
print("Error: workflow name required")
|
|
2551
|
-
print("Usage: empathy workflow describe <name>")
|
|
2552
|
-
return 1
|
|
2553
|
-
|
|
2554
|
-
try:
|
|
2555
|
-
workflow_cls = get_workflow(name)
|
|
2556
|
-
provider = getattr(args, "provider", None)
|
|
2557
|
-
workflow = workflow_cls(provider=provider)
|
|
2558
|
-
|
|
2559
|
-
# Get actual provider from workflow (may come from config)
|
|
2560
|
-
actual_provider = getattr(workflow, "_provider_str", provider or "anthropic")
|
|
2561
|
-
|
|
2562
|
-
if args.json:
|
|
2563
|
-
info = {
|
|
2564
|
-
"name": workflow.name,
|
|
2565
|
-
"description": workflow.description,
|
|
2566
|
-
"provider": actual_provider,
|
|
2567
|
-
"stages": workflow.stages,
|
|
2568
|
-
"tier_map": {k: v.value for k, v in workflow.tier_map.items()},
|
|
2569
|
-
"models": {
|
|
2570
|
-
stage: workflow.get_model_for_tier(workflow.tier_map[stage])
|
|
2571
|
-
for stage in workflow.stages
|
|
2572
|
-
},
|
|
2573
|
-
}
|
|
2574
|
-
print(json_mod.dumps(info, indent=2))
|
|
2575
|
-
else:
|
|
2576
|
-
print(f"Provider: {actual_provider}")
|
|
2577
|
-
print(workflow.describe())
|
|
2578
|
-
|
|
2579
|
-
except KeyError as e:
|
|
2580
|
-
print(f"Error: {e}")
|
|
2581
|
-
return 1
|
|
2582
|
-
|
|
2583
|
-
elif action == "run":
|
|
2584
|
-
# Run a workflow
|
|
2585
|
-
name = args.name
|
|
2586
|
-
if not name:
|
|
2587
|
-
print("Error: workflow name required")
|
|
2588
|
-
print('Usage: empathy workflow run <name> --input \'{"key": "value"}\'')
|
|
2589
|
-
return 1
|
|
2590
|
-
|
|
2591
|
-
try:
|
|
2592
|
-
workflow_cls = get_workflow(name)
|
|
2593
|
-
|
|
2594
|
-
# Get provider from CLI arg, or fall back to config's default_provider
|
|
2595
|
-
if args.provider:
|
|
2596
|
-
provider = args.provider
|
|
2597
|
-
else:
|
|
2598
|
-
from attune.workflows.config import WorkflowConfig
|
|
2599
|
-
|
|
2600
|
-
wf_config = WorkflowConfig.load()
|
|
2601
|
-
provider = wf_config.default_provider
|
|
2602
|
-
|
|
2603
|
-
# Initialize workflow with provider and optional tier fallback
|
|
2604
|
-
# Note: Not all workflows support enable_tier_fallback, so we check first
|
|
2605
|
-
import inspect
|
|
2606
|
-
|
|
2607
|
-
use_tier_fallback = getattr(args, "use_recommended_tier", False)
|
|
2608
|
-
|
|
2609
|
-
# Get the workflow's __init__ signature to know what params it accepts
|
|
2610
|
-
init_sig = inspect.signature(workflow_cls.__init__)
|
|
2611
|
-
init_params = set(init_sig.parameters.keys())
|
|
2612
|
-
|
|
2613
|
-
workflow_kwargs = {}
|
|
2614
|
-
|
|
2615
|
-
# Add provider if supported
|
|
2616
|
-
if "provider" in init_params:
|
|
2617
|
-
workflow_kwargs["provider"] = provider
|
|
2618
|
-
|
|
2619
|
-
# Add enable_tier_fallback only if the workflow supports it
|
|
2620
|
-
if "enable_tier_fallback" in init_params and use_tier_fallback:
|
|
2621
|
-
workflow_kwargs["enable_tier_fallback"] = use_tier_fallback
|
|
2622
|
-
|
|
2623
|
-
# Add health-check specific parameters
|
|
2624
|
-
if name == "health-check" and "health_score_threshold" in init_params:
|
|
2625
|
-
health_score_threshold = getattr(args, "health_score_threshold", 100)
|
|
2626
|
-
workflow_kwargs["health_score_threshold"] = health_score_threshold
|
|
2627
|
-
|
|
2628
|
-
workflow = workflow_cls(**workflow_kwargs)
|
|
2629
|
-
|
|
2630
|
-
# Parse input
|
|
2631
|
-
input_data = {}
|
|
2632
|
-
if args.input:
|
|
2633
|
-
input_data = json_mod.loads(args.input)
|
|
2634
|
-
|
|
2635
|
-
# Add test-gen specific flags to input_data (only for test-gen workflow)
|
|
2636
|
-
if name == "test-gen":
|
|
2637
|
-
if getattr(args, "write_tests", False):
|
|
2638
|
-
input_data["write_tests"] = True
|
|
2639
|
-
if getattr(args, "output_dir", None):
|
|
2640
|
-
input_data["output_dir"] = args.output_dir
|
|
2641
|
-
|
|
2642
|
-
# Only print header when not in JSON mode
|
|
2643
|
-
if not args.json:
|
|
2644
|
-
print(f"\n Running workflow: {name} (provider: {provider})")
|
|
2645
|
-
print("=" * 50)
|
|
2646
|
-
|
|
2647
|
-
# Execute workflow
|
|
2648
|
-
result = asyncio.run(workflow.execute(**input_data))
|
|
2649
|
-
|
|
2650
|
-
# Extract the actual content - handle different result types
|
|
2651
|
-
if hasattr(result, "final_output"):
|
|
2652
|
-
output_content = _extract_workflow_content(result.final_output)
|
|
2653
|
-
elif hasattr(result, "metadata") and isinstance(result.metadata, dict):
|
|
2654
|
-
# Check for formatted_report in metadata (e.g., HealthCheckResult)
|
|
2655
|
-
output_content = result.metadata.get("formatted_report")
|
|
2656
|
-
if not output_content and hasattr(result, "summary"):
|
|
2657
|
-
output_content = result.summary
|
|
2658
|
-
elif hasattr(result, "summary"):
|
|
2659
|
-
output_content = result.summary
|
|
2660
|
-
else:
|
|
2661
|
-
output_content = str(result)
|
|
2662
|
-
|
|
2663
|
-
# Get timing - handle different attribute names
|
|
2664
|
-
duration_ms = getattr(result, "total_duration_ms", None)
|
|
2665
|
-
if duration_ms is None and hasattr(result, "duration_seconds"):
|
|
2666
|
-
duration_ms = int(result.duration_seconds * 1000)
|
|
2667
|
-
|
|
2668
|
-
# Get cost info if available (check cost_report first, then direct cost attribute)
|
|
2669
|
-
cost_report = getattr(result, "cost_report", None)
|
|
2670
|
-
if cost_report and hasattr(cost_report, "total_cost"):
|
|
2671
|
-
total_cost = cost_report.total_cost
|
|
2672
|
-
savings = getattr(cost_report, "savings", 0.0)
|
|
2673
|
-
else:
|
|
2674
|
-
# Fall back to direct cost attribute (e.g., CodeReviewPipelineResult)
|
|
2675
|
-
total_cost = getattr(result, "cost", 0.0)
|
|
2676
|
-
savings = 0.0
|
|
2677
|
-
|
|
2678
|
-
if args.json:
|
|
2679
|
-
# Extract error from various result types
|
|
2680
|
-
error = getattr(result, "error", None)
|
|
2681
|
-
is_successful = getattr(result, "success", getattr(result, "approved", True))
|
|
2682
|
-
if not error and not is_successful:
|
|
2683
|
-
blockers = getattr(result, "blockers", [])
|
|
2684
|
-
if blockers:
|
|
2685
|
-
error = "; ".join(blockers)
|
|
2686
|
-
else:
|
|
2687
|
-
metadata = getattr(result, "metadata", {})
|
|
2688
|
-
error = metadata.get("error") if isinstance(metadata, dict) else None
|
|
2689
|
-
|
|
2690
|
-
# JSON output includes both content and metadata
|
|
2691
|
-
# Include final_output for programmatic access (VSCode panels, etc.)
|
|
2692
|
-
raw_final_output = getattr(result, "final_output", None)
|
|
2693
|
-
if raw_final_output and isinstance(raw_final_output, dict):
|
|
2694
|
-
# Make a copy to avoid modifying the original
|
|
2695
|
-
final_output_serializable = {}
|
|
2696
|
-
for k, v in raw_final_output.items():
|
|
2697
|
-
# Skip non-serializable items
|
|
2698
|
-
if isinstance(v, set):
|
|
2699
|
-
final_output_serializable[k] = list(v)
|
|
2700
|
-
elif v is None or isinstance(v, str | int | float | bool | list | dict):
|
|
2701
|
-
final_output_serializable[k] = v
|
|
2702
|
-
else:
|
|
2703
|
-
try:
|
|
2704
|
-
final_output_serializable[k] = str(v)
|
|
2705
|
-
except Exception as e: # noqa: BLE001
|
|
2706
|
-
# INTENTIONAL: Silently skip any non-serializable objects
|
|
2707
|
-
# This is a best-effort serialization for JSON output
|
|
2708
|
-
# We cannot predict all possible object types users might return
|
|
2709
|
-
logger.debug(f"Cannot serialize field {k}: {e}")
|
|
2710
|
-
pass
|
|
2711
|
-
else:
|
|
2712
|
-
final_output_serializable = None
|
|
2713
|
-
|
|
2714
|
-
output = {
|
|
2715
|
-
"success": is_successful,
|
|
2716
|
-
"output": output_content,
|
|
2717
|
-
"final_output": final_output_serializable,
|
|
2718
|
-
"cost": total_cost,
|
|
2719
|
-
"savings": savings,
|
|
2720
|
-
"duration_ms": duration_ms or 0,
|
|
2721
|
-
"error": error,
|
|
2722
|
-
}
|
|
2723
|
-
print(json_mod.dumps(output, indent=2))
|
|
2724
|
-
# Display the actual results - this is what users want to see
|
|
2725
|
-
else:
|
|
2726
|
-
# Show tier progression if tier fallback was used
|
|
2727
|
-
if use_tier_fallback and hasattr(workflow, "_tier_progression"):
|
|
2728
|
-
tier_progression = workflow._tier_progression
|
|
2729
|
-
if tier_progression:
|
|
2730
|
-
print("\n" + "=" * 60)
|
|
2731
|
-
print(" TIER PROGRESSION (Intelligent Fallback)")
|
|
2732
|
-
print("=" * 60)
|
|
2733
|
-
|
|
2734
|
-
# Group by stage
|
|
2735
|
-
stage_tiers: dict[str, list[tuple[str, bool]]] = {}
|
|
2736
|
-
for stage, tier, success in tier_progression:
|
|
2737
|
-
if stage not in stage_tiers:
|
|
2738
|
-
stage_tiers[stage] = []
|
|
2739
|
-
stage_tiers[stage].append((tier, success))
|
|
2740
|
-
|
|
2741
|
-
# Display progression for each stage
|
|
2742
|
-
for stage, attempts in stage_tiers.items():
|
|
2743
|
-
status = "✓" if any(success for _, success in attempts) else "✗"
|
|
2744
|
-
print(f"\n{status} Stage: {stage}")
|
|
2745
|
-
|
|
2746
|
-
for idx, (tier, success) in enumerate(attempts, 1):
|
|
2747
|
-
attempt_status = "✓ SUCCESS" if success else "✗ FAILED"
|
|
2748
|
-
if idx == 1:
|
|
2749
|
-
print(f" Attempt {idx}: {tier.upper():8} → {attempt_status}")
|
|
2750
|
-
else:
|
|
2751
|
-
prev_tier = attempts[idx - 2][0]
|
|
2752
|
-
print(
|
|
2753
|
-
f" Attempt {idx}: {tier.upper():8} → {attempt_status} "
|
|
2754
|
-
f"(upgraded from {prev_tier.upper()})"
|
|
2755
|
-
)
|
|
2756
|
-
|
|
2757
|
-
# Calculate cost savings (only if result has stages attribute)
|
|
2758
|
-
if hasattr(result, "stages") and result.stages:
|
|
2759
|
-
actual_cost = sum(stage.cost for stage in result.stages if stage.cost)
|
|
2760
|
-
# Estimate what cost would be if all stages used PREMIUM
|
|
2761
|
-
premium_cost = actual_cost * 3 # Conservative estimate
|
|
2762
|
-
|
|
2763
|
-
savings = premium_cost - actual_cost
|
|
2764
|
-
savings_pct = (savings / premium_cost * 100) if premium_cost > 0 else 0
|
|
2765
|
-
|
|
2766
|
-
print("\n" + "-" * 60)
|
|
2767
|
-
print("💰 Cost Savings:")
|
|
2768
|
-
print(f" Actual cost: ${actual_cost:.4f}")
|
|
2769
|
-
print(f" Premium cost: ${premium_cost:.4f} (if all PREMIUM)")
|
|
2770
|
-
print(f" Savings: ${savings:.4f} ({savings_pct:.1f}%)")
|
|
2771
|
-
print("=" * 60 + "\n")
|
|
2772
|
-
|
|
2773
|
-
# Display workflow result
|
|
2774
|
-
# Handle different result types (success, approved, etc.)
|
|
2775
|
-
is_successful = getattr(result, "success", getattr(result, "approved", True))
|
|
2776
|
-
if is_successful:
|
|
2777
|
-
if output_content:
|
|
2778
|
-
print(f"\n{output_content}\n")
|
|
2779
|
-
else:
|
|
2780
|
-
print("\n✓ Workflow completed successfully.\n")
|
|
2781
|
-
else:
|
|
2782
|
-
# Extract error from various result types
|
|
2783
|
-
error_msg = getattr(result, "error", None)
|
|
2784
|
-
if not error_msg:
|
|
2785
|
-
# Check for blockers (CodeReviewPipelineResult)
|
|
2786
|
-
blockers = getattr(result, "blockers", [])
|
|
2787
|
-
if blockers:
|
|
2788
|
-
error_msg = "; ".join(blockers)
|
|
2789
|
-
else:
|
|
2790
|
-
# Check metadata for error
|
|
2791
|
-
metadata = getattr(result, "metadata", {})
|
|
2792
|
-
error_msg = (
|
|
2793
|
-
metadata.get("error") if isinstance(metadata, dict) else None
|
|
2794
|
-
)
|
|
2795
|
-
error_msg = error_msg or "Unknown error"
|
|
2796
|
-
print(f"\n✗ Workflow failed: {error_msg}\n")
|
|
2797
|
-
|
|
2798
|
-
except KeyError as e:
|
|
2799
|
-
print(f"Error: {e}")
|
|
2800
|
-
return 1
|
|
2801
|
-
except json_mod.JSONDecodeError as e:
|
|
2802
|
-
print(f"Error parsing input JSON: {e}")
|
|
2803
|
-
return 1
|
|
2804
|
-
|
|
2805
|
-
elif action == "config":
|
|
2806
|
-
# Generate or show workflow configuration
|
|
2807
|
-
from pathlib import Path
|
|
2808
|
-
|
|
2809
|
-
config_path = Path(".attune/workflows.yaml")
|
|
2810
|
-
|
|
2811
|
-
if config_path.exists() and not getattr(args, "force", False):
|
|
2812
|
-
print(f"Config already exists: {config_path}")
|
|
2813
|
-
print("Use --force to overwrite")
|
|
2814
|
-
print("\nCurrent configuration:")
|
|
2815
|
-
print("-" * 40)
|
|
2816
|
-
config = WorkflowConfig.load()
|
|
2817
|
-
print(f" Default provider: {config.default_provider}")
|
|
2818
|
-
if config.workflow_providers:
|
|
2819
|
-
print(" Workflow providers:")
|
|
2820
|
-
for wf, prov in config.workflow_providers.items():
|
|
2821
|
-
print(f" {wf}: {prov}")
|
|
2822
|
-
if config.custom_models:
|
|
2823
|
-
print(" Custom models configured")
|
|
2824
|
-
return 0
|
|
2825
|
-
|
|
2826
|
-
# Create config directory and file
|
|
2827
|
-
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
2828
|
-
validated_config_path = _validate_file_path(str(config_path))
|
|
2829
|
-
validated_config_path.write_text(create_example_config())
|
|
2830
|
-
print(f"✓ Created workflow config: {validated_config_path}")
|
|
2831
|
-
print("\nEdit this file to customize:")
|
|
2832
|
-
print(" - Default provider (anthropic, openai, ollama)")
|
|
2833
|
-
print(" - Per-workflow provider overrides")
|
|
2834
|
-
print(" - Custom model mappings")
|
|
2835
|
-
print(" - Model pricing")
|
|
2836
|
-
print("\nOr use environment variables:")
|
|
2837
|
-
print(" EMPATHY_WORKFLOW_PROVIDER=openai")
|
|
2838
|
-
print(" EMPATHY_MODEL_PREMIUM=gpt-5.2")
|
|
2839
|
-
|
|
2840
|
-
else:
|
|
2841
|
-
print(f"Unknown action: {action}")
|
|
2842
|
-
print("Available: list, describe, run, config")
|
|
2843
|
-
return 1
|
|
2844
|
-
|
|
2845
|
-
return 0
|
|
2846
|
-
|
|
2847
|
-
|
|
2848
|
-
def cmd_frameworks(args):
|
|
2849
|
-
"""List and manage agent frameworks.
|
|
2850
|
-
|
|
2851
|
-
Displays available agent frameworks with their capabilities and recommendations.
|
|
2852
|
-
|
|
2853
|
-
Args:
|
|
2854
|
-
args: Namespace object from argparse with attributes:
|
|
2855
|
-
- all (bool): If True, show all frameworks including experimental.
|
|
2856
|
-
- recommend (str | None): Use case for framework recommendation.
|
|
2857
|
-
- json (bool): If True, output as JSON format.
|
|
2858
|
-
|
|
2859
|
-
Returns:
|
|
2860
|
-
int: 0 on success, 1 on failure.
|
|
2861
|
-
"""
|
|
2862
|
-
import json as json_mod
|
|
2863
|
-
|
|
2864
|
-
try:
|
|
2865
|
-
from attune_llm.agent_factory import AgentFactory
|
|
2866
|
-
from attune_llm.agent_factory.framework import (
|
|
2867
|
-
get_framework_info,
|
|
2868
|
-
get_recommended_framework,
|
|
2869
|
-
)
|
|
2870
|
-
except ImportError:
|
|
2871
|
-
print("Agent Factory not available. Install empathy-framework with all dependencies.")
|
|
2872
|
-
return 1
|
|
2873
|
-
|
|
2874
|
-
show_all = getattr(args, "all", False)
|
|
2875
|
-
recommend_use_case = getattr(args, "recommend", None)
|
|
2876
|
-
output_json = getattr(args, "json", False)
|
|
2877
|
-
|
|
2878
|
-
if recommend_use_case:
|
|
2879
|
-
# Recommend a framework
|
|
2880
|
-
recommended = get_recommended_framework(recommend_use_case)
|
|
2881
|
-
info = get_framework_info(recommended)
|
|
2882
|
-
|
|
2883
|
-
if output_json:
|
|
2884
|
-
print(
|
|
2885
|
-
json_mod.dumps(
|
|
2886
|
-
{"use_case": recommend_use_case, "recommended": recommended.value, **info},
|
|
2887
|
-
indent=2,
|
|
2888
|
-
),
|
|
2889
|
-
)
|
|
2890
|
-
else:
|
|
2891
|
-
print(f"\nRecommended framework for '{recommend_use_case}': {info['name']}")
|
|
2892
|
-
print(f" Best for: {', '.join(info['best_for'])}")
|
|
2893
|
-
if info.get("install_command"):
|
|
2894
|
-
print(f" Install: {info['install_command']}")
|
|
2895
|
-
print()
|
|
2896
|
-
return 0
|
|
2897
|
-
|
|
2898
|
-
# List frameworks
|
|
2899
|
-
frameworks = AgentFactory.list_frameworks(installed_only=not show_all)
|
|
2900
|
-
|
|
2901
|
-
if output_json:
|
|
2902
|
-
print(
|
|
2903
|
-
json_mod.dumps(
|
|
2904
|
-
[
|
|
2905
|
-
{
|
|
2906
|
-
"id": f["framework"].value,
|
|
2907
|
-
"name": f["name"],
|
|
2908
|
-
"installed": f["installed"],
|
|
2909
|
-
"best_for": f["best_for"],
|
|
2910
|
-
"install_command": f.get("install_command"),
|
|
2911
|
-
}
|
|
2912
|
-
for f in frameworks
|
|
2913
|
-
],
|
|
2914
|
-
indent=2,
|
|
2915
|
-
),
|
|
2916
|
-
)
|
|
2917
|
-
else:
|
|
2918
|
-
print("\n" + "=" * 60)
|
|
2919
|
-
print(" AGENT FRAMEWORKS")
|
|
2920
|
-
print("=" * 60 + "\n")
|
|
2921
|
-
|
|
2922
|
-
for f in frameworks:
|
|
2923
|
-
status = "INSTALLED" if f["installed"] else "not installed"
|
|
2924
|
-
print(f" {f['name']:20} [{status}]")
|
|
2925
|
-
print(f" Best for: {', '.join(f['best_for'][:3])}")
|
|
2926
|
-
if not f["installed"] and f.get("install_command"):
|
|
2927
|
-
print(f" Install: {f['install_command']}")
|
|
2928
|
-
print()
|
|
2929
|
-
|
|
2930
|
-
print("-" * 60)
|
|
2931
|
-
print(" Use: empathy frameworks --recommend <use_case>")
|
|
2932
|
-
print(" Use cases: general, rag, multi_agent, code_analysis")
|
|
2933
|
-
print("=" * 60 + "\n")
|
|
2934
|
-
|
|
2935
|
-
return 0
|
|
2936
|
-
|
|
2937
|
-
|
|
2938
|
-
# =============================================================================
|
|
2939
|
-
# Telemetry CLI Command Wrappers
|
|
2940
|
-
# =============================================================================
|
|
2941
|
-
|
|
2942
|
-
|
|
2943
|
-
def _cmd_telemetry_show(args):
|
|
2944
|
-
"""Wrapper for telemetry show command."""
|
|
2945
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2946
|
-
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2947
|
-
return 1
|
|
2948
|
-
return cmd_telemetry_show(args)
|
|
2949
|
-
|
|
2950
|
-
|
|
2951
|
-
def _cmd_telemetry_savings(args):
|
|
2952
|
-
"""Wrapper for telemetry savings command."""
|
|
2953
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2954
|
-
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2955
|
-
return 1
|
|
2956
|
-
return cmd_telemetry_savings(args)
|
|
2957
|
-
|
|
2958
|
-
|
|
2959
|
-
def _cmd_telemetry_compare(args):
|
|
2960
|
-
"""Wrapper for telemetry compare command."""
|
|
2961
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2962
|
-
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2963
|
-
return 1
|
|
2964
|
-
return cmd_telemetry_compare(args)
|
|
2965
|
-
|
|
2966
|
-
|
|
2967
|
-
def _cmd_telemetry_reset(args):
|
|
2968
|
-
"""Wrapper for telemetry reset command."""
|
|
2969
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2970
|
-
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2971
|
-
return 1
|
|
2972
|
-
return cmd_telemetry_reset(args)
|
|
2973
|
-
|
|
2974
|
-
|
|
2975
|
-
def _cmd_telemetry_export(args):
|
|
2976
|
-
"""Wrapper for telemetry export command."""
|
|
2977
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2978
|
-
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2979
|
-
return 1
|
|
2980
|
-
return cmd_telemetry_export(args)
|
|
2981
|
-
|
|
2982
|
-
|
|
2983
|
-
def _cmd_tier1_status(args):
|
|
2984
|
-
"""Wrapper for tier1 status command."""
|
|
2985
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2986
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
2987
|
-
return 1
|
|
2988
|
-
return cmd_tier1_status(args)
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
def _cmd_task_routing_report(args):
|
|
2992
|
-
"""Wrapper for task routing report command."""
|
|
2993
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
2994
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
2995
|
-
return 1
|
|
2996
|
-
return cmd_task_routing_report(args)
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
def _cmd_test_status(args):
|
|
3000
|
-
"""Wrapper for test status command."""
|
|
3001
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
3002
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
3003
|
-
return 1
|
|
3004
|
-
return cmd_test_status(args)
|
|
3005
|
-
|
|
3006
|
-
|
|
3007
|
-
def _cmd_file_test_status(args):
|
|
3008
|
-
"""Wrapper for per-file test status command."""
|
|
3009
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
3010
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
3011
|
-
return 1
|
|
3012
|
-
return cmd_file_test_status(args)
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
def _cmd_file_test_dashboard(args):
|
|
3016
|
-
"""Wrapper for file test dashboard command."""
|
|
3017
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
3018
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
3019
|
-
return 1
|
|
3020
|
-
return cmd_file_test_dashboard(args)
|
|
3021
|
-
|
|
3022
|
-
|
|
3023
|
-
def _cmd_agent_performance(args):
|
|
3024
|
-
"""Wrapper for agent performance command."""
|
|
3025
|
-
if not TELEMETRY_CLI_AVAILABLE:
|
|
3026
|
-
print("Tier 1 monitoring commands not available. Install telemetry dependencies.")
|
|
3027
|
-
return 1
|
|
3028
|
-
return cmd_agent_performance(args)
|
|
3029
|
-
|
|
3030
|
-
|
|
3031
|
-
def main():
|
|
3032
|
-
"""Main CLI entry point"""
|
|
3033
|
-
# Configure Windows-compatible asyncio event loop policy
|
|
3034
|
-
setup_asyncio_policy()
|
|
3035
|
-
|
|
3036
|
-
parser = argparse.ArgumentParser(
|
|
3037
|
-
prog="empathy",
|
|
3038
|
-
description="Empathy - Build AI systems with 5 levels of empathy",
|
|
3039
|
-
)
|
|
3040
|
-
|
|
3041
|
-
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
3042
|
-
|
|
3043
|
-
# Version command
|
|
3044
|
-
parser_version = subparsers.add_parser("version", help="Display version information")
|
|
3045
|
-
parser_version.set_defaults(func=cmd_version)
|
|
3046
|
-
|
|
3047
|
-
# Init command
|
|
3048
|
-
parser_init = subparsers.add_parser("init", help="Initialize a new project")
|
|
3049
|
-
parser_init.add_argument(
|
|
3050
|
-
"--format",
|
|
3051
|
-
choices=["yaml", "json"],
|
|
3052
|
-
default="yaml",
|
|
3053
|
-
help="Configuration format (default: yaml)",
|
|
3054
|
-
)
|
|
3055
|
-
parser_init.add_argument("--output", "-o", help="Output file path")
|
|
3056
|
-
parser_init.set_defaults(func=cmd_init)
|
|
3057
|
-
|
|
3058
|
-
# Validate command
|
|
3059
|
-
parser_validate = subparsers.add_parser("validate", help="Validate configuration file")
|
|
3060
|
-
parser_validate.add_argument("config", help="Path to configuration file")
|
|
3061
|
-
parser_validate.set_defaults(func=cmd_validate)
|
|
3062
|
-
|
|
3063
|
-
# Info command
|
|
3064
|
-
parser_info = subparsers.add_parser("info", help="Display framework information")
|
|
3065
|
-
parser_info.add_argument("--config", "-c", help="Configuration file")
|
|
3066
|
-
parser_info.set_defaults(func=cmd_info)
|
|
3067
|
-
|
|
3068
|
-
# Patterns commands
|
|
3069
|
-
parser_patterns = subparsers.add_parser("patterns", help="Pattern library commands")
|
|
3070
|
-
patterns_subparsers = parser_patterns.add_subparsers(dest="patterns_command")
|
|
3071
|
-
|
|
3072
|
-
# Patterns list
|
|
3073
|
-
parser_patterns_list = patterns_subparsers.add_parser("list", help="List patterns in library")
|
|
3074
|
-
parser_patterns_list.add_argument("library", help="Path to pattern library file")
|
|
3075
|
-
parser_patterns_list.add_argument(
|
|
3076
|
-
"--format",
|
|
3077
|
-
choices=["json", "sqlite"],
|
|
3078
|
-
default="json",
|
|
3079
|
-
help="Library format (default: json)",
|
|
3080
|
-
)
|
|
3081
|
-
parser_patterns_list.set_defaults(func=cmd_patterns_list)
|
|
3082
|
-
|
|
3083
|
-
# Patterns export
|
|
3084
|
-
parser_patterns_export = patterns_subparsers.add_parser("export", help="Export patterns")
|
|
3085
|
-
parser_patterns_export.add_argument("input", help="Input file path")
|
|
3086
|
-
parser_patterns_export.add_argument("output", help="Output file path")
|
|
3087
|
-
parser_patterns_export.add_argument(
|
|
3088
|
-
"--input-format",
|
|
3089
|
-
choices=["json", "sqlite"],
|
|
3090
|
-
default="json",
|
|
3091
|
-
)
|
|
3092
|
-
parser_patterns_export.add_argument(
|
|
3093
|
-
"--output-format",
|
|
3094
|
-
choices=["json", "sqlite"],
|
|
3095
|
-
default="json",
|
|
3096
|
-
)
|
|
3097
|
-
parser_patterns_export.set_defaults(func=cmd_patterns_export)
|
|
3098
|
-
|
|
3099
|
-
# Patterns resolve - mark investigating bugs as resolved
|
|
3100
|
-
parser_patterns_resolve = patterns_subparsers.add_parser(
|
|
3101
|
-
"resolve",
|
|
3102
|
-
help="Resolve investigating bug patterns",
|
|
3103
|
-
)
|
|
3104
|
-
parser_patterns_resolve.add_argument(
|
|
3105
|
-
"bug_id",
|
|
3106
|
-
nargs="?",
|
|
3107
|
-
help="Bug ID to resolve (omit to list investigating)",
|
|
3108
|
-
)
|
|
3109
|
-
parser_patterns_resolve.add_argument("--root-cause", help="Description of the root cause")
|
|
3110
|
-
parser_patterns_resolve.add_argument("--fix", help="Description of the fix applied")
|
|
3111
|
-
parser_patterns_resolve.add_argument("--fix-code", help="Code snippet of the fix")
|
|
3112
|
-
parser_patterns_resolve.add_argument("--time", type=int, help="Resolution time in minutes")
|
|
3113
|
-
parser_patterns_resolve.add_argument(
|
|
3114
|
-
"--resolved-by",
|
|
3115
|
-
default="@developer",
|
|
3116
|
-
help="Who resolved it",
|
|
3117
|
-
)
|
|
3118
|
-
parser_patterns_resolve.add_argument(
|
|
3119
|
-
"--patterns-dir",
|
|
3120
|
-
default="./patterns",
|
|
3121
|
-
help="Path to patterns directory",
|
|
3122
|
-
)
|
|
3123
|
-
parser_patterns_resolve.add_argument(
|
|
3124
|
-
"--no-regenerate",
|
|
3125
|
-
action="store_true",
|
|
3126
|
-
help="Skip regenerating summary",
|
|
3127
|
-
)
|
|
3128
|
-
parser_patterns_resolve.set_defaults(func=cmd_patterns_resolve)
|
|
3129
|
-
|
|
3130
|
-
# Metrics commands
|
|
3131
|
-
parser_metrics = subparsers.add_parser("metrics", help="Metrics commands")
|
|
3132
|
-
metrics_subparsers = parser_metrics.add_subparsers(dest="metrics_command")
|
|
3133
|
-
|
|
3134
|
-
# Metrics show
|
|
3135
|
-
parser_metrics_show = metrics_subparsers.add_parser("show", help="Show user metrics")
|
|
3136
|
-
parser_metrics_show.add_argument("user", help="User ID")
|
|
3137
|
-
parser_metrics_show.add_argument("--db", default="./metrics.db", help="Metrics database path")
|
|
3138
|
-
parser_metrics_show.set_defaults(func=cmd_metrics_show)
|
|
3139
|
-
|
|
3140
|
-
# State commands
|
|
3141
|
-
parser_state = subparsers.add_parser("state", help="State management commands")
|
|
3142
|
-
state_subparsers = parser_state.add_subparsers(dest="state_command")
|
|
3143
|
-
|
|
3144
|
-
# State list
|
|
3145
|
-
parser_state_list = state_subparsers.add_parser("list", help="List saved states")
|
|
3146
|
-
parser_state_list.add_argument(
|
|
3147
|
-
"--state-dir",
|
|
3148
|
-
default="./empathy_state",
|
|
3149
|
-
help="State directory path",
|
|
3150
|
-
)
|
|
3151
|
-
parser_state_list.set_defaults(func=cmd_state_list)
|
|
3152
|
-
|
|
3153
|
-
# Run command (Interactive REPL)
|
|
3154
|
-
parser_run = subparsers.add_parser("run", help="Interactive REPL mode")
|
|
3155
|
-
parser_run.add_argument("--config", "-c", help="Configuration file path")
|
|
3156
|
-
parser_run.add_argument("--user-id", help="User ID (default: cli_user)")
|
|
3157
|
-
parser_run.add_argument(
|
|
3158
|
-
"--level",
|
|
3159
|
-
type=int,
|
|
3160
|
-
default=4,
|
|
3161
|
-
help="Target empathy level (1-5, default: 4)",
|
|
3162
|
-
)
|
|
3163
|
-
parser_run.set_defaults(func=cmd_run)
|
|
3164
|
-
|
|
3165
|
-
# Inspect command (Unified inspection)
|
|
3166
|
-
parser_inspect = subparsers.add_parser("inspect", help="Inspect patterns, metrics, or state")
|
|
3167
|
-
parser_inspect.add_argument(
|
|
3168
|
-
"type",
|
|
3169
|
-
choices=["patterns", "metrics", "state"],
|
|
3170
|
-
help="Type of inspection (patterns, metrics, or state)",
|
|
3171
|
-
)
|
|
3172
|
-
parser_inspect.add_argument("--user-id", help="User ID to filter by (optional)")
|
|
3173
|
-
parser_inspect.add_argument("--db", help="Database path (default: .attune/patterns.db)")
|
|
3174
|
-
parser_inspect.add_argument(
|
|
3175
|
-
"--state-dir",
|
|
3176
|
-
help="State directory path (default: .attune/state)",
|
|
3177
|
-
)
|
|
3178
|
-
parser_inspect.set_defaults(func=cmd_inspect)
|
|
3179
|
-
|
|
3180
|
-
# Export command
|
|
3181
|
-
parser_export = subparsers.add_parser(
|
|
3182
|
-
"export",
|
|
3183
|
-
help="Export patterns to file for sharing/backup",
|
|
3184
|
-
)
|
|
3185
|
-
parser_export.add_argument("output", help="Output file path")
|
|
3186
|
-
parser_export.add_argument(
|
|
3187
|
-
"--user-id",
|
|
3188
|
-
help="User ID to export (optional, exports all if not specified)",
|
|
3189
|
-
)
|
|
3190
|
-
parser_export.add_argument("--db", help="Database path (default: .attune/patterns.db)")
|
|
3191
|
-
parser_export.add_argument(
|
|
3192
|
-
"--format",
|
|
3193
|
-
default="json",
|
|
3194
|
-
choices=["json"],
|
|
3195
|
-
help="Export format (default: json)",
|
|
3196
|
-
)
|
|
3197
|
-
parser_export.set_defaults(func=cmd_export)
|
|
3198
|
-
|
|
3199
|
-
# Import command
|
|
3200
|
-
parser_import = subparsers.add_parser("import", help="Import patterns from file")
|
|
3201
|
-
parser_import.add_argument("input", help="Input file path")
|
|
3202
|
-
parser_import.add_argument("--db", help="Database path (default: .attune/patterns.db)")
|
|
3203
|
-
parser_import.set_defaults(func=cmd_import)
|
|
3204
|
-
|
|
3205
|
-
# Workflow command (Interactive setup)
|
|
3206
|
-
parser_workflow = subparsers.add_parser(
|
|
3207
|
-
"workflow",
|
|
3208
|
-
help="Interactive setup workflow for creating configuration",
|
|
3209
|
-
)
|
|
3210
|
-
parser_workflow.set_defaults(func=cmd_workflow)
|
|
3211
|
-
|
|
3212
|
-
# Provider command (Model provider configuration)
|
|
3213
|
-
parser_provider = subparsers.add_parser(
|
|
3214
|
-
"provider",
|
|
3215
|
-
help="Configure model providers and hybrid mode",
|
|
3216
|
-
)
|
|
3217
|
-
provider_subparsers = parser_provider.add_subparsers(dest="provider_cmd")
|
|
3218
|
-
|
|
3219
|
-
# provider hybrid - Interactive hybrid configuration
|
|
3220
|
-
parser_provider_hybrid = provider_subparsers.add_parser(
|
|
3221
|
-
"hybrid",
|
|
3222
|
-
help="Configure hybrid mode - pick best models for each tier",
|
|
3223
|
-
)
|
|
3224
|
-
parser_provider_hybrid.set_defaults(func=cmd_provider_hybrid)
|
|
3225
|
-
|
|
3226
|
-
# provider show - Show current configuration
|
|
3227
|
-
parser_provider_show = provider_subparsers.add_parser(
|
|
3228
|
-
"show",
|
|
3229
|
-
help="Show current provider configuration",
|
|
3230
|
-
)
|
|
3231
|
-
parser_provider_show.set_defaults(func=cmd_provider_show)
|
|
3232
|
-
|
|
3233
|
-
# provider set - Quick set single provider
|
|
3234
|
-
parser_provider_set = provider_subparsers.add_parser(
|
|
3235
|
-
"set",
|
|
3236
|
-
help="Set default provider (anthropic, openai, google, ollama)",
|
|
3237
|
-
)
|
|
3238
|
-
parser_provider_set.add_argument(
|
|
3239
|
-
"name",
|
|
3240
|
-
choices=["anthropic", "openai", "google", "ollama", "hybrid"],
|
|
3241
|
-
help="Provider name",
|
|
3242
|
-
)
|
|
3243
|
-
parser_provider_set.set_defaults(func=cmd_provider_set)
|
|
3244
|
-
|
|
3245
|
-
# Status command (Session status assistant)
|
|
3246
|
-
parser_status = subparsers.add_parser(
|
|
3247
|
-
"status",
|
|
3248
|
-
help="Session status - prioritized project status report",
|
|
3249
|
-
)
|
|
3250
|
-
parser_status.add_argument(
|
|
3251
|
-
"--patterns-dir",
|
|
3252
|
-
default="./patterns",
|
|
3253
|
-
help="Path to patterns directory",
|
|
3254
|
-
)
|
|
3255
|
-
parser_status.add_argument("--project-root", default=".", help="Project root directory")
|
|
3256
|
-
parser_status.add_argument(
|
|
3257
|
-
"--force",
|
|
3258
|
-
action="store_true",
|
|
3259
|
-
help="Force show status regardless of inactivity",
|
|
3260
|
-
)
|
|
3261
|
-
parser_status.add_argument("--full", action="store_true", help="Show all items (no limit)")
|
|
3262
|
-
parser_status.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3263
|
-
parser_status.add_argument("--select", type=int, help="Select an item to get its action prompt")
|
|
3264
|
-
parser_status.add_argument(
|
|
3265
|
-
"--inactivity",
|
|
3266
|
-
type=int,
|
|
3267
|
-
default=60,
|
|
3268
|
-
help="Inactivity threshold in minutes (default: 60)",
|
|
3269
|
-
)
|
|
3270
|
-
parser_status.set_defaults(func=cmd_status)
|
|
3271
|
-
|
|
3272
|
-
# Review command (Pattern-based code review)
|
|
3273
|
-
parser_review = subparsers.add_parser(
|
|
3274
|
-
"review",
|
|
3275
|
-
help="Pattern-based code review against historical bugs",
|
|
3276
|
-
)
|
|
3277
|
-
parser_review.add_argument("files", nargs="*", help="Files to review (default: recent changes)")
|
|
3278
|
-
parser_review.add_argument("--staged", action="store_true", help="Review staged changes only")
|
|
3279
|
-
parser_review.add_argument(
|
|
3280
|
-
"--severity",
|
|
3281
|
-
choices=["info", "warning", "error"],
|
|
3282
|
-
default="info",
|
|
3283
|
-
help="Minimum severity to report (default: info)",
|
|
3284
|
-
)
|
|
3285
|
-
parser_review.add_argument("--patterns-dir", default="./patterns", help="Patterns directory")
|
|
3286
|
-
parser_review.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3287
|
-
parser_review.set_defaults(func=cmd_review)
|
|
3288
|
-
|
|
3289
|
-
# Health command (Code Health Assistant)
|
|
3290
|
-
parser_health = subparsers.add_parser(
|
|
3291
|
-
"health",
|
|
3292
|
-
help="Code health assistant - run checks and auto-fix issues",
|
|
3293
|
-
)
|
|
3294
|
-
parser_health.add_argument(
|
|
3295
|
-
"--deep",
|
|
3296
|
-
action="store_true",
|
|
3297
|
-
help="Run comprehensive checks (slower)",
|
|
3298
|
-
)
|
|
3299
|
-
parser_health.add_argument(
|
|
3300
|
-
"--check",
|
|
3301
|
-
choices=["lint", "format", "types", "tests", "security", "deps"],
|
|
3302
|
-
help="Run specific check only",
|
|
3303
|
-
)
|
|
3304
|
-
parser_health.add_argument("--fix", action="store_true", help="Auto-fix issues where possible")
|
|
3305
|
-
parser_health.add_argument(
|
|
3306
|
-
"--dry-run",
|
|
3307
|
-
action="store_true",
|
|
3308
|
-
help="Show what would be fixed without applying",
|
|
3309
|
-
)
|
|
3310
|
-
parser_health.add_argument(
|
|
3311
|
-
"--interactive",
|
|
3312
|
-
action="store_true",
|
|
3313
|
-
help="Prompt before applying non-safe fixes",
|
|
3314
|
-
)
|
|
3315
|
-
parser_health.add_argument("--details", action="store_true", help="Show detailed issue list")
|
|
3316
|
-
parser_health.add_argument(
|
|
3317
|
-
"--full",
|
|
3318
|
-
action="store_true",
|
|
3319
|
-
help="Show full report with all details",
|
|
3320
|
-
)
|
|
3321
|
-
parser_health.add_argument(
|
|
3322
|
-
"--trends",
|
|
3323
|
-
type=int,
|
|
3324
|
-
metavar="DAYS",
|
|
3325
|
-
help="Show health trends over N days",
|
|
3326
|
-
)
|
|
3327
|
-
parser_health.add_argument(
|
|
3328
|
-
"--project-root",
|
|
3329
|
-
default=".",
|
|
3330
|
-
help="Project root directory (default: current)",
|
|
3331
|
-
)
|
|
3332
|
-
parser_health.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3333
|
-
parser_health.set_defaults(func=cmd_health)
|
|
3334
|
-
|
|
3335
|
-
# =========================================================================
|
|
3336
|
-
# POWER USER WORKFLOWS (v2.4+)
|
|
3337
|
-
# =========================================================================
|
|
3338
|
-
|
|
3339
|
-
# Morning command (start-of-day briefing)
|
|
3340
|
-
parser_morning = subparsers.add_parser(
|
|
3341
|
-
"morning",
|
|
3342
|
-
help="Start-of-day briefing with patterns, debt, and focus areas",
|
|
3343
|
-
)
|
|
3344
|
-
parser_morning.add_argument(
|
|
3345
|
-
"--patterns-dir",
|
|
3346
|
-
default="./patterns",
|
|
3347
|
-
help="Path to patterns directory",
|
|
3348
|
-
)
|
|
3349
|
-
parser_morning.add_argument("--project-root", default=".", help="Project root directory")
|
|
3350
|
-
parser_morning.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
3351
|
-
parser_morning.set_defaults(func=cmd_morning)
|
|
3352
|
-
|
|
3353
|
-
# Ship command (pre-commit validation)
|
|
3354
|
-
parser_ship = subparsers.add_parser("ship", help="Pre-commit validation pipeline")
|
|
3355
|
-
parser_ship.add_argument(
|
|
3356
|
-
"--patterns-dir",
|
|
3357
|
-
default="./patterns",
|
|
3358
|
-
help="Path to patterns directory",
|
|
3359
|
-
)
|
|
3360
|
-
parser_ship.add_argument("--project-root", default=".", help="Project root directory")
|
|
3361
|
-
parser_ship.add_argument(
|
|
3362
|
-
"--skip-sync",
|
|
3363
|
-
action="store_true",
|
|
3364
|
-
help="Skip syncing patterns to Claude",
|
|
3365
|
-
)
|
|
3366
|
-
parser_ship.add_argument(
|
|
3367
|
-
"--tests-only",
|
|
3368
|
-
action="store_true",
|
|
3369
|
-
help="Run tests only (skip lint/format checks)",
|
|
3370
|
-
)
|
|
3371
|
-
parser_ship.add_argument(
|
|
3372
|
-
"--security-only",
|
|
3373
|
-
action="store_true",
|
|
3374
|
-
help="Run security checks only",
|
|
3375
|
-
)
|
|
3376
|
-
parser_ship.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
3377
|
-
parser_ship.set_defaults(func=cmd_ship)
|
|
3378
|
-
|
|
3379
|
-
# Fix-all command (auto-fix everything)
|
|
3380
|
-
parser_fix_all = subparsers.add_parser(
|
|
3381
|
-
"fix-all",
|
|
3382
|
-
help="Auto-fix all fixable lint and format issues",
|
|
3383
|
-
)
|
|
3384
|
-
parser_fix_all.add_argument("--project-root", default=".", help="Project root directory")
|
|
3385
|
-
parser_fix_all.add_argument(
|
|
3386
|
-
"--dry-run",
|
|
3387
|
-
action="store_true",
|
|
3388
|
-
help="Show what would be fixed without applying",
|
|
3389
|
-
)
|
|
3390
|
-
parser_fix_all.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
3391
|
-
parser_fix_all.set_defaults(func=cmd_fix_all)
|
|
3392
|
-
|
|
3393
|
-
# Learn command (pattern learning from git history)
|
|
3394
|
-
parser_learn = subparsers.add_parser(
|
|
3395
|
-
"learn",
|
|
3396
|
-
help="Learn patterns from git history and bug fixes",
|
|
3397
|
-
)
|
|
3398
|
-
parser_learn.add_argument(
|
|
3399
|
-
"--patterns-dir",
|
|
3400
|
-
default="./patterns",
|
|
3401
|
-
help="Path to patterns directory",
|
|
3402
|
-
)
|
|
3403
|
-
parser_learn.add_argument(
|
|
3404
|
-
"--analyze",
|
|
3405
|
-
type=int,
|
|
3406
|
-
metavar="N",
|
|
3407
|
-
help="Analyze last N commits (default: 10)",
|
|
3408
|
-
)
|
|
3409
|
-
parser_learn.add_argument(
|
|
3410
|
-
"--watch",
|
|
3411
|
-
action="store_true",
|
|
3412
|
-
help="Watch for new commits (not yet implemented)",
|
|
3413
|
-
)
|
|
3414
|
-
parser_learn.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
3415
|
-
parser_learn.set_defaults(func=cmd_learn)
|
|
3416
|
-
|
|
3417
|
-
# Costs command (cost tracking dashboard)
|
|
3418
|
-
parser_costs = subparsers.add_parser(
|
|
3419
|
-
"costs",
|
|
3420
|
-
help="View API cost tracking and savings from model routing",
|
|
3421
|
-
)
|
|
3422
|
-
parser_costs.add_argument(
|
|
3423
|
-
"--days",
|
|
3424
|
-
type=int,
|
|
3425
|
-
default=7,
|
|
3426
|
-
help="Number of days to include (default: 7)",
|
|
3427
|
-
)
|
|
3428
|
-
parser_costs.add_argument("--empathy-dir", default=".empathy", help="Empathy data directory")
|
|
3429
|
-
parser_costs.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3430
|
-
parser_costs.set_defaults(func=cmd_costs)
|
|
3431
|
-
|
|
3432
|
-
# Telemetry commands (usage tracking)
|
|
3433
|
-
parser_telemetry = subparsers.add_parser(
|
|
3434
|
-
"telemetry",
|
|
3435
|
-
help="View and manage local usage telemetry",
|
|
3436
|
-
)
|
|
3437
|
-
telemetry_subparsers = parser_telemetry.add_subparsers(dest="telemetry_command")
|
|
3438
|
-
|
|
3439
|
-
# Telemetry show command
|
|
3440
|
-
parser_telemetry_show = telemetry_subparsers.add_parser(
|
|
3441
|
-
"show",
|
|
3442
|
-
help="Show recent LLM calls",
|
|
3443
|
-
)
|
|
3444
|
-
parser_telemetry_show.add_argument(
|
|
3445
|
-
"--limit",
|
|
3446
|
-
type=int,
|
|
3447
|
-
default=20,
|
|
3448
|
-
help="Number of entries to show (default: 20)",
|
|
3449
|
-
)
|
|
3450
|
-
parser_telemetry_show.add_argument(
|
|
3451
|
-
"--days",
|
|
3452
|
-
type=int,
|
|
3453
|
-
help="Only show entries from last N days",
|
|
3454
|
-
)
|
|
3455
|
-
parser_telemetry_show.set_defaults(func=lambda args: _cmd_telemetry_show(args))
|
|
3456
|
-
|
|
3457
|
-
# Telemetry savings command
|
|
3458
|
-
parser_telemetry_savings = telemetry_subparsers.add_parser(
|
|
3459
|
-
"savings",
|
|
3460
|
-
help="Calculate cost savings vs baseline",
|
|
3461
|
-
)
|
|
3462
|
-
parser_telemetry_savings.add_argument(
|
|
3463
|
-
"--days",
|
|
3464
|
-
type=int,
|
|
3465
|
-
default=30,
|
|
3466
|
-
help="Number of days to analyze (default: 30)",
|
|
3467
|
-
)
|
|
3468
|
-
parser_telemetry_savings.set_defaults(func=lambda args: _cmd_telemetry_savings(args))
|
|
3469
|
-
|
|
3470
|
-
# Telemetry compare command
|
|
3471
|
-
parser_telemetry_compare = telemetry_subparsers.add_parser(
|
|
3472
|
-
"compare",
|
|
3473
|
-
help="Compare two time periods",
|
|
3474
|
-
)
|
|
3475
|
-
parser_telemetry_compare.add_argument(
|
|
3476
|
-
"--period1",
|
|
3477
|
-
type=int,
|
|
3478
|
-
default=7,
|
|
3479
|
-
help="First period in days (default: 7)",
|
|
3480
|
-
)
|
|
3481
|
-
parser_telemetry_compare.add_argument(
|
|
3482
|
-
"--period2",
|
|
3483
|
-
type=int,
|
|
3484
|
-
default=30,
|
|
3485
|
-
help="Second period in days (default: 30)",
|
|
3486
|
-
)
|
|
3487
|
-
parser_telemetry_compare.set_defaults(func=lambda args: _cmd_telemetry_compare(args))
|
|
3488
|
-
|
|
3489
|
-
# Telemetry reset command
|
|
3490
|
-
parser_telemetry_reset = telemetry_subparsers.add_parser(
|
|
3491
|
-
"reset",
|
|
3492
|
-
help="Clear all telemetry data",
|
|
3493
|
-
)
|
|
3494
|
-
parser_telemetry_reset.add_argument(
|
|
3495
|
-
"--confirm",
|
|
3496
|
-
action="store_true",
|
|
3497
|
-
help="Confirm deletion",
|
|
3498
|
-
)
|
|
3499
|
-
parser_telemetry_reset.set_defaults(func=lambda args: _cmd_telemetry_reset(args))
|
|
3500
|
-
|
|
3501
|
-
# Telemetry export command
|
|
3502
|
-
parser_telemetry_export = telemetry_subparsers.add_parser(
|
|
3503
|
-
"export",
|
|
3504
|
-
help="Export telemetry data",
|
|
3505
|
-
)
|
|
3506
|
-
parser_telemetry_export.add_argument(
|
|
3507
|
-
"--format",
|
|
3508
|
-
choices=["json", "csv"],
|
|
3509
|
-
default="json",
|
|
3510
|
-
help="Export format (default: json)",
|
|
3511
|
-
)
|
|
3512
|
-
parser_telemetry_export.add_argument(
|
|
3513
|
-
"--output",
|
|
3514
|
-
"-o",
|
|
3515
|
-
help="Output file (default: stdout)",
|
|
3516
|
-
)
|
|
3517
|
-
parser_telemetry_export.add_argument(
|
|
3518
|
-
"--days",
|
|
3519
|
-
type=int,
|
|
3520
|
-
help="Only export last N days",
|
|
3521
|
-
)
|
|
3522
|
-
parser_telemetry_export.set_defaults(func=lambda args: _cmd_telemetry_export(args))
|
|
3523
|
-
|
|
3524
|
-
# Progressive workflow commands (tier escalation)
|
|
3525
|
-
parser_progressive = subparsers.add_parser(
|
|
3526
|
-
"progressive",
|
|
3527
|
-
help="Manage progressive tier escalation workflows",
|
|
3528
|
-
)
|
|
3529
|
-
progressive_subparsers = parser_progressive.add_subparsers(dest="progressive_command")
|
|
3530
|
-
|
|
3531
|
-
# Progressive list command
|
|
3532
|
-
parser_progressive_list = progressive_subparsers.add_parser(
|
|
3533
|
-
"list",
|
|
3534
|
-
help="List all saved progressive workflow results",
|
|
3535
|
-
)
|
|
3536
|
-
parser_progressive_list.add_argument(
|
|
3537
|
-
"--storage-path",
|
|
3538
|
-
help="Path to progressive workflow storage (default: .attune/progressive_runs)",
|
|
3539
|
-
)
|
|
3540
|
-
parser_progressive_list.set_defaults(func=lambda args: cmd_list_results(args))
|
|
3541
|
-
|
|
3542
|
-
# Progressive show command
|
|
3543
|
-
parser_progressive_show = progressive_subparsers.add_parser(
|
|
3544
|
-
"show",
|
|
3545
|
-
help="Show detailed report for a specific task",
|
|
3546
|
-
)
|
|
3547
|
-
parser_progressive_show.add_argument(
|
|
3548
|
-
"task_id",
|
|
3549
|
-
type=str,
|
|
3550
|
-
help="Task ID to display",
|
|
3551
|
-
)
|
|
3552
|
-
parser_progressive_show.add_argument(
|
|
3553
|
-
"--storage-path",
|
|
3554
|
-
help="Path to progressive workflow storage (default: .attune/progressive_runs)",
|
|
3555
|
-
)
|
|
3556
|
-
parser_progressive_show.add_argument(
|
|
3557
|
-
"--json",
|
|
3558
|
-
action="store_true",
|
|
3559
|
-
help="Output in JSON format",
|
|
3560
|
-
)
|
|
3561
|
-
parser_progressive_show.set_defaults(func=lambda args: cmd_show_report(args))
|
|
3562
|
-
|
|
3563
|
-
# Progressive analytics command
|
|
3564
|
-
parser_progressive_analytics = progressive_subparsers.add_parser(
|
|
3565
|
-
"analytics",
|
|
3566
|
-
help="Show cost optimization analytics",
|
|
3567
|
-
)
|
|
3568
|
-
parser_progressive_analytics.add_argument(
|
|
3569
|
-
"--storage-path",
|
|
3570
|
-
help="Path to progressive workflow storage (default: .attune/progressive_runs)",
|
|
3571
|
-
)
|
|
3572
|
-
parser_progressive_analytics.add_argument(
|
|
3573
|
-
"--json",
|
|
3574
|
-
action="store_true",
|
|
3575
|
-
help="Output in JSON format",
|
|
3576
|
-
)
|
|
3577
|
-
parser_progressive_analytics.set_defaults(func=lambda args: cmd_analytics(args))
|
|
3578
|
-
|
|
3579
|
-
# Progressive cleanup command
|
|
3580
|
-
parser_progressive_cleanup = progressive_subparsers.add_parser(
|
|
3581
|
-
"cleanup",
|
|
3582
|
-
help="Clean up old progressive workflow results",
|
|
3583
|
-
)
|
|
3584
|
-
parser_progressive_cleanup.add_argument(
|
|
3585
|
-
"--storage-path",
|
|
3586
|
-
help="Path to progressive workflow storage (default: .attune/progressive_runs)",
|
|
3587
|
-
)
|
|
3588
|
-
parser_progressive_cleanup.add_argument(
|
|
3589
|
-
"--retention-days",
|
|
3590
|
-
type=int,
|
|
3591
|
-
default=30,
|
|
3592
|
-
help="Number of days to retain results (default: 30)",
|
|
3593
|
-
)
|
|
3594
|
-
parser_progressive_cleanup.add_argument(
|
|
3595
|
-
"--dry-run",
|
|
3596
|
-
action="store_true",
|
|
3597
|
-
help="Show what would be deleted without actually deleting",
|
|
3598
|
-
)
|
|
3599
|
-
parser_progressive_cleanup.set_defaults(func=lambda args: cmd_cleanup(args))
|
|
3600
|
-
|
|
3601
|
-
# Tier 1 automation monitoring commands
|
|
3602
|
-
|
|
3603
|
-
# tier1 command - comprehensive status
|
|
3604
|
-
parser_tier1 = subparsers.add_parser(
|
|
3605
|
-
"tier1",
|
|
3606
|
-
help="Show Tier 1 automation status (tasks, tests, coverage, agents)",
|
|
3607
|
-
)
|
|
3608
|
-
parser_tier1.add_argument(
|
|
3609
|
-
"--hours",
|
|
3610
|
-
type=int,
|
|
3611
|
-
default=24,
|
|
3612
|
-
help="Hours to analyze (default: 24)",
|
|
3613
|
-
)
|
|
3614
|
-
parser_tier1.set_defaults(func=lambda args: _cmd_tier1_status(args))
|
|
3615
|
-
|
|
3616
|
-
# tasks command - task routing report
|
|
3617
|
-
parser_tasks = subparsers.add_parser(
|
|
3618
|
-
"tasks",
|
|
3619
|
-
help="Show task routing report",
|
|
3620
|
-
)
|
|
3621
|
-
parser_tasks.add_argument(
|
|
3622
|
-
"--hours",
|
|
3623
|
-
type=int,
|
|
3624
|
-
default=24,
|
|
3625
|
-
help="Hours to analyze (default: 24)",
|
|
3626
|
-
)
|
|
3627
|
-
parser_tasks.set_defaults(func=lambda args: _cmd_task_routing_report(args))
|
|
3628
|
-
|
|
3629
|
-
# tests command - test execution status
|
|
3630
|
-
parser_tests = subparsers.add_parser(
|
|
3631
|
-
"tests",
|
|
3632
|
-
help="Show test execution status",
|
|
3633
|
-
)
|
|
3634
|
-
parser_tests.add_argument(
|
|
3635
|
-
"--hours",
|
|
3636
|
-
type=int,
|
|
3637
|
-
default=24,
|
|
3638
|
-
help="Hours to analyze (default: 24)",
|
|
3639
|
-
)
|
|
3640
|
-
parser_tests.set_defaults(func=lambda args: _cmd_test_status(args))
|
|
3641
|
-
|
|
3642
|
-
# file-tests command - per-file test status
|
|
3643
|
-
parser_file_tests = subparsers.add_parser(
|
|
3644
|
-
"file-tests",
|
|
3645
|
-
help="Show per-file test status (last tested, pass/fail, staleness)",
|
|
3646
|
-
)
|
|
3647
|
-
parser_file_tests.add_argument(
|
|
3648
|
-
"--file",
|
|
3649
|
-
type=str,
|
|
3650
|
-
help="Check specific file path",
|
|
3651
|
-
)
|
|
3652
|
-
parser_file_tests.add_argument(
|
|
3653
|
-
"--failed",
|
|
3654
|
-
action="store_true",
|
|
3655
|
-
help="Show only files with failing tests",
|
|
3656
|
-
)
|
|
3657
|
-
parser_file_tests.add_argument(
|
|
3658
|
-
"--stale",
|
|
3659
|
-
action="store_true",
|
|
3660
|
-
help="Show only files with stale tests",
|
|
3661
|
-
)
|
|
3662
|
-
parser_file_tests.add_argument(
|
|
3663
|
-
"--limit",
|
|
3664
|
-
type=int,
|
|
3665
|
-
default=50,
|
|
3666
|
-
help="Maximum files to show (default: 50)",
|
|
3667
|
-
)
|
|
3668
|
-
parser_file_tests.set_defaults(func=lambda args: _cmd_file_test_status(args))
|
|
3669
|
-
|
|
3670
|
-
# file-test-dashboard command - interactive dashboard
|
|
3671
|
-
parser_file_dashboard = subparsers.add_parser(
|
|
3672
|
-
"file-test-dashboard",
|
|
3673
|
-
help="Open interactive file test status dashboard in browser",
|
|
3674
|
-
)
|
|
3675
|
-
parser_file_dashboard.add_argument(
|
|
3676
|
-
"--port",
|
|
3677
|
-
type=int,
|
|
3678
|
-
default=8765,
|
|
3679
|
-
help="Port to serve dashboard on (default: 8765)",
|
|
3680
|
-
)
|
|
3681
|
-
parser_file_dashboard.set_defaults(func=lambda args: _cmd_file_test_dashboard(args))
|
|
3682
|
-
|
|
3683
|
-
# agents command - agent performance
|
|
3684
|
-
parser_agents = subparsers.add_parser(
|
|
3685
|
-
"agents",
|
|
3686
|
-
help="Show agent performance metrics",
|
|
3687
|
-
)
|
|
3688
|
-
parser_agents.add_argument(
|
|
3689
|
-
"--hours",
|
|
3690
|
-
type=int,
|
|
3691
|
-
default=168,
|
|
3692
|
-
help="Hours to analyze (default: 168 / 7 days)",
|
|
3693
|
-
)
|
|
3694
|
-
parser_agents.set_defaults(func=lambda args: _cmd_agent_performance(args))
|
|
3695
|
-
|
|
3696
|
-
# New command (project scaffolding)
|
|
3697
|
-
parser_new = subparsers.add_parser("new", help="Create a new project from a template")
|
|
3698
|
-
parser_new.add_argument(
|
|
3699
|
-
"template",
|
|
3700
|
-
nargs="?",
|
|
3701
|
-
help="Template name (minimal, python-cli, python-fastapi, python-agent)",
|
|
3702
|
-
)
|
|
3703
|
-
parser_new.add_argument("name", nargs="?", help="Project name")
|
|
3704
|
-
parser_new.add_argument("--output", "-o", help="Output directory (default: ./<project-name>)")
|
|
3705
|
-
parser_new.add_argument("--force", "-f", action="store_true", help="Overwrite existing files")
|
|
3706
|
-
parser_new.add_argument("--list", "-l", action="store_true", help="List available templates")
|
|
3707
|
-
parser_new.set_defaults(func=cmd_new)
|
|
3708
|
-
|
|
3709
|
-
# Frameworks command (agent framework management)
|
|
3710
|
-
parser_frameworks = subparsers.add_parser(
|
|
3711
|
-
"frameworks",
|
|
3712
|
-
help="List and manage agent frameworks (LangChain, LangGraph, AutoGen, Haystack)",
|
|
3713
|
-
)
|
|
3714
|
-
parser_frameworks.add_argument(
|
|
3715
|
-
"--all",
|
|
3716
|
-
action="store_true",
|
|
3717
|
-
help="Show all frameworks including uninstalled",
|
|
3718
|
-
)
|
|
3719
|
-
parser_frameworks.add_argument(
|
|
3720
|
-
"--recommend",
|
|
3721
|
-
metavar="USE_CASE",
|
|
3722
|
-
help="Recommend framework for use case (general, rag, multi_agent, code_analysis)",
|
|
3723
|
-
)
|
|
3724
|
-
parser_frameworks.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3725
|
-
parser_frameworks.set_defaults(func=cmd_frameworks)
|
|
3726
|
-
|
|
3727
|
-
# Workflow command (multi-model workflow management)
|
|
3728
|
-
parser_workflow = subparsers.add_parser(
|
|
3729
|
-
"workflow",
|
|
3730
|
-
help="Multi-model workflows for cost-optimized task pipelines",
|
|
3731
|
-
)
|
|
3732
|
-
parser_workflow.add_argument(
|
|
3733
|
-
"action",
|
|
3734
|
-
choices=["list", "describe", "run", "config"],
|
|
3735
|
-
help="Action: list, describe, run, or config",
|
|
3736
|
-
)
|
|
3737
|
-
parser_workflow.add_argument(
|
|
3738
|
-
"name",
|
|
3739
|
-
nargs="?",
|
|
3740
|
-
help="Workflow name (for describe/run)",
|
|
3741
|
-
)
|
|
3742
|
-
parser_workflow.add_argument(
|
|
3743
|
-
"--input",
|
|
3744
|
-
"-i",
|
|
3745
|
-
help="JSON input data for workflow execution",
|
|
3746
|
-
)
|
|
3747
|
-
parser_workflow.add_argument(
|
|
3748
|
-
"--provider",
|
|
3749
|
-
"-p",
|
|
3750
|
-
choices=["anthropic", "openai", "google", "ollama", "hybrid"],
|
|
3751
|
-
default=None, # None means use config
|
|
3752
|
-
help="Model provider: anthropic, openai, google, ollama, or hybrid (mix of best models)",
|
|
3753
|
-
)
|
|
3754
|
-
parser_workflow.add_argument(
|
|
3755
|
-
"--force",
|
|
3756
|
-
action="store_true",
|
|
3757
|
-
help="Force overwrite existing config file",
|
|
3758
|
-
)
|
|
3759
|
-
parser_workflow.add_argument("--json", action="store_true", help="Output as JSON")
|
|
3760
|
-
parser_workflow.add_argument(
|
|
3761
|
-
"--use-recommended-tier",
|
|
3762
|
-
action="store_true",
|
|
3763
|
-
help="Enable intelligent tier fallback: start with CHEAP tier and automatically upgrade if quality gates fail",
|
|
3764
|
-
)
|
|
3765
|
-
parser_workflow.add_argument(
|
|
3766
|
-
"--write-tests",
|
|
3767
|
-
action="store_true",
|
|
3768
|
-
help="(test-gen workflow) Write generated tests to disk",
|
|
3769
|
-
)
|
|
3770
|
-
parser_workflow.add_argument(
|
|
3771
|
-
"--output-dir",
|
|
3772
|
-
default="tests/generated",
|
|
3773
|
-
help="(test-gen workflow) Output directory for generated tests",
|
|
3774
|
-
)
|
|
3775
|
-
parser_workflow.add_argument(
|
|
3776
|
-
"--health-score-threshold",
|
|
3777
|
-
type=int,
|
|
3778
|
-
default=95,
|
|
3779
|
-
help="(health-check workflow) Minimum health score required (0-100, default: 95 for very strict quality)",
|
|
3780
|
-
)
|
|
3781
|
-
parser_workflow.set_defaults(func=cmd_workflow)
|
|
3782
|
-
|
|
3783
|
-
# Sync-claude command (sync patterns to Claude Code)
|
|
3784
|
-
parser_sync_claude = subparsers.add_parser(
|
|
3785
|
-
"sync-claude",
|
|
3786
|
-
help="Sync learned patterns to Claude Code rules",
|
|
3787
|
-
)
|
|
3788
|
-
parser_sync_claude.add_argument(
|
|
3789
|
-
"--patterns-dir",
|
|
3790
|
-
default="./patterns",
|
|
3791
|
-
help="Path to patterns directory",
|
|
3792
|
-
)
|
|
3793
|
-
parser_sync_claude.add_argument(
|
|
3794
|
-
"--output-dir",
|
|
3795
|
-
default=".claude/rules/empathy",
|
|
3796
|
-
help="Output directory for Claude rules (default: .claude/rules/empathy)",
|
|
3797
|
-
)
|
|
3798
|
-
parser_sync_claude.set_defaults(func=cmd_sync_claude)
|
|
3799
|
-
|
|
3800
|
-
# =========================================================================
|
|
3801
|
-
# USER EXPERIENCE COMMANDS (v2.5+)
|
|
3802
|
-
# =========================================================================
|
|
3803
|
-
|
|
3804
|
-
# Cheatsheet command (quick reference)
|
|
3805
|
-
parser_cheatsheet = subparsers.add_parser("cheatsheet", help="Quick reference of all commands")
|
|
3806
|
-
parser_cheatsheet.add_argument(
|
|
3807
|
-
"category",
|
|
3808
|
-
nargs="?",
|
|
3809
|
-
help="Category to show (getting-started, daily-workflow, code-quality, etc.)",
|
|
3810
|
-
)
|
|
3811
|
-
parser_cheatsheet.add_argument(
|
|
3812
|
-
"--compact",
|
|
3813
|
-
action="store_true",
|
|
3814
|
-
help="Show commands only without descriptions",
|
|
3815
|
-
)
|
|
3816
|
-
parser_cheatsheet.set_defaults(func=cmd_cheatsheet)
|
|
3817
|
-
|
|
3818
|
-
# Onboard command (interactive tutorial)
|
|
3819
|
-
parser_onboard = subparsers.add_parser(
|
|
3820
|
-
"onboard",
|
|
3821
|
-
help="Interactive onboarding tutorial for new users",
|
|
3822
|
-
)
|
|
3823
|
-
parser_onboard.add_argument("--step", type=int, help="Jump to a specific step (1-5)")
|
|
3824
|
-
parser_onboard.add_argument("--reset", action="store_true", help="Reset onboarding progress")
|
|
3825
|
-
parser_onboard.set_defaults(func=cmd_onboard)
|
|
3826
|
-
|
|
3827
|
-
# Explain command (detailed command explanations)
|
|
3828
|
-
parser_explain = subparsers.add_parser(
|
|
3829
|
-
"explain",
|
|
3830
|
-
help="Get detailed explanation of how a command works",
|
|
3831
|
-
)
|
|
3832
|
-
parser_explain.add_argument(
|
|
3833
|
-
"command",
|
|
3834
|
-
choices=["morning", "ship", "learn", "health", "sync-claude"],
|
|
3835
|
-
help="Command to explain",
|
|
3836
|
-
)
|
|
3837
|
-
parser_explain.set_defaults(func=cmd_explain)
|
|
3838
|
-
|
|
3839
|
-
# Achievements command (progress tracking)
|
|
3840
|
-
parser_achievements = subparsers.add_parser(
|
|
3841
|
-
"achievements",
|
|
3842
|
-
help="View your usage statistics and achievements",
|
|
3843
|
-
)
|
|
3844
|
-
parser_achievements.set_defaults(func=cmd_achievements)
|
|
3845
|
-
|
|
3846
|
-
# Tier recommendation commands (cascading tier optimization)
|
|
3847
|
-
parser_tier = subparsers.add_parser(
|
|
3848
|
-
"tier",
|
|
3849
|
-
help="Intelligent tier recommendations for cascading workflows",
|
|
3850
|
-
)
|
|
3851
|
-
tier_subparsers = parser_tier.add_subparsers(dest="tier_command")
|
|
3852
|
-
|
|
3853
|
-
# tier recommend
|
|
3854
|
-
parser_tier_recommend = tier_subparsers.add_parser(
|
|
3855
|
-
"recommend",
|
|
3856
|
-
help="Get tier recommendation for a bug/task",
|
|
3857
|
-
)
|
|
3858
|
-
parser_tier_recommend.add_argument(
|
|
3859
|
-
"description",
|
|
3860
|
-
help="Description of the bug or task",
|
|
3861
|
-
)
|
|
3862
|
-
parser_tier_recommend.add_argument(
|
|
3863
|
-
"--files",
|
|
3864
|
-
help="Comma-separated list of affected files (optional)",
|
|
3865
|
-
)
|
|
3866
|
-
parser_tier_recommend.add_argument(
|
|
3867
|
-
"--complexity",
|
|
3868
|
-
type=int,
|
|
3869
|
-
help="Manual complexity hint 1-10 (optional)",
|
|
3870
|
-
)
|
|
3871
|
-
parser_tier_recommend.set_defaults(func=cmd_tier_recommend)
|
|
3872
|
-
|
|
3873
|
-
# tier stats
|
|
3874
|
-
parser_tier_stats = tier_subparsers.add_parser(
|
|
3875
|
-
"stats",
|
|
3876
|
-
help="Show tier pattern learning statistics",
|
|
3877
|
-
)
|
|
3878
|
-
parser_tier_stats.set_defaults(func=cmd_tier_stats)
|
|
3879
|
-
|
|
3880
|
-
# Orchestrate command (meta-orchestration workflows)
|
|
3881
|
-
parser_orchestrate = subparsers.add_parser(
|
|
3882
|
-
"orchestrate",
|
|
3883
|
-
help="Run meta-orchestration workflows (release-prep, health-check)",
|
|
3884
|
-
)
|
|
3885
|
-
parser_orchestrate.add_argument(
|
|
3886
|
-
"workflow",
|
|
3887
|
-
choices=["release-prep", "health-check"],
|
|
3888
|
-
help="Workflow to execute (test-coverage disabled in v4.0.0)",
|
|
3889
|
-
)
|
|
3890
|
-
parser_orchestrate.add_argument(
|
|
3891
|
-
"--project-root",
|
|
3892
|
-
default=".",
|
|
3893
|
-
help="Project root directory (default: current directory)",
|
|
3894
|
-
)
|
|
3895
|
-
# Release-prep workflow arguments
|
|
3896
|
-
parser_orchestrate.add_argument(
|
|
3897
|
-
"--path",
|
|
3898
|
-
default=".",
|
|
3899
|
-
help="Path to codebase to analyze (for release-prep, default: current directory)",
|
|
3900
|
-
)
|
|
3901
|
-
parser_orchestrate.add_argument(
|
|
3902
|
-
"--min-coverage",
|
|
3903
|
-
type=float,
|
|
3904
|
-
help="Minimum test coverage threshold (for release-prep, default: 80.0)",
|
|
3905
|
-
)
|
|
3906
|
-
parser_orchestrate.add_argument(
|
|
3907
|
-
"--min-quality",
|
|
3908
|
-
type=float,
|
|
3909
|
-
help="Minimum code quality score (for release-prep, default: 7.0)",
|
|
3910
|
-
)
|
|
3911
|
-
parser_orchestrate.add_argument(
|
|
3912
|
-
"--max-critical",
|
|
3913
|
-
type=float,
|
|
3914
|
-
help="Maximum critical security issues (for release-prep, default: 0)",
|
|
3915
|
-
)
|
|
3916
|
-
# Health-check workflow arguments
|
|
3917
|
-
parser_orchestrate.add_argument(
|
|
3918
|
-
"--mode",
|
|
3919
|
-
choices=["daily", "weekly", "release"],
|
|
3920
|
-
help="Health check mode (for health-check, default: daily)",
|
|
3921
|
-
)
|
|
3922
|
-
parser_orchestrate.add_argument(
|
|
3923
|
-
"--focus",
|
|
3924
|
-
help="Focus area for health check (for health-check, optional)",
|
|
3925
|
-
)
|
|
3926
|
-
parser_orchestrate.add_argument(
|
|
3927
|
-
"--json",
|
|
3928
|
-
action="store_true",
|
|
3929
|
-
help="Output results as JSON",
|
|
3930
|
-
)
|
|
3931
|
-
parser_orchestrate.set_defaults(func=cmd_orchestrate)
|
|
3932
|
-
|
|
3933
|
-
# Parse arguments
|
|
3934
|
-
args = parser.parse_args()
|
|
3935
|
-
|
|
3936
|
-
# Execute command
|
|
3937
|
-
if hasattr(args, "func"):
|
|
3938
|
-
result = args.func(args)
|
|
3939
|
-
|
|
3940
|
-
# Show progressive discovery tips after command execution
|
|
3941
|
-
if args.command and args.command not in ("dashboard", "run"):
|
|
3942
|
-
try:
|
|
3943
|
-
show_tip_if_available(args.command)
|
|
3944
|
-
except Exception as e: # noqa: BLE001
|
|
3945
|
-
# INTENTIONAL: Discovery tips are optional UX enhancements
|
|
3946
|
-
# They should never cause command execution to fail
|
|
3947
|
-
# Cannot predict all possible errors from discovery system
|
|
3948
|
-
logger.debug(f"Discovery tip not available for {args.command}: {e}")
|
|
3949
|
-
pass
|
|
3950
|
-
|
|
3951
|
-
return result if result is not None else 0
|
|
3952
|
-
parser.print_help()
|
|
3953
|
-
return 0
|
|
3954
|
-
|
|
3955
|
-
|
|
3956
|
-
if __name__ == "__main__":
|
|
3957
|
-
sys.exit(main() or 0)
|