empathy-framework 2.4.0__py3-none-any.whl → 3.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coach_wizards/__init__.py +13 -12
- coach_wizards/accessibility_wizard.py +12 -12
- coach_wizards/api_wizard.py +12 -12
- coach_wizards/base_wizard.py +26 -20
- coach_wizards/cicd_wizard.py +15 -13
- coach_wizards/code_reviewer_README.md +60 -0
- coach_wizards/code_reviewer_wizard.py +180 -0
- coach_wizards/compliance_wizard.py +12 -12
- coach_wizards/database_wizard.py +12 -12
- coach_wizards/debugging_wizard.py +12 -12
- coach_wizards/documentation_wizard.py +12 -12
- coach_wizards/generate_wizards.py +1 -2
- coach_wizards/localization_wizard.py +101 -19
- coach_wizards/migration_wizard.py +12 -12
- coach_wizards/monitoring_wizard.py +12 -12
- coach_wizards/observability_wizard.py +12 -12
- coach_wizards/performance_wizard.py +12 -12
- coach_wizards/prompt_engineering_wizard.py +661 -0
- coach_wizards/refactoring_wizard.py +12 -12
- coach_wizards/scaling_wizard.py +12 -12
- coach_wizards/security_wizard.py +12 -12
- coach_wizards/testing_wizard.py +12 -12
- empathy_framework-3.8.2.dist-info/METADATA +1176 -0
- empathy_framework-3.8.2.dist-info/RECORD +333 -0
- empathy_framework-3.8.2.dist-info/entry_points.txt +22 -0
- {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/top_level.txt +5 -1
- empathy_healthcare_plugin/__init__.py +1 -2
- empathy_healthcare_plugin/monitors/__init__.py +9 -0
- empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
- empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
- empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
- empathy_llm_toolkit/__init__.py +7 -7
- empathy_llm_toolkit/agent_factory/__init__.py +53 -0
- empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
- empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
- empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
- empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
- empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
- empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
- empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
- empathy_llm_toolkit/agent_factory/base.py +305 -0
- empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
- empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
- empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
- empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
- empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
- empathy_llm_toolkit/agent_factory/decorators.py +286 -0
- empathy_llm_toolkit/agent_factory/factory.py +558 -0
- empathy_llm_toolkit/agent_factory/framework.py +192 -0
- empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
- empathy_llm_toolkit/agent_factory/resilient.py +320 -0
- empathy_llm_toolkit/claude_memory.py +14 -15
- empathy_llm_toolkit/cli/__init__.py +8 -0
- empathy_llm_toolkit/cli/sync_claude.py +487 -0
- empathy_llm_toolkit/code_health.py +186 -28
- empathy_llm_toolkit/config/__init__.py +29 -0
- empathy_llm_toolkit/config/unified.py +295 -0
- empathy_llm_toolkit/contextual_patterns.py +11 -12
- empathy_llm_toolkit/core.py +168 -53
- empathy_llm_toolkit/git_pattern_extractor.py +17 -13
- empathy_llm_toolkit/levels.py +6 -13
- empathy_llm_toolkit/pattern_confidence.py +14 -18
- empathy_llm_toolkit/pattern_resolver.py +10 -12
- empathy_llm_toolkit/pattern_summary.py +16 -14
- empathy_llm_toolkit/providers.py +194 -28
- empathy_llm_toolkit/routing/__init__.py +32 -0
- empathy_llm_toolkit/routing/model_router.py +362 -0
- empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
- empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
- empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
- empathy_llm_toolkit/security/README.md +262 -0
- empathy_llm_toolkit/security/__init__.py +62 -0
- empathy_llm_toolkit/security/audit_logger.py +929 -0
- empathy_llm_toolkit/security/audit_logger_example.py +152 -0
- empathy_llm_toolkit/security/pii_scrubber.py +640 -0
- empathy_llm_toolkit/security/secrets_detector.py +678 -0
- empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
- empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
- empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
- empathy_llm_toolkit/session_status.py +20 -22
- empathy_llm_toolkit/state.py +28 -21
- empathy_llm_toolkit/wizards/__init__.py +38 -0
- empathy_llm_toolkit/wizards/base_wizard.py +364 -0
- empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
- empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
- empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
- empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
- empathy_os/__init__.py +125 -84
- empathy_os/adaptive/__init__.py +13 -0
- empathy_os/adaptive/task_complexity.py +127 -0
- empathy_os/{monitoring.py → agent_monitoring.py} +28 -28
- empathy_os/cache/__init__.py +117 -0
- empathy_os/cache/base.py +166 -0
- empathy_os/cache/dependency_manager.py +253 -0
- empathy_os/cache/hash_only.py +248 -0
- empathy_os/cache/hybrid.py +390 -0
- empathy_os/cache/storage.py +282 -0
- empathy_os/cli.py +1516 -70
- empathy_os/cli_unified.py +597 -0
- empathy_os/config/__init__.py +63 -0
- empathy_os/config/xml_config.py +239 -0
- empathy_os/config.py +95 -37
- empathy_os/coordination.py +72 -68
- empathy_os/core.py +94 -107
- empathy_os/cost_tracker.py +74 -55
- empathy_os/dashboard/__init__.py +15 -0
- empathy_os/dashboard/server.py +743 -0
- empathy_os/discovery.py +17 -14
- empathy_os/emergence.py +21 -22
- empathy_os/exceptions.py +18 -30
- empathy_os/feedback_loops.py +30 -33
- empathy_os/levels.py +32 -35
- empathy_os/leverage_points.py +31 -32
- empathy_os/logging_config.py +19 -16
- empathy_os/memory/__init__.py +195 -0
- empathy_os/memory/claude_memory.py +466 -0
- empathy_os/memory/config.py +224 -0
- empathy_os/memory/control_panel.py +1298 -0
- empathy_os/memory/edges.py +179 -0
- empathy_os/memory/graph.py +567 -0
- empathy_os/memory/long_term.py +1194 -0
- empathy_os/memory/nodes.py +179 -0
- empathy_os/memory/redis_bootstrap.py +540 -0
- empathy_os/memory/security/__init__.py +31 -0
- empathy_os/memory/security/audit_logger.py +930 -0
- empathy_os/memory/security/pii_scrubber.py +640 -0
- empathy_os/memory/security/secrets_detector.py +678 -0
- empathy_os/memory/short_term.py +2119 -0
- empathy_os/memory/storage/__init__.py +15 -0
- empathy_os/memory/summary_index.py +583 -0
- empathy_os/memory/unified.py +619 -0
- empathy_os/metrics/__init__.py +12 -0
- empathy_os/metrics/prompt_metrics.py +190 -0
- empathy_os/models/__init__.py +136 -0
- empathy_os/models/__main__.py +13 -0
- empathy_os/models/cli.py +655 -0
- empathy_os/models/empathy_executor.py +354 -0
- empathy_os/models/executor.py +252 -0
- empathy_os/models/fallback.py +671 -0
- empathy_os/models/provider_config.py +563 -0
- empathy_os/models/registry.py +382 -0
- empathy_os/models/tasks.py +302 -0
- empathy_os/models/telemetry.py +548 -0
- empathy_os/models/token_estimator.py +378 -0
- empathy_os/models/validation.py +274 -0
- empathy_os/monitoring/__init__.py +52 -0
- empathy_os/monitoring/alerts.py +23 -0
- empathy_os/monitoring/alerts_cli.py +268 -0
- empathy_os/monitoring/multi_backend.py +271 -0
- empathy_os/monitoring/otel_backend.py +363 -0
- empathy_os/optimization/__init__.py +19 -0
- empathy_os/optimization/context_optimizer.py +272 -0
- empathy_os/pattern_library.py +30 -29
- empathy_os/persistence.py +35 -37
- empathy_os/platform_utils.py +261 -0
- empathy_os/plugins/__init__.py +28 -0
- empathy_os/plugins/base.py +361 -0
- empathy_os/plugins/registry.py +268 -0
- empathy_os/project_index/__init__.py +30 -0
- empathy_os/project_index/cli.py +335 -0
- empathy_os/project_index/crew_integration.py +430 -0
- empathy_os/project_index/index.py +425 -0
- empathy_os/project_index/models.py +501 -0
- empathy_os/project_index/reports.py +473 -0
- empathy_os/project_index/scanner.py +538 -0
- empathy_os/prompts/__init__.py +61 -0
- empathy_os/prompts/config.py +77 -0
- empathy_os/prompts/context.py +177 -0
- empathy_os/prompts/parser.py +285 -0
- empathy_os/prompts/registry.py +313 -0
- empathy_os/prompts/templates.py +208 -0
- empathy_os/redis_config.py +144 -58
- empathy_os/redis_memory.py +79 -77
- empathy_os/resilience/__init__.py +56 -0
- empathy_os/resilience/circuit_breaker.py +256 -0
- empathy_os/resilience/fallback.py +179 -0
- empathy_os/resilience/health.py +300 -0
- empathy_os/resilience/retry.py +209 -0
- empathy_os/resilience/timeout.py +135 -0
- empathy_os/routing/__init__.py +43 -0
- empathy_os/routing/chain_executor.py +433 -0
- empathy_os/routing/classifier.py +217 -0
- empathy_os/routing/smart_router.py +234 -0
- empathy_os/routing/wizard_registry.py +307 -0
- empathy_os/templates.py +19 -14
- empathy_os/trust/__init__.py +28 -0
- empathy_os/trust/circuit_breaker.py +579 -0
- empathy_os/trust_building.py +67 -58
- empathy_os/validation/__init__.py +19 -0
- empathy_os/validation/xml_validator.py +281 -0
- empathy_os/wizard_factory_cli.py +170 -0
- empathy_os/{workflows.py → workflow_commands.py} +131 -37
- empathy_os/workflows/__init__.py +360 -0
- empathy_os/workflows/base.py +1660 -0
- empathy_os/workflows/bug_predict.py +962 -0
- empathy_os/workflows/code_review.py +960 -0
- empathy_os/workflows/code_review_adapters.py +310 -0
- empathy_os/workflows/code_review_pipeline.py +720 -0
- empathy_os/workflows/config.py +600 -0
- empathy_os/workflows/dependency_check.py +648 -0
- empathy_os/workflows/document_gen.py +1069 -0
- empathy_os/workflows/documentation_orchestrator.py +1205 -0
- empathy_os/workflows/health_check.py +679 -0
- empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
- empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
- empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
- empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
- empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
- empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
- empathy_os/workflows/manage_documentation.py +804 -0
- empathy_os/workflows/new_sample_workflow1.py +146 -0
- empathy_os/workflows/new_sample_workflow1_README.md +150 -0
- empathy_os/workflows/perf_audit.py +687 -0
- empathy_os/workflows/pr_review.py +748 -0
- empathy_os/workflows/progress.py +445 -0
- empathy_os/workflows/progress_server.py +322 -0
- empathy_os/workflows/refactor_plan.py +693 -0
- empathy_os/workflows/release_prep.py +808 -0
- empathy_os/workflows/research_synthesis.py +404 -0
- empathy_os/workflows/secure_release.py +585 -0
- empathy_os/workflows/security_adapters.py +297 -0
- empathy_os/workflows/security_audit.py +1046 -0
- empathy_os/workflows/step_config.py +234 -0
- empathy_os/workflows/test5.py +125 -0
- empathy_os/workflows/test5_README.md +158 -0
- empathy_os/workflows/test_gen.py +1855 -0
- empathy_os/workflows/test_lifecycle.py +526 -0
- empathy_os/workflows/test_maintenance.py +626 -0
- empathy_os/workflows/test_maintenance_cli.py +590 -0
- empathy_os/workflows/test_maintenance_crew.py +821 -0
- empathy_os/workflows/xml_enhanced_crew.py +285 -0
- empathy_software_plugin/__init__.py +1 -2
- empathy_software_plugin/cli/__init__.py +120 -0
- empathy_software_plugin/cli/inspect.py +362 -0
- empathy_software_plugin/cli.py +49 -27
- empathy_software_plugin/plugin.py +4 -8
- empathy_software_plugin/wizards/__init__.py +42 -0
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
- empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
- empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
- empathy_software_plugin/wizards/base_wizard.py +288 -0
- empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
- empathy_software_plugin/wizards/code_review_wizard.py +606 -0
- empathy_software_plugin/wizards/debugging/__init__.py +50 -0
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
- empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
- empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
- empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
- empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
- empathy_software_plugin/wizards/debugging/verification.py +369 -0
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
- empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
- empathy_software_plugin/wizards/performance/__init__.py +9 -0
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
- empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
- empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
- empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
- empathy_software_plugin/wizards/security/__init__.py +32 -0
- empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
- empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
- empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
- empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
- empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
- empathy_software_plugin/wizards/testing/__init__.py +27 -0
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
- empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
- empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
- empathy_software_plugin/wizards/testing_wizard.py +274 -0
- hot_reload/README.md +473 -0
- hot_reload/__init__.py +62 -0
- hot_reload/config.py +84 -0
- hot_reload/integration.py +228 -0
- hot_reload/reloader.py +298 -0
- hot_reload/watcher.py +179 -0
- hot_reload/websocket.py +176 -0
- scaffolding/README.md +589 -0
- scaffolding/__init__.py +35 -0
- scaffolding/__main__.py +14 -0
- scaffolding/cli.py +240 -0
- test_generator/__init__.py +38 -0
- test_generator/__main__.py +14 -0
- test_generator/cli.py +226 -0
- test_generator/generator.py +325 -0
- test_generator/risk_analyzer.py +216 -0
- workflow_patterns/__init__.py +33 -0
- workflow_patterns/behavior.py +249 -0
- workflow_patterns/core.py +76 -0
- workflow_patterns/output.py +99 -0
- workflow_patterns/registry.py +255 -0
- workflow_patterns/structural.py +288 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
- agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
- agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
- agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
- agents/compliance_anticipation_agent.py +0 -1427
- agents/epic_integration_wizard.py +0 -541
- agents/trust_building_behaviors.py +0 -891
- empathy_framework-2.4.0.dist-info/METADATA +0 -485
- empathy_framework-2.4.0.dist-info/RECORD +0 -102
- empathy_framework-2.4.0.dist-info/entry_points.txt +0 -6
- empathy_llm_toolkit/htmlcov/status.json +0 -1
- empathy_llm_toolkit/security/htmlcov/status.json +0 -1
- {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/WHEEL +0 -0
- {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/licenses/LICENSE +0 -0
empathy_os/cli.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Command-Line Interface for Empathy Framework
|
|
1
|
+
"""Command-Line Interface for Empathy Framework
|
|
3
2
|
|
|
4
3
|
Provides CLI commands for:
|
|
5
4
|
- Running interactive REPL (empathy run)
|
|
@@ -21,21 +20,274 @@ from importlib.metadata import version as get_version
|
|
|
21
20
|
from empathy_os import EmpathyConfig, EmpathyOS, load_config
|
|
22
21
|
from empathy_os.cost_tracker import cmd_costs
|
|
23
22
|
from empathy_os.dashboard import cmd_dashboard
|
|
23
|
+
from empathy_os.discovery import show_tip_if_available
|
|
24
24
|
from empathy_os.logging_config import get_logger
|
|
25
25
|
from empathy_os.pattern_library import PatternLibrary
|
|
26
26
|
from empathy_os.persistence import MetricsCollector, PatternPersistence, StateManager
|
|
27
|
+
from empathy_os.platform_utils import setup_asyncio_policy
|
|
27
28
|
from empathy_os.templates import cmd_new
|
|
28
|
-
from empathy_os.
|
|
29
|
+
from empathy_os.wizard_factory_cli import add_wizard_factory_commands
|
|
30
|
+
from empathy_os.workflows import (
|
|
31
|
+
cmd_fix_all,
|
|
32
|
+
cmd_learn,
|
|
33
|
+
cmd_morning,
|
|
34
|
+
cmd_ship,
|
|
35
|
+
create_example_config,
|
|
36
|
+
get_workflow,
|
|
37
|
+
)
|
|
38
|
+
from empathy_os.workflows import list_workflows as get_workflow_list
|
|
29
39
|
|
|
30
40
|
logger = get_logger(__name__)
|
|
31
41
|
|
|
32
42
|
|
|
43
|
+
# =============================================================================
|
|
44
|
+
# CHEATSHEET DATA - Quick reference for all commands
|
|
45
|
+
# =============================================================================
|
|
46
|
+
|
|
47
|
+
CHEATSHEET = {
|
|
48
|
+
"Getting Started": [
|
|
49
|
+
("empathy init", "Create a new config file"),
|
|
50
|
+
("empathy wizard", "Interactive setup wizard"),
|
|
51
|
+
("empathy run", "Interactive REPL mode"),
|
|
52
|
+
],
|
|
53
|
+
"Daily Workflow": [
|
|
54
|
+
("empathy morning", "Start-of-day briefing"),
|
|
55
|
+
("empathy status", "What needs attention now"),
|
|
56
|
+
("empathy ship", "Pre-commit validation"),
|
|
57
|
+
],
|
|
58
|
+
"Code Quality": [
|
|
59
|
+
("empathy health", "Quick health check"),
|
|
60
|
+
("empathy health --deep", "Comprehensive check"),
|
|
61
|
+
("empathy health --fix", "Auto-fix issues"),
|
|
62
|
+
("empathy fix-all", "Fix all lint/format issues"),
|
|
63
|
+
],
|
|
64
|
+
"Pattern Learning": [
|
|
65
|
+
("empathy learn --analyze 20", "Learn from last 20 commits"),
|
|
66
|
+
("empathy sync-claude", "Sync patterns to Claude Code"),
|
|
67
|
+
("empathy inspect patterns", "View learned patterns"),
|
|
68
|
+
],
|
|
69
|
+
"Code Review": [
|
|
70
|
+
("empathy review", "Review recent changes"),
|
|
71
|
+
("empathy review --staged", "Review staged changes only"),
|
|
72
|
+
],
|
|
73
|
+
"Memory & State": [
|
|
74
|
+
("empathy inspect state", "View saved states"),
|
|
75
|
+
("empathy inspect metrics --user-id X", "View user metrics"),
|
|
76
|
+
("empathy export patterns.json", "Export patterns"),
|
|
77
|
+
],
|
|
78
|
+
"Advanced": [
|
|
79
|
+
("empathy costs", "View API cost tracking"),
|
|
80
|
+
("empathy dashboard", "Launch visual dashboard"),
|
|
81
|
+
("empathy frameworks", "List agent frameworks"),
|
|
82
|
+
("empathy workflow list", "List multi-model workflows"),
|
|
83
|
+
("empathy new <template>", "Create project from template"),
|
|
84
|
+
],
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
EXPLAIN_CONTENT = {
|
|
88
|
+
"morning": """
|
|
89
|
+
HOW 'empathy morning' WORKS:
|
|
90
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
91
|
+
This command aggregates multiple data sources to give you a prioritized
|
|
92
|
+
start-of-day briefing:
|
|
93
|
+
|
|
94
|
+
1. PATTERNS ANALYSIS
|
|
95
|
+
Reads ./patterns/*.json to find:
|
|
96
|
+
- Unresolved bugs (status: investigating)
|
|
97
|
+
- Recent security decisions
|
|
98
|
+
- Tech debt trends
|
|
99
|
+
|
|
100
|
+
2. GIT CONTEXT
|
|
101
|
+
Checks your recent git activity:
|
|
102
|
+
- Commits from yesterday
|
|
103
|
+
- Uncommitted changes
|
|
104
|
+
- Branch status
|
|
105
|
+
|
|
106
|
+
3. HEALTH SNAPSHOT
|
|
107
|
+
Runs quick health checks:
|
|
108
|
+
- Lint issues count
|
|
109
|
+
- Type errors
|
|
110
|
+
- Test status
|
|
111
|
+
|
|
112
|
+
4. PRIORITY SCORING
|
|
113
|
+
Items are scored and sorted by:
|
|
114
|
+
- Age (older = higher priority)
|
|
115
|
+
- Severity (critical > high > medium)
|
|
116
|
+
- Your recent activity patterns
|
|
117
|
+
|
|
118
|
+
TIPS:
|
|
119
|
+
• Run this first thing each day
|
|
120
|
+
• Use 'empathy morning --verbose' for details
|
|
121
|
+
• Pair with 'empathy status --select N' to dive deeper
|
|
122
|
+
""",
|
|
123
|
+
"ship": """
|
|
124
|
+
HOW 'empathy ship' WORKS:
|
|
125
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
126
|
+
Pre-commit validation pipeline that ensures code quality before shipping:
|
|
127
|
+
|
|
128
|
+
1. HEALTH CHECKS
|
|
129
|
+
- Runs lint checks (ruff/flake8)
|
|
130
|
+
- Validates types (mypy/pyright)
|
|
131
|
+
- Checks formatting (black/prettier)
|
|
132
|
+
|
|
133
|
+
2. PATTERN REVIEW
|
|
134
|
+
- Compares changes against known bug patterns
|
|
135
|
+
- Flags code that matches historical issues
|
|
136
|
+
- Suggests fixes based on past resolutions
|
|
137
|
+
|
|
138
|
+
3. SECURITY SCAN
|
|
139
|
+
- Checks for hardcoded secrets
|
|
140
|
+
- Validates against security patterns
|
|
141
|
+
- Reports potential vulnerabilities
|
|
142
|
+
|
|
143
|
+
4. PATTERN SYNC (optional)
|
|
144
|
+
- Updates Claude Code rules
|
|
145
|
+
- Syncs new patterns discovered
|
|
146
|
+
- Skip with --skip-sync
|
|
147
|
+
|
|
148
|
+
EXIT CODES:
|
|
149
|
+
• 0 = All checks passed, safe to commit
|
|
150
|
+
• 1 = Issues found, review before committing
|
|
151
|
+
|
|
152
|
+
TIPS:
|
|
153
|
+
• Add to pre-commit hook: empathy ship --skip-sync
|
|
154
|
+
• Use 'empathy ship --verbose' to see all checks
|
|
155
|
+
""",
|
|
156
|
+
"learn": """
|
|
157
|
+
HOW 'empathy learn' WORKS:
|
|
158
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
159
|
+
Extracts patterns from your git history to teach Claude about your codebase:
|
|
160
|
+
|
|
161
|
+
1. COMMIT ANALYSIS
|
|
162
|
+
Parses commit messages looking for:
|
|
163
|
+
- fix: Bug fixes → debugging.json
|
|
164
|
+
- security: decisions → security.json
|
|
165
|
+
- TODO/FIXME in code → tech_debt.json
|
|
166
|
+
|
|
167
|
+
2. DIFF INSPECTION
|
|
168
|
+
Analyzes code changes to:
|
|
169
|
+
- Identify affected files
|
|
170
|
+
- Extract error types
|
|
171
|
+
- Record fix patterns
|
|
172
|
+
|
|
173
|
+
3. PATTERN STORAGE
|
|
174
|
+
Saves to ./patterns/:
|
|
175
|
+
- debugging.json: Bug patterns
|
|
176
|
+
- security.json: Security decisions
|
|
177
|
+
- tech_debt.json: Technical debt
|
|
178
|
+
- inspection.json: Code review findings
|
|
179
|
+
|
|
180
|
+
4. SUMMARY GENERATION
|
|
181
|
+
Creates .claude/patterns_summary.md:
|
|
182
|
+
- Human-readable pattern overview
|
|
183
|
+
- Loaded by Claude Code automatically
|
|
184
|
+
|
|
185
|
+
USAGE EXAMPLES:
|
|
186
|
+
• empathy learn --analyze 10 # Last 10 commits
|
|
187
|
+
• empathy learn --analyze 100 # Deeper history
|
|
188
|
+
• empathy sync-claude # Apply patterns to Claude
|
|
189
|
+
|
|
190
|
+
TIPS:
|
|
191
|
+
• Run weekly to keep patterns current
|
|
192
|
+
• Use good commit messages (fix:, feat:, etc.)
|
|
193
|
+
• Check ./patterns/ to see what was learned
|
|
194
|
+
""",
|
|
195
|
+
"health": """
|
|
196
|
+
HOW 'empathy health' WORKS:
|
|
197
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
198
|
+
Code health dashboard that runs multiple quality checks:
|
|
199
|
+
|
|
200
|
+
1. QUICK MODE (default)
|
|
201
|
+
Fast checks that run in seconds:
|
|
202
|
+
- Lint: ruff check or flake8
|
|
203
|
+
- Format: black --check or prettier
|
|
204
|
+
- Basic type checking
|
|
205
|
+
|
|
206
|
+
2. DEEP MODE (--deep)
|
|
207
|
+
Comprehensive checks (slower):
|
|
208
|
+
- Full type analysis (mypy --strict)
|
|
209
|
+
- Test suite execution
|
|
210
|
+
- Security scanning
|
|
211
|
+
- Dependency audit
|
|
212
|
+
|
|
213
|
+
3. SCORING
|
|
214
|
+
Health score 0-100 based on:
|
|
215
|
+
- Lint issues (×2 penalty each)
|
|
216
|
+
- Type errors (×5 penalty each)
|
|
217
|
+
- Test failures (×10 penalty each)
|
|
218
|
+
- Security issues (×20 penalty each)
|
|
219
|
+
|
|
220
|
+
4. AUTO-FIX (--fix)
|
|
221
|
+
Can automatically fix:
|
|
222
|
+
- Formatting issues
|
|
223
|
+
- Import sorting
|
|
224
|
+
- Simple lint errors
|
|
225
|
+
|
|
226
|
+
USAGE:
|
|
227
|
+
• empathy health # Quick check
|
|
228
|
+
• empathy health --deep # Full check
|
|
229
|
+
• empathy health --fix # Auto-fix issues
|
|
230
|
+
• empathy health --trends 30 # 30-day trend
|
|
231
|
+
|
|
232
|
+
TIPS:
|
|
233
|
+
• Run quick checks before commits
|
|
234
|
+
• Run deep checks in CI/CD
|
|
235
|
+
• Track trends to catch regressions
|
|
236
|
+
""",
|
|
237
|
+
"sync-claude": """
|
|
238
|
+
HOW 'empathy sync-claude' WORKS:
|
|
239
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
240
|
+
Converts learned patterns into Claude Code rules:
|
|
241
|
+
|
|
242
|
+
1. READS PATTERNS
|
|
243
|
+
Loads from ./patterns/:
|
|
244
|
+
- debugging.json → Bug fix patterns
|
|
245
|
+
- security.json → Security decisions
|
|
246
|
+
- tech_debt.json → Known debt items
|
|
247
|
+
|
|
248
|
+
2. GENERATES RULES
|
|
249
|
+
Creates .claude/rules/empathy/:
|
|
250
|
+
- debugging.md
|
|
251
|
+
- security.md
|
|
252
|
+
- tech_debt.md
|
|
253
|
+
|
|
254
|
+
3. CLAUDE CODE INTEGRATION
|
|
255
|
+
Rules are automatically loaded when:
|
|
256
|
+
- Claude Code starts in this directory
|
|
257
|
+
- Combined with CLAUDE.md instructions
|
|
258
|
+
|
|
259
|
+
HOW CLAUDE USES THESE:
|
|
260
|
+
• Sees historical bugs before suggesting code
|
|
261
|
+
• Knows about accepted security patterns
|
|
262
|
+
• Understands existing tech debt
|
|
263
|
+
|
|
264
|
+
FILE STRUCTURE:
|
|
265
|
+
./patterns/ # Your pattern storage
|
|
266
|
+
debugging.json
|
|
267
|
+
security.json
|
|
268
|
+
.claude/
|
|
269
|
+
CLAUDE.md # Project instructions
|
|
270
|
+
rules/
|
|
271
|
+
empathy/ # Generated rules
|
|
272
|
+
debugging.md
|
|
273
|
+
security.md
|
|
274
|
+
|
|
275
|
+
TIPS:
|
|
276
|
+
• Run after 'empathy learn'
|
|
277
|
+
• Commit .claude/rules/ to share with team
|
|
278
|
+
• Weekly sync keeps Claude current
|
|
279
|
+
""",
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
|
|
33
283
|
def cmd_version(args):
|
|
34
284
|
"""Display version information"""
|
|
35
285
|
logger.info("Displaying version information")
|
|
36
286
|
try:
|
|
37
287
|
version = get_version("empathy")
|
|
38
|
-
except Exception:
|
|
288
|
+
except Exception as e:
|
|
289
|
+
# Package metadata not available or invalid (development install)
|
|
290
|
+
logger.debug(f"Version not available: {e}")
|
|
39
291
|
version = "unknown"
|
|
40
292
|
logger.info(f"Empathy v{version}")
|
|
41
293
|
logger.info("Copyright 2025 Smart-AI-Memory")
|
|
@@ -43,6 +295,314 @@ def cmd_version(args):
|
|
|
43
295
|
logger.info("\n✨ Built with Claude Code + MemDocs + VS Code transformative stack")
|
|
44
296
|
|
|
45
297
|
|
|
298
|
+
def cmd_cheatsheet(args):
|
|
299
|
+
"""Display quick reference cheatsheet for all commands."""
|
|
300
|
+
category = getattr(args, "category", None)
|
|
301
|
+
compact = getattr(args, "compact", False)
|
|
302
|
+
|
|
303
|
+
print()
|
|
304
|
+
print("=" * 60)
|
|
305
|
+
print(" EMPATHY FRAMEWORK - QUICK REFERENCE")
|
|
306
|
+
print("=" * 60)
|
|
307
|
+
|
|
308
|
+
if category:
|
|
309
|
+
# Show specific category
|
|
310
|
+
category_title = category.replace("-", " ").title()
|
|
311
|
+
if category_title in CHEATSHEET:
|
|
312
|
+
print(f"\n {category_title}")
|
|
313
|
+
print(" " + "-" * 40)
|
|
314
|
+
for cmd, desc in CHEATSHEET[category_title]:
|
|
315
|
+
if compact:
|
|
316
|
+
print(f" {cmd}")
|
|
317
|
+
else:
|
|
318
|
+
print(f" {cmd:35} {desc}")
|
|
319
|
+
else:
|
|
320
|
+
print(f"\n Unknown category: {category}")
|
|
321
|
+
print(" Available: " + ", ".join(k.lower().replace(" ", "-") for k in CHEATSHEET))
|
|
322
|
+
else:
|
|
323
|
+
# Show all categories
|
|
324
|
+
for cat_name, commands in CHEATSHEET.items():
|
|
325
|
+
print(f"\n {cat_name}")
|
|
326
|
+
print(" " + "-" * 40)
|
|
327
|
+
for cmd, desc in commands:
|
|
328
|
+
if compact:
|
|
329
|
+
print(f" {cmd}")
|
|
330
|
+
else:
|
|
331
|
+
print(f" {cmd:35} {desc}")
|
|
332
|
+
|
|
333
|
+
print()
|
|
334
|
+
print("-" * 60)
|
|
335
|
+
print(" Use: empathy <command> --explain for detailed explanation")
|
|
336
|
+
print(" Use: empathy onboard for interactive tutorial")
|
|
337
|
+
print("=" * 60)
|
|
338
|
+
print()
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
def cmd_onboard(args):
|
|
342
|
+
"""Interactive onboarding tutorial for new users."""
|
|
343
|
+
from empathy_os.discovery import get_engine
|
|
344
|
+
|
|
345
|
+
step = getattr(args, "step", None)
|
|
346
|
+
reset = getattr(args, "reset", False)
|
|
347
|
+
|
|
348
|
+
engine = get_engine()
|
|
349
|
+
stats = engine.get_stats()
|
|
350
|
+
|
|
351
|
+
if reset:
|
|
352
|
+
# Reset onboarding progress
|
|
353
|
+
engine.state["onboarding_step"] = 0
|
|
354
|
+
engine.state["onboarding_completed"] = []
|
|
355
|
+
engine._save()
|
|
356
|
+
print("Onboarding progress reset.")
|
|
357
|
+
return
|
|
358
|
+
|
|
359
|
+
# Define onboarding steps
|
|
360
|
+
steps = [
|
|
361
|
+
{
|
|
362
|
+
"title": "Welcome to Empathy Framework",
|
|
363
|
+
"content": """
|
|
364
|
+
Welcome! Empathy Framework helps you build AI systems with 5 levels
|
|
365
|
+
of sophistication, from reactive responses to anticipatory assistance.
|
|
366
|
+
|
|
367
|
+
This tutorial will walk you through the key features.
|
|
368
|
+
|
|
369
|
+
Let's check your current setup first...
|
|
370
|
+
""",
|
|
371
|
+
"check": lambda: True,
|
|
372
|
+
"action": None,
|
|
373
|
+
},
|
|
374
|
+
{
|
|
375
|
+
"title": "Step 1: Initialize Your Project",
|
|
376
|
+
"content": """
|
|
377
|
+
First, let's create a configuration file for your project.
|
|
378
|
+
|
|
379
|
+
Run: empathy init
|
|
380
|
+
|
|
381
|
+
This creates empathy.config.yaml with sensible defaults.
|
|
382
|
+
Alternatively, use 'empathy wizard' for an interactive setup.
|
|
383
|
+
""",
|
|
384
|
+
"check": lambda: _file_exists("empathy.config.yaml")
|
|
385
|
+
or _file_exists("empathy.config.yml"),
|
|
386
|
+
"action": "empathy init",
|
|
387
|
+
},
|
|
388
|
+
{
|
|
389
|
+
"title": "Step 2: Learn From Your History",
|
|
390
|
+
"content": """
|
|
391
|
+
Empathy can learn patterns from your git commit history.
|
|
392
|
+
This teaches Claude about your codebase's patterns and past bugs.
|
|
393
|
+
|
|
394
|
+
Run: empathy learn --analyze 10
|
|
395
|
+
|
|
396
|
+
This analyzes the last 10 commits and extracts:
|
|
397
|
+
- Bug fix patterns
|
|
398
|
+
- Security decisions
|
|
399
|
+
- Technical debt markers
|
|
400
|
+
""",
|
|
401
|
+
"check": lambda: _file_exists("patterns/debugging.json"),
|
|
402
|
+
"action": "empathy learn --analyze 10",
|
|
403
|
+
},
|
|
404
|
+
{
|
|
405
|
+
"title": "Step 3: Sync Patterns to Claude",
|
|
406
|
+
"content": """
|
|
407
|
+
Now let's share what we learned with Claude Code.
|
|
408
|
+
|
|
409
|
+
Run: empathy sync-claude
|
|
410
|
+
|
|
411
|
+
This creates .claude/rules/empathy/ with markdown rules
|
|
412
|
+
that Claude Code automatically loads when you work in this directory.
|
|
413
|
+
""",
|
|
414
|
+
"check": lambda: _file_exists(".claude/rules/empathy/debugging.md"),
|
|
415
|
+
"action": "empathy sync-claude",
|
|
416
|
+
},
|
|
417
|
+
{
|
|
418
|
+
"title": "Step 4: Check Code Health",
|
|
419
|
+
"content": """
|
|
420
|
+
Let's run a quick health check on your codebase.
|
|
421
|
+
|
|
422
|
+
Run: empathy health
|
|
423
|
+
|
|
424
|
+
This checks:
|
|
425
|
+
- Linting issues
|
|
426
|
+
- Type errors
|
|
427
|
+
- Formatting problems
|
|
428
|
+
|
|
429
|
+
Try 'empathy health --fix' to auto-fix what's possible.
|
|
430
|
+
""",
|
|
431
|
+
"check": lambda: stats.get("command_counts", {}).get("health", 0) > 0,
|
|
432
|
+
"action": "empathy health",
|
|
433
|
+
},
|
|
434
|
+
{
|
|
435
|
+
"title": "Step 5: Daily Workflow",
|
|
436
|
+
"content": """
|
|
437
|
+
You're almost there! Here's your recommended daily workflow:
|
|
438
|
+
|
|
439
|
+
MORNING:
|
|
440
|
+
empathy morning - Get your priority briefing
|
|
441
|
+
|
|
442
|
+
BEFORE COMMITS:
|
|
443
|
+
empathy ship - Validate before committing
|
|
444
|
+
|
|
445
|
+
WEEKLY:
|
|
446
|
+
empathy learn - Update patterns from new commits
|
|
447
|
+
empathy sync-claude - Keep Claude current
|
|
448
|
+
|
|
449
|
+
You've completed the basics! Run 'empathy cheatsheet' anytime
|
|
450
|
+
for a quick reference of all commands.
|
|
451
|
+
""",
|
|
452
|
+
"check": lambda: True,
|
|
453
|
+
"action": None,
|
|
454
|
+
},
|
|
455
|
+
]
|
|
456
|
+
|
|
457
|
+
# Determine current step
|
|
458
|
+
current_step = engine.state.get("onboarding_step", 0)
|
|
459
|
+
if step is not None:
|
|
460
|
+
current_step = max(0, min(step - 1, len(steps) - 1))
|
|
461
|
+
|
|
462
|
+
step_data = steps[current_step]
|
|
463
|
+
|
|
464
|
+
# Display header
|
|
465
|
+
print()
|
|
466
|
+
print("=" * 60)
|
|
467
|
+
print(f" ONBOARDING ({current_step + 1}/{len(steps)})")
|
|
468
|
+
print("=" * 60)
|
|
469
|
+
print()
|
|
470
|
+
print(f" {step_data['title']}")
|
|
471
|
+
print(" " + "-" * 50)
|
|
472
|
+
print(step_data["content"])
|
|
473
|
+
|
|
474
|
+
# Check if step is completed
|
|
475
|
+
if step_data["check"]():
|
|
476
|
+
if current_step < len(steps) - 1:
|
|
477
|
+
print(" [DONE] This step is complete!")
|
|
478
|
+
print()
|
|
479
|
+
print(f" Continue with: empathy onboard --step {current_step + 2}")
|
|
480
|
+
# Auto-advance
|
|
481
|
+
engine.state["onboarding_step"] = current_step + 1
|
|
482
|
+
engine._save()
|
|
483
|
+
else:
|
|
484
|
+
print(" Congratulations! You've completed the onboarding!")
|
|
485
|
+
print()
|
|
486
|
+
_show_achievements(engine)
|
|
487
|
+
elif step_data["action"]:
|
|
488
|
+
print(f" NEXT: Run '{step_data['action']}'")
|
|
489
|
+
print(" Then run 'empathy onboard' to continue")
|
|
490
|
+
|
|
491
|
+
print()
|
|
492
|
+
print("-" * 60)
|
|
493
|
+
print(f" Progress: {'*' * (current_step + 1)}{'.' * (len(steps) - current_step - 1)}")
|
|
494
|
+
print("=" * 60)
|
|
495
|
+
print()
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
def _file_exists(path: str) -> bool:
|
|
499
|
+
"""Check if a file exists."""
|
|
500
|
+
from pathlib import Path
|
|
501
|
+
|
|
502
|
+
return Path(path).exists()
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
def _show_achievements(engine) -> None:
|
|
506
|
+
"""Show user achievements based on usage."""
|
|
507
|
+
stats = engine.get_stats()
|
|
508
|
+
|
|
509
|
+
achievements = []
|
|
510
|
+
total_cmds = stats.get("total_commands", 0)
|
|
511
|
+
cmd_counts = stats.get("command_counts", {})
|
|
512
|
+
|
|
513
|
+
# Check achievements
|
|
514
|
+
if total_cmds >= 1:
|
|
515
|
+
achievements.append(("First Steps", "Ran your first command"))
|
|
516
|
+
if total_cmds >= 10:
|
|
517
|
+
achievements.append(("Getting Started", "Ran 10+ commands"))
|
|
518
|
+
if total_cmds >= 50:
|
|
519
|
+
achievements.append(("Power User", "Ran 50+ commands"))
|
|
520
|
+
if total_cmds >= 100:
|
|
521
|
+
achievements.append(("Expert", "Ran 100+ commands"))
|
|
522
|
+
|
|
523
|
+
if cmd_counts.get("learn", 0) >= 1:
|
|
524
|
+
achievements.append(("Pattern Learner", "Learned from git history"))
|
|
525
|
+
if cmd_counts.get("sync-claude", 0) >= 1:
|
|
526
|
+
achievements.append(("Claude Whisperer", "Synced patterns to Claude"))
|
|
527
|
+
if cmd_counts.get("morning", 0) >= 5:
|
|
528
|
+
achievements.append(("Early Bird", "Used morning briefing 5+ times"))
|
|
529
|
+
if cmd_counts.get("ship", 0) >= 10:
|
|
530
|
+
achievements.append(("Quality Shipper", "Used pre-commit checks 10+ times"))
|
|
531
|
+
if cmd_counts.get("health", 0) >= 1 and cmd_counts.get("fix-all", 0) >= 1:
|
|
532
|
+
achievements.append(("Code Doctor", "Used health checks and fixes"))
|
|
533
|
+
|
|
534
|
+
if stats.get("patterns_learned", 0) >= 10:
|
|
535
|
+
achievements.append(("Pattern Master", "Learned 10+ patterns"))
|
|
536
|
+
|
|
537
|
+
if stats.get("days_active", 0) >= 7:
|
|
538
|
+
achievements.append(("Week Warrior", "Active for 7+ days"))
|
|
539
|
+
if stats.get("days_active", 0) >= 30:
|
|
540
|
+
achievements.append(("Monthly Maven", "Active for 30+ days"))
|
|
541
|
+
|
|
542
|
+
if achievements:
|
|
543
|
+
print(" ACHIEVEMENTS UNLOCKED")
|
|
544
|
+
print(" " + "-" * 30)
|
|
545
|
+
for name, desc in achievements:
|
|
546
|
+
print(f" * {name}: {desc}")
|
|
547
|
+
print()
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
def cmd_explain(args):
|
|
551
|
+
"""Show detailed explanation for a command."""
|
|
552
|
+
command = args.command
|
|
553
|
+
|
|
554
|
+
if command in EXPLAIN_CONTENT:
|
|
555
|
+
print(EXPLAIN_CONTENT[command])
|
|
556
|
+
else:
|
|
557
|
+
available = ", ".join(EXPLAIN_CONTENT.keys())
|
|
558
|
+
print(f"\nNo detailed explanation available for '{command}'")
|
|
559
|
+
print(f"Available: {available}")
|
|
560
|
+
print("\nTry: empathy cheatsheet for a quick reference")
|
|
561
|
+
print()
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def cmd_achievements(args):
|
|
565
|
+
"""Show user achievements and progress."""
|
|
566
|
+
from empathy_os.discovery import get_engine
|
|
567
|
+
|
|
568
|
+
engine = get_engine()
|
|
569
|
+
stats = engine.get_stats()
|
|
570
|
+
|
|
571
|
+
print()
|
|
572
|
+
print("=" * 60)
|
|
573
|
+
print(" YOUR EMPATHY FRAMEWORK JOURNEY")
|
|
574
|
+
print("=" * 60)
|
|
575
|
+
print()
|
|
576
|
+
|
|
577
|
+
# Stats summary
|
|
578
|
+
print(" STATISTICS")
|
|
579
|
+
print(" " + "-" * 40)
|
|
580
|
+
print(f" Total commands run: {stats.get('total_commands', 0)}")
|
|
581
|
+
print(f" Days active: {stats.get('days_active', 0)}")
|
|
582
|
+
print(f" Patterns learned: {stats.get('patterns_learned', 0)}")
|
|
583
|
+
shown = stats.get("tips_shown", 0)
|
|
584
|
+
total = shown + stats.get("tips_remaining", 0)
|
|
585
|
+
print(f" Tips discovered: {shown}/{total}")
|
|
586
|
+
print()
|
|
587
|
+
|
|
588
|
+
# Command breakdown
|
|
589
|
+
cmd_counts = stats.get("command_counts", {})
|
|
590
|
+
if cmd_counts:
|
|
591
|
+
print(" COMMAND USAGE")
|
|
592
|
+
print(" " + "-" * 40)
|
|
593
|
+
sorted_cmds = sorted(cmd_counts.items(), key=lambda x: x[1], reverse=True)
|
|
594
|
+
for cmd, count in sorted_cmds[:10]:
|
|
595
|
+
bar = "*" * min(count, 20)
|
|
596
|
+
print(f" {cmd:15} {count:4} {bar}")
|
|
597
|
+
print()
|
|
598
|
+
|
|
599
|
+
# Achievements
|
|
600
|
+
_show_achievements(engine)
|
|
601
|
+
|
|
602
|
+
print("=" * 60)
|
|
603
|
+
print()
|
|
604
|
+
|
|
605
|
+
|
|
46
606
|
def cmd_init(args):
|
|
47
607
|
"""Initialize a new Empathy Framework project"""
|
|
48
608
|
config_format = args.format
|
|
@@ -83,10 +643,21 @@ def cmd_validate(args):
|
|
|
83
643
|
logger.info(f" Confidence Threshold: {config.confidence_threshold}")
|
|
84
644
|
logger.info(f" Persistence Backend: {config.persistence_backend}")
|
|
85
645
|
logger.info(f" Metrics Enabled: {config.metrics_enabled}")
|
|
86
|
-
except
|
|
646
|
+
except (OSError, FileNotFoundError) as e:
|
|
647
|
+
# Config file not found or cannot be read
|
|
648
|
+
logger.error(f"Configuration file error: {e}")
|
|
649
|
+
logger.error(f"✗ Cannot read configuration file: {e}")
|
|
650
|
+
sys.exit(1)
|
|
651
|
+
except ValueError as e:
|
|
652
|
+
# Invalid configuration values
|
|
87
653
|
logger.error(f"Configuration validation failed: {e}")
|
|
88
654
|
logger.error(f"✗ Configuration invalid: {e}")
|
|
89
655
|
sys.exit(1)
|
|
656
|
+
except Exception as e:
|
|
657
|
+
# Unexpected errors during config validation
|
|
658
|
+
logger.exception(f"Unexpected error validating configuration: {e}")
|
|
659
|
+
logger.error(f"✗ Configuration invalid: {e}")
|
|
660
|
+
sys.exit(1)
|
|
90
661
|
|
|
91
662
|
|
|
92
663
|
def cmd_info(args):
|
|
@@ -177,8 +748,19 @@ def cmd_patterns_export(args):
|
|
|
177
748
|
|
|
178
749
|
logger.info(f"Loaded {len(library.patterns)} patterns from {input_file}")
|
|
179
750
|
logger.info(f"✓ Loaded {len(library.patterns)} patterns from {input_file}")
|
|
751
|
+
except (OSError, FileNotFoundError) as e:
|
|
752
|
+
# Input file not found or cannot be read
|
|
753
|
+
logger.error(f"Pattern file error: {e}")
|
|
754
|
+
logger.error(f"✗ Cannot read pattern file: {e}")
|
|
755
|
+
sys.exit(1)
|
|
756
|
+
except (ValueError, KeyError) as e:
|
|
757
|
+
# Invalid pattern data format
|
|
758
|
+
logger.error(f"Pattern data error: {e}")
|
|
759
|
+
logger.error(f"✗ Invalid pattern data: {e}")
|
|
760
|
+
sys.exit(1)
|
|
180
761
|
except Exception as e:
|
|
181
|
-
|
|
762
|
+
# Unexpected errors loading patterns
|
|
763
|
+
logger.exception(f"Unexpected error loading patterns: {e}")
|
|
182
764
|
logger.error(f"✗ Failed to load patterns: {e}")
|
|
183
765
|
sys.exit(1)
|
|
184
766
|
|
|
@@ -191,8 +773,14 @@ def cmd_patterns_export(args):
|
|
|
191
773
|
|
|
192
774
|
logger.info(f"Saved {len(library.patterns)} patterns to {output_file}")
|
|
193
775
|
logger.info(f"✓ Saved {len(library.patterns)} patterns to {output_file}")
|
|
776
|
+
except (OSError, FileNotFoundError, PermissionError) as e:
|
|
777
|
+
# Cannot write output file
|
|
778
|
+
logger.error(f"Pattern file write error: {e}")
|
|
779
|
+
logger.error(f"✗ Cannot write pattern file: {e}")
|
|
780
|
+
sys.exit(1)
|
|
194
781
|
except Exception as e:
|
|
195
|
-
|
|
782
|
+
# Unexpected errors saving patterns
|
|
783
|
+
logger.exception(f"Unexpected error saving patterns: {e}")
|
|
196
784
|
logger.error(f"✗ Failed to save patterns: {e}")
|
|
197
785
|
sys.exit(1)
|
|
198
786
|
|
|
@@ -224,7 +812,7 @@ def cmd_patterns_resolve(args):
|
|
|
224
812
|
if not args.root_cause or not args.fix:
|
|
225
813
|
print("✗ --root-cause and --fix are required when resolving a bug")
|
|
226
814
|
print(
|
|
227
|
-
" Example: empathy patterns resolve bug_123 --root-cause 'Null check' --fix 'Added ?.'"
|
|
815
|
+
" Example: empathy patterns resolve bug_123 --root-cause 'Null check' --fix 'Added ?.'",
|
|
228
816
|
)
|
|
229
817
|
sys.exit(1)
|
|
230
818
|
|
|
@@ -311,8 +899,8 @@ def cmd_review(args):
|
|
|
311
899
|
"files": args.files,
|
|
312
900
|
"staged_only": args.staged,
|
|
313
901
|
"severity_threshold": args.severity,
|
|
314
|
-
}
|
|
315
|
-
)
|
|
902
|
+
},
|
|
903
|
+
),
|
|
316
904
|
)
|
|
317
905
|
|
|
318
906
|
# Output results
|
|
@@ -413,7 +1001,7 @@ def cmd_health(args):
|
|
|
413
1001
|
print(f"\n⚠ Skipped {len(result['skipped'])} issue(s) (could not auto-fix)")
|
|
414
1002
|
else:
|
|
415
1003
|
print(
|
|
416
|
-
f"\n⚠ Skipped {len(result['skipped'])} issue(s) (use --interactive to review)"
|
|
1004
|
+
f"\n⚠ Skipped {len(result['skipped'])} issue(s) (use --interactive to review)",
|
|
417
1005
|
)
|
|
418
1006
|
|
|
419
1007
|
if result["failed"]:
|
|
@@ -484,8 +1072,19 @@ def cmd_metrics_show(args):
|
|
|
484
1072
|
logger.info(f" Level 3: {stats.get('level_3_count', 0)} uses")
|
|
485
1073
|
logger.info(f" Level 4: {stats.get('level_4_count', 0)} uses")
|
|
486
1074
|
logger.info(f" Level 5: {stats.get('level_5_count', 0)} uses")
|
|
1075
|
+
except (OSError, FileNotFoundError) as e:
|
|
1076
|
+
# Database file not found
|
|
1077
|
+
logger.error(f"Metrics database error: {e}")
|
|
1078
|
+
logger.error(f"✗ Cannot read metrics database: {e}")
|
|
1079
|
+
sys.exit(1)
|
|
1080
|
+
except KeyError as e:
|
|
1081
|
+
# User not found in database
|
|
1082
|
+
logger.error(f"User not found in metrics: {e}")
|
|
1083
|
+
logger.error(f"✗ User {user_id} not found: {e}")
|
|
1084
|
+
sys.exit(1)
|
|
487
1085
|
except Exception as e:
|
|
488
|
-
|
|
1086
|
+
# Unexpected errors retrieving metrics
|
|
1087
|
+
logger.exception(f"Unexpected error retrieving metrics for user {user_id}: {e}")
|
|
489
1088
|
logger.error(f"✗ Failed to retrieve metrics: {e}")
|
|
490
1089
|
sys.exit(1)
|
|
491
1090
|
|
|
@@ -539,7 +1138,17 @@ def cmd_run(args):
|
|
|
539
1138
|
persistence_enabled=config.persistence_enabled,
|
|
540
1139
|
)
|
|
541
1140
|
print("✓ Empathy OS initialized")
|
|
1141
|
+
except ValueError as e:
|
|
1142
|
+
# Invalid configuration parameters
|
|
1143
|
+
print(f"✗ Configuration error: {e}")
|
|
1144
|
+
sys.exit(1)
|
|
1145
|
+
except (OSError, FileNotFoundError, PermissionError) as e:
|
|
1146
|
+
# Cannot access required files/directories
|
|
1147
|
+
print(f"✗ File system error: {e}")
|
|
1148
|
+
sys.exit(1)
|
|
542
1149
|
except Exception as e:
|
|
1150
|
+
# Unexpected initialization failure
|
|
1151
|
+
logger.exception(f"Unexpected error initializing Empathy OS: {e}")
|
|
543
1152
|
print(f"✗ Failed to initialize Empathy OS: {e}")
|
|
544
1153
|
sys.exit(1)
|
|
545
1154
|
|
|
@@ -605,9 +1214,8 @@ def cmd_run(args):
|
|
|
605
1214
|
for pred in response.predictions:
|
|
606
1215
|
print(f" • {pred}")
|
|
607
1216
|
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
)
|
|
1217
|
+
conf = f"{response.confidence:.0%}"
|
|
1218
|
+
print(f"\n Level: {response.level} | Confidence: {conf} | Time: {duration:.0f}ms")
|
|
611
1219
|
print()
|
|
612
1220
|
|
|
613
1221
|
# Ask for feedback
|
|
@@ -624,7 +1232,12 @@ def cmd_run(args):
|
|
|
624
1232
|
except KeyboardInterrupt:
|
|
625
1233
|
print("\n\n👋 Goodbye!")
|
|
626
1234
|
break
|
|
1235
|
+
except (ValueError, KeyError) as e:
|
|
1236
|
+
# Invalid input or response structure
|
|
1237
|
+
print(f"\n✗ Input error: {e}\n")
|
|
627
1238
|
except Exception as e:
|
|
1239
|
+
# Unexpected errors in interactive loop - log and continue
|
|
1240
|
+
logger.exception(f"Unexpected error in interactive loop: {e}")
|
|
628
1241
|
print(f"\n✗ Error: {e}\n")
|
|
629
1242
|
|
|
630
1243
|
|
|
@@ -667,7 +1280,13 @@ def cmd_inspect(args):
|
|
|
667
1280
|
print(f"✗ Pattern library not found: {db_path}")
|
|
668
1281
|
print(" Tip: Use 'empathy-framework wizard' to set up your first project")
|
|
669
1282
|
sys.exit(1)
|
|
1283
|
+
except (ValueError, KeyError) as e:
|
|
1284
|
+
# Invalid pattern data format
|
|
1285
|
+
print(f"✗ Invalid pattern data: {e}")
|
|
1286
|
+
sys.exit(1)
|
|
670
1287
|
except Exception as e:
|
|
1288
|
+
# Unexpected errors loading patterns
|
|
1289
|
+
logger.exception(f"Unexpected error loading patterns: {e}")
|
|
671
1290
|
print(f"✗ Failed to load patterns: {e}")
|
|
672
1291
|
sys.exit(1)
|
|
673
1292
|
|
|
@@ -689,7 +1308,17 @@ def cmd_inspect(args):
|
|
|
689
1308
|
for level in range(1, 6):
|
|
690
1309
|
count = stats.get(f"level_{level}_count", 0)
|
|
691
1310
|
print(f" Level {level}: {count} times")
|
|
1311
|
+
except (OSError, FileNotFoundError) as e:
|
|
1312
|
+
# Database file not found
|
|
1313
|
+
print(f"✗ Metrics database not found: {e}")
|
|
1314
|
+
sys.exit(1)
|
|
1315
|
+
except KeyError as e:
|
|
1316
|
+
# User not found
|
|
1317
|
+
print(f"✗ User {user_id} not found: {e}")
|
|
1318
|
+
sys.exit(1)
|
|
692
1319
|
except Exception as e:
|
|
1320
|
+
# Unexpected errors loading metrics
|
|
1321
|
+
logger.exception(f"Unexpected error loading metrics: {e}")
|
|
693
1322
|
print(f"✗ Failed to load metrics: {e}")
|
|
694
1323
|
sys.exit(1)
|
|
695
1324
|
|
|
@@ -706,7 +1335,13 @@ def cmd_inspect(args):
|
|
|
706
1335
|
print("\n Users:")
|
|
707
1336
|
for uid in users:
|
|
708
1337
|
print(f" • {uid}")
|
|
1338
|
+
except (OSError, FileNotFoundError) as e:
|
|
1339
|
+
# State directory not found
|
|
1340
|
+
print(f"✗ State directory not found: {e}")
|
|
1341
|
+
sys.exit(1)
|
|
709
1342
|
except Exception as e:
|
|
1343
|
+
# Unexpected errors loading state
|
|
1344
|
+
logger.exception(f"Unexpected error loading state: {e}")
|
|
710
1345
|
print(f"✗ Failed to load state: {e}")
|
|
711
1346
|
sys.exit(1)
|
|
712
1347
|
|
|
@@ -758,7 +1393,17 @@ def cmd_export(args):
|
|
|
758
1393
|
print(f"✗ Source file not found: {db_path}")
|
|
759
1394
|
print(" Tip: Patterns are saved automatically when using the framework")
|
|
760
1395
|
sys.exit(1)
|
|
1396
|
+
except (OSError, PermissionError) as e:
|
|
1397
|
+
# Cannot write output file
|
|
1398
|
+
print(f"✗ Cannot write to file: {e}")
|
|
1399
|
+
sys.exit(1)
|
|
1400
|
+
except (ValueError, KeyError) as e:
|
|
1401
|
+
# Invalid pattern data
|
|
1402
|
+
print(f"✗ Invalid pattern data: {e}")
|
|
1403
|
+
sys.exit(1)
|
|
761
1404
|
except Exception as e:
|
|
1405
|
+
# Unexpected errors during export
|
|
1406
|
+
logger.exception(f"Unexpected error exporting patterns: {e}")
|
|
762
1407
|
print(f"✗ Export failed: {e}")
|
|
763
1408
|
sys.exit(1)
|
|
764
1409
|
|
|
@@ -811,7 +1456,17 @@ def cmd_import(args):
|
|
|
811
1456
|
except FileNotFoundError:
|
|
812
1457
|
print(f"✗ Input file not found: {input_file}")
|
|
813
1458
|
sys.exit(1)
|
|
1459
|
+
except (ValueError, KeyError) as e:
|
|
1460
|
+
# Invalid pattern data format
|
|
1461
|
+
print(f"✗ Invalid pattern data: {e}")
|
|
1462
|
+
sys.exit(1)
|
|
1463
|
+
except (OSError, PermissionError) as e:
|
|
1464
|
+
# Cannot read input or write to database
|
|
1465
|
+
print(f"✗ File access error: {e}")
|
|
1466
|
+
sys.exit(1)
|
|
814
1467
|
except Exception as e:
|
|
1468
|
+
# Unexpected errors during import
|
|
1469
|
+
logger.exception(f"Unexpected error importing patterns: {e}")
|
|
815
1470
|
print(f"✗ Import failed: {e}")
|
|
816
1471
|
sys.exit(1)
|
|
817
1472
|
|
|
@@ -855,13 +1510,29 @@ def cmd_wizard(args):
|
|
|
855
1510
|
print("\n3. Which LLM provider will you use?")
|
|
856
1511
|
print(" [1] Anthropic Claude ⭐ Recommended")
|
|
857
1512
|
print(" [2] OpenAI GPT-4")
|
|
858
|
-
print(" [3]
|
|
859
|
-
print(" [4]
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
1513
|
+
print(" [3] Google Gemini (2M context)")
|
|
1514
|
+
print(" [4] Local (Ollama)")
|
|
1515
|
+
print(" [5] Hybrid (mix best models from each provider)")
|
|
1516
|
+
print(" [6] Skip (configure later)")
|
|
1517
|
+
|
|
1518
|
+
llm_choice = input("\nYour choice (1-6) [1]: ").strip() or "1"
|
|
1519
|
+
llm_map = {
|
|
1520
|
+
"1": "anthropic",
|
|
1521
|
+
"2": "openai",
|
|
1522
|
+
"3": "google",
|
|
1523
|
+
"4": "ollama",
|
|
1524
|
+
"5": "hybrid",
|
|
1525
|
+
"6": None,
|
|
1526
|
+
}
|
|
863
1527
|
llm_provider = llm_map.get(llm_choice, "anthropic")
|
|
864
1528
|
|
|
1529
|
+
# If hybrid selected, launch interactive tier selection
|
|
1530
|
+
if llm_provider == "hybrid":
|
|
1531
|
+
from empathy_os.models.provider_config import configure_hybrid_interactive
|
|
1532
|
+
|
|
1533
|
+
configure_hybrid_interactive()
|
|
1534
|
+
llm_provider = None # Already saved by hybrid config
|
|
1535
|
+
|
|
865
1536
|
# Step 4: User ID
|
|
866
1537
|
print("\n4. What user ID should we use?")
|
|
867
1538
|
user_id = input("User ID [default_user]: ").strip() or "default_user"
|
|
@@ -922,14 +1593,541 @@ llm_provider: "{llm_provider}"
|
|
|
922
1593
|
print("\nNext steps:")
|
|
923
1594
|
print(f" 1. Edit {output_file} to customize settings")
|
|
924
1595
|
|
|
925
|
-
if llm_provider in ["anthropic", "openai"]:
|
|
926
|
-
|
|
1596
|
+
if llm_provider in ["anthropic", "openai", "google"]:
|
|
1597
|
+
env_var_map = {
|
|
1598
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
|
1599
|
+
"openai": "OPENAI_API_KEY",
|
|
1600
|
+
"google": "GOOGLE_API_KEY",
|
|
1601
|
+
}
|
|
1602
|
+
env_var = env_var_map.get(llm_provider, "API_KEY")
|
|
927
1603
|
print(f" 2. Set {env_var} environment variable")
|
|
928
1604
|
|
|
929
1605
|
print(" 3. Run: empathy-framework run --config empathy.config.yml")
|
|
930
1606
|
print("\nHappy empathizing! 🧠✨\n")
|
|
931
1607
|
|
|
932
1608
|
|
|
1609
|
+
def cmd_provider_hybrid(args):
|
|
1610
|
+
"""Configure hybrid mode - pick best models for each tier."""
|
|
1611
|
+
from empathy_os.models.provider_config import configure_hybrid_interactive
|
|
1612
|
+
|
|
1613
|
+
configure_hybrid_interactive()
|
|
1614
|
+
|
|
1615
|
+
|
|
1616
|
+
def cmd_provider_show(args):
|
|
1617
|
+
"""Show current provider configuration."""
|
|
1618
|
+
from empathy_os.models.provider_config import ProviderConfig
|
|
1619
|
+
from empathy_os.workflows.config import WorkflowConfig
|
|
1620
|
+
|
|
1621
|
+
print("\n" + "=" * 60)
|
|
1622
|
+
print("Provider Configuration")
|
|
1623
|
+
print("=" * 60)
|
|
1624
|
+
|
|
1625
|
+
# Detect available providers
|
|
1626
|
+
config = ProviderConfig.auto_detect()
|
|
1627
|
+
print(
|
|
1628
|
+
f"\nDetected API keys for: {', '.join(config.available_providers) if config.available_providers else 'None'}",
|
|
1629
|
+
)
|
|
1630
|
+
|
|
1631
|
+
# Load workflow config
|
|
1632
|
+
wf_config = WorkflowConfig.load()
|
|
1633
|
+
print(f"\nDefault provider: {wf_config.default_provider}")
|
|
1634
|
+
|
|
1635
|
+
# Show effective models
|
|
1636
|
+
print("\nEffective model mapping:")
|
|
1637
|
+
if wf_config.custom_models and "hybrid" in wf_config.custom_models:
|
|
1638
|
+
hybrid = wf_config.custom_models["hybrid"]
|
|
1639
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
1640
|
+
model = hybrid.get(tier, "not configured")
|
|
1641
|
+
print(f" {tier:8} → {model}")
|
|
1642
|
+
else:
|
|
1643
|
+
from empathy_os.models import MODEL_REGISTRY
|
|
1644
|
+
|
|
1645
|
+
provider = wf_config.default_provider
|
|
1646
|
+
if provider in MODEL_REGISTRY:
|
|
1647
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
1648
|
+
model_info = MODEL_REGISTRY[provider].get(tier)
|
|
1649
|
+
if model_info:
|
|
1650
|
+
print(f" {tier:8} → {model_info.id} ({provider})")
|
|
1651
|
+
|
|
1652
|
+
print()
|
|
1653
|
+
|
|
1654
|
+
|
|
1655
|
+
def cmd_provider_set(args):
|
|
1656
|
+
"""Set default provider."""
|
|
1657
|
+
from pathlib import Path
|
|
1658
|
+
|
|
1659
|
+
import yaml
|
|
1660
|
+
|
|
1661
|
+
provider = args.name
|
|
1662
|
+
workflows_path = Path(".empathy/workflows.yaml")
|
|
1663
|
+
|
|
1664
|
+
# Load existing config or create new
|
|
1665
|
+
if workflows_path.exists():
|
|
1666
|
+
with open(workflows_path) as f:
|
|
1667
|
+
config = yaml.safe_load(f) or {}
|
|
1668
|
+
else:
|
|
1669
|
+
config = {}
|
|
1670
|
+
workflows_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1671
|
+
|
|
1672
|
+
config["default_provider"] = provider
|
|
1673
|
+
|
|
1674
|
+
with open(workflows_path, "w") as f:
|
|
1675
|
+
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
|
1676
|
+
|
|
1677
|
+
print(f"✓ Default provider set to: {provider}")
|
|
1678
|
+
print(f" Saved to: {workflows_path}")
|
|
1679
|
+
|
|
1680
|
+
if provider == "hybrid":
|
|
1681
|
+
print("\n Tip: Run 'empathy provider hybrid' to customize tier models")
|
|
1682
|
+
|
|
1683
|
+
|
|
1684
|
+
def cmd_sync_claude(args):
|
|
1685
|
+
"""Sync patterns to Claude Code rules directory."""
|
|
1686
|
+
import json as json_mod
|
|
1687
|
+
from pathlib import Path
|
|
1688
|
+
|
|
1689
|
+
patterns_dir = Path(args.patterns_dir)
|
|
1690
|
+
output_dir = Path(args.output_dir)
|
|
1691
|
+
|
|
1692
|
+
print("=" * 60)
|
|
1693
|
+
print(" SYNC PATTERNS TO CLAUDE CODE")
|
|
1694
|
+
print("=" * 60 + "\n")
|
|
1695
|
+
|
|
1696
|
+
if not patterns_dir.exists():
|
|
1697
|
+
print(f"✗ Patterns directory not found: {patterns_dir}")
|
|
1698
|
+
print(" Run 'empathy learn --analyze 20' first to learn patterns")
|
|
1699
|
+
return 1
|
|
1700
|
+
|
|
1701
|
+
# Create output directory
|
|
1702
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
1703
|
+
|
|
1704
|
+
synced_count = 0
|
|
1705
|
+
pattern_files = ["debugging.json", "security.json", "tech_debt.json", "inspection.json"]
|
|
1706
|
+
|
|
1707
|
+
for pattern_file in pattern_files:
|
|
1708
|
+
source_path = patterns_dir / pattern_file
|
|
1709
|
+
if not source_path.exists():
|
|
1710
|
+
continue
|
|
1711
|
+
|
|
1712
|
+
try:
|
|
1713
|
+
with open(source_path) as f:
|
|
1714
|
+
data = json_mod.load(f)
|
|
1715
|
+
|
|
1716
|
+
patterns = data.get("patterns", data.get("items", []))
|
|
1717
|
+
if not patterns:
|
|
1718
|
+
continue
|
|
1719
|
+
|
|
1720
|
+
# Generate markdown rule file
|
|
1721
|
+
category = pattern_file.replace(".json", "")
|
|
1722
|
+
rule_content = _generate_claude_rule(category, patterns)
|
|
1723
|
+
|
|
1724
|
+
# Write rule file
|
|
1725
|
+
rule_file = output_dir / f"{category}.md"
|
|
1726
|
+
with open(rule_file, "w") as f:
|
|
1727
|
+
f.write(rule_content)
|
|
1728
|
+
|
|
1729
|
+
print(f" ✓ {category}: {len(patterns)} patterns → {rule_file}")
|
|
1730
|
+
synced_count += len(patterns)
|
|
1731
|
+
|
|
1732
|
+
except (json_mod.JSONDecodeError, OSError) as e:
|
|
1733
|
+
print(f" ✗ Failed to process {pattern_file}: {e}")
|
|
1734
|
+
|
|
1735
|
+
print(f"\n{'─' * 60}")
|
|
1736
|
+
print(f" Total: {synced_count} patterns synced to {output_dir}")
|
|
1737
|
+
print("=" * 60 + "\n")
|
|
1738
|
+
|
|
1739
|
+
if synced_count == 0:
|
|
1740
|
+
print("No patterns to sync. Run 'empathy learn' first.")
|
|
1741
|
+
return 1
|
|
1742
|
+
|
|
1743
|
+
return 0
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
def _generate_claude_rule(category: str, patterns: list) -> str:
|
|
1747
|
+
"""Generate a Claude Code rule file from patterns."""
|
|
1748
|
+
lines = [
|
|
1749
|
+
f"# {category.replace('_', ' ').title()} Patterns",
|
|
1750
|
+
"",
|
|
1751
|
+
"Auto-generated from Empathy Framework learned patterns.",
|
|
1752
|
+
f"Total patterns: {len(patterns)}",
|
|
1753
|
+
"",
|
|
1754
|
+
"---",
|
|
1755
|
+
"",
|
|
1756
|
+
]
|
|
1757
|
+
|
|
1758
|
+
if category == "debugging":
|
|
1759
|
+
lines.extend(
|
|
1760
|
+
[
|
|
1761
|
+
"## Bug Fix Patterns",
|
|
1762
|
+
"",
|
|
1763
|
+
"When debugging similar issues, consider these historical fixes:",
|
|
1764
|
+
"",
|
|
1765
|
+
],
|
|
1766
|
+
)
|
|
1767
|
+
for p in patterns[:20]: # Limit to 20 most recent
|
|
1768
|
+
bug_type = p.get("bug_type", "unknown")
|
|
1769
|
+
root_cause = p.get("root_cause", "Unknown")
|
|
1770
|
+
fix = p.get("fix", "See commit history")
|
|
1771
|
+
files = p.get("files_affected", [])
|
|
1772
|
+
|
|
1773
|
+
lines.append(f"### {bug_type}")
|
|
1774
|
+
lines.append(f"- **Root cause**: {root_cause}")
|
|
1775
|
+
lines.append(f"- **Fix**: {fix}")
|
|
1776
|
+
if files:
|
|
1777
|
+
lines.append(f"- **Files**: {', '.join(files[:3])}")
|
|
1778
|
+
lines.append("")
|
|
1779
|
+
|
|
1780
|
+
elif category == "security":
|
|
1781
|
+
lines.extend(
|
|
1782
|
+
[
|
|
1783
|
+
"## Security Decisions",
|
|
1784
|
+
"",
|
|
1785
|
+
"Previously reviewed security items:",
|
|
1786
|
+
"",
|
|
1787
|
+
],
|
|
1788
|
+
)
|
|
1789
|
+
for p in patterns[:20]:
|
|
1790
|
+
decision = p.get("decision", "unknown")
|
|
1791
|
+
reason = p.get("reason", "")
|
|
1792
|
+
lines.append(f"- **{p.get('type', 'unknown')}**: {decision}")
|
|
1793
|
+
if reason:
|
|
1794
|
+
lines.append(f" - Reason: {reason}")
|
|
1795
|
+
lines.append("")
|
|
1796
|
+
|
|
1797
|
+
elif category == "tech_debt":
|
|
1798
|
+
lines.extend(
|
|
1799
|
+
[
|
|
1800
|
+
"## Tech Debt Tracking",
|
|
1801
|
+
"",
|
|
1802
|
+
"Known technical debt items:",
|
|
1803
|
+
"",
|
|
1804
|
+
],
|
|
1805
|
+
)
|
|
1806
|
+
for p in patterns[:20]:
|
|
1807
|
+
lines.append(f"- {p.get('description', str(p))}")
|
|
1808
|
+
|
|
1809
|
+
else:
|
|
1810
|
+
lines.extend(
|
|
1811
|
+
[
|
|
1812
|
+
f"## {category.title()} Items",
|
|
1813
|
+
"",
|
|
1814
|
+
],
|
|
1815
|
+
)
|
|
1816
|
+
for p in patterns[:20]:
|
|
1817
|
+
lines.append(f"- {p.get('description', str(p)[:100])}")
|
|
1818
|
+
|
|
1819
|
+
return "\n".join(lines)
|
|
1820
|
+
|
|
1821
|
+
|
|
1822
|
+
def _extract_workflow_content(final_output):
|
|
1823
|
+
"""Extract readable content from workflow final_output.
|
|
1824
|
+
|
|
1825
|
+
Workflows return their results in various formats - this extracts
|
|
1826
|
+
the actual content users want to see.
|
|
1827
|
+
"""
|
|
1828
|
+
if final_output is None:
|
|
1829
|
+
return None
|
|
1830
|
+
|
|
1831
|
+
# If it's already a string, return it
|
|
1832
|
+
if isinstance(final_output, str):
|
|
1833
|
+
return final_output
|
|
1834
|
+
|
|
1835
|
+
# If it's a dict, try to extract meaningful content
|
|
1836
|
+
if isinstance(final_output, dict):
|
|
1837
|
+
# Common keys that contain the main output
|
|
1838
|
+
# formatted_report is first - preferred for security-audit and other formatted outputs
|
|
1839
|
+
content_keys = [
|
|
1840
|
+
"formatted_report", # Human-readable formatted output (security-audit, etc.)
|
|
1841
|
+
"answer",
|
|
1842
|
+
"synthesis",
|
|
1843
|
+
"result",
|
|
1844
|
+
"output",
|
|
1845
|
+
"content",
|
|
1846
|
+
"report",
|
|
1847
|
+
"summary",
|
|
1848
|
+
"analysis",
|
|
1849
|
+
"review",
|
|
1850
|
+
"documentation",
|
|
1851
|
+
"response",
|
|
1852
|
+
"recommendations",
|
|
1853
|
+
"findings",
|
|
1854
|
+
"tests",
|
|
1855
|
+
"plan",
|
|
1856
|
+
]
|
|
1857
|
+
for key in content_keys:
|
|
1858
|
+
if final_output.get(key):
|
|
1859
|
+
val = final_output[key]
|
|
1860
|
+
if isinstance(val, str):
|
|
1861
|
+
return val
|
|
1862
|
+
if isinstance(val, dict):
|
|
1863
|
+
# Recursively extract
|
|
1864
|
+
return _extract_workflow_content(val)
|
|
1865
|
+
|
|
1866
|
+
# If no common key found, try to format the dict nicely
|
|
1867
|
+
# Look for any string value that's substantial
|
|
1868
|
+
for _key, val in final_output.items():
|
|
1869
|
+
if isinstance(val, str) and len(val) > 100:
|
|
1870
|
+
return val
|
|
1871
|
+
|
|
1872
|
+
# Last resort: return a formatted version
|
|
1873
|
+
import json
|
|
1874
|
+
|
|
1875
|
+
return json.dumps(final_output, indent=2)
|
|
1876
|
+
|
|
1877
|
+
# For lists or other types, convert to string
|
|
1878
|
+
return str(final_output)
|
|
1879
|
+
|
|
1880
|
+
|
|
1881
|
+
def cmd_workflow(args):
|
|
1882
|
+
"""Multi-model workflow management and execution."""
|
|
1883
|
+
import asyncio
|
|
1884
|
+
import json as json_mod
|
|
1885
|
+
|
|
1886
|
+
action = args.action
|
|
1887
|
+
|
|
1888
|
+
if action == "list":
|
|
1889
|
+
# List available workflows
|
|
1890
|
+
workflows = get_workflow_list()
|
|
1891
|
+
|
|
1892
|
+
if args.json:
|
|
1893
|
+
print(json_mod.dumps(workflows, indent=2))
|
|
1894
|
+
else:
|
|
1895
|
+
print("\n" + "=" * 60)
|
|
1896
|
+
print(" MULTI-MODEL WORKFLOWS")
|
|
1897
|
+
print("=" * 60 + "\n")
|
|
1898
|
+
|
|
1899
|
+
for wf in workflows:
|
|
1900
|
+
print(f" {wf['name']:15} {wf['description']}")
|
|
1901
|
+
stages = " → ".join(f"{s}({wf['tier_map'][s]})" for s in wf["stages"])
|
|
1902
|
+
print(f" Stages: {stages}")
|
|
1903
|
+
print()
|
|
1904
|
+
|
|
1905
|
+
print("-" * 60)
|
|
1906
|
+
print(" Use: empathy workflow describe <name>")
|
|
1907
|
+
print(" Use: empathy workflow run <name> [--input JSON]")
|
|
1908
|
+
print("=" * 60 + "\n")
|
|
1909
|
+
|
|
1910
|
+
elif action == "describe":
|
|
1911
|
+
# Describe a specific workflow
|
|
1912
|
+
name = args.name
|
|
1913
|
+
if not name:
|
|
1914
|
+
print("Error: workflow name required")
|
|
1915
|
+
print("Usage: empathy workflow describe <name>")
|
|
1916
|
+
return 1
|
|
1917
|
+
|
|
1918
|
+
try:
|
|
1919
|
+
workflow_cls = get_workflow(name)
|
|
1920
|
+
provider = getattr(args, "provider", None)
|
|
1921
|
+
workflow = workflow_cls(provider=provider)
|
|
1922
|
+
|
|
1923
|
+
# Get actual provider from workflow (may come from config)
|
|
1924
|
+
actual_provider = getattr(workflow, "_provider_str", provider or "anthropic")
|
|
1925
|
+
|
|
1926
|
+
if args.json:
|
|
1927
|
+
info = {
|
|
1928
|
+
"name": workflow.name,
|
|
1929
|
+
"description": workflow.description,
|
|
1930
|
+
"provider": actual_provider,
|
|
1931
|
+
"stages": workflow.stages,
|
|
1932
|
+
"tier_map": {k: v.value for k, v in workflow.tier_map.items()},
|
|
1933
|
+
"models": {
|
|
1934
|
+
stage: workflow.get_model_for_tier(workflow.tier_map[stage])
|
|
1935
|
+
for stage in workflow.stages
|
|
1936
|
+
},
|
|
1937
|
+
}
|
|
1938
|
+
print(json_mod.dumps(info, indent=2))
|
|
1939
|
+
else:
|
|
1940
|
+
print(f"Provider: {actual_provider}")
|
|
1941
|
+
print(workflow.describe())
|
|
1942
|
+
|
|
1943
|
+
except KeyError as e:
|
|
1944
|
+
print(f"Error: {e}")
|
|
1945
|
+
return 1
|
|
1946
|
+
|
|
1947
|
+
elif action == "run":
|
|
1948
|
+
# Run a workflow
|
|
1949
|
+
name = args.name
|
|
1950
|
+
if not name:
|
|
1951
|
+
print("Error: workflow name required")
|
|
1952
|
+
print('Usage: empathy workflow run <name> --input \'{"key": "value"}\'')
|
|
1953
|
+
return 1
|
|
1954
|
+
|
|
1955
|
+
try:
|
|
1956
|
+
workflow_cls = get_workflow(name)
|
|
1957
|
+
|
|
1958
|
+
# Get provider from CLI arg, or fall back to config's default_provider
|
|
1959
|
+
if args.provider:
|
|
1960
|
+
provider = args.provider
|
|
1961
|
+
else:
|
|
1962
|
+
from empathy_os.workflows.config import WorkflowConfig
|
|
1963
|
+
|
|
1964
|
+
wf_config = WorkflowConfig.load()
|
|
1965
|
+
provider = wf_config.default_provider
|
|
1966
|
+
workflow = workflow_cls(provider=provider)
|
|
1967
|
+
|
|
1968
|
+
# Parse input
|
|
1969
|
+
input_data = {}
|
|
1970
|
+
if args.input:
|
|
1971
|
+
input_data = json_mod.loads(args.input)
|
|
1972
|
+
|
|
1973
|
+
# Add test-gen specific flags to input_data (only for test-gen workflow)
|
|
1974
|
+
if name == "test-gen":
|
|
1975
|
+
if getattr(args, "write_tests", False):
|
|
1976
|
+
input_data["write_tests"] = True
|
|
1977
|
+
if getattr(args, "output_dir", None):
|
|
1978
|
+
input_data["output_dir"] = args.output_dir
|
|
1979
|
+
|
|
1980
|
+
# Only print header when not in JSON mode
|
|
1981
|
+
if not args.json:
|
|
1982
|
+
print(f"\n Running workflow: {name} (provider: {provider})")
|
|
1983
|
+
print("=" * 50)
|
|
1984
|
+
|
|
1985
|
+
# Execute workflow
|
|
1986
|
+
result = asyncio.run(workflow.execute(**input_data))
|
|
1987
|
+
|
|
1988
|
+
# Extract the actual content - handle different result types
|
|
1989
|
+
if hasattr(result, "final_output"):
|
|
1990
|
+
output_content = _extract_workflow_content(result.final_output)
|
|
1991
|
+
elif hasattr(result, "metadata") and isinstance(result.metadata, dict):
|
|
1992
|
+
# Check for formatted_report in metadata (e.g., HealthCheckResult)
|
|
1993
|
+
output_content = result.metadata.get("formatted_report")
|
|
1994
|
+
if not output_content and hasattr(result, "summary"):
|
|
1995
|
+
output_content = result.summary
|
|
1996
|
+
elif hasattr(result, "summary"):
|
|
1997
|
+
output_content = result.summary
|
|
1998
|
+
else:
|
|
1999
|
+
output_content = str(result)
|
|
2000
|
+
|
|
2001
|
+
# Get timing - handle different attribute names
|
|
2002
|
+
duration_ms = getattr(result, "total_duration_ms", None)
|
|
2003
|
+
if duration_ms is None and hasattr(result, "duration_seconds"):
|
|
2004
|
+
duration_ms = int(result.duration_seconds * 1000)
|
|
2005
|
+
|
|
2006
|
+
# Get cost info if available (check cost_report first, then direct cost attribute)
|
|
2007
|
+
cost_report = getattr(result, "cost_report", None)
|
|
2008
|
+
if cost_report and hasattr(cost_report, "total_cost"):
|
|
2009
|
+
total_cost = cost_report.total_cost
|
|
2010
|
+
savings = getattr(cost_report, "savings", 0.0)
|
|
2011
|
+
else:
|
|
2012
|
+
# Fall back to direct cost attribute (e.g., CodeReviewPipelineResult)
|
|
2013
|
+
total_cost = getattr(result, "cost", 0.0)
|
|
2014
|
+
savings = 0.0
|
|
2015
|
+
|
|
2016
|
+
if args.json:
|
|
2017
|
+
# Extract error from various result types
|
|
2018
|
+
error = getattr(result, "error", None)
|
|
2019
|
+
if not error and not result.success:
|
|
2020
|
+
blockers = getattr(result, "blockers", [])
|
|
2021
|
+
if blockers:
|
|
2022
|
+
error = "; ".join(blockers)
|
|
2023
|
+
else:
|
|
2024
|
+
metadata = getattr(result, "metadata", {})
|
|
2025
|
+
error = metadata.get("error") if isinstance(metadata, dict) else None
|
|
2026
|
+
|
|
2027
|
+
# JSON output includes both content and metadata
|
|
2028
|
+
# Include final_output for programmatic access (VSCode panels, etc.)
|
|
2029
|
+
raw_final_output = getattr(result, "final_output", None)
|
|
2030
|
+
if raw_final_output and isinstance(raw_final_output, dict):
|
|
2031
|
+
# Make a copy to avoid modifying the original
|
|
2032
|
+
final_output_serializable = {}
|
|
2033
|
+
for k, v in raw_final_output.items():
|
|
2034
|
+
# Skip non-serializable items
|
|
2035
|
+
if isinstance(v, set):
|
|
2036
|
+
final_output_serializable[k] = list(v)
|
|
2037
|
+
elif v is None or isinstance(v, str | int | float | bool | list | dict):
|
|
2038
|
+
final_output_serializable[k] = v
|
|
2039
|
+
else:
|
|
2040
|
+
try:
|
|
2041
|
+
final_output_serializable[k] = str(v)
|
|
2042
|
+
except Exception as e: # noqa: BLE001
|
|
2043
|
+
# INTENTIONAL: Silently skip any non-serializable objects
|
|
2044
|
+
# This is a best-effort serialization for JSON output
|
|
2045
|
+
# We cannot predict all possible object types users might return
|
|
2046
|
+
logger.debug(f"Cannot serialize field {k}: {e}")
|
|
2047
|
+
pass
|
|
2048
|
+
else:
|
|
2049
|
+
final_output_serializable = None
|
|
2050
|
+
|
|
2051
|
+
output = {
|
|
2052
|
+
"success": result.success,
|
|
2053
|
+
"output": output_content,
|
|
2054
|
+
"final_output": final_output_serializable,
|
|
2055
|
+
"cost": total_cost,
|
|
2056
|
+
"savings": savings,
|
|
2057
|
+
"duration_ms": duration_ms or 0,
|
|
2058
|
+
"error": error,
|
|
2059
|
+
}
|
|
2060
|
+
print(json_mod.dumps(output, indent=2))
|
|
2061
|
+
# Display the actual results - this is what users want to see
|
|
2062
|
+
elif result.success:
|
|
2063
|
+
if output_content:
|
|
2064
|
+
print(f"\n{output_content}\n")
|
|
2065
|
+
else:
|
|
2066
|
+
print("\n✓ Workflow completed successfully.\n")
|
|
2067
|
+
else:
|
|
2068
|
+
# Extract error from various result types
|
|
2069
|
+
error_msg = getattr(result, "error", None)
|
|
2070
|
+
if not error_msg:
|
|
2071
|
+
# Check for blockers (CodeReviewPipelineResult)
|
|
2072
|
+
blockers = getattr(result, "blockers", [])
|
|
2073
|
+
if blockers:
|
|
2074
|
+
error_msg = "; ".join(blockers)
|
|
2075
|
+
else:
|
|
2076
|
+
# Check metadata for error
|
|
2077
|
+
metadata = getattr(result, "metadata", {})
|
|
2078
|
+
error_msg = metadata.get("error") if isinstance(metadata, dict) else None
|
|
2079
|
+
error_msg = error_msg or "Unknown error"
|
|
2080
|
+
print(f"\n✗ Workflow failed: {error_msg}\n")
|
|
2081
|
+
|
|
2082
|
+
except KeyError as e:
|
|
2083
|
+
print(f"Error: {e}")
|
|
2084
|
+
return 1
|
|
2085
|
+
except json_mod.JSONDecodeError as e:
|
|
2086
|
+
print(f"Error parsing input JSON: {e}")
|
|
2087
|
+
return 1
|
|
2088
|
+
|
|
2089
|
+
elif action == "config":
|
|
2090
|
+
# Generate or show workflow configuration
|
|
2091
|
+
from pathlib import Path
|
|
2092
|
+
|
|
2093
|
+
config_path = Path(".empathy/workflows.yaml")
|
|
2094
|
+
|
|
2095
|
+
if config_path.exists() and not getattr(args, "force", False):
|
|
2096
|
+
print(f"Config already exists: {config_path}")
|
|
2097
|
+
print("Use --force to overwrite")
|
|
2098
|
+
print("\nCurrent configuration:")
|
|
2099
|
+
print("-" * 40)
|
|
2100
|
+
config = WorkflowConfig.load()
|
|
2101
|
+
print(f" Default provider: {config.default_provider}")
|
|
2102
|
+
if config.workflow_providers:
|
|
2103
|
+
print(" Workflow providers:")
|
|
2104
|
+
for wf, prov in config.workflow_providers.items():
|
|
2105
|
+
print(f" {wf}: {prov}")
|
|
2106
|
+
if config.custom_models:
|
|
2107
|
+
print(" Custom models configured")
|
|
2108
|
+
return 0
|
|
2109
|
+
|
|
2110
|
+
# Create config directory and file
|
|
2111
|
+
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
2112
|
+
config_path.write_text(create_example_config())
|
|
2113
|
+
print(f"✓ Created workflow config: {config_path}")
|
|
2114
|
+
print("\nEdit this file to customize:")
|
|
2115
|
+
print(" - Default provider (anthropic, openai, ollama)")
|
|
2116
|
+
print(" - Per-workflow provider overrides")
|
|
2117
|
+
print(" - Custom model mappings")
|
|
2118
|
+
print(" - Model pricing")
|
|
2119
|
+
print("\nOr use environment variables:")
|
|
2120
|
+
print(" EMPATHY_WORKFLOW_PROVIDER=openai")
|
|
2121
|
+
print(" EMPATHY_MODEL_PREMIUM=gpt-5.2")
|
|
2122
|
+
|
|
2123
|
+
else:
|
|
2124
|
+
print(f"Unknown action: {action}")
|
|
2125
|
+
print("Available: list, describe, run, config")
|
|
2126
|
+
return 1
|
|
2127
|
+
|
|
2128
|
+
return 0
|
|
2129
|
+
|
|
2130
|
+
|
|
933
2131
|
def cmd_frameworks(args):
|
|
934
2132
|
"""List and manage agent frameworks."""
|
|
935
2133
|
import json as json_mod
|
|
@@ -958,7 +2156,7 @@ def cmd_frameworks(args):
|
|
|
958
2156
|
json_mod.dumps(
|
|
959
2157
|
{"use_case": recommend_use_case, "recommended": recommended.value, **info},
|
|
960
2158
|
indent=2,
|
|
961
|
-
)
|
|
2159
|
+
),
|
|
962
2160
|
)
|
|
963
2161
|
else:
|
|
964
2162
|
print(f"\nRecommended framework for '{recommend_use_case}': {info['name']}")
|
|
@@ -985,7 +2183,7 @@ def cmd_frameworks(args):
|
|
|
985
2183
|
for f in frameworks
|
|
986
2184
|
],
|
|
987
2185
|
indent=2,
|
|
988
|
-
)
|
|
2186
|
+
),
|
|
989
2187
|
)
|
|
990
2188
|
else:
|
|
991
2189
|
print("\n" + "=" * 60)
|
|
@@ -1010,6 +2208,9 @@ def cmd_frameworks(args):
|
|
|
1010
2208
|
|
|
1011
2209
|
def main():
|
|
1012
2210
|
"""Main CLI entry point"""
|
|
2211
|
+
# Configure Windows-compatible asyncio event loop policy
|
|
2212
|
+
setup_asyncio_policy()
|
|
2213
|
+
|
|
1013
2214
|
parser = argparse.ArgumentParser(
|
|
1014
2215
|
prog="empathy",
|
|
1015
2216
|
description="Empathy - Build AI systems with 5 levels of empathy",
|
|
@@ -1062,32 +2263,45 @@ def main():
|
|
|
1062
2263
|
parser_patterns_export.add_argument("input", help="Input file path")
|
|
1063
2264
|
parser_patterns_export.add_argument("output", help="Output file path")
|
|
1064
2265
|
parser_patterns_export.add_argument(
|
|
1065
|
-
"--input-format",
|
|
2266
|
+
"--input-format",
|
|
2267
|
+
choices=["json", "sqlite"],
|
|
2268
|
+
default="json",
|
|
1066
2269
|
)
|
|
1067
2270
|
parser_patterns_export.add_argument(
|
|
1068
|
-
"--output-format",
|
|
2271
|
+
"--output-format",
|
|
2272
|
+
choices=["json", "sqlite"],
|
|
2273
|
+
default="json",
|
|
1069
2274
|
)
|
|
1070
2275
|
parser_patterns_export.set_defaults(func=cmd_patterns_export)
|
|
1071
2276
|
|
|
1072
2277
|
# Patterns resolve - mark investigating bugs as resolved
|
|
1073
2278
|
parser_patterns_resolve = patterns_subparsers.add_parser(
|
|
1074
|
-
"resolve",
|
|
2279
|
+
"resolve",
|
|
2280
|
+
help="Resolve investigating bug patterns",
|
|
1075
2281
|
)
|
|
1076
2282
|
parser_patterns_resolve.add_argument(
|
|
1077
|
-
"bug_id",
|
|
2283
|
+
"bug_id",
|
|
2284
|
+
nargs="?",
|
|
2285
|
+
help="Bug ID to resolve (omit to list investigating)",
|
|
1078
2286
|
)
|
|
1079
2287
|
parser_patterns_resolve.add_argument("--root-cause", help="Description of the root cause")
|
|
1080
2288
|
parser_patterns_resolve.add_argument("--fix", help="Description of the fix applied")
|
|
1081
2289
|
parser_patterns_resolve.add_argument("--fix-code", help="Code snippet of the fix")
|
|
1082
2290
|
parser_patterns_resolve.add_argument("--time", type=int, help="Resolution time in minutes")
|
|
1083
2291
|
parser_patterns_resolve.add_argument(
|
|
1084
|
-
"--resolved-by",
|
|
2292
|
+
"--resolved-by",
|
|
2293
|
+
default="@developer",
|
|
2294
|
+
help="Who resolved it",
|
|
1085
2295
|
)
|
|
1086
2296
|
parser_patterns_resolve.add_argument(
|
|
1087
|
-
"--patterns-dir",
|
|
2297
|
+
"--patterns-dir",
|
|
2298
|
+
default="./patterns",
|
|
2299
|
+
help="Path to patterns directory",
|
|
1088
2300
|
)
|
|
1089
2301
|
parser_patterns_resolve.add_argument(
|
|
1090
|
-
"--no-regenerate",
|
|
2302
|
+
"--no-regenerate",
|
|
2303
|
+
action="store_true",
|
|
2304
|
+
help="Skip regenerating summary",
|
|
1091
2305
|
)
|
|
1092
2306
|
parser_patterns_resolve.set_defaults(func=cmd_patterns_resolve)
|
|
1093
2307
|
|
|
@@ -1108,7 +2322,9 @@ def main():
|
|
|
1108
2322
|
# State list
|
|
1109
2323
|
parser_state_list = state_subparsers.add_parser("list", help="List saved states")
|
|
1110
2324
|
parser_state_list.add_argument(
|
|
1111
|
-
"--state-dir",
|
|
2325
|
+
"--state-dir",
|
|
2326
|
+
default="./empathy_state",
|
|
2327
|
+
help="State directory path",
|
|
1112
2328
|
)
|
|
1113
2329
|
parser_state_list.set_defaults(func=cmd_state_list)
|
|
1114
2330
|
|
|
@@ -1117,7 +2333,10 @@ def main():
|
|
|
1117
2333
|
parser_run.add_argument("--config", "-c", help="Configuration file path")
|
|
1118
2334
|
parser_run.add_argument("--user-id", help="User ID (default: cli_user)")
|
|
1119
2335
|
parser_run.add_argument(
|
|
1120
|
-
"--level",
|
|
2336
|
+
"--level",
|
|
2337
|
+
type=int,
|
|
2338
|
+
default=4,
|
|
2339
|
+
help="Target empathy level (1-5, default: 4)",
|
|
1121
2340
|
)
|
|
1122
2341
|
parser_run.set_defaults(func=cmd_run)
|
|
1123
2342
|
|
|
@@ -1131,21 +2350,27 @@ def main():
|
|
|
1131
2350
|
parser_inspect.add_argument("--user-id", help="User ID to filter by (optional)")
|
|
1132
2351
|
parser_inspect.add_argument("--db", help="Database path (default: .empathy/patterns.db)")
|
|
1133
2352
|
parser_inspect.add_argument(
|
|
1134
|
-
"--state-dir",
|
|
2353
|
+
"--state-dir",
|
|
2354
|
+
help="State directory path (default: .empathy/state)",
|
|
1135
2355
|
)
|
|
1136
2356
|
parser_inspect.set_defaults(func=cmd_inspect)
|
|
1137
2357
|
|
|
1138
2358
|
# Export command
|
|
1139
2359
|
parser_export = subparsers.add_parser(
|
|
1140
|
-
"export",
|
|
2360
|
+
"export",
|
|
2361
|
+
help="Export patterns to file for sharing/backup",
|
|
1141
2362
|
)
|
|
1142
2363
|
parser_export.add_argument("output", help="Output file path")
|
|
1143
2364
|
parser_export.add_argument(
|
|
1144
|
-
"--user-id",
|
|
2365
|
+
"--user-id",
|
|
2366
|
+
help="User ID to export (optional, exports all if not specified)",
|
|
1145
2367
|
)
|
|
1146
2368
|
parser_export.add_argument("--db", help="Database path (default: .empathy/patterns.db)")
|
|
1147
2369
|
parser_export.add_argument(
|
|
1148
|
-
"--format",
|
|
2370
|
+
"--format",
|
|
2371
|
+
default="json",
|
|
2372
|
+
choices=["json"],
|
|
2373
|
+
help="Export format (default: json)",
|
|
1149
2374
|
)
|
|
1150
2375
|
parser_export.set_defaults(func=cmd_export)
|
|
1151
2376
|
|
|
@@ -1157,20 +2382,59 @@ def main():
|
|
|
1157
2382
|
|
|
1158
2383
|
# Wizard command (Interactive setup)
|
|
1159
2384
|
parser_wizard = subparsers.add_parser(
|
|
1160
|
-
"wizard",
|
|
2385
|
+
"wizard",
|
|
2386
|
+
help="Interactive setup wizard for creating configuration",
|
|
1161
2387
|
)
|
|
1162
2388
|
parser_wizard.set_defaults(func=cmd_wizard)
|
|
1163
2389
|
|
|
2390
|
+
# Provider command (Model provider configuration)
|
|
2391
|
+
parser_provider = subparsers.add_parser(
|
|
2392
|
+
"provider",
|
|
2393
|
+
help="Configure model providers and hybrid mode",
|
|
2394
|
+
)
|
|
2395
|
+
provider_subparsers = parser_provider.add_subparsers(dest="provider_cmd")
|
|
2396
|
+
|
|
2397
|
+
# provider hybrid - Interactive hybrid configuration
|
|
2398
|
+
parser_provider_hybrid = provider_subparsers.add_parser(
|
|
2399
|
+
"hybrid",
|
|
2400
|
+
help="Configure hybrid mode - pick best models for each tier",
|
|
2401
|
+
)
|
|
2402
|
+
parser_provider_hybrid.set_defaults(func=cmd_provider_hybrid)
|
|
2403
|
+
|
|
2404
|
+
# provider show - Show current configuration
|
|
2405
|
+
parser_provider_show = provider_subparsers.add_parser(
|
|
2406
|
+
"show",
|
|
2407
|
+
help="Show current provider configuration",
|
|
2408
|
+
)
|
|
2409
|
+
parser_provider_show.set_defaults(func=cmd_provider_show)
|
|
2410
|
+
|
|
2411
|
+
# provider set - Quick set single provider
|
|
2412
|
+
parser_provider_set = provider_subparsers.add_parser(
|
|
2413
|
+
"set",
|
|
2414
|
+
help="Set default provider (anthropic, openai, google, ollama)",
|
|
2415
|
+
)
|
|
2416
|
+
parser_provider_set.add_argument(
|
|
2417
|
+
"name",
|
|
2418
|
+
choices=["anthropic", "openai", "google", "ollama", "hybrid"],
|
|
2419
|
+
help="Provider name",
|
|
2420
|
+
)
|
|
2421
|
+
parser_provider_set.set_defaults(func=cmd_provider_set)
|
|
2422
|
+
|
|
1164
2423
|
# Status command (Session status assistant)
|
|
1165
2424
|
parser_status = subparsers.add_parser(
|
|
1166
|
-
"status",
|
|
2425
|
+
"status",
|
|
2426
|
+
help="Session status - prioritized project status report",
|
|
1167
2427
|
)
|
|
1168
2428
|
parser_status.add_argument(
|
|
1169
|
-
"--patterns-dir",
|
|
2429
|
+
"--patterns-dir",
|
|
2430
|
+
default="./patterns",
|
|
2431
|
+
help="Path to patterns directory",
|
|
1170
2432
|
)
|
|
1171
2433
|
parser_status.add_argument("--project-root", default=".", help="Project root directory")
|
|
1172
2434
|
parser_status.add_argument(
|
|
1173
|
-
"--force",
|
|
2435
|
+
"--force",
|
|
2436
|
+
action="store_true",
|
|
2437
|
+
help="Force show status regardless of inactivity",
|
|
1174
2438
|
)
|
|
1175
2439
|
parser_status.add_argument("--full", action="store_true", help="Show all items (no limit)")
|
|
1176
2440
|
parser_status.add_argument("--json", action="store_true", help="Output as JSON")
|
|
@@ -1185,7 +2449,8 @@ def main():
|
|
|
1185
2449
|
|
|
1186
2450
|
# Review command (Pattern-based code review)
|
|
1187
2451
|
parser_review = subparsers.add_parser(
|
|
1188
|
-
"review",
|
|
2452
|
+
"review",
|
|
2453
|
+
help="Pattern-based code review against historical bugs",
|
|
1189
2454
|
)
|
|
1190
2455
|
parser_review.add_argument("files", nargs="*", help="Files to review (default: recent changes)")
|
|
1191
2456
|
parser_review.add_argument("--staged", action="store_true", help="Review staged changes only")
|
|
@@ -1201,10 +2466,13 @@ def main():
|
|
|
1201
2466
|
|
|
1202
2467
|
# Health command (Code Health Assistant)
|
|
1203
2468
|
parser_health = subparsers.add_parser(
|
|
1204
|
-
"health",
|
|
2469
|
+
"health",
|
|
2470
|
+
help="Code health assistant - run checks and auto-fix issues",
|
|
1205
2471
|
)
|
|
1206
2472
|
parser_health.add_argument(
|
|
1207
|
-
"--deep",
|
|
2473
|
+
"--deep",
|
|
2474
|
+
action="store_true",
|
|
2475
|
+
help="Run comprehensive checks (slower)",
|
|
1208
2476
|
)
|
|
1209
2477
|
parser_health.add_argument(
|
|
1210
2478
|
"--check",
|
|
@@ -1213,20 +2481,31 @@ def main():
|
|
|
1213
2481
|
)
|
|
1214
2482
|
parser_health.add_argument("--fix", action="store_true", help="Auto-fix issues where possible")
|
|
1215
2483
|
parser_health.add_argument(
|
|
1216
|
-
"--dry-run",
|
|
2484
|
+
"--dry-run",
|
|
2485
|
+
action="store_true",
|
|
2486
|
+
help="Show what would be fixed without applying",
|
|
1217
2487
|
)
|
|
1218
2488
|
parser_health.add_argument(
|
|
1219
|
-
"--interactive",
|
|
2489
|
+
"--interactive",
|
|
2490
|
+
action="store_true",
|
|
2491
|
+
help="Prompt before applying non-safe fixes",
|
|
1220
2492
|
)
|
|
1221
2493
|
parser_health.add_argument("--details", action="store_true", help="Show detailed issue list")
|
|
1222
2494
|
parser_health.add_argument(
|
|
1223
|
-
"--full",
|
|
2495
|
+
"--full",
|
|
2496
|
+
action="store_true",
|
|
2497
|
+
help="Show full report with all details",
|
|
1224
2498
|
)
|
|
1225
2499
|
parser_health.add_argument(
|
|
1226
|
-
"--trends",
|
|
2500
|
+
"--trends",
|
|
2501
|
+
type=int,
|
|
2502
|
+
metavar="DAYS",
|
|
2503
|
+
help="Show health trends over N days",
|
|
1227
2504
|
)
|
|
1228
2505
|
parser_health.add_argument(
|
|
1229
|
-
"--project-root",
|
|
2506
|
+
"--project-root",
|
|
2507
|
+
default=".",
|
|
2508
|
+
help="Project root directory (default: current)",
|
|
1230
2509
|
)
|
|
1231
2510
|
parser_health.add_argument("--json", action="store_true", help="Output as JSON")
|
|
1232
2511
|
parser_health.set_defaults(func=cmd_health)
|
|
@@ -1237,10 +2516,13 @@ def main():
|
|
|
1237
2516
|
|
|
1238
2517
|
# Morning command (start-of-day briefing)
|
|
1239
2518
|
parser_morning = subparsers.add_parser(
|
|
1240
|
-
"morning",
|
|
2519
|
+
"morning",
|
|
2520
|
+
help="Start-of-day briefing with patterns, debt, and focus areas",
|
|
1241
2521
|
)
|
|
1242
2522
|
parser_morning.add_argument(
|
|
1243
|
-
"--patterns-dir",
|
|
2523
|
+
"--patterns-dir",
|
|
2524
|
+
default="./patterns",
|
|
2525
|
+
help="Path to patterns directory",
|
|
1244
2526
|
)
|
|
1245
2527
|
parser_morning.add_argument("--project-root", default=".", help="Project root directory")
|
|
1246
2528
|
parser_morning.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
@@ -1249,48 +2531,77 @@ def main():
|
|
|
1249
2531
|
# Ship command (pre-commit validation)
|
|
1250
2532
|
parser_ship = subparsers.add_parser("ship", help="Pre-commit validation pipeline")
|
|
1251
2533
|
parser_ship.add_argument(
|
|
1252
|
-
"--patterns-dir",
|
|
2534
|
+
"--patterns-dir",
|
|
2535
|
+
default="./patterns",
|
|
2536
|
+
help="Path to patterns directory",
|
|
1253
2537
|
)
|
|
1254
2538
|
parser_ship.add_argument("--project-root", default=".", help="Project root directory")
|
|
1255
2539
|
parser_ship.add_argument(
|
|
1256
|
-
"--skip-sync",
|
|
2540
|
+
"--skip-sync",
|
|
2541
|
+
action="store_true",
|
|
2542
|
+
help="Skip syncing patterns to Claude",
|
|
2543
|
+
)
|
|
2544
|
+
parser_ship.add_argument(
|
|
2545
|
+
"--tests-only",
|
|
2546
|
+
action="store_true",
|
|
2547
|
+
help="Run tests only (skip lint/format checks)",
|
|
2548
|
+
)
|
|
2549
|
+
parser_ship.add_argument(
|
|
2550
|
+
"--security-only",
|
|
2551
|
+
action="store_true",
|
|
2552
|
+
help="Run security checks only",
|
|
1257
2553
|
)
|
|
1258
2554
|
parser_ship.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
1259
2555
|
parser_ship.set_defaults(func=cmd_ship)
|
|
1260
2556
|
|
|
1261
2557
|
# Fix-all command (auto-fix everything)
|
|
1262
2558
|
parser_fix_all = subparsers.add_parser(
|
|
1263
|
-
"fix-all",
|
|
2559
|
+
"fix-all",
|
|
2560
|
+
help="Auto-fix all fixable lint and format issues",
|
|
1264
2561
|
)
|
|
1265
2562
|
parser_fix_all.add_argument("--project-root", default=".", help="Project root directory")
|
|
1266
2563
|
parser_fix_all.add_argument(
|
|
1267
|
-
"--dry-run",
|
|
2564
|
+
"--dry-run",
|
|
2565
|
+
action="store_true",
|
|
2566
|
+
help="Show what would be fixed without applying",
|
|
1268
2567
|
)
|
|
1269
2568
|
parser_fix_all.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
1270
2569
|
parser_fix_all.set_defaults(func=cmd_fix_all)
|
|
1271
2570
|
|
|
1272
2571
|
# Learn command (pattern learning from git history)
|
|
1273
2572
|
parser_learn = subparsers.add_parser(
|
|
1274
|
-
"learn",
|
|
2573
|
+
"learn",
|
|
2574
|
+
help="Learn patterns from git history and bug fixes",
|
|
1275
2575
|
)
|
|
1276
2576
|
parser_learn.add_argument(
|
|
1277
|
-
"--patterns-dir",
|
|
2577
|
+
"--patterns-dir",
|
|
2578
|
+
default="./patterns",
|
|
2579
|
+
help="Path to patterns directory",
|
|
1278
2580
|
)
|
|
1279
2581
|
parser_learn.add_argument(
|
|
1280
|
-
"--analyze",
|
|
2582
|
+
"--analyze",
|
|
2583
|
+
type=int,
|
|
2584
|
+
metavar="N",
|
|
2585
|
+
help="Analyze last N commits (default: 10)",
|
|
1281
2586
|
)
|
|
1282
2587
|
parser_learn.add_argument(
|
|
1283
|
-
"--watch",
|
|
2588
|
+
"--watch",
|
|
2589
|
+
action="store_true",
|
|
2590
|
+
help="Watch for new commits (not yet implemented)",
|
|
1284
2591
|
)
|
|
1285
2592
|
parser_learn.add_argument("--verbose", "-v", action="store_true", help="Show detailed output")
|
|
1286
2593
|
parser_learn.set_defaults(func=cmd_learn)
|
|
1287
2594
|
|
|
1288
2595
|
# Costs command (cost tracking dashboard)
|
|
1289
2596
|
parser_costs = subparsers.add_parser(
|
|
1290
|
-
"costs",
|
|
2597
|
+
"costs",
|
|
2598
|
+
help="View API cost tracking and savings from model routing",
|
|
1291
2599
|
)
|
|
1292
2600
|
parser_costs.add_argument(
|
|
1293
|
-
"--days",
|
|
2601
|
+
"--days",
|
|
2602
|
+
type=int,
|
|
2603
|
+
default=7,
|
|
2604
|
+
help="Number of days to include (default: 7)",
|
|
1294
2605
|
)
|
|
1295
2606
|
parser_costs.add_argument("--empathy-dir", default=".empathy", help="Empathy data directory")
|
|
1296
2607
|
parser_costs.add_argument("--json", action="store_true", help="Output as JSON")
|
|
@@ -1312,16 +2623,25 @@ def main():
|
|
|
1312
2623
|
# Dashboard command (visual web interface)
|
|
1313
2624
|
parser_dashboard = subparsers.add_parser("dashboard", help="Launch visual dashboard in browser")
|
|
1314
2625
|
parser_dashboard.add_argument(
|
|
1315
|
-
"--port",
|
|
2626
|
+
"--port",
|
|
2627
|
+
type=int,
|
|
2628
|
+
default=8765,
|
|
2629
|
+
help="Port to run on (default: 8765)",
|
|
1316
2630
|
)
|
|
1317
2631
|
parser_dashboard.add_argument(
|
|
1318
|
-
"--patterns-dir",
|
|
2632
|
+
"--patterns-dir",
|
|
2633
|
+
default="./patterns",
|
|
2634
|
+
help="Path to patterns directory",
|
|
1319
2635
|
)
|
|
1320
2636
|
parser_dashboard.add_argument(
|
|
1321
|
-
"--empathy-dir",
|
|
2637
|
+
"--empathy-dir",
|
|
2638
|
+
default=".empathy",
|
|
2639
|
+
help="Empathy data directory",
|
|
1322
2640
|
)
|
|
1323
2641
|
parser_dashboard.add_argument(
|
|
1324
|
-
"--no-browser",
|
|
2642
|
+
"--no-browser",
|
|
2643
|
+
action="store_true",
|
|
2644
|
+
help="Don't open browser automatically",
|
|
1325
2645
|
)
|
|
1326
2646
|
parser_dashboard.set_defaults(func=cmd_dashboard)
|
|
1327
2647
|
|
|
@@ -1331,7 +2651,9 @@ def main():
|
|
|
1331
2651
|
help="List and manage agent frameworks (LangChain, LangGraph, AutoGen, Haystack)",
|
|
1332
2652
|
)
|
|
1333
2653
|
parser_frameworks.add_argument(
|
|
1334
|
-
"--all",
|
|
2654
|
+
"--all",
|
|
2655
|
+
action="store_true",
|
|
2656
|
+
help="Show all frameworks including uninstalled",
|
|
1335
2657
|
)
|
|
1336
2658
|
parser_frameworks.add_argument(
|
|
1337
2659
|
"--recommend",
|
|
@@ -1341,15 +2663,139 @@ def main():
|
|
|
1341
2663
|
parser_frameworks.add_argument("--json", action="store_true", help="Output as JSON")
|
|
1342
2664
|
parser_frameworks.set_defaults(func=cmd_frameworks)
|
|
1343
2665
|
|
|
2666
|
+
# Workflow command (multi-model workflow management)
|
|
2667
|
+
parser_workflow = subparsers.add_parser(
|
|
2668
|
+
"workflow",
|
|
2669
|
+
help="Multi-model workflows for cost-optimized task pipelines",
|
|
2670
|
+
)
|
|
2671
|
+
parser_workflow.add_argument(
|
|
2672
|
+
"action",
|
|
2673
|
+
choices=["list", "describe", "run", "config"],
|
|
2674
|
+
help="Action: list, describe, run, or config",
|
|
2675
|
+
)
|
|
2676
|
+
parser_workflow.add_argument(
|
|
2677
|
+
"name",
|
|
2678
|
+
nargs="?",
|
|
2679
|
+
help="Workflow name (for describe/run)",
|
|
2680
|
+
)
|
|
2681
|
+
parser_workflow.add_argument(
|
|
2682
|
+
"--input",
|
|
2683
|
+
"-i",
|
|
2684
|
+
help="JSON input data for workflow execution",
|
|
2685
|
+
)
|
|
2686
|
+
parser_workflow.add_argument(
|
|
2687
|
+
"--provider",
|
|
2688
|
+
"-p",
|
|
2689
|
+
choices=["anthropic", "openai", "google", "ollama", "hybrid"],
|
|
2690
|
+
default=None, # None means use config
|
|
2691
|
+
help="Model provider: anthropic, openai, google, ollama, or hybrid (mix of best models)",
|
|
2692
|
+
)
|
|
2693
|
+
parser_workflow.add_argument(
|
|
2694
|
+
"--force",
|
|
2695
|
+
action="store_true",
|
|
2696
|
+
help="Force overwrite existing config file",
|
|
2697
|
+
)
|
|
2698
|
+
parser_workflow.add_argument("--json", action="store_true", help="Output as JSON")
|
|
2699
|
+
parser_workflow.add_argument(
|
|
2700
|
+
"--write-tests",
|
|
2701
|
+
action="store_true",
|
|
2702
|
+
help="(test-gen workflow) Write generated tests to disk",
|
|
2703
|
+
)
|
|
2704
|
+
parser_workflow.add_argument(
|
|
2705
|
+
"--output-dir",
|
|
2706
|
+
default="tests/generated",
|
|
2707
|
+
help="(test-gen workflow) Output directory for generated tests",
|
|
2708
|
+
)
|
|
2709
|
+
parser_workflow.set_defaults(func=cmd_workflow)
|
|
2710
|
+
|
|
2711
|
+
# Sync-claude command (sync patterns to Claude Code)
|
|
2712
|
+
parser_sync_claude = subparsers.add_parser(
|
|
2713
|
+
"sync-claude",
|
|
2714
|
+
help="Sync learned patterns to Claude Code rules",
|
|
2715
|
+
)
|
|
2716
|
+
parser_sync_claude.add_argument(
|
|
2717
|
+
"--patterns-dir",
|
|
2718
|
+
default="./patterns",
|
|
2719
|
+
help="Path to patterns directory",
|
|
2720
|
+
)
|
|
2721
|
+
parser_sync_claude.add_argument(
|
|
2722
|
+
"--output-dir",
|
|
2723
|
+
default=".claude/rules/empathy",
|
|
2724
|
+
help="Output directory for Claude rules (default: .claude/rules/empathy)",
|
|
2725
|
+
)
|
|
2726
|
+
parser_sync_claude.set_defaults(func=cmd_sync_claude)
|
|
2727
|
+
|
|
2728
|
+
# =========================================================================
|
|
2729
|
+
# USER EXPERIENCE COMMANDS (v2.5+)
|
|
2730
|
+
# =========================================================================
|
|
2731
|
+
|
|
2732
|
+
# Cheatsheet command (quick reference)
|
|
2733
|
+
parser_cheatsheet = subparsers.add_parser("cheatsheet", help="Quick reference of all commands")
|
|
2734
|
+
parser_cheatsheet.add_argument(
|
|
2735
|
+
"category",
|
|
2736
|
+
nargs="?",
|
|
2737
|
+
help="Category to show (getting-started, daily-workflow, code-quality, etc.)",
|
|
2738
|
+
)
|
|
2739
|
+
parser_cheatsheet.add_argument(
|
|
2740
|
+
"--compact",
|
|
2741
|
+
action="store_true",
|
|
2742
|
+
help="Show commands only without descriptions",
|
|
2743
|
+
)
|
|
2744
|
+
parser_cheatsheet.set_defaults(func=cmd_cheatsheet)
|
|
2745
|
+
|
|
2746
|
+
# Onboard command (interactive tutorial)
|
|
2747
|
+
parser_onboard = subparsers.add_parser(
|
|
2748
|
+
"onboard",
|
|
2749
|
+
help="Interactive onboarding tutorial for new users",
|
|
2750
|
+
)
|
|
2751
|
+
parser_onboard.add_argument("--step", type=int, help="Jump to a specific step (1-5)")
|
|
2752
|
+
parser_onboard.add_argument("--reset", action="store_true", help="Reset onboarding progress")
|
|
2753
|
+
parser_onboard.set_defaults(func=cmd_onboard)
|
|
2754
|
+
|
|
2755
|
+
# Explain command (detailed command explanations)
|
|
2756
|
+
parser_explain = subparsers.add_parser(
|
|
2757
|
+
"explain",
|
|
2758
|
+
help="Get detailed explanation of how a command works",
|
|
2759
|
+
)
|
|
2760
|
+
parser_explain.add_argument(
|
|
2761
|
+
"command",
|
|
2762
|
+
choices=["morning", "ship", "learn", "health", "sync-claude"],
|
|
2763
|
+
help="Command to explain",
|
|
2764
|
+
)
|
|
2765
|
+
parser_explain.set_defaults(func=cmd_explain)
|
|
2766
|
+
|
|
2767
|
+
# Achievements command (progress tracking)
|
|
2768
|
+
parser_achievements = subparsers.add_parser(
|
|
2769
|
+
"achievements",
|
|
2770
|
+
help="View your usage statistics and achievements",
|
|
2771
|
+
)
|
|
2772
|
+
parser_achievements.set_defaults(func=cmd_achievements)
|
|
2773
|
+
|
|
2774
|
+
# Wizard Factory commands (create wizards 12x faster)
|
|
2775
|
+
add_wizard_factory_commands(subparsers)
|
|
2776
|
+
|
|
1344
2777
|
# Parse arguments
|
|
1345
2778
|
args = parser.parse_args()
|
|
1346
2779
|
|
|
1347
2780
|
# Execute command
|
|
1348
2781
|
if hasattr(args, "func"):
|
|
1349
|
-
args.func(args)
|
|
1350
|
-
|
|
1351
|
-
|
|
2782
|
+
result = args.func(args)
|
|
2783
|
+
|
|
2784
|
+
# Show progressive discovery tips after command execution
|
|
2785
|
+
if args.command and args.command not in ("dashboard", "run"):
|
|
2786
|
+
try:
|
|
2787
|
+
show_tip_if_available(args.command)
|
|
2788
|
+
except Exception as e: # noqa: BLE001
|
|
2789
|
+
# INTENTIONAL: Discovery tips are optional UX enhancements
|
|
2790
|
+
# They should never cause command execution to fail
|
|
2791
|
+
# Cannot predict all possible errors from discovery system
|
|
2792
|
+
logger.debug(f"Discovery tip not available for {args.command}: {e}")
|
|
2793
|
+
pass
|
|
2794
|
+
|
|
2795
|
+
return result if result is not None else 0
|
|
2796
|
+
parser.print_help()
|
|
2797
|
+
return 0
|
|
1352
2798
|
|
|
1353
2799
|
|
|
1354
2800
|
if __name__ == "__main__":
|
|
1355
|
-
main()
|
|
2801
|
+
sys.exit(main() or 0)
|