attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune/cli_minimal.py
ADDED
|
@@ -0,0 +1,1159 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Empathy Framework CLI.
|
|
3
|
+
|
|
4
|
+
IMPORTANT: This CLI is for automation only (git hooks, scripts, CI/CD).
|
|
5
|
+
For interactive use, use Claude Code skills in VSCode or Claude Desktop.
|
|
6
|
+
|
|
7
|
+
Automation commands:
|
|
8
|
+
empathy workflow list List available workflows
|
|
9
|
+
empathy workflow run <name> Execute a workflow
|
|
10
|
+
empathy workflow info <name> Show workflow details
|
|
11
|
+
|
|
12
|
+
Monitoring commands:
|
|
13
|
+
empathy dashboard start Start agent coordination dashboard
|
|
14
|
+
(opens web UI at http://localhost:8000)
|
|
15
|
+
|
|
16
|
+
Utility commands:
|
|
17
|
+
empathy telemetry show Display usage summary
|
|
18
|
+
empathy telemetry savings Show cost savings
|
|
19
|
+
empathy telemetry export Export to CSV/JSON
|
|
20
|
+
empathy telemetry routing-stats Show adaptive routing statistics
|
|
21
|
+
empathy telemetry routing-check Check for tier upgrade recommendations
|
|
22
|
+
empathy telemetry models Show model performance by provider
|
|
23
|
+
empathy telemetry agents Show active agents and their status
|
|
24
|
+
empathy telemetry signals Show coordination signals for an agent
|
|
25
|
+
|
|
26
|
+
empathy provider show Show current provider config
|
|
27
|
+
empathy provider set <name> Set provider (anthropic, openai, hybrid)
|
|
28
|
+
|
|
29
|
+
empathy validate Validate configuration
|
|
30
|
+
empathy version Show version
|
|
31
|
+
|
|
32
|
+
For interactive development, use Claude Code skills:
|
|
33
|
+
/dev Developer tools (commit, review, debug, refactor)
|
|
34
|
+
/testing Run tests, coverage, generate tests
|
|
35
|
+
/workflows AI-powered workflows (security, bug prediction)
|
|
36
|
+
/docs Documentation generation
|
|
37
|
+
/release Release preparation
|
|
38
|
+
/learning Session evaluation and improvement
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
from __future__ import annotations
|
|
42
|
+
|
|
43
|
+
import argparse
|
|
44
|
+
import json
|
|
45
|
+
import logging
|
|
46
|
+
import sys
|
|
47
|
+
from pathlib import Path
|
|
48
|
+
from typing import TYPE_CHECKING
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from argparse import Namespace
|
|
52
|
+
|
|
53
|
+
logger = logging.getLogger(__name__)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_version() -> str:
|
|
57
|
+
"""Get package version."""
|
|
58
|
+
try:
|
|
59
|
+
from importlib.metadata import version
|
|
60
|
+
|
|
61
|
+
return version("empathy-framework")
|
|
62
|
+
except Exception: # noqa: BLE001
|
|
63
|
+
# INTENTIONAL: Fallback for dev installs without metadata
|
|
64
|
+
return "dev"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# =============================================================================
|
|
68
|
+
# Workflow Commands
|
|
69
|
+
# =============================================================================
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def cmd_workflow_list(args: Namespace) -> int:
|
|
73
|
+
"""List available workflows."""
|
|
74
|
+
from attune.workflows import discover_workflows
|
|
75
|
+
|
|
76
|
+
workflows = discover_workflows()
|
|
77
|
+
|
|
78
|
+
print("\n📋 Available Workflows\n")
|
|
79
|
+
print("-" * 60)
|
|
80
|
+
|
|
81
|
+
if not workflows:
|
|
82
|
+
print("No workflows registered.")
|
|
83
|
+
return 0
|
|
84
|
+
|
|
85
|
+
for name, workflow_cls in sorted(workflows.items()):
|
|
86
|
+
doc = workflow_cls.__doc__ or "No description"
|
|
87
|
+
# Get first line of docstring
|
|
88
|
+
description = doc.split("\n")[0].strip()
|
|
89
|
+
print(f" {name:25} {description}")
|
|
90
|
+
|
|
91
|
+
print("-" * 60)
|
|
92
|
+
print(f"\nTotal: {len(workflows)} workflows")
|
|
93
|
+
print("\nRun a workflow: empathy workflow run <name>")
|
|
94
|
+
return 0
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def cmd_workflow_info(args: Namespace) -> int:
|
|
98
|
+
"""Show workflow details."""
|
|
99
|
+
from attune.workflows import discover_workflows
|
|
100
|
+
|
|
101
|
+
workflows = discover_workflows()
|
|
102
|
+
name = args.name
|
|
103
|
+
if name not in workflows:
|
|
104
|
+
print(f"❌ Workflow not found: {name}")
|
|
105
|
+
print("\nAvailable workflows:")
|
|
106
|
+
for wf_name in sorted(workflows.keys()):
|
|
107
|
+
print(f" - {wf_name}")
|
|
108
|
+
return 1
|
|
109
|
+
|
|
110
|
+
workflow_cls = workflows[name]
|
|
111
|
+
print(f"\n📋 Workflow: {name}\n")
|
|
112
|
+
print("-" * 60)
|
|
113
|
+
|
|
114
|
+
# Show docstring
|
|
115
|
+
if workflow_cls.__doc__:
|
|
116
|
+
print(workflow_cls.__doc__)
|
|
117
|
+
|
|
118
|
+
# Show input schema if available
|
|
119
|
+
if hasattr(workflow_cls, "input_schema"):
|
|
120
|
+
print("\nInput Schema:")
|
|
121
|
+
print(json.dumps(workflow_cls.input_schema, indent=2))
|
|
122
|
+
|
|
123
|
+
print("-" * 60)
|
|
124
|
+
return 0
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def cmd_workflow_run(args: Namespace) -> int:
|
|
128
|
+
"""Execute a workflow."""
|
|
129
|
+
import asyncio
|
|
130
|
+
|
|
131
|
+
from attune.config import _validate_file_path
|
|
132
|
+
from attune.workflows import discover_workflows
|
|
133
|
+
|
|
134
|
+
workflows = discover_workflows()
|
|
135
|
+
name = args.name
|
|
136
|
+
if name not in workflows:
|
|
137
|
+
print(f"❌ Workflow not found: {name}")
|
|
138
|
+
return 1
|
|
139
|
+
|
|
140
|
+
# Parse input if provided
|
|
141
|
+
input_data = {}
|
|
142
|
+
if args.input:
|
|
143
|
+
try:
|
|
144
|
+
input_data = json.loads(args.input)
|
|
145
|
+
except json.JSONDecodeError as e:
|
|
146
|
+
print(f"❌ Invalid JSON input: {e}")
|
|
147
|
+
return 1
|
|
148
|
+
|
|
149
|
+
# Add common options with validation
|
|
150
|
+
if args.path:
|
|
151
|
+
try:
|
|
152
|
+
# Validate path to prevent path traversal attacks
|
|
153
|
+
validated_path = _validate_file_path(args.path)
|
|
154
|
+
input_data["path"] = str(validated_path)
|
|
155
|
+
except ValueError as e:
|
|
156
|
+
print(f"❌ Invalid path: {e}")
|
|
157
|
+
return 1
|
|
158
|
+
if args.target:
|
|
159
|
+
input_data["target"] = args.target
|
|
160
|
+
|
|
161
|
+
print(f"\n🚀 Running workflow: {name}\n")
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
workflow_cls = workflows[name]
|
|
165
|
+
workflow = workflow_cls()
|
|
166
|
+
|
|
167
|
+
# Run the workflow
|
|
168
|
+
if asyncio.iscoroutinefunction(workflow.execute):
|
|
169
|
+
result = asyncio.run(workflow.execute(**input_data))
|
|
170
|
+
else:
|
|
171
|
+
result = workflow.execute(**input_data)
|
|
172
|
+
|
|
173
|
+
# Output result
|
|
174
|
+
if args.json:
|
|
175
|
+
print(json.dumps(result, indent=2, default=str))
|
|
176
|
+
else:
|
|
177
|
+
if isinstance(result, dict):
|
|
178
|
+
print("\n✅ Workflow completed\n")
|
|
179
|
+
for key, value in result.items():
|
|
180
|
+
print(f" {key}: {value}")
|
|
181
|
+
else:
|
|
182
|
+
print(f"\n✅ Result: {result}")
|
|
183
|
+
|
|
184
|
+
return 0
|
|
185
|
+
|
|
186
|
+
except Exception as e: # noqa: BLE001
|
|
187
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
188
|
+
logger.exception(f"Workflow failed: {e}")
|
|
189
|
+
print(f"\n❌ Workflow failed: {e}")
|
|
190
|
+
return 1
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# =============================================================================
|
|
194
|
+
# Telemetry Commands
|
|
195
|
+
# =============================================================================
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def cmd_telemetry_show(args: Namespace) -> int:
|
|
199
|
+
"""Display usage summary."""
|
|
200
|
+
try:
|
|
201
|
+
from attune.models.telemetry import TelemetryStore
|
|
202
|
+
|
|
203
|
+
store = TelemetryStore()
|
|
204
|
+
|
|
205
|
+
print("\n📊 Telemetry Summary\n")
|
|
206
|
+
print("-" * 60)
|
|
207
|
+
print(f" Period: Last {args.days} days")
|
|
208
|
+
|
|
209
|
+
# Get workflow records from store
|
|
210
|
+
# TODO: Consider adding aggregate methods to TelemetryStore for better performance
|
|
211
|
+
# with large datasets (e.g., store.get_total_cost(), store.get_token_counts())
|
|
212
|
+
workflows = store.get_workflows(limit=1000)
|
|
213
|
+
calls = store.get_calls(limit=1000)
|
|
214
|
+
|
|
215
|
+
if workflows:
|
|
216
|
+
total_cost = sum(r.total_cost for r in workflows)
|
|
217
|
+
total_tokens = sum(r.total_input_tokens + r.total_output_tokens for r in workflows)
|
|
218
|
+
print(f" Workflow runs: {len(workflows):,}")
|
|
219
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
220
|
+
print(f" Total cost: ${total_cost:.2f}")
|
|
221
|
+
elif calls:
|
|
222
|
+
total_cost = sum(c.estimated_cost for c in calls)
|
|
223
|
+
total_tokens = sum(c.input_tokens + c.output_tokens for c in calls)
|
|
224
|
+
print(f" API calls: {len(calls):,}")
|
|
225
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
226
|
+
print(f" Total cost: ${total_cost:.2f}")
|
|
227
|
+
else:
|
|
228
|
+
print(" No telemetry data found.")
|
|
229
|
+
|
|
230
|
+
print("-" * 60)
|
|
231
|
+
return 0
|
|
232
|
+
|
|
233
|
+
except ImportError:
|
|
234
|
+
print("❌ Telemetry module not available")
|
|
235
|
+
return 1
|
|
236
|
+
except Exception as e: # noqa: BLE001
|
|
237
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
238
|
+
logger.exception(f"Telemetry error: {e}")
|
|
239
|
+
print(f"❌ Error: {e}")
|
|
240
|
+
return 1
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def cmd_telemetry_savings(args: Namespace) -> int:
|
|
244
|
+
"""Show cost savings from tier routing."""
|
|
245
|
+
try:
|
|
246
|
+
from attune.models.telemetry import TelemetryStore
|
|
247
|
+
|
|
248
|
+
store = TelemetryStore()
|
|
249
|
+
|
|
250
|
+
print("\n💰 Cost Savings Report\n")
|
|
251
|
+
print("-" * 60)
|
|
252
|
+
print(f" Period: Last {args.days} days")
|
|
253
|
+
|
|
254
|
+
# Calculate savings from workflow runs
|
|
255
|
+
records = store.get_workflows(limit=1000)
|
|
256
|
+
if records:
|
|
257
|
+
actual_cost = sum(r.total_cost for r in records)
|
|
258
|
+
total_tokens = sum(r.total_input_tokens + r.total_output_tokens for r in records)
|
|
259
|
+
|
|
260
|
+
# Calculate what premium-only pricing would cost
|
|
261
|
+
# Using Claude Opus pricing as premium baseline: ~$15/1M input, ~$75/1M output
|
|
262
|
+
# Simplified: ~$45/1M tokens average (blended input/output)
|
|
263
|
+
premium_rate_per_token = 45.0 / 1_000_000
|
|
264
|
+
baseline_cost = total_tokens * premium_rate_per_token
|
|
265
|
+
|
|
266
|
+
# Only show savings if we actually routed to cheaper models
|
|
267
|
+
if baseline_cost > actual_cost:
|
|
268
|
+
savings = baseline_cost - actual_cost
|
|
269
|
+
savings_pct = (savings / baseline_cost * 100) if baseline_cost > 0 else 0
|
|
270
|
+
|
|
271
|
+
print(f" Actual cost: ${actual_cost:.2f}")
|
|
272
|
+
print(f" Premium-only cost: ${baseline_cost:.2f} (estimated)")
|
|
273
|
+
print(f" Savings: ${savings:.2f}")
|
|
274
|
+
print(f" Savings percentage: {savings_pct:.1f}%")
|
|
275
|
+
else:
|
|
276
|
+
print(f" Total cost: ${actual_cost:.2f}")
|
|
277
|
+
print(f" Total tokens: {total_tokens:,}")
|
|
278
|
+
print("\n Note: No savings detected (may already be optimized)")
|
|
279
|
+
|
|
280
|
+
print("\n * Premium baseline assumes Claude Opus pricing (~$45/1M tokens)")
|
|
281
|
+
else:
|
|
282
|
+
print(" No telemetry data found.")
|
|
283
|
+
|
|
284
|
+
print("-" * 60)
|
|
285
|
+
return 0
|
|
286
|
+
|
|
287
|
+
except ImportError:
|
|
288
|
+
print("❌ Telemetry module not available")
|
|
289
|
+
return 1
|
|
290
|
+
except Exception as e: # noqa: BLE001
|
|
291
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
292
|
+
logger.exception(f"Telemetry error: {e}")
|
|
293
|
+
print(f"❌ Error: {e}")
|
|
294
|
+
return 1
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def cmd_telemetry_export(args: Namespace) -> int:
|
|
298
|
+
"""Export telemetry data to file."""
|
|
299
|
+
from attune.config import _validate_file_path
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
from attune.models.telemetry import TelemetryStore
|
|
303
|
+
|
|
304
|
+
store = TelemetryStore()
|
|
305
|
+
records = store.get_workflows(limit=10000)
|
|
306
|
+
|
|
307
|
+
# Convert to exportable format
|
|
308
|
+
data = [
|
|
309
|
+
{
|
|
310
|
+
"run_id": r.run_id,
|
|
311
|
+
"workflow_name": r.workflow_name,
|
|
312
|
+
"timestamp": r.started_at,
|
|
313
|
+
"total_cost": r.total_cost,
|
|
314
|
+
"input_tokens": r.total_input_tokens,
|
|
315
|
+
"output_tokens": r.total_output_tokens,
|
|
316
|
+
"success": r.success,
|
|
317
|
+
}
|
|
318
|
+
for r in records
|
|
319
|
+
]
|
|
320
|
+
|
|
321
|
+
# Validate output path
|
|
322
|
+
output_path = _validate_file_path(args.output)
|
|
323
|
+
|
|
324
|
+
if args.format == "csv":
|
|
325
|
+
import csv
|
|
326
|
+
|
|
327
|
+
with output_path.open("w", newline="") as f:
|
|
328
|
+
if data:
|
|
329
|
+
writer = csv.DictWriter(f, fieldnames=data[0].keys())
|
|
330
|
+
writer.writeheader()
|
|
331
|
+
writer.writerows(data)
|
|
332
|
+
print(f"✅ Exported {len(data)} entries to {output_path}")
|
|
333
|
+
|
|
334
|
+
elif args.format == "json":
|
|
335
|
+
with output_path.open("w") as f:
|
|
336
|
+
json.dump(data, f, indent=2, default=str)
|
|
337
|
+
print(f"✅ Exported {len(data)} entries to {output_path}")
|
|
338
|
+
|
|
339
|
+
return 0
|
|
340
|
+
|
|
341
|
+
except ImportError:
|
|
342
|
+
print("❌ Telemetry module not available")
|
|
343
|
+
return 1
|
|
344
|
+
except ValueError as e:
|
|
345
|
+
print(f"❌ Invalid path: {e}")
|
|
346
|
+
return 1
|
|
347
|
+
except Exception as e: # noqa: BLE001
|
|
348
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
349
|
+
logger.exception(f"Export error: {e}")
|
|
350
|
+
print(f"❌ Error: {e}")
|
|
351
|
+
return 1
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def cmd_telemetry_routing_stats(args: Namespace) -> int:
|
|
355
|
+
"""Show adaptive routing statistics."""
|
|
356
|
+
try:
|
|
357
|
+
from attune.models import AdaptiveModelRouter
|
|
358
|
+
from attune.telemetry import UsageTracker
|
|
359
|
+
|
|
360
|
+
tracker = UsageTracker.get_instance()
|
|
361
|
+
router = AdaptiveModelRouter(telemetry=tracker)
|
|
362
|
+
|
|
363
|
+
workflow = args.workflow if hasattr(args, "workflow") and args.workflow else None
|
|
364
|
+
stage = args.stage if hasattr(args, "stage") and args.stage else None
|
|
365
|
+
days = args.days if hasattr(args, "days") else 7
|
|
366
|
+
|
|
367
|
+
print("\n📊 Adaptive Routing Statistics\n")
|
|
368
|
+
print("-" * 70)
|
|
369
|
+
|
|
370
|
+
if workflow:
|
|
371
|
+
# Show stats for specific workflow
|
|
372
|
+
stats = router.get_routing_stats(workflow=workflow, stage=stage, days=days)
|
|
373
|
+
|
|
374
|
+
if stats["total_calls"] == 0:
|
|
375
|
+
print(f"\n No data found for workflow: {workflow}")
|
|
376
|
+
if stage:
|
|
377
|
+
print(f" Stage: {stage}")
|
|
378
|
+
return 0
|
|
379
|
+
|
|
380
|
+
print(f"\n Workflow: {stats['workflow']}")
|
|
381
|
+
if stage:
|
|
382
|
+
print(f" Stage: {stage}")
|
|
383
|
+
print(f" Period: Last {days} days")
|
|
384
|
+
print(f" Total calls: {stats['total_calls']}")
|
|
385
|
+
print(f" Avg cost: ${stats['avg_cost']:.4f}")
|
|
386
|
+
print(f" Success rate: {stats['avg_success_rate']:.1%}")
|
|
387
|
+
|
|
388
|
+
print(f"\n Models used: {', '.join(stats['models_used'])}")
|
|
389
|
+
|
|
390
|
+
if stats["performance_by_model"]:
|
|
391
|
+
print("\n Per-Model Performance:")
|
|
392
|
+
for model, perf in sorted(
|
|
393
|
+
stats["performance_by_model"].items(),
|
|
394
|
+
key=lambda x: x[1]["quality_score"],
|
|
395
|
+
reverse=True,
|
|
396
|
+
):
|
|
397
|
+
print(f"\n {model}:")
|
|
398
|
+
print(f" Calls: {perf['calls']}")
|
|
399
|
+
print(f" Success rate: {perf['success_rate']:.1%}")
|
|
400
|
+
print(f" Avg cost: ${perf['avg_cost']:.4f}")
|
|
401
|
+
print(f" Avg latency: {perf['avg_latency_ms']:.0f}ms")
|
|
402
|
+
print(f" Quality score: {perf['quality_score']:.2f}")
|
|
403
|
+
|
|
404
|
+
else:
|
|
405
|
+
# Show overall statistics
|
|
406
|
+
stats = tracker.get_stats(days=days)
|
|
407
|
+
|
|
408
|
+
if stats["total_calls"] == 0:
|
|
409
|
+
print("\n No telemetry data found.")
|
|
410
|
+
return 0
|
|
411
|
+
|
|
412
|
+
print(f"\n Period: Last {days} days")
|
|
413
|
+
print(f" Total calls: {stats['total_calls']:,}")
|
|
414
|
+
print(f" Total cost: ${stats['total_cost']:.2f}")
|
|
415
|
+
print(f" Cache hit rate: {stats['cache_hit_rate']:.1f}%")
|
|
416
|
+
|
|
417
|
+
print("\n Cost by Tier:")
|
|
418
|
+
for tier, cost in sorted(stats["by_tier"].items(), key=lambda x: x[1], reverse=True):
|
|
419
|
+
pct = (cost / stats["total_cost"] * 100) if stats["total_cost"] > 0 else 0
|
|
420
|
+
print(f" {tier:8s}: ${cost:6.2f} ({pct:5.1f}%)")
|
|
421
|
+
|
|
422
|
+
print("\n Top Workflows:")
|
|
423
|
+
for workflow_name, cost in list(stats["by_workflow"].items())[:5]:
|
|
424
|
+
pct = (cost / stats["total_cost"] * 100) if stats["total_cost"] > 0 else 0
|
|
425
|
+
print(f" {workflow_name:30s}: ${cost:6.2f} ({pct:5.1f}%)")
|
|
426
|
+
|
|
427
|
+
print("\n" + "-" * 70)
|
|
428
|
+
return 0
|
|
429
|
+
|
|
430
|
+
except ImportError as e:
|
|
431
|
+
print(f"❌ Adaptive routing not available: {e}")
|
|
432
|
+
print(" Ensure empathy-framework is installed with telemetry support")
|
|
433
|
+
return 1
|
|
434
|
+
except Exception as e: # noqa: BLE001
|
|
435
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
436
|
+
logger.exception(f"Routing stats error: {e}")
|
|
437
|
+
print(f"❌ Error: {e}")
|
|
438
|
+
return 1
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def cmd_telemetry_routing_check(args: Namespace) -> int:
|
|
442
|
+
"""Check for tier upgrade recommendations."""
|
|
443
|
+
try:
|
|
444
|
+
from attune.models import AdaptiveModelRouter
|
|
445
|
+
from attune.telemetry import UsageTracker
|
|
446
|
+
|
|
447
|
+
tracker = UsageTracker.get_instance()
|
|
448
|
+
router = AdaptiveModelRouter(telemetry=tracker)
|
|
449
|
+
|
|
450
|
+
workflow = args.workflow if hasattr(args, "workflow") and args.workflow else None
|
|
451
|
+
check_all = args.all if hasattr(args, "all") else False
|
|
452
|
+
|
|
453
|
+
print("\n🔍 Adaptive Routing Tier Upgrade Checks\n")
|
|
454
|
+
print("-" * 70)
|
|
455
|
+
|
|
456
|
+
if check_all:
|
|
457
|
+
# Check all workflows
|
|
458
|
+
stats = tracker.get_stats(days=7)
|
|
459
|
+
workflows = list(stats["by_workflow"].keys())
|
|
460
|
+
|
|
461
|
+
if not workflows:
|
|
462
|
+
print("\n No workflow data found.")
|
|
463
|
+
return 0
|
|
464
|
+
|
|
465
|
+
recommendations = []
|
|
466
|
+
|
|
467
|
+
for wf_name in workflows:
|
|
468
|
+
try:
|
|
469
|
+
should_upgrade, reason = router.recommend_tier_upgrade(
|
|
470
|
+
workflow=wf_name, stage=None
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
if should_upgrade:
|
|
474
|
+
recommendations.append(
|
|
475
|
+
{
|
|
476
|
+
"workflow": wf_name,
|
|
477
|
+
"reason": reason,
|
|
478
|
+
}
|
|
479
|
+
)
|
|
480
|
+
except Exception: # noqa: BLE001
|
|
481
|
+
# INTENTIONAL: Skip workflows without enough data
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
if recommendations:
|
|
485
|
+
print("\n ⚠️ Tier Upgrade Recommendations:\n")
|
|
486
|
+
for rec in recommendations:
|
|
487
|
+
print(f" Workflow: {rec['workflow']}")
|
|
488
|
+
print(f" Reason: {rec['reason']}")
|
|
489
|
+
print()
|
|
490
|
+
else:
|
|
491
|
+
print("\n ✅ All workflows performing well - no upgrades needed.\n")
|
|
492
|
+
|
|
493
|
+
elif workflow:
|
|
494
|
+
# Check specific workflow
|
|
495
|
+
should_upgrade, reason = router.recommend_tier_upgrade(workflow=workflow, stage=None)
|
|
496
|
+
|
|
497
|
+
print(f"\n Workflow: {workflow}")
|
|
498
|
+
|
|
499
|
+
if should_upgrade:
|
|
500
|
+
print(" Status: ⚠️ UPGRADE RECOMMENDED")
|
|
501
|
+
print(f" Reason: {reason}")
|
|
502
|
+
print("\n Action: Consider upgrading from CHEAP → CAPABLE or CAPABLE → PREMIUM")
|
|
503
|
+
else:
|
|
504
|
+
print(" Status: ✅ Performing well")
|
|
505
|
+
print(f" Reason: {reason}")
|
|
506
|
+
|
|
507
|
+
else:
|
|
508
|
+
print("\n Error: Specify --workflow <name> or --all")
|
|
509
|
+
return 1
|
|
510
|
+
|
|
511
|
+
print("\n" + "-" * 70)
|
|
512
|
+
return 0
|
|
513
|
+
|
|
514
|
+
except ImportError as e:
|
|
515
|
+
print(f"❌ Adaptive routing not available: {e}")
|
|
516
|
+
return 1
|
|
517
|
+
except Exception as e: # noqa: BLE001
|
|
518
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
519
|
+
logger.exception(f"Routing check error: {e}")
|
|
520
|
+
print(f"❌ Error: {e}")
|
|
521
|
+
return 1
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def cmd_telemetry_models(args: Namespace) -> int:
|
|
525
|
+
"""Show model performance by provider."""
|
|
526
|
+
try:
|
|
527
|
+
from attune.telemetry import UsageTracker
|
|
528
|
+
|
|
529
|
+
tracker = UsageTracker.get_instance()
|
|
530
|
+
provider = args.provider if hasattr(args, "provider") else None
|
|
531
|
+
days = args.days if hasattr(args, "days") else 7
|
|
532
|
+
|
|
533
|
+
stats = tracker.get_stats(days=days)
|
|
534
|
+
|
|
535
|
+
if stats["total_calls"] == 0:
|
|
536
|
+
print("\n No telemetry data found.")
|
|
537
|
+
return 0
|
|
538
|
+
|
|
539
|
+
print("\n📊 Model Performance\n")
|
|
540
|
+
print("-" * 70)
|
|
541
|
+
print(f"\n Period: Last {days} days")
|
|
542
|
+
|
|
543
|
+
# Get entries for analysis
|
|
544
|
+
entries = tracker.get_recent_entries(limit=10000, days=days)
|
|
545
|
+
|
|
546
|
+
# Group by provider and model
|
|
547
|
+
model_stats: dict[str, dict[str, dict]] = {}
|
|
548
|
+
|
|
549
|
+
for entry in entries:
|
|
550
|
+
entry_provider = entry.get("provider", "unknown")
|
|
551
|
+
if provider and entry_provider != provider:
|
|
552
|
+
continue
|
|
553
|
+
|
|
554
|
+
model = entry.get("model", "unknown")
|
|
555
|
+
cost = entry.get("cost", 0.0)
|
|
556
|
+
success = entry.get("success", True)
|
|
557
|
+
duration = entry.get("duration_ms", 0)
|
|
558
|
+
|
|
559
|
+
if entry_provider not in model_stats:
|
|
560
|
+
model_stats[entry_provider] = {}
|
|
561
|
+
|
|
562
|
+
if model not in model_stats[entry_provider]:
|
|
563
|
+
model_stats[entry_provider][model] = {
|
|
564
|
+
"calls": 0,
|
|
565
|
+
"total_cost": 0.0,
|
|
566
|
+
"successes": 0,
|
|
567
|
+
"total_duration": 0,
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
model_stats[entry_provider][model]["calls"] += 1
|
|
571
|
+
model_stats[entry_provider][model]["total_cost"] += cost
|
|
572
|
+
if success:
|
|
573
|
+
model_stats[entry_provider][model]["successes"] += 1
|
|
574
|
+
model_stats[entry_provider][model]["total_duration"] += duration
|
|
575
|
+
|
|
576
|
+
# Display by provider
|
|
577
|
+
for prov, models in sorted(model_stats.items()):
|
|
578
|
+
print(f"\n Provider: {prov.upper()}")
|
|
579
|
+
|
|
580
|
+
for model_name, mstats in sorted(
|
|
581
|
+
models.items(), key=lambda x: x[1]["total_cost"], reverse=True
|
|
582
|
+
):
|
|
583
|
+
calls = mstats["calls"]
|
|
584
|
+
avg_cost = mstats["total_cost"] / calls if calls > 0 else 0
|
|
585
|
+
success_rate = (mstats["successes"] / calls * 100) if calls > 0 else 0
|
|
586
|
+
avg_duration = mstats["total_duration"] / calls if calls > 0 else 0
|
|
587
|
+
|
|
588
|
+
print(f"\n {model_name}:")
|
|
589
|
+
print(f" Calls: {calls:,}")
|
|
590
|
+
print(f" Total cost: ${mstats['total_cost']:.2f}")
|
|
591
|
+
print(f" Avg cost: ${avg_cost:.4f}")
|
|
592
|
+
print(f" Success rate: {success_rate:.1f}%")
|
|
593
|
+
print(f" Avg duration: {avg_duration:.0f}ms")
|
|
594
|
+
|
|
595
|
+
print("\n" + "-" * 70)
|
|
596
|
+
return 0
|
|
597
|
+
|
|
598
|
+
except ImportError as e:
|
|
599
|
+
print(f"❌ Telemetry not available: {e}")
|
|
600
|
+
return 1
|
|
601
|
+
except Exception as e: # noqa: BLE001
|
|
602
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
603
|
+
logger.exception(f"Models error: {e}")
|
|
604
|
+
print(f"❌ Error: {e}")
|
|
605
|
+
return 1
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def cmd_telemetry_agents(args: Namespace) -> int:
|
|
609
|
+
"""Show active agents and their status."""
|
|
610
|
+
try:
|
|
611
|
+
from attune.telemetry import HeartbeatCoordinator
|
|
612
|
+
|
|
613
|
+
coordinator = HeartbeatCoordinator()
|
|
614
|
+
active_agents = coordinator.get_active_agents()
|
|
615
|
+
|
|
616
|
+
print("\n🤖 Active Agents\n")
|
|
617
|
+
print("-" * 70)
|
|
618
|
+
|
|
619
|
+
if not active_agents:
|
|
620
|
+
print("\n No active agents found.")
|
|
621
|
+
print(" (Agents must use HeartbeatCoordinator to be tracked)")
|
|
622
|
+
return 0
|
|
623
|
+
|
|
624
|
+
print(f"\n Found {len(active_agents)} active agent(s):\n")
|
|
625
|
+
|
|
626
|
+
for agent in sorted(active_agents, key=lambda a: a.last_beat, reverse=True):
|
|
627
|
+
# Calculate time since last beat
|
|
628
|
+
from datetime import datetime
|
|
629
|
+
|
|
630
|
+
now = datetime.utcnow()
|
|
631
|
+
time_since = (now - agent.last_beat).total_seconds()
|
|
632
|
+
|
|
633
|
+
# Status indicator
|
|
634
|
+
if agent.status in ("completed", "failed", "cancelled"):
|
|
635
|
+
status_icon = "✅" if agent.status == "completed" else "❌"
|
|
636
|
+
elif time_since > 30:
|
|
637
|
+
status_icon = "⚠️" # Stale
|
|
638
|
+
else:
|
|
639
|
+
status_icon = "🟢" # Active
|
|
640
|
+
|
|
641
|
+
print(f" {status_icon} {agent.agent_id}")
|
|
642
|
+
print(f" Status: {agent.status}")
|
|
643
|
+
print(f" Progress: {agent.progress*100:.1f}%")
|
|
644
|
+
print(f" Task: {agent.current_task}")
|
|
645
|
+
print(f" Last beat: {time_since:.1f}s ago")
|
|
646
|
+
|
|
647
|
+
# Show metadata if present
|
|
648
|
+
if agent.metadata:
|
|
649
|
+
workflow = agent.metadata.get("workflow", "")
|
|
650
|
+
if workflow:
|
|
651
|
+
print(f" Workflow: {workflow}")
|
|
652
|
+
print()
|
|
653
|
+
|
|
654
|
+
print("-" * 70)
|
|
655
|
+
return 0
|
|
656
|
+
|
|
657
|
+
except ImportError as e:
|
|
658
|
+
print(f"❌ Agent tracking not available: {e}")
|
|
659
|
+
return 1
|
|
660
|
+
except Exception as e: # noqa: BLE001
|
|
661
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
662
|
+
logger.exception(f"Agents error: {e}")
|
|
663
|
+
print(f"❌ Error: {e}")
|
|
664
|
+
return 1
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
def cmd_telemetry_signals(args: Namespace) -> int:
|
|
668
|
+
"""Show coordination signals."""
|
|
669
|
+
try:
|
|
670
|
+
from attune.telemetry import CoordinationSignals
|
|
671
|
+
|
|
672
|
+
agent_id = args.agent if hasattr(args, "agent") else None
|
|
673
|
+
|
|
674
|
+
if not agent_id:
|
|
675
|
+
print("❌ Error: --agent <id> required to view signals")
|
|
676
|
+
return 1
|
|
677
|
+
|
|
678
|
+
coordinator = CoordinationSignals(agent_id=agent_id)
|
|
679
|
+
signals = coordinator.get_pending_signals()
|
|
680
|
+
|
|
681
|
+
print(f"\n📡 Coordination Signals for {agent_id}\n")
|
|
682
|
+
print("-" * 70)
|
|
683
|
+
|
|
684
|
+
if not signals:
|
|
685
|
+
print("\n No pending signals.")
|
|
686
|
+
return 0
|
|
687
|
+
|
|
688
|
+
print(f"\n Found {len(signals)} pending signal(s):\n")
|
|
689
|
+
|
|
690
|
+
for signal in sorted(signals, key=lambda s: s.timestamp, reverse=True):
|
|
691
|
+
# Calculate age
|
|
692
|
+
from datetime import datetime
|
|
693
|
+
|
|
694
|
+
now = datetime.utcnow()
|
|
695
|
+
age = (now - signal.timestamp).total_seconds()
|
|
696
|
+
|
|
697
|
+
# Signal type indicator
|
|
698
|
+
type_icons = {
|
|
699
|
+
"task_complete": "✅",
|
|
700
|
+
"abort": "🛑",
|
|
701
|
+
"ready": "🟢",
|
|
702
|
+
"checkpoint": "🔄",
|
|
703
|
+
"error": "❌",
|
|
704
|
+
}
|
|
705
|
+
icon = type_icons.get(signal.signal_type, "📨")
|
|
706
|
+
|
|
707
|
+
print(f" {icon} {signal.signal_type}")
|
|
708
|
+
print(f" From: {signal.source_agent}")
|
|
709
|
+
print(f" Target: {signal.target_agent or '* (broadcast)'}")
|
|
710
|
+
print(f" Age: {age:.1f}s")
|
|
711
|
+
print(f" Expires in: {signal.ttl_seconds - age:.1f}s")
|
|
712
|
+
|
|
713
|
+
# Show payload summary
|
|
714
|
+
if signal.payload:
|
|
715
|
+
payload_str = str(signal.payload)
|
|
716
|
+
if len(payload_str) > 60:
|
|
717
|
+
payload_str = payload_str[:57] + "..."
|
|
718
|
+
print(f" Payload: {payload_str}")
|
|
719
|
+
print()
|
|
720
|
+
|
|
721
|
+
print("-" * 70)
|
|
722
|
+
return 0
|
|
723
|
+
|
|
724
|
+
except ImportError as e:
|
|
725
|
+
print(f"❌ Coordination signals not available: {e}")
|
|
726
|
+
return 1
|
|
727
|
+
except Exception as e: # noqa: BLE001
|
|
728
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
729
|
+
logger.exception(f"Signals error: {e}")
|
|
730
|
+
print(f"❌ Error: {e}")
|
|
731
|
+
return 1
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
# =============================================================================
|
|
735
|
+
# Provider Commands
|
|
736
|
+
# =============================================================================
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
def cmd_provider_show(args: Namespace) -> int:
|
|
740
|
+
"""Show current provider configuration."""
|
|
741
|
+
try:
|
|
742
|
+
from attune.models.provider_config import get_provider_config
|
|
743
|
+
|
|
744
|
+
config = get_provider_config()
|
|
745
|
+
|
|
746
|
+
print("\n🔧 Provider Configuration\n")
|
|
747
|
+
print("-" * 60)
|
|
748
|
+
print(f" Mode: {config.mode.value}")
|
|
749
|
+
print(f" Primary provider: {config.primary_provider}")
|
|
750
|
+
print(f" Cost optimization: {'✅ Enabled' if config.cost_optimization else '❌ Disabled'}")
|
|
751
|
+
|
|
752
|
+
if config.available_providers:
|
|
753
|
+
print("\n Available providers:")
|
|
754
|
+
for provider in config.available_providers:
|
|
755
|
+
status = "✓" if provider == config.primary_provider else " "
|
|
756
|
+
print(f" [{status}] {provider}")
|
|
757
|
+
else:
|
|
758
|
+
print("\n ⚠️ No API keys detected")
|
|
759
|
+
print(" Set ANTHROPIC_API_KEY, OPENAI_API_KEY, or GOOGLE_API_KEY")
|
|
760
|
+
|
|
761
|
+
print("-" * 60)
|
|
762
|
+
return 0
|
|
763
|
+
|
|
764
|
+
except ImportError:
|
|
765
|
+
print("❌ Provider module not available")
|
|
766
|
+
return 1
|
|
767
|
+
except Exception as e: # noqa: BLE001
|
|
768
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
769
|
+
logger.exception(f"Provider error: {e}")
|
|
770
|
+
print(f"❌ Error: {e}")
|
|
771
|
+
return 1
|
|
772
|
+
|
|
773
|
+
|
|
774
|
+
def cmd_provider_set(args: Namespace) -> int:
|
|
775
|
+
"""Set the LLM provider."""
|
|
776
|
+
try:
|
|
777
|
+
from attune.models.provider_config import (
|
|
778
|
+
ProviderMode,
|
|
779
|
+
get_provider_config,
|
|
780
|
+
set_provider_config,
|
|
781
|
+
)
|
|
782
|
+
|
|
783
|
+
# Get current config and update
|
|
784
|
+
config = get_provider_config()
|
|
785
|
+
|
|
786
|
+
if args.name == "hybrid":
|
|
787
|
+
config.mode = ProviderMode.HYBRID
|
|
788
|
+
print("✅ Provider mode set to: hybrid (multi-provider)")
|
|
789
|
+
else:
|
|
790
|
+
config.mode = ProviderMode.SINGLE
|
|
791
|
+
config.primary_provider = args.name
|
|
792
|
+
print(f"✅ Provider set to: {args.name}")
|
|
793
|
+
|
|
794
|
+
set_provider_config(config)
|
|
795
|
+
return 0
|
|
796
|
+
|
|
797
|
+
except ImportError:
|
|
798
|
+
print("❌ Provider module not available")
|
|
799
|
+
return 1
|
|
800
|
+
except Exception as e: # noqa: BLE001
|
|
801
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
802
|
+
logger.exception(f"Provider error: {e}")
|
|
803
|
+
print(f"❌ Error: {e}")
|
|
804
|
+
return 1
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
# =============================================================================
|
|
808
|
+
# Dashboard Commands
|
|
809
|
+
# =============================================================================
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def cmd_dashboard_start(args: Namespace) -> int:
|
|
813
|
+
"""Start the agent coordination dashboard."""
|
|
814
|
+
try:
|
|
815
|
+
from attune.dashboard import run_standalone_dashboard
|
|
816
|
+
|
|
817
|
+
# Get host and port from args
|
|
818
|
+
host = args.host
|
|
819
|
+
port = args.port
|
|
820
|
+
|
|
821
|
+
print("\n🚀 Starting Agent Coordination Dashboard...")
|
|
822
|
+
print(f"📊 Dashboard will be available at: http://{host}:{port}\n")
|
|
823
|
+
print("💡 Make sure Redis is populated with test data:")
|
|
824
|
+
print(" python scripts/populate_redis_direct.py\n")
|
|
825
|
+
print("Press Ctrl+C to stop\n")
|
|
826
|
+
|
|
827
|
+
# Start dashboard
|
|
828
|
+
run_standalone_dashboard(host=host, port=port)
|
|
829
|
+
return 0
|
|
830
|
+
|
|
831
|
+
except KeyboardInterrupt:
|
|
832
|
+
print("\n\n🛑 Dashboard stopped")
|
|
833
|
+
return 0
|
|
834
|
+
except ImportError as e:
|
|
835
|
+
print(f"❌ Dashboard not available: {e}")
|
|
836
|
+
print(" Install dashboard dependencies: pip install redis")
|
|
837
|
+
return 1
|
|
838
|
+
except Exception as e: # noqa: BLE001
|
|
839
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
840
|
+
logger.exception(f"Dashboard error: {e}")
|
|
841
|
+
print(f"❌ Error starting dashboard: {e}")
|
|
842
|
+
return 1
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
# =============================================================================
|
|
846
|
+
# Utility Commands
|
|
847
|
+
# =============================================================================
|
|
848
|
+
|
|
849
|
+
|
|
850
|
+
def cmd_validate(args: Namespace) -> int:
|
|
851
|
+
"""Validate configuration."""
|
|
852
|
+
print("\n🔍 Validating configuration...\n")
|
|
853
|
+
|
|
854
|
+
errors = []
|
|
855
|
+
warnings = []
|
|
856
|
+
|
|
857
|
+
# Check config file
|
|
858
|
+
config_paths = [
|
|
859
|
+
Path("attune.config.json"),
|
|
860
|
+
Path("attune.config.yml"),
|
|
861
|
+
Path("attune.config.yaml"),
|
|
862
|
+
]
|
|
863
|
+
|
|
864
|
+
config_found = False
|
|
865
|
+
for config_path in config_paths:
|
|
866
|
+
if config_path.exists():
|
|
867
|
+
config_found = True
|
|
868
|
+
print(f" ✅ Config file: {config_path}")
|
|
869
|
+
break
|
|
870
|
+
|
|
871
|
+
if not config_found:
|
|
872
|
+
warnings.append("No attune.config file found (using defaults)")
|
|
873
|
+
|
|
874
|
+
# Check for API keys
|
|
875
|
+
import os
|
|
876
|
+
|
|
877
|
+
api_keys = {
|
|
878
|
+
"ANTHROPIC_API_KEY": "Anthropic (Claude)",
|
|
879
|
+
"OPENAI_API_KEY": "OpenAI (GPT)",
|
|
880
|
+
"GOOGLE_API_KEY": "Google (Gemini)",
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
keys_found = 0
|
|
884
|
+
for key, name in api_keys.items():
|
|
885
|
+
if os.environ.get(key):
|
|
886
|
+
print(f" ✅ {name} API key set")
|
|
887
|
+
keys_found += 1
|
|
888
|
+
|
|
889
|
+
if keys_found == 0:
|
|
890
|
+
errors.append(
|
|
891
|
+
"No API keys found. Set at least one: ANTHROPIC_API_KEY, OPENAI_API_KEY, or GOOGLE_API_KEY"
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
# Check workflows directory
|
|
895
|
+
try:
|
|
896
|
+
from attune.workflows import WORKFLOW_REGISTRY
|
|
897
|
+
|
|
898
|
+
print(f" ✅ {len(WORKFLOW_REGISTRY)} workflows registered")
|
|
899
|
+
except ImportError as e:
|
|
900
|
+
warnings.append(f"Could not load workflows: {e}")
|
|
901
|
+
|
|
902
|
+
# Summary
|
|
903
|
+
print("\n" + "-" * 60)
|
|
904
|
+
|
|
905
|
+
if errors:
|
|
906
|
+
print("\n❌ Validation failed:")
|
|
907
|
+
for error in errors:
|
|
908
|
+
print(f" - {error}")
|
|
909
|
+
return 1
|
|
910
|
+
|
|
911
|
+
if warnings:
|
|
912
|
+
print("\n⚠️ Warnings:")
|
|
913
|
+
for warning in warnings:
|
|
914
|
+
print(f" - {warning}")
|
|
915
|
+
|
|
916
|
+
print("\n✅ Configuration is valid")
|
|
917
|
+
return 0
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
def cmd_version(args: Namespace) -> int:
|
|
921
|
+
"""Show version information."""
|
|
922
|
+
version = get_version()
|
|
923
|
+
print(f"empathy-framework {version}")
|
|
924
|
+
|
|
925
|
+
if args.verbose:
|
|
926
|
+
print(f"\nPython: {sys.version}")
|
|
927
|
+
print(f"Platform: {sys.platform}")
|
|
928
|
+
|
|
929
|
+
# Show installed extras
|
|
930
|
+
try:
|
|
931
|
+
from importlib.metadata import requires
|
|
932
|
+
|
|
933
|
+
reqs = requires("empathy-framework") or []
|
|
934
|
+
print(f"\nDependencies: {len(reqs)}")
|
|
935
|
+
except Exception: # noqa: BLE001
|
|
936
|
+
pass
|
|
937
|
+
|
|
938
|
+
return 0
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
# =============================================================================
|
|
942
|
+
# Convenience Commands (Keyword Shortcuts)
|
|
943
|
+
# =============================================================================
|
|
944
|
+
# Main Entry Point
|
|
945
|
+
# =============================================================================
|
|
946
|
+
|
|
947
|
+
|
|
948
|
+
def create_parser() -> argparse.ArgumentParser:
|
|
949
|
+
"""Create the argument parser."""
|
|
950
|
+
parser = argparse.ArgumentParser(
|
|
951
|
+
prog="empathy",
|
|
952
|
+
description="Empathy Framework CLI (automation interface - for git hooks, scripts, CI/CD)",
|
|
953
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
954
|
+
epilog="""
|
|
955
|
+
NOTE: This CLI is for automation only. For interactive development,
|
|
956
|
+
use Claude Code skills in VSCode or Claude Desktop:
|
|
957
|
+
|
|
958
|
+
/dev Developer tools (commit, review, debug, refactor)
|
|
959
|
+
/testing Run tests, coverage, generate tests
|
|
960
|
+
/workflows AI-powered workflows (security, bug prediction)
|
|
961
|
+
/learning Session evaluation
|
|
962
|
+
|
|
963
|
+
Documentation: https://smartaimemory.com/framework-docs/
|
|
964
|
+
""",
|
|
965
|
+
)
|
|
966
|
+
|
|
967
|
+
parser.add_argument(
|
|
968
|
+
"-v",
|
|
969
|
+
"--verbose",
|
|
970
|
+
action="store_true",
|
|
971
|
+
help="Enable verbose output",
|
|
972
|
+
)
|
|
973
|
+
|
|
974
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
975
|
+
|
|
976
|
+
# --- Workflow commands ---
|
|
977
|
+
workflow_parser = subparsers.add_parser("workflow", help="Workflow management")
|
|
978
|
+
workflow_sub = workflow_parser.add_subparsers(dest="workflow_command")
|
|
979
|
+
|
|
980
|
+
# workflow list
|
|
981
|
+
workflow_sub.add_parser("list", help="List available workflows")
|
|
982
|
+
|
|
983
|
+
# workflow info
|
|
984
|
+
info_parser = workflow_sub.add_parser("info", help="Show workflow details")
|
|
985
|
+
info_parser.add_argument("name", help="Workflow name")
|
|
986
|
+
|
|
987
|
+
# workflow run
|
|
988
|
+
run_parser = workflow_sub.add_parser("run", help="Run a workflow")
|
|
989
|
+
run_parser.add_argument("name", help="Workflow name")
|
|
990
|
+
run_parser.add_argument("--input", "-i", help="JSON input data")
|
|
991
|
+
run_parser.add_argument("--path", "-p", help="Target path")
|
|
992
|
+
run_parser.add_argument("--target", "-t", help="Target value (e.g., coverage target)")
|
|
993
|
+
run_parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
|
|
994
|
+
|
|
995
|
+
# --- Telemetry commands ---
|
|
996
|
+
telemetry_parser = subparsers.add_parser("telemetry", help="Usage telemetry")
|
|
997
|
+
telemetry_sub = telemetry_parser.add_subparsers(dest="telemetry_command")
|
|
998
|
+
|
|
999
|
+
# telemetry show
|
|
1000
|
+
show_parser = telemetry_sub.add_parser("show", help="Display usage summary")
|
|
1001
|
+
show_parser.add_argument(
|
|
1002
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
1003
|
+
)
|
|
1004
|
+
|
|
1005
|
+
# telemetry savings
|
|
1006
|
+
savings_parser = telemetry_sub.add_parser("savings", help="Show cost savings")
|
|
1007
|
+
savings_parser.add_argument(
|
|
1008
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
1009
|
+
)
|
|
1010
|
+
|
|
1011
|
+
# telemetry export
|
|
1012
|
+
export_parser = telemetry_sub.add_parser("export", help="Export telemetry data")
|
|
1013
|
+
export_parser.add_argument("--output", "-o", required=True, help="Output file path")
|
|
1014
|
+
export_parser.add_argument(
|
|
1015
|
+
"--format", "-f", choices=["csv", "json"], default="json", help="Output format"
|
|
1016
|
+
)
|
|
1017
|
+
export_parser.add_argument(
|
|
1018
|
+
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
1019
|
+
)
|
|
1020
|
+
|
|
1021
|
+
# telemetry routing-stats
|
|
1022
|
+
routing_stats_parser = telemetry_sub.add_parser(
|
|
1023
|
+
"routing-stats", help="Show adaptive routing statistics"
|
|
1024
|
+
)
|
|
1025
|
+
routing_stats_parser.add_argument("--workflow", "-w", help="Workflow name")
|
|
1026
|
+
routing_stats_parser.add_argument("--stage", "-s", help="Stage name")
|
|
1027
|
+
routing_stats_parser.add_argument(
|
|
1028
|
+
"--days", "-d", type=int, default=7, help="Number of days (default: 7)"
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
# telemetry routing-check
|
|
1032
|
+
routing_check_parser = telemetry_sub.add_parser(
|
|
1033
|
+
"routing-check", help="Check for tier upgrade recommendations"
|
|
1034
|
+
)
|
|
1035
|
+
routing_check_parser.add_argument("--workflow", "-w", help="Workflow name")
|
|
1036
|
+
routing_check_parser.add_argument(
|
|
1037
|
+
"--all", "-a", action="store_true", help="Check all workflows"
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
# telemetry models
|
|
1041
|
+
models_parser = telemetry_sub.add_parser("models", help="Show model performance by provider")
|
|
1042
|
+
models_parser.add_argument(
|
|
1043
|
+
"--provider",
|
|
1044
|
+
"-p",
|
|
1045
|
+
choices=["anthropic", "openai", "google"],
|
|
1046
|
+
help="Filter by provider",
|
|
1047
|
+
)
|
|
1048
|
+
models_parser.add_argument(
|
|
1049
|
+
"--days", "-d", type=int, default=7, help="Number of days (default: 7)"
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
# telemetry agents
|
|
1053
|
+
telemetry_sub.add_parser("agents", help="Show active agents and their status")
|
|
1054
|
+
|
|
1055
|
+
# telemetry signals
|
|
1056
|
+
signals_parser = telemetry_sub.add_parser("signals", help="Show coordination signals")
|
|
1057
|
+
signals_parser.add_argument("--agent", "-a", required=True, help="Agent ID to view signals for")
|
|
1058
|
+
|
|
1059
|
+
# --- Provider commands ---
|
|
1060
|
+
provider_parser = subparsers.add_parser("provider", help="LLM provider configuration")
|
|
1061
|
+
provider_sub = provider_parser.add_subparsers(dest="provider_command")
|
|
1062
|
+
|
|
1063
|
+
# provider show
|
|
1064
|
+
provider_sub.add_parser("show", help="Show current provider")
|
|
1065
|
+
|
|
1066
|
+
# provider set
|
|
1067
|
+
set_parser = provider_sub.add_parser("set", help="Set provider")
|
|
1068
|
+
set_parser.add_argument("name", choices=["anthropic", "openai", "hybrid"], help="Provider name")
|
|
1069
|
+
|
|
1070
|
+
# --- Dashboard commands ---
|
|
1071
|
+
dashboard_parser = subparsers.add_parser("dashboard", help="Agent coordination dashboard")
|
|
1072
|
+
dashboard_sub = dashboard_parser.add_subparsers(dest="dashboard_command")
|
|
1073
|
+
|
|
1074
|
+
# dashboard start
|
|
1075
|
+
start_parser = dashboard_sub.add_parser("start", help="Start dashboard web server")
|
|
1076
|
+
start_parser.add_argument("--host", default="127.0.0.1", help="Host to bind to (default: 127.0.0.1)")
|
|
1077
|
+
start_parser.add_argument("--port", type=int, default=8000, help="Port to bind to (default: 8000)")
|
|
1078
|
+
|
|
1079
|
+
# --- Utility commands ---
|
|
1080
|
+
subparsers.add_parser("validate", help="Validate configuration")
|
|
1081
|
+
|
|
1082
|
+
version_parser = subparsers.add_parser("version", help="Show version")
|
|
1083
|
+
version_parser.add_argument("-v", "--verbose", action="store_true", help="Show detailed info")
|
|
1084
|
+
return parser
|
|
1085
|
+
|
|
1086
|
+
|
|
1087
|
+
def main(argv: list[str] | None = None) -> int:
|
|
1088
|
+
"""Main entry point."""
|
|
1089
|
+
parser = create_parser()
|
|
1090
|
+
args = parser.parse_args(argv)
|
|
1091
|
+
|
|
1092
|
+
# Configure logging
|
|
1093
|
+
if args.verbose:
|
|
1094
|
+
logging.basicConfig(level=logging.DEBUG)
|
|
1095
|
+
else:
|
|
1096
|
+
logging.basicConfig(level=logging.WARNING)
|
|
1097
|
+
|
|
1098
|
+
# Route to command handlers
|
|
1099
|
+
if args.command == "workflow":
|
|
1100
|
+
if args.workflow_command == "list":
|
|
1101
|
+
return cmd_workflow_list(args)
|
|
1102
|
+
elif args.workflow_command == "info":
|
|
1103
|
+
return cmd_workflow_info(args)
|
|
1104
|
+
elif args.workflow_command == "run":
|
|
1105
|
+
return cmd_workflow_run(args)
|
|
1106
|
+
else:
|
|
1107
|
+
print("Usage: empathy workflow {list|info|run}")
|
|
1108
|
+
return 1
|
|
1109
|
+
|
|
1110
|
+
elif args.command == "telemetry":
|
|
1111
|
+
if args.telemetry_command == "show":
|
|
1112
|
+
return cmd_telemetry_show(args)
|
|
1113
|
+
elif args.telemetry_command == "savings":
|
|
1114
|
+
return cmd_telemetry_savings(args)
|
|
1115
|
+
elif args.telemetry_command == "export":
|
|
1116
|
+
return cmd_telemetry_export(args)
|
|
1117
|
+
elif args.telemetry_command == "routing-stats":
|
|
1118
|
+
return cmd_telemetry_routing_stats(args)
|
|
1119
|
+
elif args.telemetry_command == "routing-check":
|
|
1120
|
+
return cmd_telemetry_routing_check(args)
|
|
1121
|
+
elif args.telemetry_command == "models":
|
|
1122
|
+
return cmd_telemetry_models(args)
|
|
1123
|
+
elif args.telemetry_command == "agents":
|
|
1124
|
+
return cmd_telemetry_agents(args)
|
|
1125
|
+
elif args.telemetry_command == "signals":
|
|
1126
|
+
return cmd_telemetry_signals(args)
|
|
1127
|
+
else:
|
|
1128
|
+
print("Usage: empathy telemetry {show|savings|export|routing-stats|routing-check|models|agents|signals}")
|
|
1129
|
+
return 1
|
|
1130
|
+
|
|
1131
|
+
elif args.command == "provider":
|
|
1132
|
+
if args.provider_command == "show":
|
|
1133
|
+
return cmd_provider_show(args)
|
|
1134
|
+
elif args.provider_command == "set":
|
|
1135
|
+
return cmd_provider_set(args)
|
|
1136
|
+
else:
|
|
1137
|
+
print("Usage: empathy provider {show|set}")
|
|
1138
|
+
return 1
|
|
1139
|
+
|
|
1140
|
+
elif args.command == "dashboard":
|
|
1141
|
+
if args.dashboard_command == "start":
|
|
1142
|
+
return cmd_dashboard_start(args)
|
|
1143
|
+
else:
|
|
1144
|
+
print("Usage: empathy dashboard start [--host HOST] [--port PORT]")
|
|
1145
|
+
return 1
|
|
1146
|
+
|
|
1147
|
+
elif args.command == "validate":
|
|
1148
|
+
return cmd_validate(args)
|
|
1149
|
+
|
|
1150
|
+
elif args.command == "version":
|
|
1151
|
+
return cmd_version(args)
|
|
1152
|
+
|
|
1153
|
+
else:
|
|
1154
|
+
parser.print_help()
|
|
1155
|
+
return 0
|
|
1156
|
+
|
|
1157
|
+
|
|
1158
|
+
if __name__ == "__main__":
|
|
1159
|
+
sys.exit(main())
|