attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
attune/telemetry/cli.py
ADDED
|
@@ -0,0 +1,1231 @@
|
|
|
1
|
+
"""CLI commands for telemetry tracking.
|
|
2
|
+
|
|
3
|
+
Provides commands to view, analyze, and manage local usage telemetry data.
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Smart-AI-Memory
|
|
6
|
+
Licensed under Fair Source License 0.9
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import csv
|
|
10
|
+
import json
|
|
11
|
+
import sys
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from rich.console import Console
|
|
17
|
+
from rich.panel import Panel
|
|
18
|
+
from rich.table import Table
|
|
19
|
+
from rich.text import Text
|
|
20
|
+
|
|
21
|
+
RICH_AVAILABLE = True
|
|
22
|
+
except ImportError:
|
|
23
|
+
RICH_AVAILABLE = False
|
|
24
|
+
Console = None # type: ignore
|
|
25
|
+
|
|
26
|
+
from attune.config import _validate_file_path
|
|
27
|
+
|
|
28
|
+
from .usage_tracker import UsageTracker
|
|
29
|
+
|
|
30
|
+
# _validate_file_path is now imported from attune.config
|
|
31
|
+
# This eliminates the duplicate definition that previously existed here (lines 30-69)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def cmd_telemetry_show(args: Any) -> int:
|
|
35
|
+
"""Show recent telemetry entries.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
args: Parsed command-line arguments
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Exit code (0 for success)
|
|
42
|
+
|
|
43
|
+
"""
|
|
44
|
+
tracker = UsageTracker.get_instance()
|
|
45
|
+
limit = getattr(args, "limit", 20)
|
|
46
|
+
days = getattr(args, "days", None)
|
|
47
|
+
|
|
48
|
+
entries = tracker.get_recent_entries(limit=limit, days=days)
|
|
49
|
+
|
|
50
|
+
if not entries:
|
|
51
|
+
print("No telemetry data found.")
|
|
52
|
+
print(f"Data location: {tracker.telemetry_dir}")
|
|
53
|
+
return 0
|
|
54
|
+
|
|
55
|
+
if RICH_AVAILABLE and Console is not None:
|
|
56
|
+
console = Console()
|
|
57
|
+
table = Table(title="Recent LLM Calls", show_header=True, header_style="bold magenta")
|
|
58
|
+
table.add_column("Time", style="cyan", width=19)
|
|
59
|
+
table.add_column("Workflow", style="green")
|
|
60
|
+
table.add_column("Stage", style="blue")
|
|
61
|
+
table.add_column("Tier", style="yellow")
|
|
62
|
+
table.add_column("Cost", style="red", justify="right")
|
|
63
|
+
table.add_column("Tokens", justify="right")
|
|
64
|
+
table.add_column("Cache", style="green")
|
|
65
|
+
table.add_column("Duration", justify="right")
|
|
66
|
+
|
|
67
|
+
total_cost = 0.0
|
|
68
|
+
total_duration = 0
|
|
69
|
+
|
|
70
|
+
for entry in entries:
|
|
71
|
+
ts = entry.get("ts", "")
|
|
72
|
+
# Format timestamp
|
|
73
|
+
try:
|
|
74
|
+
dt = datetime.fromisoformat(ts.rstrip("Z"))
|
|
75
|
+
ts_display = dt.strftime("%Y-%m-%d %H:%M:%S")
|
|
76
|
+
except (ValueError, AttributeError):
|
|
77
|
+
ts_display = ts[:19] if len(ts) >= 19 else ts
|
|
78
|
+
|
|
79
|
+
workflow = entry.get("workflow", "unknown")
|
|
80
|
+
stage = entry.get("stage", "-")
|
|
81
|
+
tier = entry.get("tier", "unknown")
|
|
82
|
+
cost = entry.get("cost", 0.0)
|
|
83
|
+
tokens = entry.get("tokens", {})
|
|
84
|
+
cache = entry.get("cache", {})
|
|
85
|
+
duration_ms = entry.get("duration_ms", 0)
|
|
86
|
+
|
|
87
|
+
tokens_str = f"{tokens.get('input', 0)}/{tokens.get('output', 0)}"
|
|
88
|
+
cache_str = "HIT" if cache.get("hit") else "MISS"
|
|
89
|
+
if cache.get("hit"):
|
|
90
|
+
cache_type = cache.get("type", "")
|
|
91
|
+
if cache_type:
|
|
92
|
+
cache_str += f" ({cache_type})"
|
|
93
|
+
|
|
94
|
+
table.add_row(
|
|
95
|
+
ts_display,
|
|
96
|
+
workflow[:20],
|
|
97
|
+
stage[:15] if stage else "-",
|
|
98
|
+
tier,
|
|
99
|
+
f"${cost:.4f}",
|
|
100
|
+
tokens_str,
|
|
101
|
+
cache_str,
|
|
102
|
+
f"{duration_ms}ms",
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
total_cost += cost
|
|
106
|
+
total_duration += duration_ms
|
|
107
|
+
|
|
108
|
+
console.print(table)
|
|
109
|
+
console.print()
|
|
110
|
+
console.print(f"[bold]Total Cost:[/bold] ${total_cost:.4f}")
|
|
111
|
+
console.print(f"[bold]Avg Duration:[/bold] {total_duration // len(entries)}ms")
|
|
112
|
+
console.print(f"\n[dim]Data location: {tracker.telemetry_dir}[/dim]")
|
|
113
|
+
else:
|
|
114
|
+
# Fallback to plain text
|
|
115
|
+
print(
|
|
116
|
+
f"\n{'Time':<19} {'Workflow':<20} {'Stage':<15} {'Tier':<10} {'Cost':>10} {'Cache':<10} {'Duration':>10}"
|
|
117
|
+
)
|
|
118
|
+
print("-" * 120)
|
|
119
|
+
total_cost = 0.0
|
|
120
|
+
for entry in entries:
|
|
121
|
+
ts = entry.get("ts", "")[:19]
|
|
122
|
+
workflow = entry.get("workflow", "unknown")[:20]
|
|
123
|
+
stage = entry.get("stage", "-")[:15]
|
|
124
|
+
tier = entry.get("tier", "unknown")
|
|
125
|
+
cost = entry.get("cost", 0.0)
|
|
126
|
+
cache = entry.get("cache", {})
|
|
127
|
+
duration_ms = entry.get("duration_ms", 0)
|
|
128
|
+
|
|
129
|
+
cache_str = "HIT" if cache.get("hit") else "MISS"
|
|
130
|
+
print(
|
|
131
|
+
f"{ts:<19} {workflow:<20} {stage:<15} {tier:<10} ${cost:>9.4f} {cache_str:<10} {duration_ms:>9}ms"
|
|
132
|
+
)
|
|
133
|
+
total_cost += cost
|
|
134
|
+
|
|
135
|
+
print("-" * 120)
|
|
136
|
+
print(f"Total Cost: ${total_cost:.4f}")
|
|
137
|
+
print(f"\nData location: {tracker.telemetry_dir}")
|
|
138
|
+
|
|
139
|
+
return 0
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def cmd_telemetry_savings(args: Any) -> int:
|
|
143
|
+
"""Calculate and display cost savings.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
args: Parsed command-line arguments
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Exit code (0 for success)
|
|
150
|
+
|
|
151
|
+
"""
|
|
152
|
+
tracker = UsageTracker.get_instance()
|
|
153
|
+
days = getattr(args, "days", 30)
|
|
154
|
+
|
|
155
|
+
savings = tracker.calculate_savings(days=days)
|
|
156
|
+
|
|
157
|
+
if savings["total_calls"] == 0:
|
|
158
|
+
print("No telemetry data found for the specified period.")
|
|
159
|
+
return 0
|
|
160
|
+
|
|
161
|
+
if RICH_AVAILABLE and Console is not None:
|
|
162
|
+
console = Console()
|
|
163
|
+
|
|
164
|
+
# Create savings report
|
|
165
|
+
title = Text("Cost Savings Analysis", style="bold magenta")
|
|
166
|
+
content_lines = []
|
|
167
|
+
|
|
168
|
+
content_lines.append(f"Period: Last {days} days")
|
|
169
|
+
content_lines.append("")
|
|
170
|
+
content_lines.append("Usage Pattern:")
|
|
171
|
+
for tier, pct in sorted(savings["tier_distribution"].items()):
|
|
172
|
+
content_lines.append(f" {tier:8}: {pct:5.1f}%")
|
|
173
|
+
content_lines.append("")
|
|
174
|
+
content_lines.append("Cost Comparison:")
|
|
175
|
+
content_lines.append(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
|
|
176
|
+
content_lines.append(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
|
|
177
|
+
content_lines.append("")
|
|
178
|
+
savings_color = "green" if savings["savings"] > 0 else "red"
|
|
179
|
+
content_lines.append(
|
|
180
|
+
f"[bold {savings_color}]YOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)[/bold {savings_color}]"
|
|
181
|
+
)
|
|
182
|
+
content_lines.append("")
|
|
183
|
+
content_lines.append(f"Cache savings: ${savings['cache_savings']:.2f}")
|
|
184
|
+
content_lines.append(f"Total calls: {savings['total_calls']}")
|
|
185
|
+
|
|
186
|
+
panel = Panel(
|
|
187
|
+
"\n".join(content_lines),
|
|
188
|
+
title=title,
|
|
189
|
+
border_style="cyan",
|
|
190
|
+
)
|
|
191
|
+
console.print(panel)
|
|
192
|
+
else:
|
|
193
|
+
# Fallback to plain text
|
|
194
|
+
print("\n" + "=" * 60)
|
|
195
|
+
print("COST SAVINGS ANALYSIS")
|
|
196
|
+
print("=" * 60)
|
|
197
|
+
print(f"Period: Last {days} days\n")
|
|
198
|
+
print("Usage Pattern:")
|
|
199
|
+
for tier, pct in sorted(savings["tier_distribution"].items()):
|
|
200
|
+
print(f" {tier:8}: {pct:5.1f}%")
|
|
201
|
+
print("\nCost Comparison:")
|
|
202
|
+
print(f" Baseline (all PREMIUM): ${savings['baseline_cost']:.2f}")
|
|
203
|
+
print(f" Actual (tier routing): ${savings['actual_cost']:.2f}")
|
|
204
|
+
print(f"\nYOUR SAVINGS: ${savings['savings']:.2f} ({savings['savings_percent']:.1f}%)")
|
|
205
|
+
print(f"\nCache savings: ${savings['cache_savings']:.2f}")
|
|
206
|
+
print(f"Total calls: {savings['total_calls']}")
|
|
207
|
+
print("=" * 60)
|
|
208
|
+
|
|
209
|
+
return 0
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def cmd_telemetry_cache_stats(args: Any) -> int:
|
|
213
|
+
"""Show prompt caching performance statistics.
|
|
214
|
+
|
|
215
|
+
Displays cache hit rates, cost savings, and workflow-level stats.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
args: Parsed command-line arguments
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Exit code (0 for success)
|
|
222
|
+
"""
|
|
223
|
+
tracker = UsageTracker.get_instance()
|
|
224
|
+
days = getattr(args, "days", 7)
|
|
225
|
+
|
|
226
|
+
stats = tracker.get_cache_stats(days=days)
|
|
227
|
+
|
|
228
|
+
if stats["total_requests"] == 0:
|
|
229
|
+
print("No telemetry data found for cache analysis.")
|
|
230
|
+
print(f"Data location: {tracker.telemetry_dir}")
|
|
231
|
+
return 0
|
|
232
|
+
|
|
233
|
+
if RICH_AVAILABLE and Console is not None:
|
|
234
|
+
console = Console()
|
|
235
|
+
|
|
236
|
+
# Main stats table
|
|
237
|
+
table = Table(
|
|
238
|
+
title=f"Prompt Caching Stats (Last {days} Days)",
|
|
239
|
+
show_header=True,
|
|
240
|
+
header_style="bold magenta",
|
|
241
|
+
)
|
|
242
|
+
table.add_column("Metric", style="cyan")
|
|
243
|
+
table.add_column("Value", style="green", justify="right")
|
|
244
|
+
|
|
245
|
+
# Cache hit rate
|
|
246
|
+
hit_rate_color = "green" if stats["hit_rate"] > 0.5 else "yellow"
|
|
247
|
+
table.add_row(
|
|
248
|
+
"Cache Hit Rate",
|
|
249
|
+
f"[{hit_rate_color}]{stats['hit_rate']:.1%}[/{hit_rate_color}]",
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Tokens
|
|
253
|
+
table.add_row("Cache Reads", f"{stats['total_reads']:,} tokens")
|
|
254
|
+
table.add_row("Cache Writes", f"{stats['total_writes']:,} tokens")
|
|
255
|
+
|
|
256
|
+
# Cost savings
|
|
257
|
+
savings_color = "green" if stats["savings"] > 0 else "dim"
|
|
258
|
+
table.add_row(
|
|
259
|
+
"Estimated Savings",
|
|
260
|
+
f"[bold {savings_color}]${stats['savings']:.2f}[/bold {savings_color}]",
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Requests
|
|
264
|
+
table.add_row("Requests with Cache Hits", f"{stats['hit_count']:,}")
|
|
265
|
+
table.add_row("Total Requests", f"{stats['total_requests']:,}")
|
|
266
|
+
|
|
267
|
+
console.print(table)
|
|
268
|
+
|
|
269
|
+
# Per-workflow breakdown
|
|
270
|
+
if stats["by_workflow"]:
|
|
271
|
+
console.print("\n")
|
|
272
|
+
wf_table = Table(
|
|
273
|
+
title="Cache Performance by Workflow",
|
|
274
|
+
show_header=True,
|
|
275
|
+
header_style="bold magenta",
|
|
276
|
+
)
|
|
277
|
+
wf_table.add_column("Workflow", style="cyan")
|
|
278
|
+
wf_table.add_column("Hit Rate", justify="right")
|
|
279
|
+
wf_table.add_column("Reads", justify="right")
|
|
280
|
+
wf_table.add_column("Writes", justify="right")
|
|
281
|
+
|
|
282
|
+
# Sort by hit rate descending
|
|
283
|
+
sorted_workflows = sorted(
|
|
284
|
+
stats["by_workflow"].items(),
|
|
285
|
+
key=lambda x: x[1].get("hit_rate", 0),
|
|
286
|
+
reverse=True,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
for workflow, wf_stats in sorted_workflows[:10]: # Top 10
|
|
290
|
+
hit_rate = wf_stats.get("hit_rate", 0.0)
|
|
291
|
+
hit_rate_color = "green" if hit_rate > 0.5 else "yellow"
|
|
292
|
+
wf_table.add_row(
|
|
293
|
+
workflow,
|
|
294
|
+
f"[{hit_rate_color}]{hit_rate:.1%}[/{hit_rate_color}]",
|
|
295
|
+
f"{wf_stats['reads']:,}",
|
|
296
|
+
f"{wf_stats['writes']:,}",
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
console.print(wf_table)
|
|
300
|
+
|
|
301
|
+
# Recommendations
|
|
302
|
+
if stats["hit_rate"] < 0.3:
|
|
303
|
+
console.print("\n")
|
|
304
|
+
console.print(
|
|
305
|
+
Panel(
|
|
306
|
+
"[yellow]⚠ Cache hit rate is low (<30%)[/yellow]\n\n"
|
|
307
|
+
"Recommendations:\n"
|
|
308
|
+
" • Increase reuse of system prompts across requests\n"
|
|
309
|
+
" • Group similar requests together (5-min cache TTL)\n"
|
|
310
|
+
" • Consider using workflow batching\n"
|
|
311
|
+
" • Structure prompts with static content first",
|
|
312
|
+
title="Optimization Tips",
|
|
313
|
+
border_style="yellow",
|
|
314
|
+
)
|
|
315
|
+
)
|
|
316
|
+
else:
|
|
317
|
+
# Fallback to plain text
|
|
318
|
+
print("\n" + "=" * 60)
|
|
319
|
+
print(f"PROMPT CACHING STATS (LAST {days} DAYS)")
|
|
320
|
+
print("=" * 60)
|
|
321
|
+
print(f"Cache Hit Rate: {stats['hit_rate']:.1%}")
|
|
322
|
+
print(f"Cache Reads: {stats['total_reads']:,} tokens")
|
|
323
|
+
print(f"Cache Writes: {stats['total_writes']:,} tokens")
|
|
324
|
+
print(f"Estimated Savings: ${stats['savings']:.2f}")
|
|
325
|
+
print(f"Requests with Cache Hits: {stats['hit_count']:,}")
|
|
326
|
+
print(f"Total Requests: {stats['total_requests']:,}")
|
|
327
|
+
print("=" * 60)
|
|
328
|
+
|
|
329
|
+
if stats["hit_rate"] < 0.3:
|
|
330
|
+
print("\n⚠ Cache hit rate is low (<30%)")
|
|
331
|
+
print("Recommendations:")
|
|
332
|
+
print(" • Increase reuse of system prompts across requests")
|
|
333
|
+
print(" • Group similar requests together (5-min cache TTL)")
|
|
334
|
+
print(" • Consider using workflow batching")
|
|
335
|
+
|
|
336
|
+
return 0
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def cmd_telemetry_compare(args: Any) -> int:
|
|
340
|
+
"""Compare telemetry across two time periods.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
args: Parsed command-line arguments
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Exit code (0 for success)
|
|
347
|
+
|
|
348
|
+
"""
|
|
349
|
+
tracker = UsageTracker.get_instance()
|
|
350
|
+
period1_days = getattr(args, "period1", 7)
|
|
351
|
+
period2_days = getattr(args, "period2", 30)
|
|
352
|
+
|
|
353
|
+
# Get stats for both periods
|
|
354
|
+
stats1 = tracker.get_stats(days=period1_days)
|
|
355
|
+
stats2 = tracker.get_stats(days=period2_days)
|
|
356
|
+
|
|
357
|
+
if stats1["total_calls"] == 0 or stats2["total_calls"] == 0:
|
|
358
|
+
print("Insufficient telemetry data for comparison.")
|
|
359
|
+
return 0
|
|
360
|
+
|
|
361
|
+
if RICH_AVAILABLE and Console is not None:
|
|
362
|
+
console = Console()
|
|
363
|
+
table = Table(title="Telemetry Comparison", show_header=True, header_style="bold magenta")
|
|
364
|
+
table.add_column("Metric", style="cyan")
|
|
365
|
+
table.add_column(f"Last {period1_days} days", justify="right", style="green")
|
|
366
|
+
table.add_column(f"Last {period2_days} days", justify="right", style="yellow")
|
|
367
|
+
table.add_column("Change", justify="right", style="blue")
|
|
368
|
+
|
|
369
|
+
# Total calls
|
|
370
|
+
calls_change = (
|
|
371
|
+
((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
|
|
372
|
+
if stats2["total_calls"] > 0
|
|
373
|
+
else 0
|
|
374
|
+
)
|
|
375
|
+
table.add_row(
|
|
376
|
+
"Total Calls",
|
|
377
|
+
str(stats1["total_calls"]),
|
|
378
|
+
str(stats2["total_calls"]),
|
|
379
|
+
f"{calls_change:+.1f}%",
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
# Total cost
|
|
383
|
+
cost_change = (
|
|
384
|
+
((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
|
|
385
|
+
if stats2["total_cost"] > 0
|
|
386
|
+
else 0
|
|
387
|
+
)
|
|
388
|
+
table.add_row(
|
|
389
|
+
"Total Cost",
|
|
390
|
+
f"${stats1['total_cost']:.2f}",
|
|
391
|
+
f"${stats2['total_cost']:.2f}",
|
|
392
|
+
f"{cost_change:+.1f}%",
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Avg cost per call
|
|
396
|
+
avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
|
|
397
|
+
avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
|
|
398
|
+
avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
|
|
399
|
+
table.add_row(
|
|
400
|
+
"Avg Cost/Call",
|
|
401
|
+
f"${avg1:.4f}",
|
|
402
|
+
f"${avg2:.4f}",
|
|
403
|
+
f"{avg_change:+.1f}%",
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
# Cache hit rate
|
|
407
|
+
cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
|
|
408
|
+
table.add_row(
|
|
409
|
+
"Cache Hit Rate",
|
|
410
|
+
f"{stats1['cache_hit_rate']:.1f}%",
|
|
411
|
+
f"{stats2['cache_hit_rate']:.1f}%",
|
|
412
|
+
f"{cache_change:+.1f}pp",
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
console.print(table)
|
|
416
|
+
else:
|
|
417
|
+
# Fallback to plain text
|
|
418
|
+
print("\n" + "=" * 80)
|
|
419
|
+
print("TELEMETRY COMPARISON")
|
|
420
|
+
print("=" * 80)
|
|
421
|
+
print(
|
|
422
|
+
f"{'Metric':<20} {'Last ' + str(period1_days) + ' days':>20} {'Last ' + str(period2_days) + ' days':>20} {'Change':>15}"
|
|
423
|
+
)
|
|
424
|
+
print("-" * 80)
|
|
425
|
+
|
|
426
|
+
calls_change = (
|
|
427
|
+
((stats1["total_calls"] - stats2["total_calls"]) / stats2["total_calls"] * 100)
|
|
428
|
+
if stats2["total_calls"] > 0
|
|
429
|
+
else 0
|
|
430
|
+
)
|
|
431
|
+
print(
|
|
432
|
+
f"{'Total Calls':<20} {stats1['total_calls']:>20} {stats2['total_calls']:>20} {calls_change:>14.1f}%"
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
cost_change = (
|
|
436
|
+
((stats1["total_cost"] - stats2["total_cost"]) / stats2["total_cost"] * 100)
|
|
437
|
+
if stats2["total_cost"] > 0
|
|
438
|
+
else 0
|
|
439
|
+
)
|
|
440
|
+
print(
|
|
441
|
+
f"{'Total Cost':<20} ${stats1['total_cost']:>19.2f} ${stats2['total_cost']:>19.2f} {cost_change:>14.1f}%"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
avg1 = stats1["total_cost"] / stats1["total_calls"] if stats1["total_calls"] > 0 else 0
|
|
445
|
+
avg2 = stats2["total_cost"] / stats2["total_calls"] if stats2["total_calls"] > 0 else 0
|
|
446
|
+
avg_change = ((avg1 - avg2) / avg2 * 100) if avg2 > 0 else 0
|
|
447
|
+
print(f"{'Avg Cost/Call':<20} ${avg1:>19.4f} ${avg2:>19.4f} {avg_change:>14.1f}%")
|
|
448
|
+
|
|
449
|
+
cache_change = stats1["cache_hit_rate"] - stats2["cache_hit_rate"]
|
|
450
|
+
print(
|
|
451
|
+
f"{'Cache Hit Rate':<20} {stats1['cache_hit_rate']:>19.1f}% {stats2['cache_hit_rate']:>19.1f}% {cache_change:>14.1f}pp"
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
print("=" * 80)
|
|
455
|
+
|
|
456
|
+
return 0
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def cmd_telemetry_reset(args: Any) -> int:
|
|
460
|
+
"""Reset/clear all telemetry data.
|
|
461
|
+
|
|
462
|
+
Args:
|
|
463
|
+
args: Parsed command-line arguments
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
Exit code (0 for success)
|
|
467
|
+
|
|
468
|
+
"""
|
|
469
|
+
tracker = UsageTracker.get_instance()
|
|
470
|
+
confirm = getattr(args, "confirm", False)
|
|
471
|
+
|
|
472
|
+
if not confirm:
|
|
473
|
+
print("WARNING: This will permanently delete all telemetry data.")
|
|
474
|
+
print(f"Location: {tracker.telemetry_dir}")
|
|
475
|
+
print("\nUse --confirm to proceed.")
|
|
476
|
+
return 1
|
|
477
|
+
|
|
478
|
+
count = tracker.reset()
|
|
479
|
+
print(f"Deleted {count} telemetry entries.")
|
|
480
|
+
print("New tracking starts now.")
|
|
481
|
+
return 0
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
def cmd_telemetry_export(args: Any) -> int:
|
|
485
|
+
"""Export telemetry data to JSON or CSV.
|
|
486
|
+
|
|
487
|
+
Args:
|
|
488
|
+
args: Parsed command-line arguments
|
|
489
|
+
|
|
490
|
+
Returns:
|
|
491
|
+
Exit code (0 for success)
|
|
492
|
+
|
|
493
|
+
"""
|
|
494
|
+
tracker = UsageTracker.get_instance()
|
|
495
|
+
format_type = getattr(args, "format", "json")
|
|
496
|
+
output_file = getattr(args, "output", None)
|
|
497
|
+
days = getattr(args, "days", None)
|
|
498
|
+
|
|
499
|
+
entries = tracker.export_to_dict(days=days)
|
|
500
|
+
|
|
501
|
+
if not entries:
|
|
502
|
+
print("No telemetry data to export.")
|
|
503
|
+
return 0
|
|
504
|
+
|
|
505
|
+
if format_type == "json":
|
|
506
|
+
# Export as JSON
|
|
507
|
+
if output_file:
|
|
508
|
+
validated_path = _validate_file_path(output_file)
|
|
509
|
+
with open(validated_path, "w", encoding="utf-8") as f:
|
|
510
|
+
json.dump(entries, f, indent=2)
|
|
511
|
+
print(f"Exported {len(entries)} entries to {validated_path}")
|
|
512
|
+
else:
|
|
513
|
+
print(json.dumps(entries, indent=2))
|
|
514
|
+
elif format_type == "csv":
|
|
515
|
+
# Export as CSV
|
|
516
|
+
if not entries:
|
|
517
|
+
print("No data to export.")
|
|
518
|
+
return 0
|
|
519
|
+
|
|
520
|
+
# Get all possible fields
|
|
521
|
+
fieldnames = [
|
|
522
|
+
"ts",
|
|
523
|
+
"workflow",
|
|
524
|
+
"stage",
|
|
525
|
+
"tier",
|
|
526
|
+
"model",
|
|
527
|
+
"provider",
|
|
528
|
+
"cost",
|
|
529
|
+
"tokens_input",
|
|
530
|
+
"tokens_output",
|
|
531
|
+
"cache_hit",
|
|
532
|
+
"cache_type",
|
|
533
|
+
"duration_ms",
|
|
534
|
+
]
|
|
535
|
+
|
|
536
|
+
if output_file:
|
|
537
|
+
validated_path = _validate_file_path(output_file)
|
|
538
|
+
with open(validated_path, "w", newline="", encoding="utf-8") as f:
|
|
539
|
+
writer = csv.DictWriter(f, fieldnames=fieldnames)
|
|
540
|
+
writer.writeheader()
|
|
541
|
+
for entry in entries:
|
|
542
|
+
row = {
|
|
543
|
+
"ts": entry.get("ts", ""),
|
|
544
|
+
"workflow": entry.get("workflow", ""),
|
|
545
|
+
"stage": entry.get("stage", ""),
|
|
546
|
+
"tier": entry.get("tier", ""),
|
|
547
|
+
"model": entry.get("model", ""),
|
|
548
|
+
"provider": entry.get("provider", ""),
|
|
549
|
+
"cost": entry.get("cost", 0.0),
|
|
550
|
+
"tokens_input": entry.get("tokens", {}).get("input", 0),
|
|
551
|
+
"tokens_output": entry.get("tokens", {}).get("output", 0),
|
|
552
|
+
"cache_hit": entry.get("cache", {}).get("hit", False),
|
|
553
|
+
"cache_type": entry.get("cache", {}).get("type", ""),
|
|
554
|
+
"duration_ms": entry.get("duration_ms", 0),
|
|
555
|
+
}
|
|
556
|
+
writer.writerow(row)
|
|
557
|
+
print(f"Exported {len(entries)} entries to {validated_path}")
|
|
558
|
+
else:
|
|
559
|
+
# Print to stdout
|
|
560
|
+
writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames)
|
|
561
|
+
writer.writeheader()
|
|
562
|
+
for entry in entries:
|
|
563
|
+
row = {
|
|
564
|
+
"ts": entry.get("ts", ""),
|
|
565
|
+
"workflow": entry.get("workflow", ""),
|
|
566
|
+
"stage": entry.get("stage", ""),
|
|
567
|
+
"tier": entry.get("tier", ""),
|
|
568
|
+
"model": entry.get("model", ""),
|
|
569
|
+
"provider": entry.get("provider", ""),
|
|
570
|
+
"cost": entry.get("cost", 0.0),
|
|
571
|
+
"tokens_input": entry.get("tokens", {}).get("input", 0),
|
|
572
|
+
"tokens_output": entry.get("tokens", {}).get("output", 0),
|
|
573
|
+
"cache_hit": entry.get("cache", {}).get("hit", False),
|
|
574
|
+
"cache_type": entry.get("cache", {}).get("type", ""),
|
|
575
|
+
"duration_ms": entry.get("duration_ms", 0),
|
|
576
|
+
}
|
|
577
|
+
writer.writerow(row)
|
|
578
|
+
else:
|
|
579
|
+
print(f"Unknown format: {format_type}")
|
|
580
|
+
print("Supported formats: json, csv")
|
|
581
|
+
return 1
|
|
582
|
+
|
|
583
|
+
return 0
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
# ==============================================================================
|
|
587
|
+
# Dashboard Commands
|
|
588
|
+
# ==============================================================================
|
|
589
|
+
# cmd_telemetry_dashboard and cmd_file_test_dashboard have been moved to:
|
|
590
|
+
# src/attune/telemetry/commands/dashboard_commands.py
|
|
591
|
+
# They are imported at the top of this file for backward compatibility.
|
|
592
|
+
# ==============================================================================
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
# ==============================================================================
|
|
596
|
+
# Tier 1 Automation Monitoring CLI Commands
|
|
597
|
+
# ==============================================================================
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
# ==============================================================================
|
|
602
|
+
# Dashboard Commands (Extracted to Separate Module)
|
|
603
|
+
# ==============================================================================
|
|
604
|
+
# cmd_telemetry_dashboard and cmd_file_test_dashboard moved to:
|
|
605
|
+
# src/attune/telemetry/commands/dashboard_commands.py
|
|
606
|
+
# Imported at top of file for backward compatibility.
|
|
607
|
+
# ==============================================================================
|
|
608
|
+
|
|
609
|
+
def cmd_tier1_status(args: Any) -> int:
|
|
610
|
+
"""Show comprehensive Tier 1 automation status.
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
args: Parsed command-line arguments (hours)
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
Exit code (0 for success)
|
|
617
|
+
"""
|
|
618
|
+
from datetime import timedelta
|
|
619
|
+
|
|
620
|
+
from attune.models.telemetry import TelemetryAnalytics, get_telemetry_store
|
|
621
|
+
|
|
622
|
+
try:
|
|
623
|
+
store = get_telemetry_store()
|
|
624
|
+
analytics = TelemetryAnalytics(store)
|
|
625
|
+
|
|
626
|
+
hours = getattr(args, "hours", 24)
|
|
627
|
+
since = datetime.utcnow() - timedelta(hours=hours)
|
|
628
|
+
|
|
629
|
+
summary = analytics.tier1_summary(since=since)
|
|
630
|
+
except Exception as e:
|
|
631
|
+
print(f"Error retrieving Tier 1 status: {e}")
|
|
632
|
+
return 1
|
|
633
|
+
|
|
634
|
+
if RICH_AVAILABLE and Console is not None:
|
|
635
|
+
console = Console()
|
|
636
|
+
|
|
637
|
+
# Task Routing Panel
|
|
638
|
+
routing = summary["task_routing"]
|
|
639
|
+
routing_text = Text()
|
|
640
|
+
routing_text.append(f"Total Tasks: {routing['total_tasks']}\n")
|
|
641
|
+
routing_text.append(f"Success Rate: {routing['accuracy_rate']:.1%}\n", style="green bold")
|
|
642
|
+
routing_text.append(f"Avg Confidence: {routing['avg_confidence']:.2f}\n")
|
|
643
|
+
|
|
644
|
+
# Test Execution Panel
|
|
645
|
+
tests = summary["test_execution"]
|
|
646
|
+
tests_text = Text()
|
|
647
|
+
tests_text.append(f"Total Runs: {tests['total_executions']}\n")
|
|
648
|
+
tests_text.append(f"Success Rate: {tests['success_rate']:.1%}\n", style="green bold")
|
|
649
|
+
tests_text.append(f"Avg Duration: {tests['avg_duration_seconds']:.1f}s\n")
|
|
650
|
+
tests_text.append(f"Total Failures: {tests['total_failures']}\n")
|
|
651
|
+
|
|
652
|
+
# Coverage Panel
|
|
653
|
+
coverage = summary["coverage"]
|
|
654
|
+
coverage_text = Text()
|
|
655
|
+
coverage_text.append(f"Current: {coverage['current_coverage']:.1f}%\n", style="cyan bold")
|
|
656
|
+
coverage_text.append(f"Change: {coverage['change']:+.1f}%\n")
|
|
657
|
+
coverage_text.append(f"Trend: {coverage['trend']}\n")
|
|
658
|
+
coverage_text.append(f"Critical Gaps: {coverage['critical_gaps_count']}\n")
|
|
659
|
+
|
|
660
|
+
# Agent Performance Panel
|
|
661
|
+
agent = summary["agent_performance"]
|
|
662
|
+
agent_text = Text()
|
|
663
|
+
agent_text.append(f"Active Agents: {len(agent['by_agent'])}\n")
|
|
664
|
+
agent_text.append(f"Automation Rate: {agent['automation_rate']:.1%}\n", style="green bold")
|
|
665
|
+
agent_text.append(f"Human Review Rate: {agent['human_review_rate']:.1%}\n")
|
|
666
|
+
|
|
667
|
+
# Display all panels
|
|
668
|
+
console.print(f"\n[bold]Tier 1 Automation Status[/bold] (last {hours} hours)\n")
|
|
669
|
+
console.print(Panel(routing_text, title="Task Routing", border_style="blue"))
|
|
670
|
+
console.print(Panel(tests_text, title="Test Execution", border_style="green"))
|
|
671
|
+
console.print(Panel(coverage_text, title="Coverage", border_style="cyan"))
|
|
672
|
+
console.print(Panel(agent_text, title="Agent Performance", border_style="magenta"))
|
|
673
|
+
else:
|
|
674
|
+
# Plain text fallback
|
|
675
|
+
routing = summary["task_routing"]
|
|
676
|
+
tests = summary["test_execution"]
|
|
677
|
+
coverage = summary["coverage"]
|
|
678
|
+
agent = summary["agent_performance"]
|
|
679
|
+
|
|
680
|
+
print(f"\nTier 1 Automation Status (last {hours} hours)")
|
|
681
|
+
print("=" * 50)
|
|
682
|
+
print("\nTask Routing:")
|
|
683
|
+
print(f" Total Tasks: {routing['total_tasks']}")
|
|
684
|
+
print(f" Success Rate: {routing['accuracy_rate']:.1%}")
|
|
685
|
+
print(f" Avg Confidence: {routing['avg_confidence']:.2f}")
|
|
686
|
+
|
|
687
|
+
print("\nTest Execution:")
|
|
688
|
+
print(f" Total Runs: {tests['total_executions']}")
|
|
689
|
+
print(f" Success Rate: {tests['success_rate']:.1%}")
|
|
690
|
+
print(f" Avg Duration: {tests['avg_duration_seconds']:.1f}s")
|
|
691
|
+
|
|
692
|
+
print("\nCoverage:")
|
|
693
|
+
print(f" Current: {coverage['current_coverage']:.1f}%")
|
|
694
|
+
print(f" Trend: {coverage['trend']}")
|
|
695
|
+
|
|
696
|
+
print("\nAgent Performance:")
|
|
697
|
+
print(f" Active Agents: {len(agent['by_agent'])}")
|
|
698
|
+
print(f" Automation Rate: {agent['automation_rate']:.1%}")
|
|
699
|
+
|
|
700
|
+
return 0
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
def cmd_task_routing_report(args: Any) -> int:
|
|
704
|
+
"""Show detailed task routing report.
|
|
705
|
+
|
|
706
|
+
Args:
|
|
707
|
+
args: Parsed command-line arguments (hours)
|
|
708
|
+
|
|
709
|
+
Returns:
|
|
710
|
+
Exit code (0 for success)
|
|
711
|
+
"""
|
|
712
|
+
from datetime import timedelta
|
|
713
|
+
|
|
714
|
+
from attune.models.telemetry import TelemetryAnalytics, get_telemetry_store
|
|
715
|
+
|
|
716
|
+
try:
|
|
717
|
+
store = get_telemetry_store()
|
|
718
|
+
analytics = TelemetryAnalytics(store)
|
|
719
|
+
|
|
720
|
+
hours = getattr(args, "hours", 24)
|
|
721
|
+
since = datetime.utcnow() - timedelta(hours=hours)
|
|
722
|
+
|
|
723
|
+
stats = analytics.task_routing_accuracy(since=since)
|
|
724
|
+
except Exception as e:
|
|
725
|
+
print(f"Error retrieving task routing report: {e}")
|
|
726
|
+
return 1
|
|
727
|
+
|
|
728
|
+
if not stats["total_tasks"]:
|
|
729
|
+
print(f"No task routing data found in the last {hours} hours.")
|
|
730
|
+
return 0
|
|
731
|
+
|
|
732
|
+
if RICH_AVAILABLE and Console is not None:
|
|
733
|
+
console = Console()
|
|
734
|
+
|
|
735
|
+
# Summary table
|
|
736
|
+
table = Table(title=f"Task Routing Report (last {hours} hours)")
|
|
737
|
+
table.add_column("Metric", style="cyan")
|
|
738
|
+
table.add_column("Value", style="green", justify="right")
|
|
739
|
+
|
|
740
|
+
table.add_row("Total Tasks", str(stats["total_tasks"]))
|
|
741
|
+
table.add_row("Successful", str(stats["successful_routing"]))
|
|
742
|
+
table.add_row("Accuracy Rate", f"{stats['accuracy_rate']:.1%}")
|
|
743
|
+
table.add_row("Avg Confidence", f"{stats['avg_confidence']:.2f}")
|
|
744
|
+
|
|
745
|
+
console.print(table)
|
|
746
|
+
|
|
747
|
+
# By task type table
|
|
748
|
+
if stats["by_task_type"]:
|
|
749
|
+
type_table = Table(title="Breakdown by Task Type")
|
|
750
|
+
type_table.add_column("Task Type", style="cyan")
|
|
751
|
+
type_table.add_column("Total", justify="right")
|
|
752
|
+
type_table.add_column("Success", justify="right")
|
|
753
|
+
type_table.add_column("Rate", justify="right", style="green")
|
|
754
|
+
|
|
755
|
+
for task_type, data in stats["by_task_type"].items():
|
|
756
|
+
type_table.add_row(
|
|
757
|
+
task_type, str(data["total"]), str(data["success"]), f"{data['rate']:.1%}"
|
|
758
|
+
)
|
|
759
|
+
|
|
760
|
+
console.print(type_table)
|
|
761
|
+
else:
|
|
762
|
+
# Plain text fallback
|
|
763
|
+
print(f"\nTask Routing Report (last {hours} hours)")
|
|
764
|
+
print("=" * 50)
|
|
765
|
+
print(f"Total Tasks: {stats['total_tasks']}")
|
|
766
|
+
print(f"Successful: {stats['successful_routing']}")
|
|
767
|
+
print(f"Accuracy Rate: {stats['accuracy_rate']:.1%}")
|
|
768
|
+
print(f"Avg Confidence: {stats['avg_confidence']:.2f}")
|
|
769
|
+
|
|
770
|
+
if stats["by_task_type"]:
|
|
771
|
+
print("\nBy Task Type:")
|
|
772
|
+
for task_type, data in stats["by_task_type"].items():
|
|
773
|
+
print(f" {task_type}: {data['success']}/{data['total']} ({data['rate']:.1%})")
|
|
774
|
+
|
|
775
|
+
return 0
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
def cmd_test_status(args: Any) -> int:
|
|
779
|
+
"""Show test execution status.
|
|
780
|
+
|
|
781
|
+
Args:
|
|
782
|
+
args: Parsed command-line arguments (hours)
|
|
783
|
+
|
|
784
|
+
Returns:
|
|
785
|
+
Exit code (0 for success)
|
|
786
|
+
"""
|
|
787
|
+
from datetime import timedelta
|
|
788
|
+
|
|
789
|
+
from attune.models.telemetry import TelemetryAnalytics, get_telemetry_store
|
|
790
|
+
|
|
791
|
+
try:
|
|
792
|
+
store = get_telemetry_store()
|
|
793
|
+
analytics = TelemetryAnalytics(store)
|
|
794
|
+
|
|
795
|
+
hours = getattr(args, "hours", 24)
|
|
796
|
+
since = datetime.utcnow() - timedelta(hours=hours)
|
|
797
|
+
|
|
798
|
+
stats = analytics.test_execution_trends(since=since)
|
|
799
|
+
coverage = analytics.coverage_progress(since=since)
|
|
800
|
+
except Exception as e:
|
|
801
|
+
print(f"Error retrieving test status: {e}")
|
|
802
|
+
return 1
|
|
803
|
+
|
|
804
|
+
if not stats["total_executions"]:
|
|
805
|
+
print(f"No test execution data found in the last {hours} hours.")
|
|
806
|
+
return 0
|
|
807
|
+
|
|
808
|
+
if RICH_AVAILABLE and Console is not None:
|
|
809
|
+
console = Console()
|
|
810
|
+
|
|
811
|
+
# Test execution table
|
|
812
|
+
table = Table(title=f"Test Execution Status (last {hours} hours)")
|
|
813
|
+
table.add_column("Metric", style="cyan")
|
|
814
|
+
table.add_column("Value", style="green", justify="right")
|
|
815
|
+
|
|
816
|
+
table.add_row("Total Runs", str(stats["total_executions"]))
|
|
817
|
+
table.add_row("Success Rate", f"{stats['success_rate']:.1%}")
|
|
818
|
+
table.add_row("Avg Duration", f"{stats['avg_duration_seconds']:.1f}s")
|
|
819
|
+
table.add_row("Total Tests Run", str(stats["total_tests_run"]))
|
|
820
|
+
table.add_row("Total Failures", str(stats["total_failures"]))
|
|
821
|
+
table.add_row("Current Coverage", f"{coverage['current_coverage']:.1f}%")
|
|
822
|
+
table.add_row("Coverage Trend", coverage["trend"])
|
|
823
|
+
|
|
824
|
+
console.print(table)
|
|
825
|
+
|
|
826
|
+
# Most failing tests
|
|
827
|
+
if stats["most_failing_tests"]:
|
|
828
|
+
fail_table = Table(title="Most Frequently Failing Tests")
|
|
829
|
+
fail_table.add_column("Test Name", style="cyan")
|
|
830
|
+
fail_table.add_column("Failures", justify="right", style="red")
|
|
831
|
+
|
|
832
|
+
for test in stats["most_failing_tests"][:10]:
|
|
833
|
+
fail_table.add_row(test["name"], str(test["failures"]))
|
|
834
|
+
|
|
835
|
+
console.print(fail_table)
|
|
836
|
+
else:
|
|
837
|
+
# Plain text fallback
|
|
838
|
+
print(f"\nTest Execution Status (last {hours} hours)")
|
|
839
|
+
print("=" * 50)
|
|
840
|
+
print(f"Total Runs: {stats['total_executions']}")
|
|
841
|
+
print(f"Success Rate: {stats['success_rate']:.1%}")
|
|
842
|
+
print(f"Avg Duration: {stats['avg_duration_seconds']:.1f}s")
|
|
843
|
+
print(f"Total Tests Run: {stats['total_tests_run']}")
|
|
844
|
+
print(f"Total Failures: {stats['total_failures']}")
|
|
845
|
+
print(f"Current Coverage: {coverage['current_coverage']:.1f}%")
|
|
846
|
+
|
|
847
|
+
if stats["most_failing_tests"]:
|
|
848
|
+
print("\nMost Frequently Failing Tests:")
|
|
849
|
+
for test in stats["most_failing_tests"][:10]:
|
|
850
|
+
print(f" {test['name']}: {test['failures']} failures")
|
|
851
|
+
|
|
852
|
+
return 0
|
|
853
|
+
|
|
854
|
+
|
|
855
|
+
def cmd_agent_performance(args: Any) -> int:
|
|
856
|
+
"""Show agent performance metrics.
|
|
857
|
+
|
|
858
|
+
Args:
|
|
859
|
+
args: Parsed command-line arguments (hours)
|
|
860
|
+
|
|
861
|
+
Returns:
|
|
862
|
+
Exit code (0 for success)
|
|
863
|
+
"""
|
|
864
|
+
from datetime import timedelta
|
|
865
|
+
|
|
866
|
+
from attune.models.telemetry import TelemetryAnalytics, get_telemetry_store
|
|
867
|
+
|
|
868
|
+
try:
|
|
869
|
+
store = get_telemetry_store()
|
|
870
|
+
analytics = TelemetryAnalytics(store)
|
|
871
|
+
|
|
872
|
+
hours = getattr(args, "hours", 168) # Default 7 days for agent performance
|
|
873
|
+
since = datetime.utcnow() - timedelta(hours=hours)
|
|
874
|
+
|
|
875
|
+
stats = analytics.agent_performance(since=since)
|
|
876
|
+
except Exception as e:
|
|
877
|
+
print(f"Error retrieving agent performance: {e}")
|
|
878
|
+
return 1
|
|
879
|
+
|
|
880
|
+
if not stats["by_agent"]:
|
|
881
|
+
print(f"No agent assignment data found in the last {hours} hours.")
|
|
882
|
+
return 0
|
|
883
|
+
|
|
884
|
+
if RICH_AVAILABLE and Console is not None:
|
|
885
|
+
console = Console()
|
|
886
|
+
|
|
887
|
+
# Agent performance table
|
|
888
|
+
table = Table(title=f"Agent Performance (last {hours} hours)")
|
|
889
|
+
table.add_column("Agent", style="cyan")
|
|
890
|
+
table.add_column("Assignments", justify="right")
|
|
891
|
+
table.add_column("Completed", justify="right")
|
|
892
|
+
table.add_column("Success Rate", justify="right", style="green")
|
|
893
|
+
table.add_column("Avg Duration", justify="right")
|
|
894
|
+
|
|
895
|
+
for agent, data in stats["by_agent"].items():
|
|
896
|
+
table.add_row(
|
|
897
|
+
agent,
|
|
898
|
+
str(data["assignments"]),
|
|
899
|
+
str(data["completed"]),
|
|
900
|
+
f"{data['success_rate']:.1%}",
|
|
901
|
+
f"{data['avg_duration_hours']:.2f}h",
|
|
902
|
+
)
|
|
903
|
+
|
|
904
|
+
console.print(table)
|
|
905
|
+
|
|
906
|
+
# Summary panel
|
|
907
|
+
summary_text = Text()
|
|
908
|
+
summary_text.append(
|
|
909
|
+
f"Automation Rate: {stats['automation_rate']:.1%}\n", style="green bold"
|
|
910
|
+
)
|
|
911
|
+
summary_text.append(f"Human Review Rate: {stats['human_review_rate']:.1%}\n")
|
|
912
|
+
|
|
913
|
+
console.print(Panel(summary_text, title="Summary", border_style="blue"))
|
|
914
|
+
else:
|
|
915
|
+
# Plain text fallback
|
|
916
|
+
print(f"\nAgent Performance (last {hours} hours)")
|
|
917
|
+
print("=" * 50)
|
|
918
|
+
|
|
919
|
+
for agent, data in stats["by_agent"].items():
|
|
920
|
+
print(f"\n{agent}:")
|
|
921
|
+
print(f" Assignments: {data['assignments']}")
|
|
922
|
+
print(f" Completed: {data['completed']}")
|
|
923
|
+
print(f" Success Rate: {data['success_rate']:.1%}")
|
|
924
|
+
print(f" Avg Duration: {data['avg_duration_hours']:.2f}h")
|
|
925
|
+
|
|
926
|
+
print(f"\nAutomation Rate: {stats['automation_rate']:.1%}")
|
|
927
|
+
print(f"Human Review Rate: {stats['human_review_rate']:.1%}")
|
|
928
|
+
|
|
929
|
+
return 0
|
|
930
|
+
|
|
931
|
+
|
|
932
|
+
def cmd_sonnet_opus_analysis(args: Any) -> int:
|
|
933
|
+
"""Show Sonnet 4.5 → Opus 4.5 fallback analysis and cost savings.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
args: Parsed command-line arguments (days)
|
|
937
|
+
|
|
938
|
+
Returns:
|
|
939
|
+
Exit code (0 for success)
|
|
940
|
+
"""
|
|
941
|
+
from datetime import timedelta
|
|
942
|
+
|
|
943
|
+
from attune.models.telemetry import TelemetryAnalytics, get_telemetry_store
|
|
944
|
+
|
|
945
|
+
store = get_telemetry_store()
|
|
946
|
+
analytics = TelemetryAnalytics(store)
|
|
947
|
+
|
|
948
|
+
days = getattr(args, "days", 30)
|
|
949
|
+
since = datetime.utcnow() - timedelta(days=days)
|
|
950
|
+
|
|
951
|
+
stats = analytics.sonnet_opus_fallback_analysis(since=since)
|
|
952
|
+
|
|
953
|
+
if stats["total_calls"] == 0:
|
|
954
|
+
print(f"No Sonnet/Opus calls found in the last {days} days.")
|
|
955
|
+
return 0
|
|
956
|
+
|
|
957
|
+
if RICH_AVAILABLE and Console is not None:
|
|
958
|
+
console = Console()
|
|
959
|
+
|
|
960
|
+
# Fallback Performance Panel
|
|
961
|
+
perf_text = Text()
|
|
962
|
+
perf_text.append(f"Total Anthropic Calls: {stats['total_calls']}\n")
|
|
963
|
+
perf_text.append(f"Sonnet 4.5 Attempts: {stats['sonnet_attempts']}\n")
|
|
964
|
+
perf_text.append(
|
|
965
|
+
f"Sonnet Success Rate: {stats['success_rate_sonnet']:.1f}%\n",
|
|
966
|
+
style="green bold",
|
|
967
|
+
)
|
|
968
|
+
perf_text.append(f"Opus Fallbacks: {stats['opus_fallbacks']}\n")
|
|
969
|
+
perf_text.append(
|
|
970
|
+
f"Fallback Rate: {stats['fallback_rate']:.1f}%\n",
|
|
971
|
+
style="yellow bold" if stats["fallback_rate"] > 10 else "green",
|
|
972
|
+
)
|
|
973
|
+
|
|
974
|
+
console.print(
|
|
975
|
+
Panel(
|
|
976
|
+
perf_text,
|
|
977
|
+
title=f"Sonnet 4.5 → Opus 4.5 Fallback Performance (last {days} days)",
|
|
978
|
+
border_style="cyan",
|
|
979
|
+
)
|
|
980
|
+
)
|
|
981
|
+
|
|
982
|
+
# Cost Savings Panel
|
|
983
|
+
savings_text = Text()
|
|
984
|
+
savings_text.append(f"Actual Cost: ${stats['actual_cost']:.2f}\n")
|
|
985
|
+
savings_text.append(f"Always-Opus Cost: ${stats['always_opus_cost']:.2f}\n")
|
|
986
|
+
savings_text.append(
|
|
987
|
+
f"Savings: ${stats['savings']:.2f} ({stats['savings_percent']:.1f}%)\n",
|
|
988
|
+
style="green bold",
|
|
989
|
+
)
|
|
990
|
+
savings_text.append("\n")
|
|
991
|
+
savings_text.append(f"Avg Cost/Call (actual): ${stats['avg_cost_per_call']:.4f}\n")
|
|
992
|
+
savings_text.append(f"Avg Cost/Call (all Opus): ${stats['avg_opus_cost_per_call']:.4f}\n")
|
|
993
|
+
|
|
994
|
+
console.print(Panel(savings_text, title="Cost Savings Analysis", border_style="green"))
|
|
995
|
+
|
|
996
|
+
# Recommendation
|
|
997
|
+
if stats["fallback_rate"] < 5:
|
|
998
|
+
rec_text = Text()
|
|
999
|
+
rec_text.append("✅ Excellent Performance!\n", style="green bold")
|
|
1000
|
+
rec_text.append(
|
|
1001
|
+
f"Sonnet 4.5 handles {100 - stats['fallback_rate']:.1f}% of tasks successfully.\n"
|
|
1002
|
+
)
|
|
1003
|
+
rec_text.append(
|
|
1004
|
+
f"You're saving ${stats['savings']:.2f} compared to always using Opus.\n"
|
|
1005
|
+
)
|
|
1006
|
+
console.print(Panel(rec_text, title="Recommendation", border_style="green"))
|
|
1007
|
+
elif stats["fallback_rate"] < 15:
|
|
1008
|
+
rec_text = Text()
|
|
1009
|
+
rec_text.append("⚠️ Moderate Fallback Rate\n", style="yellow bold")
|
|
1010
|
+
rec_text.append(f"{stats['fallback_rate']:.1f}% of tasks need Opus fallback.\n")
|
|
1011
|
+
rec_text.append("Consider analyzing which tasks fail on Sonnet.\n")
|
|
1012
|
+
console.print(Panel(rec_text, title="Recommendation", border_style="yellow"))
|
|
1013
|
+
else:
|
|
1014
|
+
rec_text = Text()
|
|
1015
|
+
rec_text.append("❌ High Fallback Rate\n", style="red bold")
|
|
1016
|
+
rec_text.append(f"{stats['fallback_rate']:.1f}% of tasks need Opus fallback.\n")
|
|
1017
|
+
rec_text.append(
|
|
1018
|
+
"Consider using Opus directly for complex tasks to avoid retry overhead.\n"
|
|
1019
|
+
)
|
|
1020
|
+
console.print(Panel(rec_text, title="Recommendation", border_style="red"))
|
|
1021
|
+
else:
|
|
1022
|
+
# Plain text fallback
|
|
1023
|
+
print(f"\nSonnet 4.5 → Opus 4.5 Fallback Analysis (last {days} days)")
|
|
1024
|
+
print("=" * 60)
|
|
1025
|
+
print("\nFallback Performance:")
|
|
1026
|
+
print(f" Total Anthropic Calls: {stats['total_calls']}")
|
|
1027
|
+
print(f" Sonnet 4.5 Attempts: {stats['sonnet_attempts']}")
|
|
1028
|
+
print(f" Sonnet Success Rate: {stats['success_rate_sonnet']:.1f}%")
|
|
1029
|
+
print(f" Opus Fallbacks: {stats['opus_fallbacks']}")
|
|
1030
|
+
print(f" Fallback Rate: {stats['fallback_rate']:.1f}%")
|
|
1031
|
+
print("\nCost Savings:")
|
|
1032
|
+
print(f" Actual Cost: ${stats['actual_cost']:.2f}")
|
|
1033
|
+
print(f" Always-Opus Cost: ${stats['always_opus_cost']:.2f}")
|
|
1034
|
+
print(f" Savings: ${stats['savings']:.2f} ({stats['savings_percent']:.1f}%)")
|
|
1035
|
+
print(f" Avg Cost/Call (actual): ${stats['avg_cost_per_call']:.4f}")
|
|
1036
|
+
print(f" Avg Cost/Call (all Opus): ${stats['avg_opus_cost_per_call']:.4f}")
|
|
1037
|
+
|
|
1038
|
+
if stats["fallback_rate"] < 5:
|
|
1039
|
+
print(f"\n✅ Excellent! Sonnet handles {100 - stats['fallback_rate']:.1f}% of tasks.")
|
|
1040
|
+
elif stats["fallback_rate"] < 15:
|
|
1041
|
+
print(f"\n⚠️ Moderate fallback rate ({stats['fallback_rate']:.1f}%).")
|
|
1042
|
+
else:
|
|
1043
|
+
print(f"\n❌ High fallback rate ({stats['fallback_rate']:.1f}%).")
|
|
1044
|
+
|
|
1045
|
+
return 0
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
def cmd_file_test_status(args: Any) -> int:
|
|
1049
|
+
"""Show per-file test status.
|
|
1050
|
+
|
|
1051
|
+
Displays the test status for individual files, including:
|
|
1052
|
+
- Last test result (passed/failed/error/no_tests)
|
|
1053
|
+
- When tests were last run
|
|
1054
|
+
- Whether tests are stale (source modified since last test)
|
|
1055
|
+
|
|
1056
|
+
Args:
|
|
1057
|
+
args: Parsed command-line arguments
|
|
1058
|
+
- file: Optional specific file to check
|
|
1059
|
+
- failed: Show only failed tests
|
|
1060
|
+
- stale: Show only stale tests
|
|
1061
|
+
- limit: Maximum files to show
|
|
1062
|
+
|
|
1063
|
+
Returns:
|
|
1064
|
+
Exit code (0 for success)
|
|
1065
|
+
"""
|
|
1066
|
+
from attune.models.telemetry import get_telemetry_store
|
|
1067
|
+
|
|
1068
|
+
try:
|
|
1069
|
+
store = get_telemetry_store()
|
|
1070
|
+
|
|
1071
|
+
file_path = getattr(args, "file", None)
|
|
1072
|
+
failed_only = getattr(args, "failed", False)
|
|
1073
|
+
stale_only = getattr(args, "stale", False)
|
|
1074
|
+
limit = getattr(args, "limit", 50)
|
|
1075
|
+
|
|
1076
|
+
if file_path:
|
|
1077
|
+
# Show status for a specific file
|
|
1078
|
+
record = store.get_latest_file_test(file_path)
|
|
1079
|
+
if record is None:
|
|
1080
|
+
print(f"No test record found for: {file_path}")
|
|
1081
|
+
return 0
|
|
1082
|
+
records = [record]
|
|
1083
|
+
else:
|
|
1084
|
+
# Get all file test records
|
|
1085
|
+
all_records = store.get_file_tests(limit=100000)
|
|
1086
|
+
|
|
1087
|
+
if not all_records:
|
|
1088
|
+
print("No per-file test records found.")
|
|
1089
|
+
print("Run: empathy test-file <source_file> to track tests for a file.")
|
|
1090
|
+
return 0
|
|
1091
|
+
|
|
1092
|
+
# Get latest record per file
|
|
1093
|
+
latest_by_file: dict[str, Any] = {}
|
|
1094
|
+
for record in all_records:
|
|
1095
|
+
existing = latest_by_file.get(record.file_path)
|
|
1096
|
+
if existing is None or record.timestamp > existing.timestamp:
|
|
1097
|
+
latest_by_file[record.file_path] = record
|
|
1098
|
+
|
|
1099
|
+
records = list(latest_by_file.values())
|
|
1100
|
+
|
|
1101
|
+
# Apply filters
|
|
1102
|
+
if failed_only:
|
|
1103
|
+
records = [r for r in records if r.last_test_result in ("failed", "error")]
|
|
1104
|
+
if stale_only:
|
|
1105
|
+
records = [r for r in records if r.is_stale]
|
|
1106
|
+
|
|
1107
|
+
# Sort by file path and limit
|
|
1108
|
+
records.sort(key=lambda r: r.file_path)
|
|
1109
|
+
records = records[:limit]
|
|
1110
|
+
|
|
1111
|
+
except Exception as e:
|
|
1112
|
+
print(f"Error retrieving file test status: {e}")
|
|
1113
|
+
return 1
|
|
1114
|
+
|
|
1115
|
+
if not records:
|
|
1116
|
+
filter_desc = []
|
|
1117
|
+
if failed_only:
|
|
1118
|
+
filter_desc.append("failed")
|
|
1119
|
+
if stale_only:
|
|
1120
|
+
filter_desc.append("stale")
|
|
1121
|
+
filter_str = " and ".join(filter_desc) if filter_desc else "matching"
|
|
1122
|
+
print(f"No {filter_str} file test records found.")
|
|
1123
|
+
return 0
|
|
1124
|
+
|
|
1125
|
+
if RICH_AVAILABLE and Console is not None:
|
|
1126
|
+
console = Console()
|
|
1127
|
+
|
|
1128
|
+
# Summary stats
|
|
1129
|
+
total = len(records)
|
|
1130
|
+
passed = sum(1 for r in records if r.last_test_result == "passed")
|
|
1131
|
+
failed = sum(1 for r in records if r.last_test_result in ("failed", "error"))
|
|
1132
|
+
no_tests = sum(1 for r in records if r.last_test_result == "no_tests")
|
|
1133
|
+
stale = sum(1 for r in records if r.is_stale)
|
|
1134
|
+
|
|
1135
|
+
summary = Text()
|
|
1136
|
+
summary.append(f"Files: {total} ", style="bold")
|
|
1137
|
+
summary.append(f"Passed: {passed} ", style="green")
|
|
1138
|
+
summary.append(f"Failed: {failed} ", style="red")
|
|
1139
|
+
summary.append(f"No Tests: {no_tests} ", style="yellow")
|
|
1140
|
+
summary.append(f"Stale: {stale}", style="magenta")
|
|
1141
|
+
console.print(Panel(summary, title="Per-File Test Status Summary", border_style="cyan"))
|
|
1142
|
+
|
|
1143
|
+
# File status table
|
|
1144
|
+
table = Table(title="File Test Status")
|
|
1145
|
+
table.add_column("File", style="cyan", max_width=50)
|
|
1146
|
+
table.add_column("Result", style="bold")
|
|
1147
|
+
table.add_column("Tests", justify="right")
|
|
1148
|
+
table.add_column("Passed", justify="right", style="green")
|
|
1149
|
+
table.add_column("Failed", justify="right", style="red")
|
|
1150
|
+
table.add_column("Duration", justify="right")
|
|
1151
|
+
table.add_column("Last Run", style="dim")
|
|
1152
|
+
table.add_column("Stale", style="magenta")
|
|
1153
|
+
|
|
1154
|
+
for record in records:
|
|
1155
|
+
# Format result with color
|
|
1156
|
+
result = record.last_test_result
|
|
1157
|
+
if result == "passed":
|
|
1158
|
+
result_style = "green"
|
|
1159
|
+
elif result in ("failed", "error"):
|
|
1160
|
+
result_style = "red"
|
|
1161
|
+
elif result == "no_tests":
|
|
1162
|
+
result_style = "yellow"
|
|
1163
|
+
else:
|
|
1164
|
+
result_style = "dim"
|
|
1165
|
+
|
|
1166
|
+
# Format timestamp
|
|
1167
|
+
try:
|
|
1168
|
+
dt = datetime.fromisoformat(record.timestamp.rstrip("Z"))
|
|
1169
|
+
ts_display = dt.strftime("%Y-%m-%d %H:%M")
|
|
1170
|
+
except (ValueError, AttributeError):
|
|
1171
|
+
ts_display = record.timestamp[:16] if record.timestamp else "-"
|
|
1172
|
+
|
|
1173
|
+
# Stale indicator
|
|
1174
|
+
stale_str = "YES" if record.is_stale else ""
|
|
1175
|
+
|
|
1176
|
+
table.add_row(
|
|
1177
|
+
record.file_path,
|
|
1178
|
+
Text(result, style=result_style),
|
|
1179
|
+
str(record.test_count),
|
|
1180
|
+
str(record.passed),
|
|
1181
|
+
str(record.failed + record.errors),
|
|
1182
|
+
f"{record.duration_seconds:.1f}s" if record.duration_seconds else "-",
|
|
1183
|
+
ts_display,
|
|
1184
|
+
stale_str,
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1187
|
+
console.print(table)
|
|
1188
|
+
|
|
1189
|
+
# Show failed test details if any
|
|
1190
|
+
failed_records = [r for r in records if r.failed_tests]
|
|
1191
|
+
if failed_records:
|
|
1192
|
+
fail_table = Table(title="Failed Test Details")
|
|
1193
|
+
fail_table.add_column("File", style="cyan")
|
|
1194
|
+
fail_table.add_column("Test Name", style="red")
|
|
1195
|
+
fail_table.add_column("Error")
|
|
1196
|
+
|
|
1197
|
+
for record in failed_records[:10]:
|
|
1198
|
+
for test in record.failed_tests[:3]:
|
|
1199
|
+
fail_table.add_row(
|
|
1200
|
+
record.file_path,
|
|
1201
|
+
test.get("name", "unknown"),
|
|
1202
|
+
test.get("error", "")[:50],
|
|
1203
|
+
)
|
|
1204
|
+
|
|
1205
|
+
console.print(fail_table)
|
|
1206
|
+
|
|
1207
|
+
else:
|
|
1208
|
+
# Plain text fallback
|
|
1209
|
+
print("\nPer-File Test Status")
|
|
1210
|
+
print("=" * 80)
|
|
1211
|
+
|
|
1212
|
+
for record in records:
|
|
1213
|
+
status = record.last_test_result.upper()
|
|
1214
|
+
stale_marker = " [STALE]" if record.is_stale else ""
|
|
1215
|
+
print(f"\n{record.file_path}")
|
|
1216
|
+
print(f" Status: {status}{stale_marker}")
|
|
1217
|
+
print(
|
|
1218
|
+
f" Tests: {record.test_count} (passed: {record.passed}, failed: {record.failed})"
|
|
1219
|
+
)
|
|
1220
|
+
if record.duration_seconds:
|
|
1221
|
+
print(f" Duration: {record.duration_seconds:.1f}s")
|
|
1222
|
+
print(f" Last Run: {record.timestamp[:19]}")
|
|
1223
|
+
|
|
1224
|
+
if record.failed_tests:
|
|
1225
|
+
print(" Failed Tests:")
|
|
1226
|
+
for test in record.failed_tests[:3]:
|
|
1227
|
+
print(f" - {test.get('name', 'unknown')}: {test.get('error', '')[:40]}")
|
|
1228
|
+
|
|
1229
|
+
return 0
|
|
1230
|
+
|
|
1231
|
+
|