tapps-agents 3.5.40__py3-none-any.whl → 3.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/__init__.py +22 -22
- tapps_agents/agents/analyst/__init__.py +5 -5
- tapps_agents/agents/architect/__init__.py +5 -5
- tapps_agents/agents/architect/agent.py +1033 -1033
- tapps_agents/agents/architect/pattern_detector.py +75 -75
- tapps_agents/agents/cleanup/__init__.py +7 -7
- tapps_agents/agents/cleanup/agent.py +445 -445
- tapps_agents/agents/debugger/__init__.py +7 -7
- tapps_agents/agents/debugger/agent.py +310 -310
- tapps_agents/agents/debugger/error_analyzer.py +437 -437
- tapps_agents/agents/designer/__init__.py +5 -5
- tapps_agents/agents/designer/agent.py +786 -786
- tapps_agents/agents/designer/visual_designer.py +638 -638
- tapps_agents/agents/documenter/__init__.py +7 -7
- tapps_agents/agents/documenter/agent.py +531 -531
- tapps_agents/agents/documenter/doc_generator.py +472 -472
- tapps_agents/agents/documenter/doc_validator.py +393 -393
- tapps_agents/agents/documenter/framework_doc_updater.py +493 -493
- tapps_agents/agents/enhancer/__init__.py +7 -7
- tapps_agents/agents/evaluator/__init__.py +7 -7
- tapps_agents/agents/evaluator/agent.py +443 -443
- tapps_agents/agents/evaluator/priority_evaluator.py +641 -641
- tapps_agents/agents/evaluator/quality_analyzer.py +147 -147
- tapps_agents/agents/evaluator/report_generator.py +344 -344
- tapps_agents/agents/evaluator/usage_analyzer.py +192 -192
- tapps_agents/agents/evaluator/workflow_analyzer.py +189 -189
- tapps_agents/agents/implementer/__init__.py +7 -7
- tapps_agents/agents/implementer/agent.py +798 -798
- tapps_agents/agents/implementer/auto_fix.py +1119 -1119
- tapps_agents/agents/implementer/code_generator.py +73 -73
- tapps_agents/agents/improver/__init__.py +1 -1
- tapps_agents/agents/improver/agent.py +753 -753
- tapps_agents/agents/ops/__init__.py +1 -1
- tapps_agents/agents/ops/agent.py +619 -619
- tapps_agents/agents/ops/dependency_analyzer.py +600 -600
- tapps_agents/agents/orchestrator/__init__.py +5 -5
- tapps_agents/agents/orchestrator/agent.py +522 -522
- tapps_agents/agents/planner/__init__.py +7 -7
- tapps_agents/agents/planner/agent.py +1127 -1127
- tapps_agents/agents/reviewer/__init__.py +24 -24
- tapps_agents/agents/reviewer/agent.py +3513 -3513
- tapps_agents/agents/reviewer/aggregator.py +213 -213
- tapps_agents/agents/reviewer/batch_review.py +448 -448
- tapps_agents/agents/reviewer/cache.py +443 -443
- tapps_agents/agents/reviewer/context7_enhancer.py +630 -630
- tapps_agents/agents/reviewer/context_detector.py +203 -203
- tapps_agents/agents/reviewer/docker_compose_validator.py +158 -158
- tapps_agents/agents/reviewer/dockerfile_validator.py +176 -176
- tapps_agents/agents/reviewer/error_handling.py +126 -126
- tapps_agents/agents/reviewer/feedback_generator.py +490 -490
- tapps_agents/agents/reviewer/influxdb_validator.py +316 -316
- tapps_agents/agents/reviewer/issue_tracking.py +169 -169
- tapps_agents/agents/reviewer/library_detector.py +295 -295
- tapps_agents/agents/reviewer/library_patterns.py +268 -268
- tapps_agents/agents/reviewer/maintainability_scorer.py +593 -593
- tapps_agents/agents/reviewer/metric_strategies.py +276 -276
- tapps_agents/agents/reviewer/mqtt_validator.py +160 -160
- tapps_agents/agents/reviewer/output_enhancer.py +105 -105
- tapps_agents/agents/reviewer/pattern_detector.py +241 -241
- tapps_agents/agents/reviewer/performance_scorer.py +357 -357
- tapps_agents/agents/reviewer/phased_review.py +516 -516
- tapps_agents/agents/reviewer/progressive_review.py +435 -435
- tapps_agents/agents/reviewer/react_scorer.py +331 -331
- tapps_agents/agents/reviewer/score_constants.py +228 -228
- tapps_agents/agents/reviewer/score_validator.py +507 -507
- tapps_agents/agents/reviewer/scorer_registry.py +373 -373
- tapps_agents/agents/reviewer/scoring.py +1566 -1566
- tapps_agents/agents/reviewer/service_discovery.py +534 -534
- tapps_agents/agents/reviewer/tools/__init__.py +41 -41
- tapps_agents/agents/reviewer/tools/parallel_executor.py +581 -581
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -250
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -284
- tapps_agents/agents/reviewer/typescript_scorer.py +1142 -1142
- tapps_agents/agents/reviewer/validation.py +208 -208
- tapps_agents/agents/reviewer/websocket_validator.py +132 -132
- tapps_agents/agents/tester/__init__.py +7 -7
- tapps_agents/agents/tester/accessibility_auditor.py +309 -309
- tapps_agents/agents/tester/agent.py +1080 -1080
- tapps_agents/agents/tester/batch_generator.py +54 -54
- tapps_agents/agents/tester/context_learner.py +51 -51
- tapps_agents/agents/tester/coverage_analyzer.py +386 -386
- tapps_agents/agents/tester/coverage_test_generator.py +290 -290
- tapps_agents/agents/tester/debug_enhancer.py +238 -238
- tapps_agents/agents/tester/device_emulator.py +241 -241
- tapps_agents/agents/tester/integration_generator.py +62 -62
- tapps_agents/agents/tester/network_recorder.py +300 -300
- tapps_agents/agents/tester/performance_monitor.py +320 -320
- tapps_agents/agents/tester/test_fixer.py +316 -316
- tapps_agents/agents/tester/test_generator.py +632 -632
- tapps_agents/agents/tester/trace_manager.py +234 -234
- tapps_agents/agents/tester/visual_regression.py +291 -291
- tapps_agents/analysis/pattern_detector.py +36 -36
- tapps_agents/beads/hydration.py +213 -213
- tapps_agents/beads/parse.py +32 -32
- tapps_agents/beads/specs.py +206 -206
- tapps_agents/cli/__init__.py +9 -9
- tapps_agents/cli/__main__.py +8 -8
- tapps_agents/cli/base.py +478 -478
- tapps_agents/cli/command_classifier.py +72 -72
- tapps_agents/cli/commands/__init__.py +2 -2
- tapps_agents/cli/commands/analyst.py +173 -173
- tapps_agents/cli/commands/architect.py +109 -109
- tapps_agents/cli/commands/cleanup_agent.py +92 -92
- tapps_agents/cli/commands/common.py +126 -126
- tapps_agents/cli/commands/debugger.py +90 -90
- tapps_agents/cli/commands/designer.py +112 -112
- tapps_agents/cli/commands/documenter.py +136 -136
- tapps_agents/cli/commands/enhancer.py +110 -110
- tapps_agents/cli/commands/evaluator.py +255 -255
- tapps_agents/cli/commands/health.py +665 -665
- tapps_agents/cli/commands/implementer.py +301 -301
- tapps_agents/cli/commands/improver.py +91 -91
- tapps_agents/cli/commands/knowledge.py +111 -111
- tapps_agents/cli/commands/learning.py +172 -172
- tapps_agents/cli/commands/observability.py +283 -283
- tapps_agents/cli/commands/ops.py +135 -135
- tapps_agents/cli/commands/orchestrator.py +116 -116
- tapps_agents/cli/commands/planner.py +237 -237
- tapps_agents/cli/commands/reviewer.py +1872 -1872
- tapps_agents/cli/commands/status.py +285 -285
- tapps_agents/cli/commands/task.py +227 -219
- tapps_agents/cli/commands/tester.py +191 -191
- tapps_agents/cli/commands/top_level.py +3586 -3586
- tapps_agents/cli/feedback.py +936 -936
- tapps_agents/cli/formatters.py +608 -608
- tapps_agents/cli/help/__init__.py +7 -7
- tapps_agents/cli/help/static_help.py +425 -425
- tapps_agents/cli/network_detection.py +110 -110
- tapps_agents/cli/output_compactor.py +274 -274
- tapps_agents/cli/parsers/__init__.py +2 -2
- tapps_agents/cli/parsers/analyst.py +186 -186
- tapps_agents/cli/parsers/architect.py +167 -167
- tapps_agents/cli/parsers/cleanup_agent.py +228 -228
- tapps_agents/cli/parsers/debugger.py +116 -116
- tapps_agents/cli/parsers/designer.py +182 -182
- tapps_agents/cli/parsers/documenter.py +134 -134
- tapps_agents/cli/parsers/enhancer.py +113 -113
- tapps_agents/cli/parsers/evaluator.py +213 -213
- tapps_agents/cli/parsers/implementer.py +168 -168
- tapps_agents/cli/parsers/improver.py +132 -132
- tapps_agents/cli/parsers/ops.py +159 -159
- tapps_agents/cli/parsers/orchestrator.py +98 -98
- tapps_agents/cli/parsers/planner.py +145 -145
- tapps_agents/cli/parsers/reviewer.py +462 -462
- tapps_agents/cli/parsers/tester.py +124 -124
- tapps_agents/cli/progress_heartbeat.py +254 -254
- tapps_agents/cli/streaming_progress.py +336 -336
- tapps_agents/cli/utils/__init__.py +6 -6
- tapps_agents/cli/utils/agent_lifecycle.py +48 -48
- tapps_agents/cli/utils/error_formatter.py +82 -82
- tapps_agents/cli/utils/error_recovery.py +188 -188
- tapps_agents/cli/utils/output_handler.py +59 -59
- tapps_agents/cli/utils/prompt_enhancer.py +319 -319
- tapps_agents/cli/validators/__init__.py +9 -9
- tapps_agents/cli/validators/command_validator.py +81 -81
- tapps_agents/context7/__init__.py +112 -112
- tapps_agents/context7/agent_integration.py +869 -869
- tapps_agents/context7/analytics.py +382 -382
- tapps_agents/context7/analytics_dashboard.py +299 -299
- tapps_agents/context7/async_cache.py +681 -681
- tapps_agents/context7/backup_client.py +958 -958
- tapps_agents/context7/cache_locking.py +194 -194
- tapps_agents/context7/cache_metadata.py +214 -214
- tapps_agents/context7/cache_prewarm.py +488 -488
- tapps_agents/context7/cache_structure.py +168 -168
- tapps_agents/context7/cache_warming.py +604 -604
- tapps_agents/context7/circuit_breaker.py +376 -376
- tapps_agents/context7/cleanup.py +461 -461
- tapps_agents/context7/commands.py +858 -858
- tapps_agents/context7/credential_validation.py +276 -276
- tapps_agents/context7/cross_reference_resolver.py +168 -168
- tapps_agents/context7/cross_references.py +424 -424
- tapps_agents/context7/doc_manager.py +225 -225
- tapps_agents/context7/fuzzy_matcher.py +369 -369
- tapps_agents/context7/kb_cache.py +404 -404
- tapps_agents/context7/language_detector.py +219 -219
- tapps_agents/context7/library_detector.py +725 -725
- tapps_agents/context7/lookup.py +738 -738
- tapps_agents/context7/metadata.py +258 -258
- tapps_agents/context7/refresh_queue.py +300 -300
- tapps_agents/context7/security.py +373 -373
- tapps_agents/context7/staleness_policies.py +278 -278
- tapps_agents/context7/tiles_integration.py +47 -47
- tapps_agents/continuous_bug_fix/__init__.py +20 -20
- tapps_agents/continuous_bug_fix/bug_finder.py +306 -306
- tapps_agents/continuous_bug_fix/bug_fix_coordinator.py +177 -177
- tapps_agents/continuous_bug_fix/commit_manager.py +178 -178
- tapps_agents/continuous_bug_fix/continuous_bug_fixer.py +322 -322
- tapps_agents/continuous_bug_fix/proactive_bug_finder.py +285 -285
- tapps_agents/core/__init__.py +298 -298
- tapps_agents/core/adaptive_cache_config.py +432 -432
- tapps_agents/core/agent_base.py +647 -647
- tapps_agents/core/agent_cache.py +466 -466
- tapps_agents/core/agent_learning.py +1865 -1865
- tapps_agents/core/analytics_dashboard.py +563 -563
- tapps_agents/core/analytics_enhancements.py +597 -597
- tapps_agents/core/anonymization.py +274 -274
- tapps_agents/core/artifact_context_builder.py +293 -0
- tapps_agents/core/ast_parser.py +228 -228
- tapps_agents/core/async_file_ops.py +402 -402
- tapps_agents/core/best_practice_consultant.py +299 -299
- tapps_agents/core/brownfield_analyzer.py +299 -299
- tapps_agents/core/brownfield_review.py +541 -541
- tapps_agents/core/browser_controller.py +513 -513
- tapps_agents/core/capability_registry.py +418 -418
- tapps_agents/core/change_impact_analyzer.py +190 -190
- tapps_agents/core/checkpoint_manager.py +377 -377
- tapps_agents/core/code_generator.py +329 -329
- tapps_agents/core/code_validator.py +276 -276
- tapps_agents/core/command_registry.py +327 -327
- tapps_agents/core/config.py +33 -0
- tapps_agents/core/context_gathering/__init__.py +2 -2
- tapps_agents/core/context_gathering/repository_explorer.py +28 -28
- tapps_agents/core/context_intelligence/__init__.py +2 -2
- tapps_agents/core/context_intelligence/relevance_scorer.py +24 -24
- tapps_agents/core/context_intelligence/token_budget_manager.py +27 -27
- tapps_agents/core/context_manager.py +240 -240
- tapps_agents/core/cursor_feedback_monitor.py +146 -146
- tapps_agents/core/cursor_verification.py +290 -290
- tapps_agents/core/customization_loader.py +280 -280
- tapps_agents/core/customization_schema.py +260 -260
- tapps_agents/core/customization_template.py +238 -238
- tapps_agents/core/debug_logger.py +124 -124
- tapps_agents/core/design_validator.py +298 -298
- tapps_agents/core/diagram_generator.py +226 -226
- tapps_agents/core/docker_utils.py +232 -232
- tapps_agents/core/document_generator.py +617 -617
- tapps_agents/core/domain_detector.py +30 -30
- tapps_agents/core/error_envelope.py +454 -454
- tapps_agents/core/error_handler.py +270 -270
- tapps_agents/core/estimation_tracker.py +189 -189
- tapps_agents/core/eval_prompt_engine.py +116 -116
- tapps_agents/core/evaluation_base.py +119 -119
- tapps_agents/core/evaluation_models.py +320 -320
- tapps_agents/core/evaluation_orchestrator.py +225 -225
- tapps_agents/core/evaluators/__init__.py +7 -7
- tapps_agents/core/evaluators/architectural_evaluator.py +205 -205
- tapps_agents/core/evaluators/behavioral_evaluator.py +160 -160
- tapps_agents/core/evaluators/performance_profile_evaluator.py +160 -160
- tapps_agents/core/evaluators/security_posture_evaluator.py +148 -148
- tapps_agents/core/evaluators/spec_compliance_evaluator.py +181 -181
- tapps_agents/core/exceptions.py +107 -107
- tapps_agents/core/expert_config_generator.py +293 -293
- tapps_agents/core/export_schema.py +202 -202
- tapps_agents/core/external_feedback_models.py +102 -102
- tapps_agents/core/external_feedback_storage.py +213 -213
- tapps_agents/core/fallback_strategy.py +314 -314
- tapps_agents/core/feedback_analyzer.py +162 -162
- tapps_agents/core/feedback_collector.py +178 -178
- tapps_agents/core/git_operations.py +445 -445
- tapps_agents/core/hardware_profiler.py +151 -151
- tapps_agents/core/instructions.py +324 -324
- tapps_agents/core/io_guardrails.py +69 -69
- tapps_agents/core/issue_manifest.py +249 -249
- tapps_agents/core/issue_schema.py +139 -139
- tapps_agents/core/json_utils.py +128 -128
- tapps_agents/core/knowledge_graph.py +446 -446
- tapps_agents/core/language_detector.py +296 -296
- tapps_agents/core/learning_confidence.py +242 -242
- tapps_agents/core/learning_dashboard.py +246 -246
- tapps_agents/core/learning_decision.py +384 -384
- tapps_agents/core/learning_explainability.py +578 -578
- tapps_agents/core/learning_export.py +287 -287
- tapps_agents/core/learning_integration.py +228 -228
- tapps_agents/core/llm_behavior.py +232 -232
- tapps_agents/core/long_duration_support.py +786 -786
- tapps_agents/core/mcp_setup.py +106 -106
- tapps_agents/core/memory_integration.py +396 -396
- tapps_agents/core/meta_learning.py +666 -666
- tapps_agents/core/module_path_sanitizer.py +199 -199
- tapps_agents/core/multi_agent_orchestrator.py +382 -382
- tapps_agents/core/network_errors.py +125 -125
- tapps_agents/core/nfr_validator.py +336 -336
- tapps_agents/core/offline_mode.py +158 -158
- tapps_agents/core/output_contracts.py +300 -300
- tapps_agents/core/output_formatter.py +300 -300
- tapps_agents/core/path_normalizer.py +174 -174
- tapps_agents/core/path_validator.py +322 -322
- tapps_agents/core/pattern_library.py +250 -250
- tapps_agents/core/performance_benchmark.py +301 -301
- tapps_agents/core/performance_monitor.py +184 -184
- tapps_agents/core/playwright_mcp_controller.py +771 -771
- tapps_agents/core/policy_loader.py +135 -135
- tapps_agents/core/progress.py +166 -166
- tapps_agents/core/project_profile.py +354 -354
- tapps_agents/core/project_type_detector.py +454 -454
- tapps_agents/core/prompt_base.py +223 -223
- tapps_agents/core/prompt_learning/__init__.py +2 -2
- tapps_agents/core/prompt_learning/learning_loop.py +24 -24
- tapps_agents/core/prompt_learning/project_prompt_store.py +25 -25
- tapps_agents/core/prompt_learning/skills_prompt_analyzer.py +35 -35
- tapps_agents/core/prompt_optimization/__init__.py +6 -6
- tapps_agents/core/prompt_optimization/ab_tester.py +114 -114
- tapps_agents/core/prompt_optimization/correlation_analyzer.py +160 -160
- tapps_agents/core/prompt_optimization/progressive_refiner.py +129 -129
- tapps_agents/core/prompt_optimization/prompt_library.py +37 -37
- tapps_agents/core/requirements_evaluator.py +431 -431
- tapps_agents/core/resource_aware_executor.py +449 -449
- tapps_agents/core/resource_monitor.py +343 -343
- tapps_agents/core/resume_handler.py +298 -298
- tapps_agents/core/retry_handler.py +197 -197
- tapps_agents/core/review_checklists.py +479 -479
- tapps_agents/core/role_loader.py +201 -201
- tapps_agents/core/role_template_loader.py +201 -201
- tapps_agents/core/runtime_mode.py +60 -60
- tapps_agents/core/security_scanner.py +342 -342
- tapps_agents/core/skill_agent_registry.py +194 -194
- tapps_agents/core/skill_integration.py +208 -208
- tapps_agents/core/skill_loader.py +492 -492
- tapps_agents/core/skill_template.py +341 -341
- tapps_agents/core/skill_validator.py +478 -478
- tapps_agents/core/stack_analyzer.py +35 -35
- tapps_agents/core/startup.py +174 -174
- tapps_agents/core/storage_manager.py +397 -397
- tapps_agents/core/storage_models.py +166 -166
- tapps_agents/core/story_evaluator.py +410 -410
- tapps_agents/core/subprocess_utils.py +170 -170
- tapps_agents/core/task_duration.py +296 -296
- tapps_agents/core/task_memory.py +582 -582
- tapps_agents/core/task_state.py +226 -226
- tapps_agents/core/tech_stack_priorities.py +208 -208
- tapps_agents/core/temp_directory.py +194 -194
- tapps_agents/core/template_merger.py +600 -600
- tapps_agents/core/template_selector.py +280 -280
- tapps_agents/core/test_generator.py +286 -286
- tapps_agents/core/tiered_context.py +253 -253
- tapps_agents/core/token_monitor.py +345 -345
- tapps_agents/core/traceability.py +254 -254
- tapps_agents/core/trajectory_tracker.py +50 -50
- tapps_agents/core/unicode_safe.py +143 -143
- tapps_agents/core/unified_cache_config.py +170 -170
- tapps_agents/core/unified_state.py +324 -324
- tapps_agents/core/validate_cursor_setup.py +237 -237
- tapps_agents/core/validation_registry.py +136 -136
- tapps_agents/core/validators/__init__.py +4 -4
- tapps_agents/core/validators/python_validator.py +87 -87
- tapps_agents/core/verification_agent.py +90 -90
- tapps_agents/core/visual_feedback.py +644 -644
- tapps_agents/core/workflow_validator.py +197 -197
- tapps_agents/core/worktree.py +367 -367
- tapps_agents/docker/__init__.py +10 -10
- tapps_agents/docker/analyzer.py +186 -186
- tapps_agents/docker/debugger.py +229 -229
- tapps_agents/docker/error_patterns.py +216 -216
- tapps_agents/epic/__init__.py +22 -22
- tapps_agents/epic/beads_sync.py +115 -115
- tapps_agents/epic/markdown_sync.py +105 -105
- tapps_agents/epic/models.py +96 -96
- tapps_agents/experts/__init__.py +163 -163
- tapps_agents/experts/agent_integration.py +243 -243
- tapps_agents/experts/auto_generator.py +331 -331
- tapps_agents/experts/base_expert.py +536 -536
- tapps_agents/experts/builtin_registry.py +261 -261
- tapps_agents/experts/business_metrics.py +565 -565
- tapps_agents/experts/cache.py +266 -266
- tapps_agents/experts/confidence_breakdown.py +306 -306
- tapps_agents/experts/confidence_calculator.py +336 -336
- tapps_agents/experts/confidence_metrics.py +236 -236
- tapps_agents/experts/domain_config.py +311 -311
- tapps_agents/experts/domain_detector.py +550 -550
- tapps_agents/experts/domain_utils.py +84 -84
- tapps_agents/experts/expert_config.py +113 -113
- tapps_agents/experts/expert_engine.py +465 -465
- tapps_agents/experts/expert_registry.py +744 -744
- tapps_agents/experts/expert_synthesizer.py +70 -70
- tapps_agents/experts/governance.py +197 -197
- tapps_agents/experts/history_logger.py +312 -312
- tapps_agents/experts/knowledge/README.md +180 -180
- tapps_agents/experts/knowledge/accessibility/accessible-forms.md +331 -331
- tapps_agents/experts/knowledge/accessibility/aria-patterns.md +344 -344
- tapps_agents/experts/knowledge/accessibility/color-contrast.md +285 -285
- tapps_agents/experts/knowledge/accessibility/keyboard-navigation.md +332 -332
- tapps_agents/experts/knowledge/accessibility/screen-readers.md +282 -282
- tapps_agents/experts/knowledge/accessibility/semantic-html.md +355 -355
- tapps_agents/experts/knowledge/accessibility/testing-accessibility.md +369 -369
- tapps_agents/experts/knowledge/accessibility/wcag-2.1.md +296 -296
- tapps_agents/experts/knowledge/accessibility/wcag-2.2.md +211 -211
- tapps_agents/experts/knowledge/agent-learning/best-practices.md +715 -715
- tapps_agents/experts/knowledge/agent-learning/pattern-extraction.md +282 -282
- tapps_agents/experts/knowledge/agent-learning/prompt-optimization.md +320 -320
- tapps_agents/experts/knowledge/ai-frameworks/model-optimization.md +90 -90
- tapps_agents/experts/knowledge/ai-frameworks/openvino-patterns.md +260 -260
- tapps_agents/experts/knowledge/api-design-integration/api-gateway-patterns.md +309 -309
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +521 -521
- tapps_agents/experts/knowledge/api-design-integration/api-versioning.md +421 -421
- tapps_agents/experts/knowledge/api-design-integration/async-protocol-patterns.md +61 -61
- tapps_agents/experts/knowledge/api-design-integration/contract-testing.md +221 -221
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +489 -489
- tapps_agents/experts/knowledge/api-design-integration/fastapi-patterns.md +360 -360
- tapps_agents/experts/knowledge/api-design-integration/fastapi-testing.md +262 -262
- tapps_agents/experts/knowledge/api-design-integration/graphql-patterns.md +582 -582
- tapps_agents/experts/knowledge/api-design-integration/grpc-best-practices.md +499 -499
- tapps_agents/experts/knowledge/api-design-integration/mqtt-patterns.md +455 -455
- tapps_agents/experts/knowledge/api-design-integration/rate-limiting.md +507 -507
- tapps_agents/experts/knowledge/api-design-integration/restful-api-design.md +618 -618
- tapps_agents/experts/knowledge/api-design-integration/websocket-patterns.md +480 -480
- tapps_agents/experts/knowledge/cloud-infrastructure/cloud-native-patterns.md +175 -175
- tapps_agents/experts/knowledge/cloud-infrastructure/container-health-checks.md +261 -261
- tapps_agents/experts/knowledge/cloud-infrastructure/containerization.md +222 -222
- tapps_agents/experts/knowledge/cloud-infrastructure/cost-optimization.md +122 -122
- tapps_agents/experts/knowledge/cloud-infrastructure/disaster-recovery.md +153 -153
- tapps_agents/experts/knowledge/cloud-infrastructure/dockerfile-patterns.md +285 -285
- tapps_agents/experts/knowledge/cloud-infrastructure/infrastructure-as-code.md +187 -187
- tapps_agents/experts/knowledge/cloud-infrastructure/kubernetes-patterns.md +253 -253
- tapps_agents/experts/knowledge/cloud-infrastructure/multi-cloud-strategies.md +155 -155
- tapps_agents/experts/knowledge/cloud-infrastructure/serverless-architecture.md +200 -200
- tapps_agents/experts/knowledge/code-quality-analysis/README.md +16 -16
- tapps_agents/experts/knowledge/code-quality-analysis/code-metrics.md +137 -137
- tapps_agents/experts/knowledge/code-quality-analysis/complexity-analysis.md +181 -181
- tapps_agents/experts/knowledge/code-quality-analysis/technical-debt-patterns.md +191 -191
- tapps_agents/experts/knowledge/data-privacy-compliance/anonymization.md +313 -313
- tapps_agents/experts/knowledge/data-privacy-compliance/ccpa.md +255 -255
- tapps_agents/experts/knowledge/data-privacy-compliance/consent-management.md +282 -282
- tapps_agents/experts/knowledge/data-privacy-compliance/data-minimization.md +275 -275
- tapps_agents/experts/knowledge/data-privacy-compliance/data-retention.md +297 -297
- tapps_agents/experts/knowledge/data-privacy-compliance/data-subject-rights.md +383 -383
- tapps_agents/experts/knowledge/data-privacy-compliance/encryption-privacy.md +285 -285
- tapps_agents/experts/knowledge/data-privacy-compliance/gdpr.md +344 -344
- tapps_agents/experts/knowledge/data-privacy-compliance/hipaa.md +385 -385
- tapps_agents/experts/knowledge/data-privacy-compliance/privacy-by-design.md +280 -280
- tapps_agents/experts/knowledge/database-data-management/acid-vs-cap.md +164 -164
- tapps_agents/experts/knowledge/database-data-management/backup-and-recovery.md +182 -182
- tapps_agents/experts/knowledge/database-data-management/data-modeling.md +172 -172
- tapps_agents/experts/knowledge/database-data-management/database-design.md +187 -187
- tapps_agents/experts/knowledge/database-data-management/flux-query-optimization.md +342 -342
- tapps_agents/experts/knowledge/database-data-management/influxdb-connection-patterns.md +432 -432
- tapps_agents/experts/knowledge/database-data-management/influxdb-patterns.md +442 -442
- tapps_agents/experts/knowledge/database-data-management/migration-strategies.md +216 -216
- tapps_agents/experts/knowledge/database-data-management/nosql-patterns.md +259 -259
- tapps_agents/experts/knowledge/database-data-management/scalability-patterns.md +184 -184
- tapps_agents/experts/knowledge/database-data-management/sql-optimization.md +175 -175
- tapps_agents/experts/knowledge/database-data-management/time-series-modeling.md +444 -444
- tapps_agents/experts/knowledge/development-workflow/README.md +16 -16
- tapps_agents/experts/knowledge/development-workflow/automation-best-practices.md +216 -216
- tapps_agents/experts/knowledge/development-workflow/build-strategies.md +198 -198
- tapps_agents/experts/knowledge/development-workflow/deployment-patterns.md +205 -205
- tapps_agents/experts/knowledge/development-workflow/git-workflows.md +205 -205
- tapps_agents/experts/knowledge/documentation-knowledge-management/README.md +16 -16
- tapps_agents/experts/knowledge/documentation-knowledge-management/api-documentation-patterns.md +231 -231
- tapps_agents/experts/knowledge/documentation-knowledge-management/documentation-standards.md +191 -191
- tapps_agents/experts/knowledge/documentation-knowledge-management/knowledge-management.md +171 -171
- tapps_agents/experts/knowledge/documentation-knowledge-management/technical-writing-guide.md +192 -192
- tapps_agents/experts/knowledge/observability-monitoring/alerting-patterns.md +461 -461
- tapps_agents/experts/knowledge/observability-monitoring/apm-tools.md +459 -459
- tapps_agents/experts/knowledge/observability-monitoring/distributed-tracing.md +367 -367
- tapps_agents/experts/knowledge/observability-monitoring/logging-strategies.md +478 -478
- tapps_agents/experts/knowledge/observability-monitoring/metrics-and-monitoring.md +510 -510
- tapps_agents/experts/knowledge/observability-monitoring/observability-best-practices.md +492 -492
- tapps_agents/experts/knowledge/observability-monitoring/open-telemetry.md +573 -573
- tapps_agents/experts/knowledge/observability-monitoring/slo-sli-sla.md +419 -419
- tapps_agents/experts/knowledge/performance/anti-patterns.md +284 -284
- tapps_agents/experts/knowledge/performance/api-performance.md +256 -256
- tapps_agents/experts/knowledge/performance/caching.md +327 -327
- tapps_agents/experts/knowledge/performance/database-performance.md +252 -252
- tapps_agents/experts/knowledge/performance/optimization-patterns.md +327 -327
- tapps_agents/experts/knowledge/performance/profiling.md +297 -297
- tapps_agents/experts/knowledge/performance/resource-management.md +293 -293
- tapps_agents/experts/knowledge/performance/scalability.md +306 -306
- tapps_agents/experts/knowledge/security/owasp-top10.md +209 -209
- tapps_agents/experts/knowledge/security/secure-coding-practices.md +207 -207
- tapps_agents/experts/knowledge/security/threat-modeling.md +220 -220
- tapps_agents/experts/knowledge/security/vulnerability-patterns.md +342 -342
- tapps_agents/experts/knowledge/software-architecture/docker-compose-patterns.md +314 -314
- tapps_agents/experts/knowledge/software-architecture/microservices-patterns.md +379 -379
- tapps_agents/experts/knowledge/software-architecture/service-communication.md +316 -316
- tapps_agents/experts/knowledge/testing/best-practices.md +310 -310
- tapps_agents/experts/knowledge/testing/coverage-analysis.md +293 -293
- tapps_agents/experts/knowledge/testing/mocking.md +256 -256
- tapps_agents/experts/knowledge/testing/test-automation.md +276 -276
- tapps_agents/experts/knowledge/testing/test-data.md +271 -271
- tapps_agents/experts/knowledge/testing/test-design-patterns.md +280 -280
- tapps_agents/experts/knowledge/testing/test-maintenance.md +236 -236
- tapps_agents/experts/knowledge/testing/test-strategies.md +311 -311
- tapps_agents/experts/knowledge/user-experience/information-architecture.md +325 -325
- tapps_agents/experts/knowledge/user-experience/interaction-design.md +363 -363
- tapps_agents/experts/knowledge/user-experience/prototyping.md +293 -293
- tapps_agents/experts/knowledge/user-experience/usability-heuristics.md +337 -337
- tapps_agents/experts/knowledge/user-experience/usability-testing.md +311 -311
- tapps_agents/experts/knowledge/user-experience/user-journeys.md +296 -296
- tapps_agents/experts/knowledge/user-experience/user-research.md +373 -373
- tapps_agents/experts/knowledge/user-experience/ux-principles.md +340 -340
- tapps_agents/experts/knowledge_freshness.py +321 -321
- tapps_agents/experts/knowledge_ingestion.py +438 -438
- tapps_agents/experts/knowledge_need_detector.py +93 -93
- tapps_agents/experts/knowledge_validator.py +382 -382
- tapps_agents/experts/observability.py +440 -440
- tapps_agents/experts/passive_notifier.py +238 -238
- tapps_agents/experts/proactive_orchestrator.py +32 -32
- tapps_agents/experts/rag_chunker.py +205 -205
- tapps_agents/experts/rag_embedder.py +152 -152
- tapps_agents/experts/rag_evaluation.py +299 -299
- tapps_agents/experts/rag_index.py +303 -303
- tapps_agents/experts/rag_metrics.py +293 -293
- tapps_agents/experts/rag_safety.py +263 -263
- tapps_agents/experts/report_generator.py +296 -296
- tapps_agents/experts/setup_wizard.py +441 -441
- tapps_agents/experts/simple_rag.py +431 -431
- tapps_agents/experts/vector_rag.py +354 -354
- tapps_agents/experts/weight_distributor.py +304 -304
- tapps_agents/health/__init__.py +24 -24
- tapps_agents/health/base.py +75 -75
- tapps_agents/health/checks/__init__.py +22 -22
- tapps_agents/health/checks/automation.py +127 -127
- tapps_agents/health/checks/context7_cache.py +210 -210
- tapps_agents/health/checks/environment.py +116 -116
- tapps_agents/health/checks/execution.py +170 -170
- tapps_agents/health/checks/knowledge_base.py +187 -187
- tapps_agents/health/checks/outcomes.py +324 -324
- tapps_agents/health/collector.py +280 -280
- tapps_agents/health/dashboard.py +137 -137
- tapps_agents/health/metrics.py +151 -151
- tapps_agents/health/orchestrator.py +271 -271
- tapps_agents/health/registry.py +166 -166
- tapps_agents/hooks/__init__.py +33 -33
- tapps_agents/hooks/config.py +140 -140
- tapps_agents/hooks/events.py +135 -135
- tapps_agents/hooks/executor.py +128 -128
- tapps_agents/hooks/manager.py +143 -143
- tapps_agents/integration/__init__.py +8 -8
- tapps_agents/integration/service_integrator.py +121 -121
- tapps_agents/integrations/__init__.py +10 -10
- tapps_agents/integrations/clawdbot.py +525 -525
- tapps_agents/integrations/memory_bridge.py +356 -356
- tapps_agents/mcp/__init__.py +18 -18
- tapps_agents/mcp/gateway.py +112 -112
- tapps_agents/mcp/servers/__init__.py +13 -13
- tapps_agents/mcp/servers/analysis.py +204 -204
- tapps_agents/mcp/servers/context7.py +198 -198
- tapps_agents/mcp/servers/filesystem.py +218 -218
- tapps_agents/mcp/servers/git.py +201 -201
- tapps_agents/mcp/tool_registry.py +115 -115
- tapps_agents/quality/__init__.py +54 -54
- tapps_agents/quality/coverage_analyzer.py +379 -379
- tapps_agents/quality/enforcement.py +82 -82
- tapps_agents/quality/gates/__init__.py +37 -37
- tapps_agents/quality/gates/approval_gate.py +255 -255
- tapps_agents/quality/gates/base.py +84 -84
- tapps_agents/quality/gates/exceptions.py +43 -43
- tapps_agents/quality/gates/policy_gate.py +195 -195
- tapps_agents/quality/gates/registry.py +239 -239
- tapps_agents/quality/gates/security_gate.py +156 -156
- tapps_agents/quality/quality_gates.py +369 -369
- tapps_agents/quality/secret_scanner.py +335 -335
- tapps_agents/session/__init__.py +19 -19
- tapps_agents/session/manager.py +256 -256
- tapps_agents/simple_mode/__init__.py +66 -66
- tapps_agents/simple_mode/agent_contracts.py +357 -357
- tapps_agents/simple_mode/beads_hooks.py +151 -151
- tapps_agents/simple_mode/code_snippet_handler.py +382 -382
- tapps_agents/simple_mode/documentation_manager.py +395 -395
- tapps_agents/simple_mode/documentation_reader.py +187 -187
- tapps_agents/simple_mode/file_inference.py +292 -292
- tapps_agents/simple_mode/framework_change_detector.py +268 -268
- tapps_agents/simple_mode/intent_parser.py +510 -510
- tapps_agents/simple_mode/learning_progression.py +358 -358
- tapps_agents/simple_mode/nl_handler.py +700 -700
- tapps_agents/simple_mode/onboarding.py +253 -253
- tapps_agents/simple_mode/orchestrators/__init__.py +38 -38
- tapps_agents/simple_mode/orchestrators/base.py +185 -185
- tapps_agents/simple_mode/orchestrators/breakdown_orchestrator.py +49 -49
- tapps_agents/simple_mode/orchestrators/brownfield_orchestrator.py +135 -135
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2700 -2667
- tapps_agents/simple_mode/orchestrators/deliverable_checklist.py +349 -349
- tapps_agents/simple_mode/orchestrators/enhance_orchestrator.py +53 -53
- tapps_agents/simple_mode/orchestrators/epic_orchestrator.py +122 -122
- tapps_agents/simple_mode/orchestrators/explore_orchestrator.py +184 -184
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
- tapps_agents/simple_mode/orchestrators/plan_analysis_orchestrator.py +206 -206
- tapps_agents/simple_mode/orchestrators/pr_orchestrator.py +237 -237
- tapps_agents/simple_mode/orchestrators/refactor_orchestrator.py +222 -222
- tapps_agents/simple_mode/orchestrators/requirements_tracer.py +262 -262
- tapps_agents/simple_mode/orchestrators/resume_orchestrator.py +210 -210
- tapps_agents/simple_mode/orchestrators/review_orchestrator.py +161 -161
- tapps_agents/simple_mode/orchestrators/test_orchestrator.py +82 -82
- tapps_agents/simple_mode/output_aggregator.py +340 -340
- tapps_agents/simple_mode/result_formatters.py +598 -598
- tapps_agents/simple_mode/step_dependencies.py +382 -382
- tapps_agents/simple_mode/step_results.py +276 -276
- tapps_agents/simple_mode/streaming.py +388 -388
- tapps_agents/simple_mode/variations.py +129 -129
- tapps_agents/simple_mode/visual_feedback.py +238 -238
- tapps_agents/simple_mode/zero_config.py +274 -274
- tapps_agents/suggestions/__init__.py +8 -8
- tapps_agents/suggestions/inline_suggester.py +52 -52
- tapps_agents/templates/__init__.py +8 -8
- tapps_agents/templates/microservice_generator.py +274 -274
- tapps_agents/utils/env_validator.py +291 -291
- tapps_agents/workflow/__init__.py +171 -171
- tapps_agents/workflow/acceptance_verifier.py +132 -132
- tapps_agents/workflow/agent_handlers/__init__.py +41 -41
- tapps_agents/workflow/agent_handlers/analyst_handler.py +75 -75
- tapps_agents/workflow/agent_handlers/architect_handler.py +107 -107
- tapps_agents/workflow/agent_handlers/base.py +84 -84
- tapps_agents/workflow/agent_handlers/debugger_handler.py +100 -100
- tapps_agents/workflow/agent_handlers/designer_handler.py +110 -110
- tapps_agents/workflow/agent_handlers/documenter_handler.py +94 -94
- tapps_agents/workflow/agent_handlers/implementer_handler.py +235 -235
- tapps_agents/workflow/agent_handlers/ops_handler.py +62 -62
- tapps_agents/workflow/agent_handlers/orchestrator_handler.py +43 -43
- tapps_agents/workflow/agent_handlers/planner_handler.py +98 -98
- tapps_agents/workflow/agent_handlers/registry.py +119 -119
- tapps_agents/workflow/agent_handlers/reviewer_handler.py +119 -119
- tapps_agents/workflow/agent_handlers/tester_handler.py +69 -69
- tapps_agents/workflow/analytics_accessor.py +337 -337
- tapps_agents/workflow/analytics_alerts.py +416 -416
- tapps_agents/workflow/analytics_dashboard_cursor.py +281 -281
- tapps_agents/workflow/analytics_dual_write.py +103 -103
- tapps_agents/workflow/analytics_integration.py +119 -119
- tapps_agents/workflow/analytics_query_parser.py +278 -278
- tapps_agents/workflow/analytics_visualizer.py +259 -259
- tapps_agents/workflow/artifact_helper.py +204 -204
- tapps_agents/workflow/audit_logger.py +263 -263
- tapps_agents/workflow/auto_execution_config.py +340 -340
- tapps_agents/workflow/auto_progression.py +586 -586
- tapps_agents/workflow/branch_cleanup.py +349 -349
- tapps_agents/workflow/checkpoint.py +256 -256
- tapps_agents/workflow/checkpoint_manager.py +178 -178
- tapps_agents/workflow/code_artifact.py +179 -179
- tapps_agents/workflow/common_enums.py +96 -96
- tapps_agents/workflow/confirmation_handler.py +130 -130
- tapps_agents/workflow/context_analyzer.py +222 -222
- tapps_agents/workflow/context_artifact.py +230 -230
- tapps_agents/workflow/cursor_chat.py +94 -94
- tapps_agents/workflow/cursor_executor.py +2337 -2196
- tapps_agents/workflow/cursor_skill_helper.py +516 -516
- tapps_agents/workflow/dependency_resolver.py +244 -244
- tapps_agents/workflow/design_artifact.py +156 -156
- tapps_agents/workflow/detector.py +751 -751
- tapps_agents/workflow/direct_execution_fallback.py +301 -301
- tapps_agents/workflow/docs_artifact.py +168 -168
- tapps_agents/workflow/enforcer.py +389 -389
- tapps_agents/workflow/enhancement_artifact.py +142 -142
- tapps_agents/workflow/error_recovery.py +806 -806
- tapps_agents/workflow/event_bus.py +183 -183
- tapps_agents/workflow/event_log.py +612 -612
- tapps_agents/workflow/events.py +63 -63
- tapps_agents/workflow/exceptions.py +43 -43
- tapps_agents/workflow/execution_graph.py +498 -498
- tapps_agents/workflow/execution_plan.py +126 -126
- tapps_agents/workflow/file_utils.py +186 -186
- tapps_agents/workflow/gate_evaluator.py +182 -182
- tapps_agents/workflow/gate_integration.py +200 -200
- tapps_agents/workflow/graph_visualizer.py +130 -130
- tapps_agents/workflow/health_checker.py +206 -206
- tapps_agents/workflow/logging_helper.py +243 -243
- tapps_agents/workflow/manifest.py +582 -582
- tapps_agents/workflow/marker_writer.py +250 -250
- tapps_agents/workflow/message_formatter.py +188 -188
- tapps_agents/workflow/messaging.py +325 -325
- tapps_agents/workflow/metadata_models.py +91 -91
- tapps_agents/workflow/metrics_integration.py +226 -226
- tapps_agents/workflow/migration_utils.py +116 -116
- tapps_agents/workflow/models.py +148 -111
- tapps_agents/workflow/nlp_config.py +198 -198
- tapps_agents/workflow/nlp_error_handler.py +207 -207
- tapps_agents/workflow/nlp_executor.py +163 -163
- tapps_agents/workflow/nlp_parser.py +528 -528
- tapps_agents/workflow/observability_dashboard.py +451 -451
- tapps_agents/workflow/observer.py +170 -170
- tapps_agents/workflow/ops_artifact.py +257 -257
- tapps_agents/workflow/output_passing.py +214 -214
- tapps_agents/workflow/parallel_executor.py +463 -463
- tapps_agents/workflow/planning_artifact.py +179 -179
- tapps_agents/workflow/preset_loader.py +285 -285
- tapps_agents/workflow/preset_recommender.py +270 -270
- tapps_agents/workflow/progress_logger.py +145 -145
- tapps_agents/workflow/progress_manager.py +303 -303
- tapps_agents/workflow/progress_monitor.py +186 -186
- tapps_agents/workflow/progress_updates.py +423 -423
- tapps_agents/workflow/quality_artifact.py +158 -158
- tapps_agents/workflow/quality_loopback.py +101 -101
- tapps_agents/workflow/recommender.py +387 -387
- tapps_agents/workflow/remediation_loop.py +166 -166
- tapps_agents/workflow/result_aggregator.py +300 -300
- tapps_agents/workflow/review_artifact.py +185 -185
- tapps_agents/workflow/schema_validator.py +522 -522
- tapps_agents/workflow/session_handoff.py +178 -178
- tapps_agents/workflow/skill_invoker.py +648 -648
- tapps_agents/workflow/state_manager.py +756 -756
- tapps_agents/workflow/state_persistence_config.py +331 -331
- tapps_agents/workflow/status_monitor.py +449 -449
- tapps_agents/workflow/step_checkpoint.py +314 -314
- tapps_agents/workflow/step_details.py +201 -201
- tapps_agents/workflow/story_models.py +147 -147
- tapps_agents/workflow/streaming.py +416 -416
- tapps_agents/workflow/suggestion_engine.py +552 -552
- tapps_agents/workflow/testing_artifact.py +186 -186
- tapps_agents/workflow/timeline.py +158 -158
- tapps_agents/workflow/token_integration.py +209 -209
- tapps_agents/workflow/validation.py +217 -217
- tapps_agents/workflow/visual_feedback.py +391 -391
- tapps_agents/workflow/workflow_chain.py +95 -95
- tapps_agents/workflow/workflow_summary.py +219 -219
- tapps_agents/workflow/worktree_manager.py +724 -724
- {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/METADATA +672 -672
- tapps_agents-3.6.0.dist-info/RECORD +758 -0
- {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/licenses/LICENSE +22 -22
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +0 -324
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +0 -324
- tapps_agents-3.5.40.dist-info/RECORD +0 -760
- {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.40.dist-info → tapps_agents-3.6.0.dist-info}/top_level.txt +0 -0
|
@@ -1,665 +1,665 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Health command handlers.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from __future__ import annotations
|
|
6
|
-
|
|
7
|
-
import json
|
|
8
|
-
import logging
|
|
9
|
-
import sys
|
|
10
|
-
from collections import defaultdict
|
|
11
|
-
from datetime import UTC, datetime, timedelta
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
|
|
14
|
-
from ...health.checks.automation import AutomationHealthCheck
|
|
15
|
-
from ...health.checks.environment import EnvironmentHealthCheck
|
|
16
|
-
from ...health.checks.execution import ExecutionHealthCheck
|
|
17
|
-
from ...health.checks.context7_cache import Context7CacheHealthCheck
|
|
18
|
-
from ...health.checks.knowledge_base import KnowledgeBaseHealthCheck
|
|
19
|
-
from ...health.checks.outcomes import OutcomeHealthCheck
|
|
20
|
-
from ...health.collector import HealthMetricsCollector
|
|
21
|
-
from ...health.dashboard import HealthDashboard
|
|
22
|
-
from ...health.orchestrator import HealthOrchestrator
|
|
23
|
-
from ...health.registry import HealthCheckRegistry
|
|
24
|
-
from ..feedback import get_feedback, ProgressTracker
|
|
25
|
-
from .common import format_json_output
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
def _usage_data_from_execution_metrics(project_root: Path) -> dict | None:
|
|
29
|
-
"""
|
|
30
|
-
Build usage-like data from execution metrics when analytics is empty.
|
|
31
|
-
|
|
32
|
-
Aggregates .tapps-agents/metrics/executions_*.jsonl by today (steps/workflows),
|
|
33
|
-
by skill (agents), and by workflow_id (workflows). Returns same shape as
|
|
34
|
-
AnalyticsDashboard.get_dashboard_data() for system/agents/workflows.
|
|
35
|
-
"""
|
|
36
|
-
try:
|
|
37
|
-
from ...workflow.execution_metrics import ExecutionMetricsCollector
|
|
38
|
-
|
|
39
|
-
collector = ExecutionMetricsCollector(project_root=project_root)
|
|
40
|
-
metrics = collector.get_metrics(limit=5000)
|
|
41
|
-
if not metrics:
|
|
42
|
-
return None
|
|
43
|
-
|
|
44
|
-
now = datetime.now(UTC)
|
|
45
|
-
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
46
|
-
thirty_days_ago = now - timedelta(days=30)
|
|
47
|
-
|
|
48
|
-
# Filter to last 30 days
|
|
49
|
-
def parse_ts(ts: str) -> datetime:
|
|
50
|
-
return datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
|
51
|
-
|
|
52
|
-
recent = [m for m in metrics if parse_ts(m.started_at) >= thirty_days_ago]
|
|
53
|
-
if not recent:
|
|
54
|
-
return None
|
|
55
|
-
|
|
56
|
-
today_metrics = [m for m in recent if parse_ts(m.started_at) >= today_start]
|
|
57
|
-
workflow_ids_today_success = {
|
|
58
|
-
m.workflow_id for m in today_metrics if m.status == "success"
|
|
59
|
-
}
|
|
60
|
-
workflow_ids_today_failed = {
|
|
61
|
-
m.workflow_id for m in today_metrics if m.status != "success"
|
|
62
|
-
}
|
|
63
|
-
completed_today = len(workflow_ids_today_success)
|
|
64
|
-
failed_today = len(workflow_ids_today_failed)
|
|
65
|
-
avg_duration = (
|
|
66
|
-
sum(m.duration_ms for m in recent) / len(recent) / 1000.0
|
|
67
|
-
if recent
|
|
68
|
-
else 0.0
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
# Agents: by skill or command
|
|
72
|
-
agent_counts: dict[str, list] = defaultdict(list)
|
|
73
|
-
for m in recent:
|
|
74
|
-
key = m.skill or m.command or "unknown"
|
|
75
|
-
agent_counts[key].append(m)
|
|
76
|
-
|
|
77
|
-
agents_list = []
|
|
78
|
-
for name, ms in agent_counts.items():
|
|
79
|
-
total = len(ms)
|
|
80
|
-
success = sum(1 for m in ms if m.status == "success")
|
|
81
|
-
agents_list.append(
|
|
82
|
-
{
|
|
83
|
-
"agent_id": name,
|
|
84
|
-
"agent_name": name,
|
|
85
|
-
"total_executions": total,
|
|
86
|
-
"successful_executions": success,
|
|
87
|
-
"failed_executions": total - success,
|
|
88
|
-
"success_rate": success / total if total else 0.0,
|
|
89
|
-
"average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
|
|
90
|
-
if total
|
|
91
|
-
else 0.0,
|
|
92
|
-
}
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
# Workflows: by workflow_id
|
|
96
|
-
wf_counts: dict[str, list] = defaultdict(list)
|
|
97
|
-
for m in recent:
|
|
98
|
-
wf_counts[m.workflow_id].append(m)
|
|
99
|
-
|
|
100
|
-
workflows_list = []
|
|
101
|
-
for wf_id, ms in wf_counts.items():
|
|
102
|
-
total = len(ms)
|
|
103
|
-
success = sum(1 for m in ms if m.status == "success")
|
|
104
|
-
workflows_list.append(
|
|
105
|
-
{
|
|
106
|
-
"workflow_id": wf_id,
|
|
107
|
-
"workflow_name": wf_id,
|
|
108
|
-
"total_executions": total,
|
|
109
|
-
"successful_executions": success,
|
|
110
|
-
"failed_executions": total - success,
|
|
111
|
-
"success_rate": success / total if total else 0.0,
|
|
112
|
-
"average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
|
|
113
|
-
if total
|
|
114
|
-
else 0.0,
|
|
115
|
-
}
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
# System: try cpu/mem/disk from ResourceMonitor
|
|
119
|
-
cpu_usage = memory_usage = disk_usage = 0.0
|
|
120
|
-
try:
|
|
121
|
-
from ...core.resource_monitor import ResourceMonitor
|
|
122
|
-
|
|
123
|
-
mon = ResourceMonitor()
|
|
124
|
-
res = mon.get_current_metrics()
|
|
125
|
-
cpu_usage = getattr(res, "cpu_percent", 0.0) or 0.0
|
|
126
|
-
memory_usage = getattr(res, "memory_percent", 0.0) or 0.0
|
|
127
|
-
disk_usage = getattr(res, "disk_percent", 0.0) or 0.0
|
|
128
|
-
except Exception:
|
|
129
|
-
pass
|
|
130
|
-
|
|
131
|
-
return {
|
|
132
|
-
"timestamp": now.isoformat(),
|
|
133
|
-
"system": {
|
|
134
|
-
"timestamp": now.isoformat(),
|
|
135
|
-
"total_agents": len(agents_list),
|
|
136
|
-
"active_workflows": 0,
|
|
137
|
-
"completed_workflows_today": completed_today,
|
|
138
|
-
"failed_workflows_today": failed_today,
|
|
139
|
-
"average_workflow_duration": avg_duration,
|
|
140
|
-
"cpu_usage": cpu_usage,
|
|
141
|
-
"memory_usage": memory_usage,
|
|
142
|
-
"disk_usage": disk_usage,
|
|
143
|
-
},
|
|
144
|
-
"agents": agents_list,
|
|
145
|
-
"workflows": workflows_list,
|
|
146
|
-
}
|
|
147
|
-
except Exception:
|
|
148
|
-
return None
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def handle_health_check_command(
|
|
152
|
-
check_name: str | None = None,
|
|
153
|
-
output_format: str = "text",
|
|
154
|
-
save: bool = True,
|
|
155
|
-
project_root: Path | None = None,
|
|
156
|
-
) -> None:
|
|
157
|
-
"""
|
|
158
|
-
Handle health check command.
|
|
159
|
-
|
|
160
|
-
Args:
|
|
161
|
-
check_name: Optional specific check to run
|
|
162
|
-
output_format: Output format (json or text)
|
|
163
|
-
save: Whether to save results to metrics storage
|
|
164
|
-
project_root: Project root directory
|
|
165
|
-
"""
|
|
166
|
-
project_root = project_root or Path.cwd()
|
|
167
|
-
|
|
168
|
-
# Initialize registry and register all checks
|
|
169
|
-
registry = HealthCheckRegistry()
|
|
170
|
-
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
171
|
-
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
172
|
-
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
173
|
-
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
174
|
-
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
175
|
-
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
176
|
-
|
|
177
|
-
# Initialize orchestrator
|
|
178
|
-
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
179
|
-
orchestrator = HealthOrchestrator(
|
|
180
|
-
registry=registry, metrics_collector=metrics_collector, project_root=project_root
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
# Run checks
|
|
184
|
-
feedback = get_feedback()
|
|
185
|
-
feedback.format_type = output_format
|
|
186
|
-
operation_desc = f"Running health check: {check_name}" if check_name else "Running all health checks"
|
|
187
|
-
feedback.start_operation("Health Check", operation_desc)
|
|
188
|
-
|
|
189
|
-
if check_name:
|
|
190
|
-
check_names = [check_name]
|
|
191
|
-
feedback.running(f"Initializing check: {check_name}...", step=1, total_steps=3)
|
|
192
|
-
else:
|
|
193
|
-
check_names = None
|
|
194
|
-
feedback.running("Discovering health checks...", step=1, total_steps=3)
|
|
195
|
-
|
|
196
|
-
feedback.running("Executing health checks...", step=2, total_steps=3)
|
|
197
|
-
results = orchestrator.run_all_checks(check_names=check_names, save_metrics=save)
|
|
198
|
-
feedback.running("Collecting results...", step=3, total_steps=3)
|
|
199
|
-
feedback.clear_progress()
|
|
200
|
-
|
|
201
|
-
# Build summary
|
|
202
|
-
summary = {}
|
|
203
|
-
if results:
|
|
204
|
-
healthy_count = sum(1 for r in results.values() if r and r.status == "healthy")
|
|
205
|
-
total_count = len([r for r in results.values() if r])
|
|
206
|
-
summary["checks_run"] = total_count
|
|
207
|
-
summary["healthy"] = healthy_count
|
|
208
|
-
summary["degraded"] = sum(1 for r in results.values() if r and r.status == "degraded")
|
|
209
|
-
summary["unhealthy"] = sum(1 for r in results.values() if r and r.status == "unhealthy")
|
|
210
|
-
|
|
211
|
-
# Format output
|
|
212
|
-
if output_format == "json":
|
|
213
|
-
output = {
|
|
214
|
-
"checks": {
|
|
215
|
-
name: {
|
|
216
|
-
"status": result.status,
|
|
217
|
-
"score": result.score,
|
|
218
|
-
"message": result.message,
|
|
219
|
-
"details": result.details,
|
|
220
|
-
"remediation": (
|
|
221
|
-
result.remediation
|
|
222
|
-
if isinstance(result.remediation, list)
|
|
223
|
-
else [result.remediation]
|
|
224
|
-
if result.remediation
|
|
225
|
-
else None
|
|
226
|
-
),
|
|
227
|
-
}
|
|
228
|
-
for name, result in results.items()
|
|
229
|
-
if result
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
# Merge summary into output
|
|
233
|
-
if summary:
|
|
234
|
-
output = {**output, "summary": summary}
|
|
235
|
-
feedback.output_result(output, message="Health checks completed")
|
|
236
|
-
else:
|
|
237
|
-
# Text output
|
|
238
|
-
feedback.success("Health checks completed")
|
|
239
|
-
warnings = []
|
|
240
|
-
for name, result in sorted(results.items()):
|
|
241
|
-
if not result:
|
|
242
|
-
continue
|
|
243
|
-
|
|
244
|
-
status_symbol = {
|
|
245
|
-
"healthy": "[OK]",
|
|
246
|
-
"degraded": "[WARN]",
|
|
247
|
-
"unhealthy": "[FAIL]",
|
|
248
|
-
}.get(result.status, "[?]")
|
|
249
|
-
|
|
250
|
-
print(f"\n[{status_symbol}] {name.upper()}: {result.status} ({result.score:.1f}/100)")
|
|
251
|
-
print(f" {result.message}")
|
|
252
|
-
|
|
253
|
-
if result.status != "healthy":
|
|
254
|
-
warnings.append(f"{name}: {result.message}")
|
|
255
|
-
|
|
256
|
-
if result.details:
|
|
257
|
-
# Show key metrics
|
|
258
|
-
key_metrics = []
|
|
259
|
-
for key in [
|
|
260
|
-
"total_executions",
|
|
261
|
-
"success_rate",
|
|
262
|
-
"hit_rate",
|
|
263
|
-
"total_files",
|
|
264
|
-
"average_score",
|
|
265
|
-
]:
|
|
266
|
-
if key in result.details:
|
|
267
|
-
value = result.details[key]
|
|
268
|
-
if isinstance(value, float):
|
|
269
|
-
if key == "success_rate" or key == "hit_rate":
|
|
270
|
-
key_metrics.append(f"{key}: {value:.1f}%")
|
|
271
|
-
else:
|
|
272
|
-
key_metrics.append(f"{key}: {value:.1f}")
|
|
273
|
-
else:
|
|
274
|
-
key_metrics.append(f"{key}: {value}")
|
|
275
|
-
|
|
276
|
-
if key_metrics:
|
|
277
|
-
print(f" Metrics: {' | '.join(key_metrics)}")
|
|
278
|
-
|
|
279
|
-
if result.remediation:
|
|
280
|
-
if isinstance(result.remediation, list):
|
|
281
|
-
if len(result.remediation) > 0:
|
|
282
|
-
print(f" Remediation: {result.remediation[0]}")
|
|
283
|
-
elif isinstance(result.remediation, str):
|
|
284
|
-
print(f" Remediation: {result.remediation}")
|
|
285
|
-
|
|
286
|
-
if warnings:
|
|
287
|
-
for warning_msg in warnings:
|
|
288
|
-
feedback.warning(warning_msg)
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
def handle_health_dashboard_command(
|
|
292
|
-
output_format: str = "text", project_root: Path | None = None
|
|
293
|
-
) -> None:
|
|
294
|
-
"""
|
|
295
|
-
Handle health dashboard command.
|
|
296
|
-
|
|
297
|
-
Args:
|
|
298
|
-
output_format: Output format (json or text)
|
|
299
|
-
project_root: Project root directory
|
|
300
|
-
"""
|
|
301
|
-
project_root = project_root or Path.cwd()
|
|
302
|
-
|
|
303
|
-
# Initialize registry and register all checks
|
|
304
|
-
registry = HealthCheckRegistry()
|
|
305
|
-
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
306
|
-
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
307
|
-
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
308
|
-
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
309
|
-
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
310
|
-
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
311
|
-
|
|
312
|
-
# Initialize dashboard
|
|
313
|
-
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
314
|
-
orchestrator = HealthOrchestrator(
|
|
315
|
-
registry=registry, metrics_collector=metrics_collector, project_root=project_root
|
|
316
|
-
)
|
|
317
|
-
dashboard = HealthDashboard(orchestrator=orchestrator)
|
|
318
|
-
|
|
319
|
-
# Render dashboard
|
|
320
|
-
feedback = get_feedback()
|
|
321
|
-
feedback.format_type = output_format
|
|
322
|
-
feedback.start_operation("Health Dashboard", "Generating health dashboard visualization")
|
|
323
|
-
feedback.running("Collecting health metrics...", step=1, total_steps=3)
|
|
324
|
-
feedback.running("Generating dashboard...", step=2, total_steps=3)
|
|
325
|
-
feedback.running("Rendering dashboard output...", step=3, total_steps=3)
|
|
326
|
-
|
|
327
|
-
if output_format == "json":
|
|
328
|
-
output = dashboard.render_json()
|
|
329
|
-
feedback.clear_progress()
|
|
330
|
-
feedback.output_result(output, message="Health dashboard generated")
|
|
331
|
-
else:
|
|
332
|
-
output = dashboard.render_text()
|
|
333
|
-
feedback.clear_progress()
|
|
334
|
-
feedback.success("Health dashboard generated")
|
|
335
|
-
print(output)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
def handle_health_metrics_command(
|
|
339
|
-
check_name: str | None = None,
|
|
340
|
-
status: str | None = None,
|
|
341
|
-
days: int = 30,
|
|
342
|
-
output_format: str = "text",
|
|
343
|
-
project_root: Path | None = None,
|
|
344
|
-
) -> None:
|
|
345
|
-
"""
|
|
346
|
-
Handle health metrics command.
|
|
347
|
-
|
|
348
|
-
Args:
|
|
349
|
-
check_name: Optional check name to filter
|
|
350
|
-
status: Optional status to filter
|
|
351
|
-
days: Number of days to look back
|
|
352
|
-
output_format: Output format (json or text)
|
|
353
|
-
project_root: Project root directory
|
|
354
|
-
"""
|
|
355
|
-
project_root = project_root or Path.cwd()
|
|
356
|
-
collector = HealthMetricsCollector(project_root=project_root)
|
|
357
|
-
|
|
358
|
-
# Get metrics
|
|
359
|
-
feedback = get_feedback()
|
|
360
|
-
feedback.format_type = output_format
|
|
361
|
-
operation_desc = f"Collecting metrics{f' for {check_name}' if check_name else ''}"
|
|
362
|
-
feedback.start_operation("Health Metrics", operation_desc)
|
|
363
|
-
feedback.running("Querying metrics database...", step=1, total_steps=3)
|
|
364
|
-
|
|
365
|
-
metrics = collector.get_metrics(check_name=check_name, status=status, days=days, limit=1000)
|
|
366
|
-
feedback.running("Calculating summary statistics...", step=2, total_steps=3)
|
|
367
|
-
summary = collector.get_summary(days=days)
|
|
368
|
-
feedback.running("Formatting results...", step=3, total_steps=3)
|
|
369
|
-
feedback.clear_progress()
|
|
370
|
-
|
|
371
|
-
if output_format == "json":
|
|
372
|
-
output = {
|
|
373
|
-
"summary": summary,
|
|
374
|
-
"metrics": [m.to_dict() for m in metrics],
|
|
375
|
-
}
|
|
376
|
-
feedback.output_result(output, message="Health metrics retrieved")
|
|
377
|
-
else:
|
|
378
|
-
# Text output
|
|
379
|
-
feedback.success("Health metrics retrieved")
|
|
380
|
-
print(f"\nHealth Metrics Summary (last {days} days)")
|
|
381
|
-
print("=" * 70)
|
|
382
|
-
print(f"Total checks: {summary['total_checks']}")
|
|
383
|
-
print(f"Average score: {summary['average_score']:.1f}/100")
|
|
384
|
-
print(f"\nBy status:")
|
|
385
|
-
for status_name, count in summary["by_status"].items():
|
|
386
|
-
print(f" {status_name}: {count}")
|
|
387
|
-
|
|
388
|
-
if summary["by_check"]:
|
|
389
|
-
print(f"\nBy check:")
|
|
390
|
-
for check_name, check_data in summary["by_check"].items():
|
|
391
|
-
print(f" {check_name}:")
|
|
392
|
-
print(f" Count: {check_data['count']}")
|
|
393
|
-
print(f" Average score: {check_data['average_score']:.1f}/100")
|
|
394
|
-
print(f" Latest status: {check_data['latest_status']}")
|
|
395
|
-
print(f" Latest score: {check_data['latest_score']:.1f}/100")
|
|
396
|
-
|
|
397
|
-
if metrics:
|
|
398
|
-
print(f"\nRecent metrics (showing up to 10):")
|
|
399
|
-
for metric in metrics[:10]:
|
|
400
|
-
print(f" {metric.check_name}: {metric.status} ({metric.score:.1f}/100) - {metric.timestamp}")
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
def handle_health_trends_command(
|
|
404
|
-
check_name: str,
|
|
405
|
-
days: int = 7,
|
|
406
|
-
output_format: str = "text",
|
|
407
|
-
project_root: Path | None = None,
|
|
408
|
-
) -> None:
|
|
409
|
-
"""
|
|
410
|
-
Handle health trends command.
|
|
411
|
-
|
|
412
|
-
Args:
|
|
413
|
-
check_name: Check name to analyze trends for
|
|
414
|
-
days: Number of days to analyze
|
|
415
|
-
output_format: Output format (json or text)
|
|
416
|
-
project_root: Project root directory
|
|
417
|
-
"""
|
|
418
|
-
project_root = project_root or Path.cwd()
|
|
419
|
-
collector = HealthMetricsCollector(project_root=project_root)
|
|
420
|
-
|
|
421
|
-
# Get trends
|
|
422
|
-
feedback = get_feedback()
|
|
423
|
-
feedback.format_type = output_format
|
|
424
|
-
feedback.start_operation("Health Trends", f"Analyzing health trends for {check_name}")
|
|
425
|
-
feedback.running("Loading historical data...", step=1, total_steps=3)
|
|
426
|
-
|
|
427
|
-
trends = collector.get_trends(check_name=check_name, days=days)
|
|
428
|
-
feedback.running("Calculating trends...", step=2, total_steps=3)
|
|
429
|
-
feedback.running("Generating trend report...", step=3, total_steps=3)
|
|
430
|
-
feedback.clear_progress()
|
|
431
|
-
|
|
432
|
-
if output_format == "json":
|
|
433
|
-
output = {
|
|
434
|
-
"check_name": check_name,
|
|
435
|
-
"days": days,
|
|
436
|
-
"trends": trends,
|
|
437
|
-
}
|
|
438
|
-
feedback.output_result(output, message="Health trends analyzed")
|
|
439
|
-
else:
|
|
440
|
-
# Text output
|
|
441
|
-
feedback.success("Health trends analyzed")
|
|
442
|
-
print(f"\nHealth Trends for '{check_name}' (last {days} days)")
|
|
443
|
-
print("=" * 70)
|
|
444
|
-
print(f"Direction: {trends['direction']}")
|
|
445
|
-
print(f"Score change: {trends['score_change']:+.1f} points")
|
|
446
|
-
|
|
447
|
-
if trends["status_changes"]:
|
|
448
|
-
print(f"\nStatus changes:")
|
|
449
|
-
for status, change in trends["status_changes"].items():
|
|
450
|
-
if change != 0:
|
|
451
|
-
print(f" {status}: {change:+d}")
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
def handle_health_usage_command(args: object) -> None:
|
|
455
|
-
"""
|
|
456
|
-
Handle health usage subcommand (formerly analytics).
|
|
457
|
-
Dispatches to dashboard, agents, workflows, trends, or system using AnalyticsDashboard.
|
|
458
|
-
"""
|
|
459
|
-
from ...core.analytics_dashboard import AnalyticsDashboard
|
|
460
|
-
|
|
461
|
-
dashboard = AnalyticsDashboard()
|
|
462
|
-
sub = getattr(args, "usage_subcommand", "dashboard")
|
|
463
|
-
if sub == "show":
|
|
464
|
-
sub = "dashboard"
|
|
465
|
-
fmt = getattr(args, "format", "text")
|
|
466
|
-
|
|
467
|
-
if sub == "dashboard":
|
|
468
|
-
data = dashboard.get_dashboard_data()
|
|
469
|
-
if fmt == "json":
|
|
470
|
-
format_json_output(data)
|
|
471
|
-
else:
|
|
472
|
-
print("\n" + "=" * 60)
|
|
473
|
-
print("Usage / Analytics Dashboard")
|
|
474
|
-
print("=" * 60)
|
|
475
|
-
print(f"\nSystem Status (as of {data['timestamp']}):")
|
|
476
|
-
sys_data = data["system"]
|
|
477
|
-
print(f" Total Agents: {sys_data['total_agents']}")
|
|
478
|
-
print(f" Active Workflows: {sys_data['active_workflows']}")
|
|
479
|
-
print(f" Completed Today: {sys_data['completed_workflows_today']}")
|
|
480
|
-
print(f" Failed Today: {sys_data['failed_workflows_today']}")
|
|
481
|
-
print(f" Avg Workflow Duration: {sys_data['average_workflow_duration']:.2f}s")
|
|
482
|
-
print(f" CPU Usage: {sys_data['cpu_usage']:.1f}%")
|
|
483
|
-
print(f" Memory Usage: {sys_data['memory_usage']:.1f}%")
|
|
484
|
-
print(f" Disk Usage: {sys_data['disk_usage']:.1f}%")
|
|
485
|
-
print("\nAgent Performance (Top 10):")
|
|
486
|
-
for agent in sorted(data["agents"], key=lambda x: x["total_executions"], reverse=True)[:10]:
|
|
487
|
-
print(f" {agent['agent_name']}: {agent['total_executions']} executions, "
|
|
488
|
-
f"{agent['success_rate']*100:.1f}% success, {agent['average_duration']:.2f}s avg")
|
|
489
|
-
print("\nWorkflow Performance:")
|
|
490
|
-
for wf in sorted(data["workflows"], key=lambda x: x["total_executions"], reverse=True)[:10]:
|
|
491
|
-
print(f" {wf['workflow_name']}: {wf['total_executions']} executions, "
|
|
492
|
-
f"{wf['success_rate']*100:.1f}% success")
|
|
493
|
-
elif sub == "agents":
|
|
494
|
-
metrics = dashboard.get_agent_performance(agent_id=getattr(args, "agent_id", None))
|
|
495
|
-
if fmt == "json":
|
|
496
|
-
format_json_output(metrics)
|
|
497
|
-
else:
|
|
498
|
-
for agent in metrics:
|
|
499
|
-
print(f"{agent['agent_name']}: {agent['total_executions']} executions, "
|
|
500
|
-
f"{agent['success_rate']*100:.1f}% success")
|
|
501
|
-
elif sub == "workflows":
|
|
502
|
-
metrics = dashboard.get_workflow_performance(workflow_id=getattr(args, "workflow_id", None))
|
|
503
|
-
if fmt == "json":
|
|
504
|
-
format_json_output(metrics)
|
|
505
|
-
else:
|
|
506
|
-
for wf in metrics:
|
|
507
|
-
print(f"{wf['workflow_name']}: {wf['total_executions']} executions, "
|
|
508
|
-
f"{wf['success_rate']*100:.1f}% success")
|
|
509
|
-
elif sub == "trends":
|
|
510
|
-
metric_type = getattr(args, "metric_type", "agent_duration")
|
|
511
|
-
days = getattr(args, "days", 30)
|
|
512
|
-
trends = dashboard.get_trends(metric_type, days=days)
|
|
513
|
-
if fmt == "json":
|
|
514
|
-
format_json_output(trends)
|
|
515
|
-
else:
|
|
516
|
-
for t in trends:
|
|
517
|
-
print(f"{t['metric_name']}: {len(t['values'])} data points")
|
|
518
|
-
elif sub == "system":
|
|
519
|
-
status = dashboard.get_system_status()
|
|
520
|
-
if fmt == "json":
|
|
521
|
-
format_json_output(status)
|
|
522
|
-
else:
|
|
523
|
-
print(f"System Status (as of {status['timestamp']}):")
|
|
524
|
-
print(f" Total Agents: {status['total_agents']}")
|
|
525
|
-
print(f" Active Workflows: {status['active_workflows']}")
|
|
526
|
-
print(f" Completed Today: {status['completed_workflows_today']}")
|
|
527
|
-
print(f" Failed Today: {status['failed_workflows_today']}")
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
def handle_health_overview_command(
|
|
531
|
-
output_format: str = "text",
|
|
532
|
-
project_root: Path | None = None,
|
|
533
|
-
) -> None:
|
|
534
|
-
"""
|
|
535
|
-
Single 1000-foot view: health checks + usage rolled up for all subsystems.
|
|
536
|
-
|
|
537
|
-
Renders one easy-to-read report: overall health, each health check one line,
|
|
538
|
-
then usage at a glance (system, top agents, top workflows).
|
|
539
|
-
"""
|
|
540
|
-
from ...core.analytics_dashboard import AnalyticsDashboard
|
|
541
|
-
|
|
542
|
-
project_root = project_root or Path.cwd()
|
|
543
|
-
|
|
544
|
-
# 1. Health checks
|
|
545
|
-
registry = HealthCheckRegistry()
|
|
546
|
-
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
547
|
-
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
548
|
-
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
549
|
-
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
550
|
-
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
551
|
-
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
552
|
-
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
553
|
-
orchestrator = HealthOrchestrator(
|
|
554
|
-
registry=registry,
|
|
555
|
-
metrics_collector=metrics_collector,
|
|
556
|
-
project_root=project_root,
|
|
557
|
-
)
|
|
558
|
-
health_results = orchestrator.run_all_checks(save_metrics=True)
|
|
559
|
-
overall = orchestrator.get_overall_health(health_results)
|
|
560
|
-
|
|
561
|
-
# 2. Usage (best-effort; prefer analytics, fallback to execution metrics — HM-001-S1)
|
|
562
|
-
_log = logging.getLogger(__name__)
|
|
563
|
-
usage_data = None
|
|
564
|
-
try:
|
|
565
|
-
usage_dashboard = AnalyticsDashboard()
|
|
566
|
-
usage_data = usage_dashboard.get_dashboard_data()
|
|
567
|
-
except Exception:
|
|
568
|
-
pass
|
|
569
|
-
# If analytics has no agent/workflow data, derive from execution metrics
|
|
570
|
-
fallback_used = False
|
|
571
|
-
if usage_data:
|
|
572
|
-
agents = usage_data.get("agents") or []
|
|
573
|
-
workflows = usage_data.get("workflows") or []
|
|
574
|
-
total_runs = sum(a.get("total_executions", 0) for a in agents) + sum(
|
|
575
|
-
w.get("total_executions", 0) for w in workflows
|
|
576
|
-
)
|
|
577
|
-
if total_runs == 0:
|
|
578
|
-
fallback = _usage_data_from_execution_metrics(project_root)
|
|
579
|
-
if fallback:
|
|
580
|
-
fallback_used = True
|
|
581
|
-
usage_data = fallback
|
|
582
|
-
else:
|
|
583
|
-
fallback = _usage_data_from_execution_metrics(project_root)
|
|
584
|
-
if fallback:
|
|
585
|
-
fallback_used = True
|
|
586
|
-
usage_data = fallback
|
|
587
|
-
if fallback_used and usage_data:
|
|
588
|
-
n_agents = len(usage_data.get("agents") or [])
|
|
589
|
-
n_workflows = len(usage_data.get("workflows") or [])
|
|
590
|
-
_log.info(
|
|
591
|
-
"Health overview: using execution metrics fallback (%s agents, %s workflows)",
|
|
592
|
-
n_agents, n_workflows,
|
|
593
|
-
)
|
|
594
|
-
|
|
595
|
-
# 3. Build output
|
|
596
|
-
feedback = get_feedback()
|
|
597
|
-
feedback.format_type = output_format
|
|
598
|
-
|
|
599
|
-
if output_format == "json":
|
|
600
|
-
out = {
|
|
601
|
-
"overview": {
|
|
602
|
-
"overall_health": overall,
|
|
603
|
-
"health_checks": {
|
|
604
|
-
name: {
|
|
605
|
-
"status": r.status,
|
|
606
|
-
"score": r.score,
|
|
607
|
-
"message": r.message,
|
|
608
|
-
}
|
|
609
|
-
for name, r in health_results.items()
|
|
610
|
-
if r
|
|
611
|
-
},
|
|
612
|
-
},
|
|
613
|
-
"usage": usage_data,
|
|
614
|
-
}
|
|
615
|
-
format_json_output(out)
|
|
616
|
-
return
|
|
617
|
-
|
|
618
|
-
# Text: 1000-foot, great-looking, easy to read
|
|
619
|
-
width = 72
|
|
620
|
-
lines = []
|
|
621
|
-
lines.append("")
|
|
622
|
-
lines.append("=" * width)
|
|
623
|
-
lines.append(" TAPPS-AGENTS | HEALTH + USAGE | 1000-FOOT VIEW")
|
|
624
|
-
lines.append("=" * width)
|
|
625
|
-
lines.append("")
|
|
626
|
-
|
|
627
|
-
# Overall health
|
|
628
|
-
status_sym = {"healthy": "[OK] ", "degraded": "[WARN]", "unhealthy": "[FAIL]", "unknown": "[?] "}
|
|
629
|
-
sym = status_sym.get(overall["status"], "[?] ")
|
|
630
|
-
lines.append(f" {sym} Overall: {overall['status'].upper()} ({overall['score']:.1f}/100)")
|
|
631
|
-
lines.append("")
|
|
632
|
-
|
|
633
|
-
# Subsystems (health checks) - one line each
|
|
634
|
-
lines.append(" SUBSYSTEMS (health)")
|
|
635
|
-
lines.append(" " + "-" * (width - 2))
|
|
636
|
-
for name, result in sorted(health_results.items()):
|
|
637
|
-
if not result:
|
|
638
|
-
continue
|
|
639
|
-
s = status_sym.get(result.status, "[?] ")
|
|
640
|
-
label = name.replace("_", " ").upper()
|
|
641
|
-
lines.append(f" {s} {label}: {result.score:.1f}/100 | {result.message[:50]}{'...' if len(result.message) > 50 else ''}")
|
|
642
|
-
lines.append("")
|
|
643
|
-
|
|
644
|
-
# Usage at a glance
|
|
645
|
-
lines.append(" USAGE (agents & workflows)")
|
|
646
|
-
lines.append(" " + "-" * (width - 2))
|
|
647
|
-
if usage_data:
|
|
648
|
-
sys_data = usage_data.get("system", {})
|
|
649
|
-
lines.append(f" Today: completed {sys_data.get('completed_workflows_today', 0)} workflows, failed {sys_data.get('failed_workflows_today', 0)} | active: {sys_data.get('active_workflows', 0)}")
|
|
650
|
-
lines.append(f" Avg workflow duration: {sys_data.get('average_workflow_duration', 0):.1f}s | CPU: {sys_data.get('cpu_usage', 0):.0f}% Mem: {sys_data.get('memory_usage', 0):.0f}% Disk: {sys_data.get('disk_usage', 0):.0f}%")
|
|
651
|
-
agents = sorted(usage_data.get("agents", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
|
|
652
|
-
if agents:
|
|
653
|
-
lines.append(" Top agents (30d): " + " | ".join(f"{a.get('agent_name', '')}: {a.get('total_executions', 0)} runs ({a.get('success_rate', 0)*100:.0f}% ok)" for a in agents))
|
|
654
|
-
workflows = sorted(usage_data.get("workflows", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
|
|
655
|
-
if workflows:
|
|
656
|
-
lines.append(" Top workflows (30d): " + " | ".join(f"{w.get('workflow_name', '')}: {w.get('total_executions', 0)} ({w.get('success_rate', 0)*100:.0f}% ok)" for w in workflows))
|
|
657
|
-
else:
|
|
658
|
-
lines.append(" (No usage data yet. Run agents/workflows to populate.)")
|
|
659
|
-
lines.append("")
|
|
660
|
-
lines.append("=" * width)
|
|
661
|
-
lines.append("")
|
|
662
|
-
|
|
663
|
-
feedback.clear_progress()
|
|
664
|
-
print("\n".join(lines))
|
|
665
|
-
|
|
1
|
+
"""
|
|
2
|
+
Health command handlers.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import sys
|
|
10
|
+
from collections import defaultdict
|
|
11
|
+
from datetime import UTC, datetime, timedelta
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from ...health.checks.automation import AutomationHealthCheck
|
|
15
|
+
from ...health.checks.environment import EnvironmentHealthCheck
|
|
16
|
+
from ...health.checks.execution import ExecutionHealthCheck
|
|
17
|
+
from ...health.checks.context7_cache import Context7CacheHealthCheck
|
|
18
|
+
from ...health.checks.knowledge_base import KnowledgeBaseHealthCheck
|
|
19
|
+
from ...health.checks.outcomes import OutcomeHealthCheck
|
|
20
|
+
from ...health.collector import HealthMetricsCollector
|
|
21
|
+
from ...health.dashboard import HealthDashboard
|
|
22
|
+
from ...health.orchestrator import HealthOrchestrator
|
|
23
|
+
from ...health.registry import HealthCheckRegistry
|
|
24
|
+
from ..feedback import get_feedback, ProgressTracker
|
|
25
|
+
from .common import format_json_output
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _usage_data_from_execution_metrics(project_root: Path) -> dict | None:
|
|
29
|
+
"""
|
|
30
|
+
Build usage-like data from execution metrics when analytics is empty.
|
|
31
|
+
|
|
32
|
+
Aggregates .tapps-agents/metrics/executions_*.jsonl by today (steps/workflows),
|
|
33
|
+
by skill (agents), and by workflow_id (workflows). Returns same shape as
|
|
34
|
+
AnalyticsDashboard.get_dashboard_data() for system/agents/workflows.
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
from ...workflow.execution_metrics import ExecutionMetricsCollector
|
|
38
|
+
|
|
39
|
+
collector = ExecutionMetricsCollector(project_root=project_root)
|
|
40
|
+
metrics = collector.get_metrics(limit=5000)
|
|
41
|
+
if not metrics:
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
now = datetime.now(UTC)
|
|
45
|
+
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
|
46
|
+
thirty_days_ago = now - timedelta(days=30)
|
|
47
|
+
|
|
48
|
+
# Filter to last 30 days
|
|
49
|
+
def parse_ts(ts: str) -> datetime:
|
|
50
|
+
return datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
|
51
|
+
|
|
52
|
+
recent = [m for m in metrics if parse_ts(m.started_at) >= thirty_days_ago]
|
|
53
|
+
if not recent:
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
today_metrics = [m for m in recent if parse_ts(m.started_at) >= today_start]
|
|
57
|
+
workflow_ids_today_success = {
|
|
58
|
+
m.workflow_id for m in today_metrics if m.status == "success"
|
|
59
|
+
}
|
|
60
|
+
workflow_ids_today_failed = {
|
|
61
|
+
m.workflow_id for m in today_metrics if m.status != "success"
|
|
62
|
+
}
|
|
63
|
+
completed_today = len(workflow_ids_today_success)
|
|
64
|
+
failed_today = len(workflow_ids_today_failed)
|
|
65
|
+
avg_duration = (
|
|
66
|
+
sum(m.duration_ms for m in recent) / len(recent) / 1000.0
|
|
67
|
+
if recent
|
|
68
|
+
else 0.0
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Agents: by skill or command
|
|
72
|
+
agent_counts: dict[str, list] = defaultdict(list)
|
|
73
|
+
for m in recent:
|
|
74
|
+
key = m.skill or m.command or "unknown"
|
|
75
|
+
agent_counts[key].append(m)
|
|
76
|
+
|
|
77
|
+
agents_list = []
|
|
78
|
+
for name, ms in agent_counts.items():
|
|
79
|
+
total = len(ms)
|
|
80
|
+
success = sum(1 for m in ms if m.status == "success")
|
|
81
|
+
agents_list.append(
|
|
82
|
+
{
|
|
83
|
+
"agent_id": name,
|
|
84
|
+
"agent_name": name,
|
|
85
|
+
"total_executions": total,
|
|
86
|
+
"successful_executions": success,
|
|
87
|
+
"failed_executions": total - success,
|
|
88
|
+
"success_rate": success / total if total else 0.0,
|
|
89
|
+
"average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
|
|
90
|
+
if total
|
|
91
|
+
else 0.0,
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
# Workflows: by workflow_id
|
|
96
|
+
wf_counts: dict[str, list] = defaultdict(list)
|
|
97
|
+
for m in recent:
|
|
98
|
+
wf_counts[m.workflow_id].append(m)
|
|
99
|
+
|
|
100
|
+
workflows_list = []
|
|
101
|
+
for wf_id, ms in wf_counts.items():
|
|
102
|
+
total = len(ms)
|
|
103
|
+
success = sum(1 for m in ms if m.status == "success")
|
|
104
|
+
workflows_list.append(
|
|
105
|
+
{
|
|
106
|
+
"workflow_id": wf_id,
|
|
107
|
+
"workflow_name": wf_id,
|
|
108
|
+
"total_executions": total,
|
|
109
|
+
"successful_executions": success,
|
|
110
|
+
"failed_executions": total - success,
|
|
111
|
+
"success_rate": success / total if total else 0.0,
|
|
112
|
+
"average_duration": sum(m.duration_ms for m in ms) / total / 1000.0
|
|
113
|
+
if total
|
|
114
|
+
else 0.0,
|
|
115
|
+
}
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# System: try cpu/mem/disk from ResourceMonitor
|
|
119
|
+
cpu_usage = memory_usage = disk_usage = 0.0
|
|
120
|
+
try:
|
|
121
|
+
from ...core.resource_monitor import ResourceMonitor
|
|
122
|
+
|
|
123
|
+
mon = ResourceMonitor()
|
|
124
|
+
res = mon.get_current_metrics()
|
|
125
|
+
cpu_usage = getattr(res, "cpu_percent", 0.0) or 0.0
|
|
126
|
+
memory_usage = getattr(res, "memory_percent", 0.0) or 0.0
|
|
127
|
+
disk_usage = getattr(res, "disk_percent", 0.0) or 0.0
|
|
128
|
+
except Exception:
|
|
129
|
+
pass
|
|
130
|
+
|
|
131
|
+
return {
|
|
132
|
+
"timestamp": now.isoformat(),
|
|
133
|
+
"system": {
|
|
134
|
+
"timestamp": now.isoformat(),
|
|
135
|
+
"total_agents": len(agents_list),
|
|
136
|
+
"active_workflows": 0,
|
|
137
|
+
"completed_workflows_today": completed_today,
|
|
138
|
+
"failed_workflows_today": failed_today,
|
|
139
|
+
"average_workflow_duration": avg_duration,
|
|
140
|
+
"cpu_usage": cpu_usage,
|
|
141
|
+
"memory_usage": memory_usage,
|
|
142
|
+
"disk_usage": disk_usage,
|
|
143
|
+
},
|
|
144
|
+
"agents": agents_list,
|
|
145
|
+
"workflows": workflows_list,
|
|
146
|
+
}
|
|
147
|
+
except Exception:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def handle_health_check_command(
|
|
152
|
+
check_name: str | None = None,
|
|
153
|
+
output_format: str = "text",
|
|
154
|
+
save: bool = True,
|
|
155
|
+
project_root: Path | None = None,
|
|
156
|
+
) -> None:
|
|
157
|
+
"""
|
|
158
|
+
Handle health check command.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
check_name: Optional specific check to run
|
|
162
|
+
output_format: Output format (json or text)
|
|
163
|
+
save: Whether to save results to metrics storage
|
|
164
|
+
project_root: Project root directory
|
|
165
|
+
"""
|
|
166
|
+
project_root = project_root or Path.cwd()
|
|
167
|
+
|
|
168
|
+
# Initialize registry and register all checks
|
|
169
|
+
registry = HealthCheckRegistry()
|
|
170
|
+
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
171
|
+
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
172
|
+
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
173
|
+
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
174
|
+
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
175
|
+
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
176
|
+
|
|
177
|
+
# Initialize orchestrator
|
|
178
|
+
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
179
|
+
orchestrator = HealthOrchestrator(
|
|
180
|
+
registry=registry, metrics_collector=metrics_collector, project_root=project_root
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Run checks
|
|
184
|
+
feedback = get_feedback()
|
|
185
|
+
feedback.format_type = output_format
|
|
186
|
+
operation_desc = f"Running health check: {check_name}" if check_name else "Running all health checks"
|
|
187
|
+
feedback.start_operation("Health Check", operation_desc)
|
|
188
|
+
|
|
189
|
+
if check_name:
|
|
190
|
+
check_names = [check_name]
|
|
191
|
+
feedback.running(f"Initializing check: {check_name}...", step=1, total_steps=3)
|
|
192
|
+
else:
|
|
193
|
+
check_names = None
|
|
194
|
+
feedback.running("Discovering health checks...", step=1, total_steps=3)
|
|
195
|
+
|
|
196
|
+
feedback.running("Executing health checks...", step=2, total_steps=3)
|
|
197
|
+
results = orchestrator.run_all_checks(check_names=check_names, save_metrics=save)
|
|
198
|
+
feedback.running("Collecting results...", step=3, total_steps=3)
|
|
199
|
+
feedback.clear_progress()
|
|
200
|
+
|
|
201
|
+
# Build summary
|
|
202
|
+
summary = {}
|
|
203
|
+
if results:
|
|
204
|
+
healthy_count = sum(1 for r in results.values() if r and r.status == "healthy")
|
|
205
|
+
total_count = len([r for r in results.values() if r])
|
|
206
|
+
summary["checks_run"] = total_count
|
|
207
|
+
summary["healthy"] = healthy_count
|
|
208
|
+
summary["degraded"] = sum(1 for r in results.values() if r and r.status == "degraded")
|
|
209
|
+
summary["unhealthy"] = sum(1 for r in results.values() if r and r.status == "unhealthy")
|
|
210
|
+
|
|
211
|
+
# Format output
|
|
212
|
+
if output_format == "json":
|
|
213
|
+
output = {
|
|
214
|
+
"checks": {
|
|
215
|
+
name: {
|
|
216
|
+
"status": result.status,
|
|
217
|
+
"score": result.score,
|
|
218
|
+
"message": result.message,
|
|
219
|
+
"details": result.details,
|
|
220
|
+
"remediation": (
|
|
221
|
+
result.remediation
|
|
222
|
+
if isinstance(result.remediation, list)
|
|
223
|
+
else [result.remediation]
|
|
224
|
+
if result.remediation
|
|
225
|
+
else None
|
|
226
|
+
),
|
|
227
|
+
}
|
|
228
|
+
for name, result in results.items()
|
|
229
|
+
if result
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
# Merge summary into output
|
|
233
|
+
if summary:
|
|
234
|
+
output = {**output, "summary": summary}
|
|
235
|
+
feedback.output_result(output, message="Health checks completed")
|
|
236
|
+
else:
|
|
237
|
+
# Text output
|
|
238
|
+
feedback.success("Health checks completed")
|
|
239
|
+
warnings = []
|
|
240
|
+
for name, result in sorted(results.items()):
|
|
241
|
+
if not result:
|
|
242
|
+
continue
|
|
243
|
+
|
|
244
|
+
status_symbol = {
|
|
245
|
+
"healthy": "[OK]",
|
|
246
|
+
"degraded": "[WARN]",
|
|
247
|
+
"unhealthy": "[FAIL]",
|
|
248
|
+
}.get(result.status, "[?]")
|
|
249
|
+
|
|
250
|
+
print(f"\n[{status_symbol}] {name.upper()}: {result.status} ({result.score:.1f}/100)")
|
|
251
|
+
print(f" {result.message}")
|
|
252
|
+
|
|
253
|
+
if result.status != "healthy":
|
|
254
|
+
warnings.append(f"{name}: {result.message}")
|
|
255
|
+
|
|
256
|
+
if result.details:
|
|
257
|
+
# Show key metrics
|
|
258
|
+
key_metrics = []
|
|
259
|
+
for key in [
|
|
260
|
+
"total_executions",
|
|
261
|
+
"success_rate",
|
|
262
|
+
"hit_rate",
|
|
263
|
+
"total_files",
|
|
264
|
+
"average_score",
|
|
265
|
+
]:
|
|
266
|
+
if key in result.details:
|
|
267
|
+
value = result.details[key]
|
|
268
|
+
if isinstance(value, float):
|
|
269
|
+
if key == "success_rate" or key == "hit_rate":
|
|
270
|
+
key_metrics.append(f"{key}: {value:.1f}%")
|
|
271
|
+
else:
|
|
272
|
+
key_metrics.append(f"{key}: {value:.1f}")
|
|
273
|
+
else:
|
|
274
|
+
key_metrics.append(f"{key}: {value}")
|
|
275
|
+
|
|
276
|
+
if key_metrics:
|
|
277
|
+
print(f" Metrics: {' | '.join(key_metrics)}")
|
|
278
|
+
|
|
279
|
+
if result.remediation:
|
|
280
|
+
if isinstance(result.remediation, list):
|
|
281
|
+
if len(result.remediation) > 0:
|
|
282
|
+
print(f" Remediation: {result.remediation[0]}")
|
|
283
|
+
elif isinstance(result.remediation, str):
|
|
284
|
+
print(f" Remediation: {result.remediation}")
|
|
285
|
+
|
|
286
|
+
if warnings:
|
|
287
|
+
for warning_msg in warnings:
|
|
288
|
+
feedback.warning(warning_msg)
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def handle_health_dashboard_command(
|
|
292
|
+
output_format: str = "text", project_root: Path | None = None
|
|
293
|
+
) -> None:
|
|
294
|
+
"""
|
|
295
|
+
Handle health dashboard command.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
output_format: Output format (json or text)
|
|
299
|
+
project_root: Project root directory
|
|
300
|
+
"""
|
|
301
|
+
project_root = project_root or Path.cwd()
|
|
302
|
+
|
|
303
|
+
# Initialize registry and register all checks
|
|
304
|
+
registry = HealthCheckRegistry()
|
|
305
|
+
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
306
|
+
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
307
|
+
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
308
|
+
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
309
|
+
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
310
|
+
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
311
|
+
|
|
312
|
+
# Initialize dashboard
|
|
313
|
+
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
314
|
+
orchestrator = HealthOrchestrator(
|
|
315
|
+
registry=registry, metrics_collector=metrics_collector, project_root=project_root
|
|
316
|
+
)
|
|
317
|
+
dashboard = HealthDashboard(orchestrator=orchestrator)
|
|
318
|
+
|
|
319
|
+
# Render dashboard
|
|
320
|
+
feedback = get_feedback()
|
|
321
|
+
feedback.format_type = output_format
|
|
322
|
+
feedback.start_operation("Health Dashboard", "Generating health dashboard visualization")
|
|
323
|
+
feedback.running("Collecting health metrics...", step=1, total_steps=3)
|
|
324
|
+
feedback.running("Generating dashboard...", step=2, total_steps=3)
|
|
325
|
+
feedback.running("Rendering dashboard output...", step=3, total_steps=3)
|
|
326
|
+
|
|
327
|
+
if output_format == "json":
|
|
328
|
+
output = dashboard.render_json()
|
|
329
|
+
feedback.clear_progress()
|
|
330
|
+
feedback.output_result(output, message="Health dashboard generated")
|
|
331
|
+
else:
|
|
332
|
+
output = dashboard.render_text()
|
|
333
|
+
feedback.clear_progress()
|
|
334
|
+
feedback.success("Health dashboard generated")
|
|
335
|
+
print(output)
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def handle_health_metrics_command(
|
|
339
|
+
check_name: str | None = None,
|
|
340
|
+
status: str | None = None,
|
|
341
|
+
days: int = 30,
|
|
342
|
+
output_format: str = "text",
|
|
343
|
+
project_root: Path | None = None,
|
|
344
|
+
) -> None:
|
|
345
|
+
"""
|
|
346
|
+
Handle health metrics command.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
check_name: Optional check name to filter
|
|
350
|
+
status: Optional status to filter
|
|
351
|
+
days: Number of days to look back
|
|
352
|
+
output_format: Output format (json or text)
|
|
353
|
+
project_root: Project root directory
|
|
354
|
+
"""
|
|
355
|
+
project_root = project_root or Path.cwd()
|
|
356
|
+
collector = HealthMetricsCollector(project_root=project_root)
|
|
357
|
+
|
|
358
|
+
# Get metrics
|
|
359
|
+
feedback = get_feedback()
|
|
360
|
+
feedback.format_type = output_format
|
|
361
|
+
operation_desc = f"Collecting metrics{f' for {check_name}' if check_name else ''}"
|
|
362
|
+
feedback.start_operation("Health Metrics", operation_desc)
|
|
363
|
+
feedback.running("Querying metrics database...", step=1, total_steps=3)
|
|
364
|
+
|
|
365
|
+
metrics = collector.get_metrics(check_name=check_name, status=status, days=days, limit=1000)
|
|
366
|
+
feedback.running("Calculating summary statistics...", step=2, total_steps=3)
|
|
367
|
+
summary = collector.get_summary(days=days)
|
|
368
|
+
feedback.running("Formatting results...", step=3, total_steps=3)
|
|
369
|
+
feedback.clear_progress()
|
|
370
|
+
|
|
371
|
+
if output_format == "json":
|
|
372
|
+
output = {
|
|
373
|
+
"summary": summary,
|
|
374
|
+
"metrics": [m.to_dict() for m in metrics],
|
|
375
|
+
}
|
|
376
|
+
feedback.output_result(output, message="Health metrics retrieved")
|
|
377
|
+
else:
|
|
378
|
+
# Text output
|
|
379
|
+
feedback.success("Health metrics retrieved")
|
|
380
|
+
print(f"\nHealth Metrics Summary (last {days} days)")
|
|
381
|
+
print("=" * 70)
|
|
382
|
+
print(f"Total checks: {summary['total_checks']}")
|
|
383
|
+
print(f"Average score: {summary['average_score']:.1f}/100")
|
|
384
|
+
print(f"\nBy status:")
|
|
385
|
+
for status_name, count in summary["by_status"].items():
|
|
386
|
+
print(f" {status_name}: {count}")
|
|
387
|
+
|
|
388
|
+
if summary["by_check"]:
|
|
389
|
+
print(f"\nBy check:")
|
|
390
|
+
for check_name, check_data in summary["by_check"].items():
|
|
391
|
+
print(f" {check_name}:")
|
|
392
|
+
print(f" Count: {check_data['count']}")
|
|
393
|
+
print(f" Average score: {check_data['average_score']:.1f}/100")
|
|
394
|
+
print(f" Latest status: {check_data['latest_status']}")
|
|
395
|
+
print(f" Latest score: {check_data['latest_score']:.1f}/100")
|
|
396
|
+
|
|
397
|
+
if metrics:
|
|
398
|
+
print(f"\nRecent metrics (showing up to 10):")
|
|
399
|
+
for metric in metrics[:10]:
|
|
400
|
+
print(f" {metric.check_name}: {metric.status} ({metric.score:.1f}/100) - {metric.timestamp}")
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def handle_health_trends_command(
|
|
404
|
+
check_name: str,
|
|
405
|
+
days: int = 7,
|
|
406
|
+
output_format: str = "text",
|
|
407
|
+
project_root: Path | None = None,
|
|
408
|
+
) -> None:
|
|
409
|
+
"""
|
|
410
|
+
Handle health trends command.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
check_name: Check name to analyze trends for
|
|
414
|
+
days: Number of days to analyze
|
|
415
|
+
output_format: Output format (json or text)
|
|
416
|
+
project_root: Project root directory
|
|
417
|
+
"""
|
|
418
|
+
project_root = project_root or Path.cwd()
|
|
419
|
+
collector = HealthMetricsCollector(project_root=project_root)
|
|
420
|
+
|
|
421
|
+
# Get trends
|
|
422
|
+
feedback = get_feedback()
|
|
423
|
+
feedback.format_type = output_format
|
|
424
|
+
feedback.start_operation("Health Trends", f"Analyzing health trends for {check_name}")
|
|
425
|
+
feedback.running("Loading historical data...", step=1, total_steps=3)
|
|
426
|
+
|
|
427
|
+
trends = collector.get_trends(check_name=check_name, days=days)
|
|
428
|
+
feedback.running("Calculating trends...", step=2, total_steps=3)
|
|
429
|
+
feedback.running("Generating trend report...", step=3, total_steps=3)
|
|
430
|
+
feedback.clear_progress()
|
|
431
|
+
|
|
432
|
+
if output_format == "json":
|
|
433
|
+
output = {
|
|
434
|
+
"check_name": check_name,
|
|
435
|
+
"days": days,
|
|
436
|
+
"trends": trends,
|
|
437
|
+
}
|
|
438
|
+
feedback.output_result(output, message="Health trends analyzed")
|
|
439
|
+
else:
|
|
440
|
+
# Text output
|
|
441
|
+
feedback.success("Health trends analyzed")
|
|
442
|
+
print(f"\nHealth Trends for '{check_name}' (last {days} days)")
|
|
443
|
+
print("=" * 70)
|
|
444
|
+
print(f"Direction: {trends['direction']}")
|
|
445
|
+
print(f"Score change: {trends['score_change']:+.1f} points")
|
|
446
|
+
|
|
447
|
+
if trends["status_changes"]:
|
|
448
|
+
print(f"\nStatus changes:")
|
|
449
|
+
for status, change in trends["status_changes"].items():
|
|
450
|
+
if change != 0:
|
|
451
|
+
print(f" {status}: {change:+d}")
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
def handle_health_usage_command(args: object) -> None:
|
|
455
|
+
"""
|
|
456
|
+
Handle health usage subcommand (formerly analytics).
|
|
457
|
+
Dispatches to dashboard, agents, workflows, trends, or system using AnalyticsDashboard.
|
|
458
|
+
"""
|
|
459
|
+
from ...core.analytics_dashboard import AnalyticsDashboard
|
|
460
|
+
|
|
461
|
+
dashboard = AnalyticsDashboard()
|
|
462
|
+
sub = getattr(args, "usage_subcommand", "dashboard")
|
|
463
|
+
if sub == "show":
|
|
464
|
+
sub = "dashboard"
|
|
465
|
+
fmt = getattr(args, "format", "text")
|
|
466
|
+
|
|
467
|
+
if sub == "dashboard":
|
|
468
|
+
data = dashboard.get_dashboard_data()
|
|
469
|
+
if fmt == "json":
|
|
470
|
+
format_json_output(data)
|
|
471
|
+
else:
|
|
472
|
+
print("\n" + "=" * 60)
|
|
473
|
+
print("Usage / Analytics Dashboard")
|
|
474
|
+
print("=" * 60)
|
|
475
|
+
print(f"\nSystem Status (as of {data['timestamp']}):")
|
|
476
|
+
sys_data = data["system"]
|
|
477
|
+
print(f" Total Agents: {sys_data['total_agents']}")
|
|
478
|
+
print(f" Active Workflows: {sys_data['active_workflows']}")
|
|
479
|
+
print(f" Completed Today: {sys_data['completed_workflows_today']}")
|
|
480
|
+
print(f" Failed Today: {sys_data['failed_workflows_today']}")
|
|
481
|
+
print(f" Avg Workflow Duration: {sys_data['average_workflow_duration']:.2f}s")
|
|
482
|
+
print(f" CPU Usage: {sys_data['cpu_usage']:.1f}%")
|
|
483
|
+
print(f" Memory Usage: {sys_data['memory_usage']:.1f}%")
|
|
484
|
+
print(f" Disk Usage: {sys_data['disk_usage']:.1f}%")
|
|
485
|
+
print("\nAgent Performance (Top 10):")
|
|
486
|
+
for agent in sorted(data["agents"], key=lambda x: x["total_executions"], reverse=True)[:10]:
|
|
487
|
+
print(f" {agent['agent_name']}: {agent['total_executions']} executions, "
|
|
488
|
+
f"{agent['success_rate']*100:.1f}% success, {agent['average_duration']:.2f}s avg")
|
|
489
|
+
print("\nWorkflow Performance:")
|
|
490
|
+
for wf in sorted(data["workflows"], key=lambda x: x["total_executions"], reverse=True)[:10]:
|
|
491
|
+
print(f" {wf['workflow_name']}: {wf['total_executions']} executions, "
|
|
492
|
+
f"{wf['success_rate']*100:.1f}% success")
|
|
493
|
+
elif sub == "agents":
|
|
494
|
+
metrics = dashboard.get_agent_performance(agent_id=getattr(args, "agent_id", None))
|
|
495
|
+
if fmt == "json":
|
|
496
|
+
format_json_output(metrics)
|
|
497
|
+
else:
|
|
498
|
+
for agent in metrics:
|
|
499
|
+
print(f"{agent['agent_name']}: {agent['total_executions']} executions, "
|
|
500
|
+
f"{agent['success_rate']*100:.1f}% success")
|
|
501
|
+
elif sub == "workflows":
|
|
502
|
+
metrics = dashboard.get_workflow_performance(workflow_id=getattr(args, "workflow_id", None))
|
|
503
|
+
if fmt == "json":
|
|
504
|
+
format_json_output(metrics)
|
|
505
|
+
else:
|
|
506
|
+
for wf in metrics:
|
|
507
|
+
print(f"{wf['workflow_name']}: {wf['total_executions']} executions, "
|
|
508
|
+
f"{wf['success_rate']*100:.1f}% success")
|
|
509
|
+
elif sub == "trends":
|
|
510
|
+
metric_type = getattr(args, "metric_type", "agent_duration")
|
|
511
|
+
days = getattr(args, "days", 30)
|
|
512
|
+
trends = dashboard.get_trends(metric_type, days=days)
|
|
513
|
+
if fmt == "json":
|
|
514
|
+
format_json_output(trends)
|
|
515
|
+
else:
|
|
516
|
+
for t in trends:
|
|
517
|
+
print(f"{t['metric_name']}: {len(t['values'])} data points")
|
|
518
|
+
elif sub == "system":
|
|
519
|
+
status = dashboard.get_system_status()
|
|
520
|
+
if fmt == "json":
|
|
521
|
+
format_json_output(status)
|
|
522
|
+
else:
|
|
523
|
+
print(f"System Status (as of {status['timestamp']}):")
|
|
524
|
+
print(f" Total Agents: {status['total_agents']}")
|
|
525
|
+
print(f" Active Workflows: {status['active_workflows']}")
|
|
526
|
+
print(f" Completed Today: {status['completed_workflows_today']}")
|
|
527
|
+
print(f" Failed Today: {status['failed_workflows_today']}")
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def handle_health_overview_command(
|
|
531
|
+
output_format: str = "text",
|
|
532
|
+
project_root: Path | None = None,
|
|
533
|
+
) -> None:
|
|
534
|
+
"""
|
|
535
|
+
Single 1000-foot view: health checks + usage rolled up for all subsystems.
|
|
536
|
+
|
|
537
|
+
Renders one easy-to-read report: overall health, each health check one line,
|
|
538
|
+
then usage at a glance (system, top agents, top workflows).
|
|
539
|
+
"""
|
|
540
|
+
from ...core.analytics_dashboard import AnalyticsDashboard
|
|
541
|
+
|
|
542
|
+
project_root = project_root or Path.cwd()
|
|
543
|
+
|
|
544
|
+
# 1. Health checks
|
|
545
|
+
registry = HealthCheckRegistry()
|
|
546
|
+
registry.register(EnvironmentHealthCheck(project_root=project_root))
|
|
547
|
+
registry.register(AutomationHealthCheck(project_root=project_root))
|
|
548
|
+
registry.register(ExecutionHealthCheck(project_root=project_root))
|
|
549
|
+
registry.register(Context7CacheHealthCheck(project_root=project_root))
|
|
550
|
+
registry.register(KnowledgeBaseHealthCheck(project_root=project_root))
|
|
551
|
+
registry.register(OutcomeHealthCheck(project_root=project_root))
|
|
552
|
+
metrics_collector = HealthMetricsCollector(project_root=project_root)
|
|
553
|
+
orchestrator = HealthOrchestrator(
|
|
554
|
+
registry=registry,
|
|
555
|
+
metrics_collector=metrics_collector,
|
|
556
|
+
project_root=project_root,
|
|
557
|
+
)
|
|
558
|
+
health_results = orchestrator.run_all_checks(save_metrics=True)
|
|
559
|
+
overall = orchestrator.get_overall_health(health_results)
|
|
560
|
+
|
|
561
|
+
# 2. Usage (best-effort; prefer analytics, fallback to execution metrics — HM-001-S1)
|
|
562
|
+
_log = logging.getLogger(__name__)
|
|
563
|
+
usage_data = None
|
|
564
|
+
try:
|
|
565
|
+
usage_dashboard = AnalyticsDashboard()
|
|
566
|
+
usage_data = usage_dashboard.get_dashboard_data()
|
|
567
|
+
except Exception:
|
|
568
|
+
pass
|
|
569
|
+
# If analytics has no agent/workflow data, derive from execution metrics
|
|
570
|
+
fallback_used = False
|
|
571
|
+
if usage_data:
|
|
572
|
+
agents = usage_data.get("agents") or []
|
|
573
|
+
workflows = usage_data.get("workflows") or []
|
|
574
|
+
total_runs = sum(a.get("total_executions", 0) for a in agents) + sum(
|
|
575
|
+
w.get("total_executions", 0) for w in workflows
|
|
576
|
+
)
|
|
577
|
+
if total_runs == 0:
|
|
578
|
+
fallback = _usage_data_from_execution_metrics(project_root)
|
|
579
|
+
if fallback:
|
|
580
|
+
fallback_used = True
|
|
581
|
+
usage_data = fallback
|
|
582
|
+
else:
|
|
583
|
+
fallback = _usage_data_from_execution_metrics(project_root)
|
|
584
|
+
if fallback:
|
|
585
|
+
fallback_used = True
|
|
586
|
+
usage_data = fallback
|
|
587
|
+
if fallback_used and usage_data:
|
|
588
|
+
n_agents = len(usage_data.get("agents") or [])
|
|
589
|
+
n_workflows = len(usage_data.get("workflows") or [])
|
|
590
|
+
_log.info(
|
|
591
|
+
"Health overview: using execution metrics fallback (%s agents, %s workflows)",
|
|
592
|
+
n_agents, n_workflows,
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
# 3. Build output
|
|
596
|
+
feedback = get_feedback()
|
|
597
|
+
feedback.format_type = output_format
|
|
598
|
+
|
|
599
|
+
if output_format == "json":
|
|
600
|
+
out = {
|
|
601
|
+
"overview": {
|
|
602
|
+
"overall_health": overall,
|
|
603
|
+
"health_checks": {
|
|
604
|
+
name: {
|
|
605
|
+
"status": r.status,
|
|
606
|
+
"score": r.score,
|
|
607
|
+
"message": r.message,
|
|
608
|
+
}
|
|
609
|
+
for name, r in health_results.items()
|
|
610
|
+
if r
|
|
611
|
+
},
|
|
612
|
+
},
|
|
613
|
+
"usage": usage_data,
|
|
614
|
+
}
|
|
615
|
+
format_json_output(out)
|
|
616
|
+
return
|
|
617
|
+
|
|
618
|
+
# Text: 1000-foot, great-looking, easy to read
|
|
619
|
+
width = 72
|
|
620
|
+
lines = []
|
|
621
|
+
lines.append("")
|
|
622
|
+
lines.append("=" * width)
|
|
623
|
+
lines.append(" TAPPS-AGENTS | HEALTH + USAGE | 1000-FOOT VIEW")
|
|
624
|
+
lines.append("=" * width)
|
|
625
|
+
lines.append("")
|
|
626
|
+
|
|
627
|
+
# Overall health
|
|
628
|
+
status_sym = {"healthy": "[OK] ", "degraded": "[WARN]", "unhealthy": "[FAIL]", "unknown": "[?] "}
|
|
629
|
+
sym = status_sym.get(overall["status"], "[?] ")
|
|
630
|
+
lines.append(f" {sym} Overall: {overall['status'].upper()} ({overall['score']:.1f}/100)")
|
|
631
|
+
lines.append("")
|
|
632
|
+
|
|
633
|
+
# Subsystems (health checks) - one line each
|
|
634
|
+
lines.append(" SUBSYSTEMS (health)")
|
|
635
|
+
lines.append(" " + "-" * (width - 2))
|
|
636
|
+
for name, result in sorted(health_results.items()):
|
|
637
|
+
if not result:
|
|
638
|
+
continue
|
|
639
|
+
s = status_sym.get(result.status, "[?] ")
|
|
640
|
+
label = name.replace("_", " ").upper()
|
|
641
|
+
lines.append(f" {s} {label}: {result.score:.1f}/100 | {result.message[:50]}{'...' if len(result.message) > 50 else ''}")
|
|
642
|
+
lines.append("")
|
|
643
|
+
|
|
644
|
+
# Usage at a glance
|
|
645
|
+
lines.append(" USAGE (agents & workflows)")
|
|
646
|
+
lines.append(" " + "-" * (width - 2))
|
|
647
|
+
if usage_data:
|
|
648
|
+
sys_data = usage_data.get("system", {})
|
|
649
|
+
lines.append(f" Today: completed {sys_data.get('completed_workflows_today', 0)} workflows, failed {sys_data.get('failed_workflows_today', 0)} | active: {sys_data.get('active_workflows', 0)}")
|
|
650
|
+
lines.append(f" Avg workflow duration: {sys_data.get('average_workflow_duration', 0):.1f}s | CPU: {sys_data.get('cpu_usage', 0):.0f}% Mem: {sys_data.get('memory_usage', 0):.0f}% Disk: {sys_data.get('disk_usage', 0):.0f}%")
|
|
651
|
+
agents = sorted(usage_data.get("agents", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
|
|
652
|
+
if agents:
|
|
653
|
+
lines.append(" Top agents (30d): " + " | ".join(f"{a.get('agent_name', '')}: {a.get('total_executions', 0)} runs ({a.get('success_rate', 0)*100:.0f}% ok)" for a in agents))
|
|
654
|
+
workflows = sorted(usage_data.get("workflows", []), key=lambda x: x.get("total_executions", 0), reverse=True)[:5]
|
|
655
|
+
if workflows:
|
|
656
|
+
lines.append(" Top workflows (30d): " + " | ".join(f"{w.get('workflow_name', '')}: {w.get('total_executions', 0)} ({w.get('success_rate', 0)*100:.0f}% ok)" for w in workflows))
|
|
657
|
+
else:
|
|
658
|
+
lines.append(" (No usage data yet. Run agents/workflows to populate.)")
|
|
659
|
+
lines.append("")
|
|
660
|
+
lines.append("=" * width)
|
|
661
|
+
lines.append("")
|
|
662
|
+
|
|
663
|
+
feedback.clear_progress()
|
|
664
|
+
print("\n".join(lines))
|
|
665
|
+
|