attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1313 @@
|
|
|
1
|
+
"""Code Health Assistant Module
|
|
2
|
+
|
|
3
|
+
A comprehensive system for running health checks, tracking trends,
|
|
4
|
+
and auto-fixing common issues in codebases.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
7
|
+
Licensed under Fair Source 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import shutil
|
|
14
|
+
import subprocess
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime, timedelta
|
|
17
|
+
from enum import Enum
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class HealthStatus(Enum):
|
|
25
|
+
"""Health check result status."""
|
|
26
|
+
|
|
27
|
+
PASS = "pass"
|
|
28
|
+
WARN = "warn"
|
|
29
|
+
FAIL = "fail"
|
|
30
|
+
SKIP = "skip"
|
|
31
|
+
ERROR = "error"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class CheckCategory(Enum):
|
|
35
|
+
"""Categories of health checks."""
|
|
36
|
+
|
|
37
|
+
LINT = "lint"
|
|
38
|
+
FORMAT = "format"
|
|
39
|
+
TYPES = "types"
|
|
40
|
+
TESTS = "tests"
|
|
41
|
+
COVERAGE = "coverage"
|
|
42
|
+
SECURITY = "security"
|
|
43
|
+
DEPS = "deps"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# Priority weights for health check categories
|
|
47
|
+
CHECK_WEIGHTS = {
|
|
48
|
+
CheckCategory.SECURITY: 100,
|
|
49
|
+
CheckCategory.TYPES: 90,
|
|
50
|
+
CheckCategory.TESTS: 85,
|
|
51
|
+
CheckCategory.LINT: 70,
|
|
52
|
+
CheckCategory.FORMAT: 50,
|
|
53
|
+
CheckCategory.COVERAGE: 40,
|
|
54
|
+
CheckCategory.DEPS: 30,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Default thresholds
|
|
58
|
+
DEFAULT_THRESHOLDS = {
|
|
59
|
+
"good": 85,
|
|
60
|
+
"warning": 70,
|
|
61
|
+
"critical": 50,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Default configuration
|
|
65
|
+
DEFAULT_CONFIG = {
|
|
66
|
+
"checks": {
|
|
67
|
+
"lint": {"enabled": True, "tool": "ruff", "weight": 70},
|
|
68
|
+
"format": {"enabled": True, "tool": "black", "weight": 50},
|
|
69
|
+
"types": {"enabled": True, "tool": "pyright", "weight": 90},
|
|
70
|
+
"tests": {"enabled": True, "tool": "pytest", "weight": 85, "coverage_target": 80},
|
|
71
|
+
"security": {"enabled": True, "tool": "bandit", "weight": 100},
|
|
72
|
+
"deps": {"enabled": True, "tool": "pip-audit", "weight": 30},
|
|
73
|
+
},
|
|
74
|
+
"thresholds": DEFAULT_THRESHOLDS,
|
|
75
|
+
"auto_fix": {
|
|
76
|
+
"safe_fixes": True,
|
|
77
|
+
"prompt_fixes": True,
|
|
78
|
+
"categories": ["lint", "format"],
|
|
79
|
+
},
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass
|
|
84
|
+
class HealthIssue:
|
|
85
|
+
"""A single health check issue."""
|
|
86
|
+
|
|
87
|
+
category: CheckCategory
|
|
88
|
+
file_path: str
|
|
89
|
+
line: int | None
|
|
90
|
+
code: str
|
|
91
|
+
message: str
|
|
92
|
+
severity: str = "warning" # warning, error
|
|
93
|
+
fixable: bool = False
|
|
94
|
+
fix_command: str | None = None
|
|
95
|
+
|
|
96
|
+
def to_dict(self) -> dict:
|
|
97
|
+
"""Convert to dictionary."""
|
|
98
|
+
return {
|
|
99
|
+
"category": self.category.value,
|
|
100
|
+
"file_path": self.file_path,
|
|
101
|
+
"line": self.line,
|
|
102
|
+
"code": self.code,
|
|
103
|
+
"message": self.message,
|
|
104
|
+
"severity": self.severity,
|
|
105
|
+
"fixable": self.fixable,
|
|
106
|
+
"fix_command": self.fix_command,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@dataclass
|
|
111
|
+
class CheckResult:
|
|
112
|
+
"""Result of a single health check."""
|
|
113
|
+
|
|
114
|
+
category: CheckCategory
|
|
115
|
+
status: HealthStatus
|
|
116
|
+
score: int # 0-100
|
|
117
|
+
issues: list[HealthIssue] = field(default_factory=list)
|
|
118
|
+
details: dict[str, Any] = field(default_factory=dict)
|
|
119
|
+
duration_ms: int = 0
|
|
120
|
+
tool_used: str = ""
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def issue_count(self) -> int:
|
|
124
|
+
"""Return total issue count."""
|
|
125
|
+
return len(self.issues)
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def fixable_count(self) -> int:
|
|
129
|
+
"""Return fixable issue count."""
|
|
130
|
+
return sum(1 for i in self.issues if i.fixable)
|
|
131
|
+
|
|
132
|
+
def to_dict(self) -> dict:
|
|
133
|
+
"""Convert to dictionary."""
|
|
134
|
+
return {
|
|
135
|
+
"category": self.category.value,
|
|
136
|
+
"status": self.status.value,
|
|
137
|
+
"score": self.score,
|
|
138
|
+
"issue_count": self.issue_count,
|
|
139
|
+
"fixable_count": self.fixable_count,
|
|
140
|
+
"issues": [i.to_dict() for i in self.issues],
|
|
141
|
+
"details": self.details,
|
|
142
|
+
"duration_ms": self.duration_ms,
|
|
143
|
+
"tool_used": self.tool_used,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
@dataclass
|
|
148
|
+
class HealthReport:
|
|
149
|
+
"""Complete health report from all checks."""
|
|
150
|
+
|
|
151
|
+
results: list[CheckResult] = field(default_factory=list)
|
|
152
|
+
overall_score: int = 100
|
|
153
|
+
status: HealthStatus = HealthStatus.PASS
|
|
154
|
+
generated_at: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
155
|
+
project_root: str = "."
|
|
156
|
+
|
|
157
|
+
def add_result(self, result: CheckResult) -> None:
|
|
158
|
+
"""Add a check result to the report."""
|
|
159
|
+
self.results.append(result)
|
|
160
|
+
self._recalculate_score()
|
|
161
|
+
|
|
162
|
+
def _recalculate_score(self) -> None:
|
|
163
|
+
"""Recalculate overall score based on weighted results."""
|
|
164
|
+
if not self.results:
|
|
165
|
+
self.overall_score = 100
|
|
166
|
+
self.status = HealthStatus.PASS
|
|
167
|
+
return
|
|
168
|
+
|
|
169
|
+
total_weight = 0
|
|
170
|
+
weighted_score = 0
|
|
171
|
+
|
|
172
|
+
for result in self.results:
|
|
173
|
+
if result.status != HealthStatus.SKIP:
|
|
174
|
+
weight = CHECK_WEIGHTS.get(result.category, 50)
|
|
175
|
+
weighted_score += result.score * weight
|
|
176
|
+
total_weight += weight
|
|
177
|
+
|
|
178
|
+
if total_weight > 0:
|
|
179
|
+
self.overall_score = int(weighted_score / total_weight)
|
|
180
|
+
else:
|
|
181
|
+
self.overall_score = 100
|
|
182
|
+
|
|
183
|
+
# Determine overall status
|
|
184
|
+
if self.overall_score >= DEFAULT_THRESHOLDS["good"]:
|
|
185
|
+
self.status = HealthStatus.PASS
|
|
186
|
+
elif self.overall_score >= DEFAULT_THRESHOLDS["warning"]:
|
|
187
|
+
self.status = HealthStatus.WARN
|
|
188
|
+
else:
|
|
189
|
+
self.status = HealthStatus.FAIL
|
|
190
|
+
|
|
191
|
+
@property
|
|
192
|
+
def total_issues(self) -> int:
|
|
193
|
+
"""Return total issues across all checks."""
|
|
194
|
+
return sum(r.issue_count for r in self.results)
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
def total_fixable(self) -> int:
|
|
198
|
+
"""Return total fixable issues."""
|
|
199
|
+
return sum(r.fixable_count for r in self.results)
|
|
200
|
+
|
|
201
|
+
def get_result(self, category: CheckCategory) -> CheckResult | None:
|
|
202
|
+
"""Get result for a specific category."""
|
|
203
|
+
for result in self.results:
|
|
204
|
+
if result.category == category:
|
|
205
|
+
return result
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
def to_dict(self) -> dict:
|
|
209
|
+
"""Convert to dictionary."""
|
|
210
|
+
return {
|
|
211
|
+
"overall_score": self.overall_score,
|
|
212
|
+
"status": self.status.value,
|
|
213
|
+
"total_issues": self.total_issues,
|
|
214
|
+
"total_fixable": self.total_fixable,
|
|
215
|
+
"generated_at": self.generated_at,
|
|
216
|
+
"project_root": self.project_root,
|
|
217
|
+
"results": [r.to_dict() for r in self.results],
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class HealthCheckRunner:
|
|
222
|
+
"""Run configurable health checks and aggregate results."""
|
|
223
|
+
|
|
224
|
+
def __init__(
|
|
225
|
+
self,
|
|
226
|
+
project_root: str = ".",
|
|
227
|
+
config: dict | None = None,
|
|
228
|
+
):
|
|
229
|
+
"""Initialize the health check runner.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
project_root: Root directory of the project
|
|
233
|
+
config: Configuration dictionary (uses defaults if not provided)
|
|
234
|
+
|
|
235
|
+
"""
|
|
236
|
+
self.project_root = Path(project_root).resolve()
|
|
237
|
+
self.config = {**DEFAULT_CONFIG, **(config or {})}
|
|
238
|
+
self._check_handlers = {
|
|
239
|
+
CheckCategory.LINT: self._run_lint_check,
|
|
240
|
+
CheckCategory.FORMAT: self._run_format_check,
|
|
241
|
+
CheckCategory.TYPES: self._run_type_check,
|
|
242
|
+
CheckCategory.TESTS: self._run_test_check,
|
|
243
|
+
CheckCategory.SECURITY: self._run_security_check,
|
|
244
|
+
CheckCategory.DEPS: self._run_deps_check,
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
def _is_tool_available(self, tool: str) -> bool:
|
|
248
|
+
"""Check if a tool is available on the system."""
|
|
249
|
+
return shutil.which(tool) is not None
|
|
250
|
+
|
|
251
|
+
async def run_all(self) -> HealthReport:
|
|
252
|
+
"""Run all enabled health checks."""
|
|
253
|
+
report = HealthReport(project_root=str(self.project_root))
|
|
254
|
+
|
|
255
|
+
tasks = []
|
|
256
|
+
for category in CheckCategory:
|
|
257
|
+
check_config = self.config["checks"].get(category.value, {})
|
|
258
|
+
if check_config.get("enabled", False):
|
|
259
|
+
handler = self._check_handlers.get(category)
|
|
260
|
+
if handler:
|
|
261
|
+
tasks.append(self._run_check_async(category, handler, check_config))
|
|
262
|
+
|
|
263
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
264
|
+
|
|
265
|
+
for result in results:
|
|
266
|
+
if isinstance(result, CheckResult):
|
|
267
|
+
report.add_result(result)
|
|
268
|
+
elif isinstance(result, Exception):
|
|
269
|
+
# Log error but continue with other checks
|
|
270
|
+
pass
|
|
271
|
+
|
|
272
|
+
return report
|
|
273
|
+
|
|
274
|
+
async def run_quick(self) -> HealthReport:
|
|
275
|
+
"""Run fast checks only (lint, format, types)."""
|
|
276
|
+
report = HealthReport(project_root=str(self.project_root))
|
|
277
|
+
|
|
278
|
+
quick_checks = [CheckCategory.LINT, CheckCategory.FORMAT, CheckCategory.TYPES]
|
|
279
|
+
tasks = []
|
|
280
|
+
|
|
281
|
+
for category in quick_checks:
|
|
282
|
+
check_config = self.config["checks"].get(category.value, {})
|
|
283
|
+
if check_config.get("enabled", False):
|
|
284
|
+
handler = self._check_handlers.get(category)
|
|
285
|
+
if handler:
|
|
286
|
+
tasks.append(self._run_check_async(category, handler, check_config))
|
|
287
|
+
|
|
288
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
289
|
+
|
|
290
|
+
for result in results:
|
|
291
|
+
if isinstance(result, CheckResult):
|
|
292
|
+
report.add_result(result)
|
|
293
|
+
|
|
294
|
+
return report
|
|
295
|
+
|
|
296
|
+
async def run_check(self, category: CheckCategory) -> CheckResult:
|
|
297
|
+
"""Run a specific health check."""
|
|
298
|
+
check_config = self.config["checks"].get(category.value, {})
|
|
299
|
+
handler = self._check_handlers.get(category)
|
|
300
|
+
|
|
301
|
+
if not handler:
|
|
302
|
+
return CheckResult(
|
|
303
|
+
category=category,
|
|
304
|
+
status=HealthStatus.ERROR,
|
|
305
|
+
score=0,
|
|
306
|
+
details={"error": f"No handler for {category.value}"},
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
return await self._run_check_async(category, handler, check_config)
|
|
310
|
+
|
|
311
|
+
async def _run_check_async(
|
|
312
|
+
self,
|
|
313
|
+
category: CheckCategory,
|
|
314
|
+
handler,
|
|
315
|
+
config: dict,
|
|
316
|
+
) -> CheckResult:
|
|
317
|
+
"""Run a check handler asynchronously.
|
|
318
|
+
|
|
319
|
+
This uses broad exception handling intentionally for graceful degradation.
|
|
320
|
+
Health checks are optional features - the system should continue even if some checks fail.
|
|
321
|
+
|
|
322
|
+
Note:
|
|
323
|
+
Full exception context is preserved via logger.exception() for debugging.
|
|
324
|
+
"""
|
|
325
|
+
start_time = datetime.now()
|
|
326
|
+
try:
|
|
327
|
+
result: CheckResult = await asyncio.to_thread(handler, config)
|
|
328
|
+
result.duration_ms = int((datetime.now() - start_time).total_seconds() * 1000)
|
|
329
|
+
return result
|
|
330
|
+
except Exception as e:
|
|
331
|
+
# INTENTIONAL: Broad exception handler for graceful degradation of optional checks
|
|
332
|
+
# Full traceback preserved for debugging
|
|
333
|
+
logger.exception(f"Health check failed for {category.value}: {e}")
|
|
334
|
+
return CheckResult(
|
|
335
|
+
category=category,
|
|
336
|
+
status=HealthStatus.ERROR,
|
|
337
|
+
score=0,
|
|
338
|
+
details={"error": str(e)},
|
|
339
|
+
duration_ms=int((datetime.now() - start_time).total_seconds() * 1000),
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
def _run_lint_check(self, config: dict) -> CheckResult:
|
|
343
|
+
"""Run linting check using ruff or flake8."""
|
|
344
|
+
tool = config.get("tool", "ruff")
|
|
345
|
+
issues = []
|
|
346
|
+
|
|
347
|
+
if not self._is_tool_available(tool):
|
|
348
|
+
return CheckResult(
|
|
349
|
+
category=CheckCategory.LINT,
|
|
350
|
+
status=HealthStatus.SKIP,
|
|
351
|
+
score=100,
|
|
352
|
+
tool_used=tool,
|
|
353
|
+
details={"skip_reason": f"{tool} not available"},
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
try:
|
|
357
|
+
if tool == "ruff":
|
|
358
|
+
result = subprocess.run(
|
|
359
|
+
["ruff", "check", "--output-format=json", str(self.project_root)],
|
|
360
|
+
check=False,
|
|
361
|
+
capture_output=True,
|
|
362
|
+
text=True,
|
|
363
|
+
cwd=str(self.project_root),
|
|
364
|
+
)
|
|
365
|
+
if result.stdout:
|
|
366
|
+
ruff_issues = json.loads(result.stdout)
|
|
367
|
+
for item in ruff_issues:
|
|
368
|
+
issues.append(
|
|
369
|
+
HealthIssue(
|
|
370
|
+
category=CheckCategory.LINT,
|
|
371
|
+
file_path=item.get("filename", ""),
|
|
372
|
+
line=item.get("location", {}).get("row"),
|
|
373
|
+
code=item.get("code", ""),
|
|
374
|
+
message=item.get("message", ""),
|
|
375
|
+
severity=(
|
|
376
|
+
"warning" if item.get("code", "").startswith("W") else "error"
|
|
377
|
+
),
|
|
378
|
+
fixable=item.get("fix") is not None,
|
|
379
|
+
fix_command="ruff check --fix" if item.get("fix") else None,
|
|
380
|
+
),
|
|
381
|
+
)
|
|
382
|
+
else:
|
|
383
|
+
# Fallback to flake8
|
|
384
|
+
result = subprocess.run(
|
|
385
|
+
["flake8", "--format=json", str(self.project_root)],
|
|
386
|
+
check=False,
|
|
387
|
+
capture_output=True,
|
|
388
|
+
text=True,
|
|
389
|
+
cwd=str(self.project_root),
|
|
390
|
+
)
|
|
391
|
+
# Parse flake8 output...
|
|
392
|
+
|
|
393
|
+
# Calculate score based on issues
|
|
394
|
+
score = max(0, 100 - len(issues) * 5) # -5 per issue
|
|
395
|
+
status = HealthStatus.PASS if not issues else HealthStatus.WARN
|
|
396
|
+
|
|
397
|
+
return CheckResult(
|
|
398
|
+
category=CheckCategory.LINT,
|
|
399
|
+
status=status,
|
|
400
|
+
score=score,
|
|
401
|
+
issues=issues,
|
|
402
|
+
tool_used=tool,
|
|
403
|
+
details={"total_files_checked": len({i.file_path for i in issues}) or "all"},
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
except json.JSONDecodeError as e:
|
|
407
|
+
# Tool output not in expected JSON format
|
|
408
|
+
logger.warning(f"Lint check JSON parse error ({tool}): {e}")
|
|
409
|
+
return CheckResult(
|
|
410
|
+
category=CheckCategory.LINT,
|
|
411
|
+
status=HealthStatus.ERROR,
|
|
412
|
+
score=0,
|
|
413
|
+
tool_used=tool,
|
|
414
|
+
details={"error": f"Failed to parse {tool} output: {e}"},
|
|
415
|
+
)
|
|
416
|
+
except subprocess.SubprocessError as e:
|
|
417
|
+
# Tool execution failed
|
|
418
|
+
logger.error(f"Lint check subprocess error ({tool}): {e}")
|
|
419
|
+
return CheckResult(
|
|
420
|
+
category=CheckCategory.LINT,
|
|
421
|
+
status=HealthStatus.ERROR,
|
|
422
|
+
score=0,
|
|
423
|
+
tool_used=tool,
|
|
424
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
425
|
+
)
|
|
426
|
+
except Exception as e:
|
|
427
|
+
# Unexpected errors - preserve full context for debugging
|
|
428
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
429
|
+
logger.exception(f"Unexpected error in lint check ({tool}): {e}")
|
|
430
|
+
return CheckResult(
|
|
431
|
+
category=CheckCategory.LINT,
|
|
432
|
+
status=HealthStatus.ERROR,
|
|
433
|
+
score=0,
|
|
434
|
+
tool_used=tool,
|
|
435
|
+
details={"error": str(e)},
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
def _run_format_check(self, config: dict) -> CheckResult:
|
|
439
|
+
"""Run formatting check using black or prettier."""
|
|
440
|
+
tool = config.get("tool", "black")
|
|
441
|
+
issues = []
|
|
442
|
+
|
|
443
|
+
if not self._is_tool_available(tool):
|
|
444
|
+
return CheckResult(
|
|
445
|
+
category=CheckCategory.FORMAT,
|
|
446
|
+
status=HealthStatus.SKIP,
|
|
447
|
+
score=100,
|
|
448
|
+
tool_used=tool,
|
|
449
|
+
details={"skip_reason": f"{tool} not available"},
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
try:
|
|
453
|
+
if tool == "black":
|
|
454
|
+
result = subprocess.run(
|
|
455
|
+
["black", "--check", "--diff", str(self.project_root)],
|
|
456
|
+
check=False,
|
|
457
|
+
capture_output=True,
|
|
458
|
+
text=True,
|
|
459
|
+
cwd=str(self.project_root),
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
if result.returncode != 0:
|
|
463
|
+
# Parse diff output to find files needing formatting
|
|
464
|
+
lines = result.stdout.split("\n") if result.stdout else []
|
|
465
|
+
current_file = None
|
|
466
|
+
for line in lines:
|
|
467
|
+
if line.startswith("--- "):
|
|
468
|
+
current_file = line[4:].split("\t")[0]
|
|
469
|
+
elif current_file and line.startswith("would reformat"):
|
|
470
|
+
issues.append(
|
|
471
|
+
HealthIssue(
|
|
472
|
+
category=CheckCategory.FORMAT,
|
|
473
|
+
file_path=current_file,
|
|
474
|
+
line=None,
|
|
475
|
+
code="FORMAT",
|
|
476
|
+
message="File needs reformatting",
|
|
477
|
+
severity="warning",
|
|
478
|
+
fixable=True,
|
|
479
|
+
fix_command=f"black {current_file}",
|
|
480
|
+
),
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
# Also check stderr for files that would be reformatted
|
|
484
|
+
if result.stderr:
|
|
485
|
+
for line in result.stderr.split("\n"):
|
|
486
|
+
if "would reformat" in line:
|
|
487
|
+
file_path = line.replace("would reformat ", "").strip()
|
|
488
|
+
if file_path and not any(i.file_path == file_path for i in issues):
|
|
489
|
+
issues.append(
|
|
490
|
+
HealthIssue(
|
|
491
|
+
category=CheckCategory.FORMAT,
|
|
492
|
+
file_path=file_path,
|
|
493
|
+
line=None,
|
|
494
|
+
code="FORMAT",
|
|
495
|
+
message="File needs reformatting",
|
|
496
|
+
severity="warning",
|
|
497
|
+
fixable=True,
|
|
498
|
+
fix_command=f"black {file_path}",
|
|
499
|
+
),
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
score = max(0, 100 - len(issues) * 10) # -10 per file
|
|
503
|
+
status = HealthStatus.PASS if not issues else HealthStatus.WARN
|
|
504
|
+
|
|
505
|
+
return CheckResult(
|
|
506
|
+
category=CheckCategory.FORMAT,
|
|
507
|
+
status=status,
|
|
508
|
+
score=score,
|
|
509
|
+
issues=issues,
|
|
510
|
+
tool_used=tool,
|
|
511
|
+
details={"files_need_formatting": len(issues)},
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
except subprocess.SubprocessError as e:
|
|
515
|
+
# Tool execution failed
|
|
516
|
+
logger.error(f"Format check subprocess error ({tool}): {e}")
|
|
517
|
+
return CheckResult(
|
|
518
|
+
category=CheckCategory.FORMAT,
|
|
519
|
+
status=HealthStatus.ERROR,
|
|
520
|
+
score=0,
|
|
521
|
+
tool_used=tool,
|
|
522
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
523
|
+
)
|
|
524
|
+
except Exception as e:
|
|
525
|
+
# Unexpected errors - preserve full context for debugging
|
|
526
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
527
|
+
logger.exception(f"Unexpected error in format check ({tool}): {e}")
|
|
528
|
+
return CheckResult(
|
|
529
|
+
category=CheckCategory.FORMAT,
|
|
530
|
+
status=HealthStatus.ERROR,
|
|
531
|
+
score=0,
|
|
532
|
+
tool_used=tool,
|
|
533
|
+
details={"error": str(e)},
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
def _run_type_check(self, config: dict) -> CheckResult:
|
|
537
|
+
"""Run type checking using pyright or mypy."""
|
|
538
|
+
tool = config.get("tool", "pyright")
|
|
539
|
+
issues = []
|
|
540
|
+
|
|
541
|
+
if not self._is_tool_available(tool):
|
|
542
|
+
return CheckResult(
|
|
543
|
+
category=CheckCategory.TYPES,
|
|
544
|
+
status=HealthStatus.SKIP,
|
|
545
|
+
score=100,
|
|
546
|
+
tool_used=tool,
|
|
547
|
+
details={"skip_reason": f"{tool} not available"},
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
try:
|
|
551
|
+
if tool == "pyright":
|
|
552
|
+
result = subprocess.run(
|
|
553
|
+
["pyright", "--outputjson"],
|
|
554
|
+
check=False,
|
|
555
|
+
capture_output=True,
|
|
556
|
+
text=True,
|
|
557
|
+
cwd=str(self.project_root),
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
if result.stdout:
|
|
561
|
+
try:
|
|
562
|
+
data = json.loads(result.stdout)
|
|
563
|
+
diagnostics = data.get("generalDiagnostics", [])
|
|
564
|
+
for diag in diagnostics:
|
|
565
|
+
issues.append(
|
|
566
|
+
HealthIssue(
|
|
567
|
+
category=CheckCategory.TYPES,
|
|
568
|
+
file_path=diag.get("file", ""),
|
|
569
|
+
line=diag.get("range", {}).get("start", {}).get("line"),
|
|
570
|
+
code=diag.get("rule", "TYPE"),
|
|
571
|
+
message=diag.get("message", ""),
|
|
572
|
+
severity="error" if diag.get("severity") == 1 else "warning",
|
|
573
|
+
fixable=False,
|
|
574
|
+
),
|
|
575
|
+
)
|
|
576
|
+
except json.JSONDecodeError:
|
|
577
|
+
pass
|
|
578
|
+
|
|
579
|
+
elif tool == "mypy":
|
|
580
|
+
result = subprocess.run(
|
|
581
|
+
["mypy", "--show-error-codes", "--no-error-summary", str(self.project_root)],
|
|
582
|
+
check=False,
|
|
583
|
+
capture_output=True,
|
|
584
|
+
text=True,
|
|
585
|
+
cwd=str(self.project_root),
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
if result.stdout:
|
|
589
|
+
for line in result.stdout.split("\n"):
|
|
590
|
+
if ": error:" in line or ": note:" in line:
|
|
591
|
+
parts = line.split(":", 3)
|
|
592
|
+
if len(parts) >= 4:
|
|
593
|
+
issues.append(
|
|
594
|
+
HealthIssue(
|
|
595
|
+
category=CheckCategory.TYPES,
|
|
596
|
+
file_path=parts[0],
|
|
597
|
+
line=int(parts[1]) if parts[1].isdigit() else None,
|
|
598
|
+
code="TYPE",
|
|
599
|
+
message=parts[3].strip() if len(parts) > 3 else "",
|
|
600
|
+
severity="error",
|
|
601
|
+
fixable=False,
|
|
602
|
+
),
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
score = max(0, 100 - len(issues) * 10) # -10 per type error
|
|
606
|
+
status = (
|
|
607
|
+
HealthStatus.PASS
|
|
608
|
+
if not issues
|
|
609
|
+
else (HealthStatus.FAIL if len(issues) > 5 else HealthStatus.WARN)
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
return CheckResult(
|
|
613
|
+
category=CheckCategory.TYPES,
|
|
614
|
+
status=status,
|
|
615
|
+
score=score,
|
|
616
|
+
issues=issues,
|
|
617
|
+
tool_used=tool,
|
|
618
|
+
details={"type_errors": len(issues)},
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
except json.JSONDecodeError as e:
|
|
622
|
+
# Tool output not in expected JSON format (pyright specific)
|
|
623
|
+
logger.warning(f"Type check JSON parse error ({tool}): {e}")
|
|
624
|
+
return CheckResult(
|
|
625
|
+
category=CheckCategory.TYPES,
|
|
626
|
+
status=HealthStatus.ERROR,
|
|
627
|
+
score=0,
|
|
628
|
+
tool_used=tool,
|
|
629
|
+
details={"error": f"Failed to parse {tool} output: {e}"},
|
|
630
|
+
)
|
|
631
|
+
except subprocess.SubprocessError as e:
|
|
632
|
+
# Tool execution failed
|
|
633
|
+
logger.error(f"Type check subprocess error ({tool}): {e}")
|
|
634
|
+
return CheckResult(
|
|
635
|
+
category=CheckCategory.TYPES,
|
|
636
|
+
status=HealthStatus.ERROR,
|
|
637
|
+
score=0,
|
|
638
|
+
tool_used=tool,
|
|
639
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
640
|
+
)
|
|
641
|
+
except Exception as e:
|
|
642
|
+
# Unexpected errors - preserve full context for debugging
|
|
643
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
644
|
+
logger.exception(f"Unexpected error in type check ({tool}): {e}")
|
|
645
|
+
return CheckResult(
|
|
646
|
+
category=CheckCategory.TYPES,
|
|
647
|
+
status=HealthStatus.ERROR,
|
|
648
|
+
score=0,
|
|
649
|
+
tool_used=tool,
|
|
650
|
+
details={"error": str(e)},
|
|
651
|
+
)
|
|
652
|
+
|
|
653
|
+
def _run_test_check(self, config: dict) -> CheckResult:
|
|
654
|
+
"""Run test suite using pytest."""
|
|
655
|
+
tool = config.get("tool", "pytest")
|
|
656
|
+
|
|
657
|
+
if not self._is_tool_available(tool):
|
|
658
|
+
return CheckResult(
|
|
659
|
+
category=CheckCategory.TESTS,
|
|
660
|
+
status=HealthStatus.SKIP,
|
|
661
|
+
score=100,
|
|
662
|
+
tool_used=tool,
|
|
663
|
+
details={"skip_reason": f"{tool} not available"},
|
|
664
|
+
)
|
|
665
|
+
|
|
666
|
+
try:
|
|
667
|
+
result = subprocess.run(
|
|
668
|
+
["pytest", "--tb=no", "-q", "--co", "-q"], # Collect only, quiet
|
|
669
|
+
check=False,
|
|
670
|
+
capture_output=True,
|
|
671
|
+
text=True,
|
|
672
|
+
cwd=str(self.project_root),
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
# Count collected tests
|
|
676
|
+
test_count = 0
|
|
677
|
+
for line in result.stdout.split("\n"):
|
|
678
|
+
if "test" in line.lower() and "::" in line:
|
|
679
|
+
test_count += 1
|
|
680
|
+
|
|
681
|
+
# Run actual tests
|
|
682
|
+
result = subprocess.run(
|
|
683
|
+
["pytest", "--tb=short", "-q"],
|
|
684
|
+
check=False,
|
|
685
|
+
capture_output=True,
|
|
686
|
+
text=True,
|
|
687
|
+
cwd=str(self.project_root),
|
|
688
|
+
timeout=300, # 5 minute timeout
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
passed = 0
|
|
692
|
+
failed = 0
|
|
693
|
+
issues = []
|
|
694
|
+
|
|
695
|
+
# Parse pytest output
|
|
696
|
+
for line in result.stdout.split("\n"):
|
|
697
|
+
if " passed" in line:
|
|
698
|
+
parts = line.split()
|
|
699
|
+
for i, part in enumerate(parts):
|
|
700
|
+
if part == "passed" and i > 0:
|
|
701
|
+
try:
|
|
702
|
+
passed = int(parts[i - 1])
|
|
703
|
+
except ValueError:
|
|
704
|
+
pass
|
|
705
|
+
if " failed" in line:
|
|
706
|
+
parts = line.split()
|
|
707
|
+
for i, part in enumerate(parts):
|
|
708
|
+
if part == "failed" and i > 0:
|
|
709
|
+
try:
|
|
710
|
+
failed = int(parts[i - 1])
|
|
711
|
+
except ValueError:
|
|
712
|
+
pass
|
|
713
|
+
|
|
714
|
+
# Create issues for failed tests
|
|
715
|
+
if result.returncode != 0 and result.stdout:
|
|
716
|
+
lines = result.stdout.split("\n")
|
|
717
|
+
for line in lines:
|
|
718
|
+
if "FAILED" in line:
|
|
719
|
+
# Extract test name and file
|
|
720
|
+
if "::" in line:
|
|
721
|
+
test_path = (
|
|
722
|
+
line.split("FAILED")[1].strip() if "FAILED" in line else line
|
|
723
|
+
)
|
|
724
|
+
test_path = test_path.split(" -")[0].strip()
|
|
725
|
+
file_part = test_path.split("::")[0] if "::" in test_path else test_path
|
|
726
|
+
issues.append(
|
|
727
|
+
HealthIssue(
|
|
728
|
+
category=CheckCategory.TESTS,
|
|
729
|
+
file_path=file_part,
|
|
730
|
+
line=None,
|
|
731
|
+
code="TEST_FAIL",
|
|
732
|
+
message=f"Test failed: {test_path}",
|
|
733
|
+
severity="error",
|
|
734
|
+
fixable=False,
|
|
735
|
+
),
|
|
736
|
+
)
|
|
737
|
+
|
|
738
|
+
total = passed + failed
|
|
739
|
+
score = int((passed / total) * 100) if total > 0 else 100
|
|
740
|
+
status = HealthStatus.PASS if failed == 0 else HealthStatus.FAIL
|
|
741
|
+
|
|
742
|
+
return CheckResult(
|
|
743
|
+
category=CheckCategory.TESTS,
|
|
744
|
+
status=status,
|
|
745
|
+
score=score,
|
|
746
|
+
issues=issues,
|
|
747
|
+
tool_used=tool,
|
|
748
|
+
details={
|
|
749
|
+
"passed": passed,
|
|
750
|
+
"failed": failed,
|
|
751
|
+
"total": total,
|
|
752
|
+
},
|
|
753
|
+
)
|
|
754
|
+
|
|
755
|
+
except subprocess.TimeoutExpired:
|
|
756
|
+
# Tests took too long - specific timeout error
|
|
757
|
+
logger.error(f"Test check timeout ({tool}): Tests took longer than 5 minutes")
|
|
758
|
+
return CheckResult(
|
|
759
|
+
category=CheckCategory.TESTS,
|
|
760
|
+
status=HealthStatus.ERROR,
|
|
761
|
+
score=0,
|
|
762
|
+
tool_used=tool,
|
|
763
|
+
details={"error": "Test suite timed out after 5 minutes"},
|
|
764
|
+
)
|
|
765
|
+
except subprocess.SubprocessError as e:
|
|
766
|
+
# Tool execution failed
|
|
767
|
+
logger.error(f"Test check subprocess error ({tool}): {e}")
|
|
768
|
+
return CheckResult(
|
|
769
|
+
category=CheckCategory.TESTS,
|
|
770
|
+
status=HealthStatus.ERROR,
|
|
771
|
+
score=0,
|
|
772
|
+
tool_used=tool,
|
|
773
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
774
|
+
)
|
|
775
|
+
except Exception as e:
|
|
776
|
+
# Unexpected errors - preserve full context for debugging
|
|
777
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
778
|
+
logger.exception(f"Unexpected error in test check ({tool}): {e}")
|
|
779
|
+
return CheckResult(
|
|
780
|
+
category=CheckCategory.TESTS,
|
|
781
|
+
status=HealthStatus.ERROR,
|
|
782
|
+
score=0,
|
|
783
|
+
tool_used=tool,
|
|
784
|
+
details={"error": str(e)},
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
def _run_security_check(self, config: dict) -> CheckResult:
|
|
788
|
+
"""Run security check using bandit."""
|
|
789
|
+
tool = config.get("tool", "bandit")
|
|
790
|
+
issues = []
|
|
791
|
+
|
|
792
|
+
if not self._is_tool_available(tool):
|
|
793
|
+
return CheckResult(
|
|
794
|
+
category=CheckCategory.SECURITY,
|
|
795
|
+
status=HealthStatus.SKIP,
|
|
796
|
+
score=100,
|
|
797
|
+
tool_used=tool,
|
|
798
|
+
details={"skip_reason": f"{tool} not available"},
|
|
799
|
+
)
|
|
800
|
+
|
|
801
|
+
try:
|
|
802
|
+
result = subprocess.run(
|
|
803
|
+
["bandit", "-r", "-f", "json", str(self.project_root)],
|
|
804
|
+
check=False,
|
|
805
|
+
capture_output=True,
|
|
806
|
+
text=True,
|
|
807
|
+
cwd=str(self.project_root),
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
if result.stdout:
|
|
811
|
+
try:
|
|
812
|
+
data = json.loads(result.stdout)
|
|
813
|
+
for item in data.get("results", []):
|
|
814
|
+
severity = item.get("issue_severity", "LOW")
|
|
815
|
+
issues.append(
|
|
816
|
+
HealthIssue(
|
|
817
|
+
category=CheckCategory.SECURITY,
|
|
818
|
+
file_path=item.get("filename", ""),
|
|
819
|
+
line=item.get("line_number"),
|
|
820
|
+
code=item.get("test_id", ""),
|
|
821
|
+
message=item.get("issue_text", ""),
|
|
822
|
+
severity="error" if severity in ["HIGH", "MEDIUM"] else "warning",
|
|
823
|
+
fixable=False,
|
|
824
|
+
),
|
|
825
|
+
)
|
|
826
|
+
except json.JSONDecodeError:
|
|
827
|
+
pass
|
|
828
|
+
|
|
829
|
+
# Weight by severity
|
|
830
|
+
high_count = sum(1 for i in issues if i.severity == "error")
|
|
831
|
+
low_count = len(issues) - high_count
|
|
832
|
+
score = max(0, 100 - high_count * 20 - low_count * 5)
|
|
833
|
+
status = (
|
|
834
|
+
HealthStatus.PASS
|
|
835
|
+
if not issues
|
|
836
|
+
else (HealthStatus.FAIL if high_count > 0 else HealthStatus.WARN)
|
|
837
|
+
)
|
|
838
|
+
|
|
839
|
+
return CheckResult(
|
|
840
|
+
category=CheckCategory.SECURITY,
|
|
841
|
+
status=status,
|
|
842
|
+
score=score,
|
|
843
|
+
issues=issues,
|
|
844
|
+
tool_used=tool,
|
|
845
|
+
details={
|
|
846
|
+
"high_severity": high_count,
|
|
847
|
+
"low_severity": low_count,
|
|
848
|
+
},
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
except json.JSONDecodeError as e:
|
|
852
|
+
# Tool output not in expected JSON format
|
|
853
|
+
logger.warning(f"Security check JSON parse error ({tool}): {e}")
|
|
854
|
+
return CheckResult(
|
|
855
|
+
category=CheckCategory.SECURITY,
|
|
856
|
+
status=HealthStatus.ERROR,
|
|
857
|
+
score=0,
|
|
858
|
+
tool_used=tool,
|
|
859
|
+
details={"error": f"Failed to parse {tool} output: {e}"},
|
|
860
|
+
)
|
|
861
|
+
except subprocess.SubprocessError as e:
|
|
862
|
+
# Tool execution failed
|
|
863
|
+
logger.error(f"Security check subprocess error ({tool}): {e}")
|
|
864
|
+
return CheckResult(
|
|
865
|
+
category=CheckCategory.SECURITY,
|
|
866
|
+
status=HealthStatus.ERROR,
|
|
867
|
+
score=0,
|
|
868
|
+
tool_used=tool,
|
|
869
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
870
|
+
)
|
|
871
|
+
except Exception as e:
|
|
872
|
+
# Unexpected errors - preserve full context for debugging
|
|
873
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
874
|
+
logger.exception(f"Unexpected error in security check ({tool}): {e}")
|
|
875
|
+
return CheckResult(
|
|
876
|
+
category=CheckCategory.SECURITY,
|
|
877
|
+
status=HealthStatus.ERROR,
|
|
878
|
+
score=0,
|
|
879
|
+
tool_used=tool,
|
|
880
|
+
details={"error": str(e)},
|
|
881
|
+
)
|
|
882
|
+
|
|
883
|
+
def _run_deps_check(self, config: dict) -> CheckResult:
|
|
884
|
+
"""Run dependency check using pip-audit."""
|
|
885
|
+
tool = config.get("tool", "pip-audit")
|
|
886
|
+
issues = []
|
|
887
|
+
|
|
888
|
+
if not self._is_tool_available(tool):
|
|
889
|
+
return CheckResult(
|
|
890
|
+
category=CheckCategory.DEPS,
|
|
891
|
+
status=HealthStatus.SKIP,
|
|
892
|
+
score=100,
|
|
893
|
+
tool_used=tool,
|
|
894
|
+
details={"skip_reason": f"{tool} not available"},
|
|
895
|
+
)
|
|
896
|
+
|
|
897
|
+
try:
|
|
898
|
+
result = subprocess.run(
|
|
899
|
+
["pip-audit", "--format=json"],
|
|
900
|
+
check=False,
|
|
901
|
+
capture_output=True,
|
|
902
|
+
text=True,
|
|
903
|
+
cwd=str(self.project_root),
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
if result.stdout:
|
|
907
|
+
try:
|
|
908
|
+
data = json.loads(result.stdout)
|
|
909
|
+
for item in data:
|
|
910
|
+
issues.append(
|
|
911
|
+
HealthIssue(
|
|
912
|
+
category=CheckCategory.DEPS,
|
|
913
|
+
file_path="requirements.txt",
|
|
914
|
+
line=None,
|
|
915
|
+
code=item.get("id", "VULN"),
|
|
916
|
+
message=f"{item.get('name')}: {item.get('description', 'Vuln')}",
|
|
917
|
+
severity="error" if item.get("fix_versions") else "warning",
|
|
918
|
+
fixable=bool(item.get("fix_versions")),
|
|
919
|
+
fix_command=self._get_fix_cmd(item),
|
|
920
|
+
),
|
|
921
|
+
)
|
|
922
|
+
except json.JSONDecodeError:
|
|
923
|
+
pass
|
|
924
|
+
|
|
925
|
+
score = max(0, 100 - len(issues) * 15)
|
|
926
|
+
status = HealthStatus.PASS if not issues else HealthStatus.WARN
|
|
927
|
+
|
|
928
|
+
return CheckResult(
|
|
929
|
+
category=CheckCategory.DEPS,
|
|
930
|
+
status=status,
|
|
931
|
+
score=score,
|
|
932
|
+
issues=issues,
|
|
933
|
+
tool_used=tool,
|
|
934
|
+
details={"vulnerable_packages": len(issues)},
|
|
935
|
+
)
|
|
936
|
+
|
|
937
|
+
except json.JSONDecodeError as e:
|
|
938
|
+
# Tool output not in expected JSON format
|
|
939
|
+
logger.warning(f"Dependency check JSON parse error ({tool}): {e}")
|
|
940
|
+
return CheckResult(
|
|
941
|
+
category=CheckCategory.DEPS,
|
|
942
|
+
status=HealthStatus.ERROR,
|
|
943
|
+
score=0,
|
|
944
|
+
tool_used=tool,
|
|
945
|
+
details={"error": f"Failed to parse {tool} output: {e}"},
|
|
946
|
+
)
|
|
947
|
+
except subprocess.SubprocessError as e:
|
|
948
|
+
# Tool execution failed
|
|
949
|
+
logger.error(f"Dependency check subprocess error ({tool}): {e}")
|
|
950
|
+
return CheckResult(
|
|
951
|
+
category=CheckCategory.DEPS,
|
|
952
|
+
status=HealthStatus.ERROR,
|
|
953
|
+
score=0,
|
|
954
|
+
tool_used=tool,
|
|
955
|
+
details={"error": f"Failed to run {tool}: {e}"},
|
|
956
|
+
)
|
|
957
|
+
except Exception as e:
|
|
958
|
+
# Unexpected errors - preserve full context for debugging
|
|
959
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional check
|
|
960
|
+
logger.exception(f"Unexpected error in dependency check ({tool}): {e}")
|
|
961
|
+
return CheckResult(
|
|
962
|
+
category=CheckCategory.DEPS,
|
|
963
|
+
status=HealthStatus.ERROR,
|
|
964
|
+
score=0,
|
|
965
|
+
tool_used=tool,
|
|
966
|
+
details={"error": str(e)},
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
def _get_fix_cmd(self, item: dict) -> str | None:
|
|
970
|
+
"""Get pip install command to fix a vulnerable package."""
|
|
971
|
+
fix_versions = item.get("fix_versions")
|
|
972
|
+
if fix_versions:
|
|
973
|
+
return f"pip install {item.get('name')}=={fix_versions[0]}"
|
|
974
|
+
return None
|
|
975
|
+
|
|
976
|
+
|
|
977
|
+
class AutoFixer:
|
|
978
|
+
"""Apply automatic fixes to code health issues."""
|
|
979
|
+
|
|
980
|
+
def __init__(self, config: dict[str, Any] | None = None):
|
|
981
|
+
"""Initialize the auto-fixer."""
|
|
982
|
+
self.config: dict[str, Any] = config or DEFAULT_CONFIG.get("auto_fix", {}) # type: ignore[assignment]
|
|
983
|
+
self.safe_fixes = self.config.get("safe_fixes", True)
|
|
984
|
+
self.prompt_fixes = self.config.get("prompt_fixes", True)
|
|
985
|
+
|
|
986
|
+
def preview_fixes(self, report: HealthReport) -> list[dict]:
|
|
987
|
+
"""Show what would be fixed without applying."""
|
|
988
|
+
fixes = []
|
|
989
|
+
for result in report.results:
|
|
990
|
+
for issue in result.issues:
|
|
991
|
+
if issue.fixable and issue.fix_command:
|
|
992
|
+
fixes.append(
|
|
993
|
+
{
|
|
994
|
+
"category": issue.category.value,
|
|
995
|
+
"file": issue.file_path,
|
|
996
|
+
"issue": issue.message,
|
|
997
|
+
"fix_command": issue.fix_command,
|
|
998
|
+
"safe": self._is_safe_fix(issue),
|
|
999
|
+
},
|
|
1000
|
+
)
|
|
1001
|
+
return fixes
|
|
1002
|
+
|
|
1003
|
+
def _is_safe_fix(self, issue: HealthIssue) -> bool:
|
|
1004
|
+
"""Determine if a fix is safe to apply automatically."""
|
|
1005
|
+
safe_codes = ["FORMAT", "W291", "W292", "W293", "I001"] # Whitespace, imports
|
|
1006
|
+
return issue.code in safe_codes or issue.category == CheckCategory.FORMAT
|
|
1007
|
+
|
|
1008
|
+
async def fix_all(self, report: HealthReport, interactive: bool = False) -> dict[str, Any]:
|
|
1009
|
+
"""Apply all safe fixes, optionally prompt for others."""
|
|
1010
|
+
results: dict[str, Any] = {
|
|
1011
|
+
"fixed": [],
|
|
1012
|
+
"skipped": [],
|
|
1013
|
+
"failed": [],
|
|
1014
|
+
}
|
|
1015
|
+
|
|
1016
|
+
for result in report.results:
|
|
1017
|
+
for issue in result.issues:
|
|
1018
|
+
if issue.fixable and issue.fix_command:
|
|
1019
|
+
if self._is_safe_fix(issue):
|
|
1020
|
+
success = await self._apply_fix(issue)
|
|
1021
|
+
if success:
|
|
1022
|
+
results["fixed"].append(issue.to_dict())
|
|
1023
|
+
else:
|
|
1024
|
+
results["failed"].append(issue.to_dict())
|
|
1025
|
+
elif interactive and self.prompt_fixes:
|
|
1026
|
+
# In interactive mode, we'd prompt here
|
|
1027
|
+
results["skipped"].append(issue.to_dict())
|
|
1028
|
+
else:
|
|
1029
|
+
results["skipped"].append(issue.to_dict())
|
|
1030
|
+
|
|
1031
|
+
return results
|
|
1032
|
+
|
|
1033
|
+
async def fix_category(self, report: HealthReport, category: CheckCategory) -> dict[str, Any]:
|
|
1034
|
+
"""Fix issues in a specific category."""
|
|
1035
|
+
results: dict[str, Any] = {
|
|
1036
|
+
"fixed": [],
|
|
1037
|
+
"skipped": [],
|
|
1038
|
+
"failed": [],
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
result = report.get_result(category)
|
|
1042
|
+
if not result:
|
|
1043
|
+
return results
|
|
1044
|
+
|
|
1045
|
+
for issue in result.issues:
|
|
1046
|
+
if issue.fixable and issue.fix_command:
|
|
1047
|
+
success = await self._apply_fix(issue)
|
|
1048
|
+
if success:
|
|
1049
|
+
results["fixed"].append(issue.to_dict())
|
|
1050
|
+
else:
|
|
1051
|
+
results["failed"].append(issue.to_dict())
|
|
1052
|
+
|
|
1053
|
+
return results
|
|
1054
|
+
|
|
1055
|
+
async def _apply_fix(self, issue: HealthIssue) -> bool:
|
|
1056
|
+
"""Apply a single fix.
|
|
1057
|
+
|
|
1058
|
+
This uses broad exception handling intentionally for graceful degradation.
|
|
1059
|
+
Auto-fixes are optional - the system should continue even if some fixes fail.
|
|
1060
|
+
|
|
1061
|
+
Note:
|
|
1062
|
+
Full exception context is preserved via logger.exception() for debugging.
|
|
1063
|
+
"""
|
|
1064
|
+
if not issue.fix_command:
|
|
1065
|
+
return False
|
|
1066
|
+
|
|
1067
|
+
try:
|
|
1068
|
+
result = await asyncio.to_thread(
|
|
1069
|
+
subprocess.run,
|
|
1070
|
+
issue.fix_command.split(),
|
|
1071
|
+
capture_output=True,
|
|
1072
|
+
text=True,
|
|
1073
|
+
)
|
|
1074
|
+
return result.returncode == 0
|
|
1075
|
+
except subprocess.SubprocessError as e:
|
|
1076
|
+
# Fix command execution failed
|
|
1077
|
+
logger.error(f"Auto-fix subprocess error for {issue.file_path}: {e}")
|
|
1078
|
+
return False
|
|
1079
|
+
except Exception as e:
|
|
1080
|
+
# Unexpected errors - preserve full context for debugging
|
|
1081
|
+
# INTENTIONAL: Broad handler for graceful degradation of optional auto-fix
|
|
1082
|
+
logger.exception(f"Unexpected error applying fix to {issue.file_path}: {e}")
|
|
1083
|
+
return False
|
|
1084
|
+
|
|
1085
|
+
|
|
1086
|
+
class HealthTrendTracker:
|
|
1087
|
+
"""Track code health trends and identify patterns."""
|
|
1088
|
+
|
|
1089
|
+
def __init__(self, project_root: str = "."):
|
|
1090
|
+
"""Initialize the trend tracker."""
|
|
1091
|
+
self.project_root = Path(project_root)
|
|
1092
|
+
self.history_dir = self.project_root / ".empathy" / "health_history"
|
|
1093
|
+
self.history_dir.mkdir(parents=True, exist_ok=True)
|
|
1094
|
+
|
|
1095
|
+
def record_check(self, report: HealthReport) -> None:
|
|
1096
|
+
"""Save health check to history."""
|
|
1097
|
+
today = datetime.now().strftime("%Y-%m-%d")
|
|
1098
|
+
filepath = self.history_dir / f"{today}.json"
|
|
1099
|
+
|
|
1100
|
+
# Load existing or create new
|
|
1101
|
+
history = []
|
|
1102
|
+
if filepath.exists():
|
|
1103
|
+
try:
|
|
1104
|
+
history = json.loads(filepath.read_text())
|
|
1105
|
+
except json.JSONDecodeError:
|
|
1106
|
+
history = []
|
|
1107
|
+
|
|
1108
|
+
# Add new entry
|
|
1109
|
+
history.append(
|
|
1110
|
+
{
|
|
1111
|
+
"timestamp": datetime.now().isoformat(),
|
|
1112
|
+
"overall_score": report.overall_score,
|
|
1113
|
+
"status": report.status.value,
|
|
1114
|
+
"total_issues": report.total_issues,
|
|
1115
|
+
"results": {r.category.value: r.score for r in report.results},
|
|
1116
|
+
},
|
|
1117
|
+
)
|
|
1118
|
+
|
|
1119
|
+
filepath.write_text(json.dumps(history, indent=2))
|
|
1120
|
+
|
|
1121
|
+
def get_trends(self, days: int = 30) -> dict[str, Any]:
|
|
1122
|
+
"""Analyze health trends over time."""
|
|
1123
|
+
trends: dict[str, Any] = {
|
|
1124
|
+
"period_days": days,
|
|
1125
|
+
"data_points": [],
|
|
1126
|
+
"average_score": 0,
|
|
1127
|
+
"trend_direction": "stable",
|
|
1128
|
+
"score_change": 0,
|
|
1129
|
+
}
|
|
1130
|
+
|
|
1131
|
+
scores = []
|
|
1132
|
+
for i in range(days):
|
|
1133
|
+
date = (datetime.now() - timedelta(days=i)).strftime("%Y-%m-%d")
|
|
1134
|
+
filepath = self.history_dir / f"{date}.json"
|
|
1135
|
+
|
|
1136
|
+
if filepath.exists():
|
|
1137
|
+
try:
|
|
1138
|
+
data = json.loads(filepath.read_text())
|
|
1139
|
+
if data:
|
|
1140
|
+
# Get last entry of the day
|
|
1141
|
+
entry = data[-1]
|
|
1142
|
+
scores.append(entry.get("overall_score", 0))
|
|
1143
|
+
trends["data_points"].append(
|
|
1144
|
+
{
|
|
1145
|
+
"date": date,
|
|
1146
|
+
"score": entry.get("overall_score", 0),
|
|
1147
|
+
},
|
|
1148
|
+
)
|
|
1149
|
+
except json.JSONDecodeError:
|
|
1150
|
+
pass
|
|
1151
|
+
|
|
1152
|
+
if scores:
|
|
1153
|
+
trends["average_score"] = int(sum(scores) / len(scores))
|
|
1154
|
+
|
|
1155
|
+
if len(scores) >= 2:
|
|
1156
|
+
recent = scores[:7] if len(scores) >= 7 else scores[: len(scores) // 2]
|
|
1157
|
+
older = scores[7:] if len(scores) >= 7 else scores[len(scores) // 2 :]
|
|
1158
|
+
|
|
1159
|
+
recent_avg = sum(recent) / len(recent) if recent else 0
|
|
1160
|
+
older_avg = sum(older) / len(older) if older else 0
|
|
1161
|
+
|
|
1162
|
+
trends["score_change"] = int(recent_avg - older_avg)
|
|
1163
|
+
|
|
1164
|
+
if trends["score_change"] > 5:
|
|
1165
|
+
trends["trend_direction"] = "improving"
|
|
1166
|
+
elif trends["score_change"] < -5:
|
|
1167
|
+
trends["trend_direction"] = "declining"
|
|
1168
|
+
else:
|
|
1169
|
+
trends["trend_direction"] = "stable"
|
|
1170
|
+
|
|
1171
|
+
return trends
|
|
1172
|
+
|
|
1173
|
+
def identify_hotspots(self) -> list[dict]:
|
|
1174
|
+
"""Find files that consistently have issues."""
|
|
1175
|
+
file_issues: dict[str, int] = {}
|
|
1176
|
+
|
|
1177
|
+
for filepath in self.history_dir.glob("*.json"):
|
|
1178
|
+
try:
|
|
1179
|
+
data = json.loads(filepath.read_text())
|
|
1180
|
+
for entry in data:
|
|
1181
|
+
for result in entry.get("results", {}).values():
|
|
1182
|
+
if isinstance(result, dict):
|
|
1183
|
+
for issue in result.get("issues", []):
|
|
1184
|
+
file_path = issue.get("file_path", "")
|
|
1185
|
+
if file_path:
|
|
1186
|
+
file_issues[file_path] = file_issues.get(file_path, 0) + 1
|
|
1187
|
+
except (json.JSONDecodeError, KeyError):
|
|
1188
|
+
pass
|
|
1189
|
+
|
|
1190
|
+
# Sort by issue count
|
|
1191
|
+
sorted_files = sorted(file_issues.items(), key=lambda x: x[1], reverse=True)
|
|
1192
|
+
|
|
1193
|
+
return [{"file": f, "issue_count": c} for f, c in sorted_files[:10]]
|
|
1194
|
+
|
|
1195
|
+
|
|
1196
|
+
def format_health_output(
|
|
1197
|
+
report: HealthReport,
|
|
1198
|
+
level: int = 1,
|
|
1199
|
+
thresholds: dict | None = None,
|
|
1200
|
+
) -> str:
|
|
1201
|
+
"""Format health report for display.
|
|
1202
|
+
|
|
1203
|
+
Args:
|
|
1204
|
+
report: The health report to format
|
|
1205
|
+
level: Detail level (1=summary, 2=details, 3=full)
|
|
1206
|
+
thresholds: Score thresholds for status icons
|
|
1207
|
+
|
|
1208
|
+
"""
|
|
1209
|
+
thresholds = thresholds or DEFAULT_THRESHOLDS
|
|
1210
|
+
lines = []
|
|
1211
|
+
|
|
1212
|
+
# Overall score
|
|
1213
|
+
score = report.overall_score
|
|
1214
|
+
if score >= thresholds["good"]:
|
|
1215
|
+
status_word = "Good"
|
|
1216
|
+
status_icon = "🟢"
|
|
1217
|
+
elif score >= thresholds["warning"]:
|
|
1218
|
+
status_word = "Warning"
|
|
1219
|
+
status_icon = "🟡"
|
|
1220
|
+
else:
|
|
1221
|
+
status_word = "Critical"
|
|
1222
|
+
status_icon = "🔴"
|
|
1223
|
+
|
|
1224
|
+
lines.append(f"{status_icon} Code Health: {status_word} ({score}/100)")
|
|
1225
|
+
lines.append("")
|
|
1226
|
+
|
|
1227
|
+
# Level 1: Summary
|
|
1228
|
+
for result in report.results:
|
|
1229
|
+
if result.status == HealthStatus.SKIP:
|
|
1230
|
+
continue
|
|
1231
|
+
|
|
1232
|
+
if result.score >= thresholds["good"]:
|
|
1233
|
+
icon = "🟢"
|
|
1234
|
+
elif result.score >= thresholds["warning"]:
|
|
1235
|
+
icon = "🟡"
|
|
1236
|
+
else:
|
|
1237
|
+
icon = "🔴"
|
|
1238
|
+
|
|
1239
|
+
category_name = result.category.value.capitalize()
|
|
1240
|
+
|
|
1241
|
+
if result.category == CheckCategory.TESTS:
|
|
1242
|
+
details = result.details
|
|
1243
|
+
lines.append(
|
|
1244
|
+
f"{icon} {category_name}: {details.get('passed', 0)}P/{details.get('failed', 0)}F",
|
|
1245
|
+
)
|
|
1246
|
+
elif result.category == CheckCategory.LINT:
|
|
1247
|
+
lines.append(f"{icon} {category_name}: {result.issue_count} warnings")
|
|
1248
|
+
elif result.category == CheckCategory.TYPES:
|
|
1249
|
+
lines.append(f"{icon} {category_name}: {result.issue_count} errors")
|
|
1250
|
+
elif result.category == CheckCategory.SECURITY:
|
|
1251
|
+
details = result.details
|
|
1252
|
+
high = details.get("high_severity", 0)
|
|
1253
|
+
low = details.get("low_severity", 0)
|
|
1254
|
+
if high or low:
|
|
1255
|
+
lines.append(f"{icon} {category_name}: {high} high, {low} low severity")
|
|
1256
|
+
else:
|
|
1257
|
+
lines.append(f"{icon} {category_name}: No vulnerabilities")
|
|
1258
|
+
elif result.category == CheckCategory.FORMAT:
|
|
1259
|
+
if result.issue_count:
|
|
1260
|
+
lines.append(f"{icon} {category_name}: {result.issue_count} files need formatting")
|
|
1261
|
+
else:
|
|
1262
|
+
lines.append(f"{icon} {category_name}: All files formatted")
|
|
1263
|
+
elif result.category == CheckCategory.DEPS:
|
|
1264
|
+
if result.issue_count:
|
|
1265
|
+
lines.append(f"{icon} {category_name}: {result.issue_count} vulnerable packages")
|
|
1266
|
+
else:
|
|
1267
|
+
lines.append(f"{icon} {category_name}: All dependencies secure")
|
|
1268
|
+
|
|
1269
|
+
# Level 2: Details
|
|
1270
|
+
if level >= 2:
|
|
1271
|
+
lines.append("")
|
|
1272
|
+
lines.append("━" * 40)
|
|
1273
|
+
lines.append("Details:")
|
|
1274
|
+
lines.append("")
|
|
1275
|
+
|
|
1276
|
+
for result in report.results:
|
|
1277
|
+
if result.issues:
|
|
1278
|
+
lines.append(f" {result.category.value.upper()} ({len(result.issues)} issues)")
|
|
1279
|
+
for issue in result.issues[:5]: # Show first 5
|
|
1280
|
+
loc = f":{issue.line}" if issue.line else ""
|
|
1281
|
+
lines.append(f" {issue.file_path}{loc}")
|
|
1282
|
+
lines.append(f" {issue.code}: {issue.message}")
|
|
1283
|
+
if len(result.issues) > 5:
|
|
1284
|
+
lines.append(f" ... and {len(result.issues) - 5} more")
|
|
1285
|
+
lines.append("")
|
|
1286
|
+
|
|
1287
|
+
# Level 3: Full report
|
|
1288
|
+
if level >= 3:
|
|
1289
|
+
lines.append("")
|
|
1290
|
+
lines.append("━" * 40)
|
|
1291
|
+
lines.append("Full Report")
|
|
1292
|
+
lines.append(f"Generated: {report.generated_at}")
|
|
1293
|
+
lines.append(f"Project: {report.project_root}")
|
|
1294
|
+
lines.append("")
|
|
1295
|
+
|
|
1296
|
+
for result in report.results:
|
|
1297
|
+
lines.append(f"## {result.category.value.capitalize()} (Score: {result.score}/100)")
|
|
1298
|
+
lines.append(f" Tool: {result.tool_used}")
|
|
1299
|
+
lines.append(f" Duration: {result.duration_ms}ms")
|
|
1300
|
+
if result.details:
|
|
1301
|
+
for key, value in result.details.items():
|
|
1302
|
+
lines.append(f" {key}: {value}")
|
|
1303
|
+
lines.append("")
|
|
1304
|
+
|
|
1305
|
+
# Action prompt
|
|
1306
|
+
if report.total_fixable > 0:
|
|
1307
|
+
lines.append("")
|
|
1308
|
+
lines.append("━" * 40)
|
|
1309
|
+
lines.append(
|
|
1310
|
+
f"[1] Fix {report.total_fixable} auto-fixable issues [2] See details [3] Full report",
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
return "\n".join(lines)
|