attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1084 @@
|
|
|
1
|
+
"""Bug Prediction Workflow
|
|
2
|
+
|
|
3
|
+
Analyzes code against learned bug patterns to predict likely issues
|
|
4
|
+
before they manifest in production.
|
|
5
|
+
|
|
6
|
+
Stages:
|
|
7
|
+
1. scan (CHEAP) - Scan codebase for code patterns and structures
|
|
8
|
+
2. correlate (CAPABLE) - Match against historical bug patterns
|
|
9
|
+
3. predict (CAPABLE) - Identify high-risk areas based on correlation
|
|
10
|
+
4. recommend (PREMIUM) - Generate actionable fix recommendations
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import fnmatch
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
import re
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
import yaml
|
|
24
|
+
|
|
25
|
+
from .base import BaseWorkflow, ModelTier
|
|
26
|
+
from .step_config import WorkflowStepConfig
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _load_bug_predict_config() -> dict:
|
|
32
|
+
"""Load bug_predict configuration from attune.config.yml.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Dict with bug_predict settings, or defaults if not found.
|
|
36
|
+
|
|
37
|
+
"""
|
|
38
|
+
defaults = {
|
|
39
|
+
"risk_threshold": 0.7,
|
|
40
|
+
"exclude_files": [],
|
|
41
|
+
"acceptable_exception_contexts": ["version", "config", "cleanup", "optional"],
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
config_paths = [
|
|
45
|
+
Path("attune.config.yml"),
|
|
46
|
+
Path("attune.config.yaml"),
|
|
47
|
+
Path(".empathy.yml"),
|
|
48
|
+
Path(".empathy.yaml"),
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
for config_path in config_paths:
|
|
52
|
+
if config_path.exists():
|
|
53
|
+
try:
|
|
54
|
+
with open(config_path) as f:
|
|
55
|
+
config = yaml.safe_load(f)
|
|
56
|
+
if config and "bug_predict" in config:
|
|
57
|
+
bug_config = config["bug_predict"]
|
|
58
|
+
return {
|
|
59
|
+
"risk_threshold": bug_config.get(
|
|
60
|
+
"risk_threshold",
|
|
61
|
+
defaults["risk_threshold"],
|
|
62
|
+
),
|
|
63
|
+
"exclude_files": bug_config.get(
|
|
64
|
+
"exclude_files",
|
|
65
|
+
defaults["exclude_files"],
|
|
66
|
+
),
|
|
67
|
+
"acceptable_exception_contexts": bug_config.get(
|
|
68
|
+
"acceptable_exception_contexts",
|
|
69
|
+
defaults["acceptable_exception_contexts"],
|
|
70
|
+
),
|
|
71
|
+
}
|
|
72
|
+
except (yaml.YAMLError, OSError):
|
|
73
|
+
pass
|
|
74
|
+
|
|
75
|
+
return defaults
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _should_exclude_file(file_path: str, exclude_patterns: list[str]) -> bool:
|
|
79
|
+
"""Check if a file should be excluded based on glob patterns.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
file_path: Path to the file
|
|
83
|
+
exclude_patterns: List of glob patterns (e.g., "**/test_*.py")
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if the file matches any exclusion pattern.
|
|
87
|
+
|
|
88
|
+
"""
|
|
89
|
+
for pattern in exclude_patterns:
|
|
90
|
+
# Handle ** patterns for recursive matching
|
|
91
|
+
if "**" in pattern:
|
|
92
|
+
# Convert ** glob to fnmatch-compatible pattern
|
|
93
|
+
parts = pattern.split("**")
|
|
94
|
+
if len(parts) == 2:
|
|
95
|
+
prefix, suffix = parts
|
|
96
|
+
# Check if file path contains the pattern structure
|
|
97
|
+
if prefix and not file_path.startswith(prefix.rstrip("/")):
|
|
98
|
+
continue
|
|
99
|
+
if suffix and fnmatch.fnmatch(file_path, f"*{suffix}"):
|
|
100
|
+
return True
|
|
101
|
+
if not suffix and fnmatch.fnmatch(file_path, f"*{prefix}*"):
|
|
102
|
+
return True
|
|
103
|
+
elif fnmatch.fnmatch(file_path, pattern) or fnmatch.fnmatch(
|
|
104
|
+
Path(file_path).name,
|
|
105
|
+
pattern,
|
|
106
|
+
):
|
|
107
|
+
return True
|
|
108
|
+
|
|
109
|
+
return False
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _is_acceptable_broad_exception(
|
|
113
|
+
line: str,
|
|
114
|
+
context_before: list[str],
|
|
115
|
+
context_after: list[str],
|
|
116
|
+
acceptable_contexts: list[str] | None = None,
|
|
117
|
+
) -> bool:
|
|
118
|
+
"""Check if a broad exception handler is acceptable based on context.
|
|
119
|
+
|
|
120
|
+
Acceptable patterns (configurable via acceptable_contexts):
|
|
121
|
+
- version: Version/metadata detection with fallback
|
|
122
|
+
- config: Config loading with default fallback
|
|
123
|
+
- optional: Optional feature detection (imports, hasattr)
|
|
124
|
+
- cleanup: Cleanup/teardown code
|
|
125
|
+
- logging: Logging-only handlers that re-raise
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
line: The line containing the except clause
|
|
129
|
+
context_before: Lines before the except
|
|
130
|
+
context_after: Lines after the except (the handler body)
|
|
131
|
+
acceptable_contexts: List of context types to accept (from config)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
True if the exception handler is acceptable, False if problematic.
|
|
135
|
+
|
|
136
|
+
"""
|
|
137
|
+
# Default acceptable contexts if not provided
|
|
138
|
+
if acceptable_contexts is None:
|
|
139
|
+
acceptable_contexts = ["version", "config", "cleanup", "optional"]
|
|
140
|
+
|
|
141
|
+
# Join context for pattern matching
|
|
142
|
+
before_text = "\n".join(context_before[-5:]).lower()
|
|
143
|
+
after_text = "\n".join(context_after[:5]).lower()
|
|
144
|
+
|
|
145
|
+
# Acceptable: Version/metadata detection
|
|
146
|
+
if "version" in acceptable_contexts:
|
|
147
|
+
if any(kw in before_text for kw in ["get_version", "version", "metadata", "__version__"]):
|
|
148
|
+
if any(kw in after_text for kw in ["return", "dev", "unknown", "0.0.0"]):
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
# Acceptable: Config loading with fallback to defaults
|
|
152
|
+
if "config" in acceptable_contexts:
|
|
153
|
+
if any(kw in before_text for kw in ["config", "settings", "yaml", "json", "load"]):
|
|
154
|
+
if "pass" in after_text or "default" in after_text or "fallback" in after_text:
|
|
155
|
+
return True
|
|
156
|
+
|
|
157
|
+
# Acceptable: Optional import/feature detection
|
|
158
|
+
if "optional" in acceptable_contexts:
|
|
159
|
+
if "import" in before_text or "hasattr" in before_text:
|
|
160
|
+
if "pass" in after_text or "none" in after_text or "false" in after_text:
|
|
161
|
+
return True
|
|
162
|
+
|
|
163
|
+
# Acceptable: Cleanup with pass (often in __del__ or context managers)
|
|
164
|
+
if "cleanup" in acceptable_contexts:
|
|
165
|
+
if any(kw in before_text for kw in ["__del__", "__exit__", "cleanup", "close", "teardown"]):
|
|
166
|
+
return True
|
|
167
|
+
|
|
168
|
+
# Acceptable: Explicit logging then re-raise or return error
|
|
169
|
+
if "logging" in acceptable_contexts:
|
|
170
|
+
if "log" in after_text and ("raise" in after_text or "return" in after_text):
|
|
171
|
+
return True
|
|
172
|
+
|
|
173
|
+
# Always accept: Comment explains the broad catch is intentional
|
|
174
|
+
if "# " in after_text and any(
|
|
175
|
+
kw in after_text
|
|
176
|
+
for kw in ["fallback", "ignore", "optional", "best effort", "graceful", "intentional"]
|
|
177
|
+
):
|
|
178
|
+
return True
|
|
179
|
+
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _has_problematic_exception_handlers(
|
|
184
|
+
content: str,
|
|
185
|
+
file_path: str,
|
|
186
|
+
acceptable_contexts: list[str] | None = None,
|
|
187
|
+
) -> bool:
|
|
188
|
+
"""Check if file has problematic broad exception handlers.
|
|
189
|
+
|
|
190
|
+
Filters out acceptable uses like version detection, config fallbacks,
|
|
191
|
+
and optional feature detection.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
content: File content to check
|
|
195
|
+
file_path: Path to the file
|
|
196
|
+
acceptable_contexts: List of acceptable context types from config
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
True if problematic exception handlers found, False otherwise.
|
|
200
|
+
|
|
201
|
+
"""
|
|
202
|
+
if "except:" not in content and "except Exception:" not in content:
|
|
203
|
+
return False
|
|
204
|
+
|
|
205
|
+
lines = content.splitlines()
|
|
206
|
+
problematic_count = 0
|
|
207
|
+
|
|
208
|
+
for i, line in enumerate(lines):
|
|
209
|
+
stripped = line.strip()
|
|
210
|
+
|
|
211
|
+
# Check for broad exception patterns
|
|
212
|
+
if stripped.startswith("except:") or stripped.startswith("except Exception"):
|
|
213
|
+
context_before = lines[max(0, i - 5) : i]
|
|
214
|
+
context_after = lines[i + 1 : min(len(lines), i + 6)]
|
|
215
|
+
|
|
216
|
+
if not _is_acceptable_broad_exception(
|
|
217
|
+
stripped,
|
|
218
|
+
context_before,
|
|
219
|
+
context_after,
|
|
220
|
+
acceptable_contexts,
|
|
221
|
+
):
|
|
222
|
+
problematic_count += 1
|
|
223
|
+
|
|
224
|
+
# Only flag if there are problematic handlers
|
|
225
|
+
return problematic_count > 0
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _is_dangerous_eval_usage(content: str, file_path: str) -> bool:
|
|
229
|
+
"""Check if file contains dangerous eval/exec usage, filtering false positives.
|
|
230
|
+
|
|
231
|
+
Excludes:
|
|
232
|
+
- String literals used for detection (e.g., 'if "eval(" in content')
|
|
233
|
+
- Comments mentioning eval/exec (e.g., '# SECURITY FIX: Use json.loads() instead of eval()')
|
|
234
|
+
- JavaScript's safe regex.exec() method
|
|
235
|
+
- Pattern definitions for security scanners
|
|
236
|
+
- Test fixtures: code written via write_text() or similar for testing
|
|
237
|
+
- Scanner test files that deliberately contain example bad patterns
|
|
238
|
+
- Docstrings documenting security policies (e.g., "No eval() or exec() usage")
|
|
239
|
+
- Security policy documentation in comments
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
True if dangerous eval/exec usage is found, False otherwise.
|
|
243
|
+
|
|
244
|
+
"""
|
|
245
|
+
# Check if file even contains eval or exec
|
|
246
|
+
if "eval(" not in content and "exec(" not in content:
|
|
247
|
+
return False
|
|
248
|
+
|
|
249
|
+
# Exclude scanner test files (they deliberately contain example bad patterns)
|
|
250
|
+
scanner_test_patterns = [
|
|
251
|
+
"test_bug_predict",
|
|
252
|
+
"test_scanner",
|
|
253
|
+
"test_security_scan",
|
|
254
|
+
]
|
|
255
|
+
file_name = file_path.lower()
|
|
256
|
+
if any(pattern in file_name for pattern in scanner_test_patterns):
|
|
257
|
+
return False
|
|
258
|
+
|
|
259
|
+
# Check for test fixture patterns - eval/exec inside write_text() or heredoc strings
|
|
260
|
+
# These are test data being written to temp files, not actual dangerous code
|
|
261
|
+
fixture_patterns = [
|
|
262
|
+
r'write_text\s*\(\s*["\'][\s\S]*?(?:eval|exec)\s*\(', # write_text("...eval(...")
|
|
263
|
+
r'write_text\s*\(\s*"""[\s\S]*?(?:eval|exec)\s*\(', # write_text("""...eval(...""")
|
|
264
|
+
r"write_text\s*\(\s*'''[\s\S]*?(?:eval|exec)\s*\(", # write_text('''...eval(...''')
|
|
265
|
+
]
|
|
266
|
+
for pattern in fixture_patterns:
|
|
267
|
+
if re.search(pattern, content, re.MULTILINE):
|
|
268
|
+
# All eval/exec occurrences might be in fixtures - do deeper check
|
|
269
|
+
# Remove fixture content and see if any eval/exec remains
|
|
270
|
+
content_without_fixtures = re.sub(
|
|
271
|
+
r"write_text\s*\([^)]*\)",
|
|
272
|
+
"",
|
|
273
|
+
content,
|
|
274
|
+
flags=re.DOTALL,
|
|
275
|
+
)
|
|
276
|
+
content_without_fixtures = re.sub(
|
|
277
|
+
r'write_text\s*\("""[\s\S]*?"""\)',
|
|
278
|
+
"",
|
|
279
|
+
content_without_fixtures,
|
|
280
|
+
)
|
|
281
|
+
content_without_fixtures = re.sub(
|
|
282
|
+
r"write_text\s*\('''[\s\S]*?'''\)",
|
|
283
|
+
"",
|
|
284
|
+
content_without_fixtures,
|
|
285
|
+
)
|
|
286
|
+
if "eval(" not in content_without_fixtures and "exec(" not in content_without_fixtures:
|
|
287
|
+
return False
|
|
288
|
+
|
|
289
|
+
# For JavaScript/TypeScript files, check for regex.exec() which is safe
|
|
290
|
+
if file_path.endswith((".js", ".ts", ".tsx", ".jsx")):
|
|
291
|
+
# Remove all regex.exec() calls (these are safe)
|
|
292
|
+
content_without_regex_exec = re.sub(r"\.\s*exec\s*\(", ".SAFE_EXEC(", content)
|
|
293
|
+
# If no eval/exec remains, it was all regex.exec()
|
|
294
|
+
if "eval(" not in content_without_regex_exec and "exec(" not in content_without_regex_exec:
|
|
295
|
+
return False
|
|
296
|
+
|
|
297
|
+
# Remove docstrings before line-by-line analysis
|
|
298
|
+
# This prevents false positives from documentation that mentions eval/exec
|
|
299
|
+
content_without_docstrings = _remove_docstrings(content)
|
|
300
|
+
|
|
301
|
+
# Check each line for real dangerous usage
|
|
302
|
+
lines = content_without_docstrings.splitlines()
|
|
303
|
+
for line in lines:
|
|
304
|
+
# Skip comment lines
|
|
305
|
+
stripped = line.strip()
|
|
306
|
+
if stripped.startswith("#") or stripped.startswith("//") or stripped.startswith("*"):
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
# Skip security policy documentation (e.g., "- No eval() or exec()")
|
|
310
|
+
if _is_security_policy_line(stripped):
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
# Check for eval( or exec( in this line
|
|
314
|
+
if "eval(" not in line and "exec(" not in line:
|
|
315
|
+
continue
|
|
316
|
+
|
|
317
|
+
# Skip if it's inside a string literal for detection purposes
|
|
318
|
+
# e.g., 'if "eval(" in content' or "pattern = r'eval\('"
|
|
319
|
+
detection_patterns = [
|
|
320
|
+
r'["\'].*eval\(.*["\']', # "eval(" or 'eval(' in a string
|
|
321
|
+
r'["\'].*exec\(.*["\']', # "exec(" or 'exec(' in a string
|
|
322
|
+
r"in\s+\w+", # Pattern like 'in content'
|
|
323
|
+
r'r["\'].*eval', # Raw string regex pattern
|
|
324
|
+
r'r["\'].*exec', # Raw string regex pattern
|
|
325
|
+
]
|
|
326
|
+
|
|
327
|
+
is_detection_code = False
|
|
328
|
+
for pattern in detection_patterns:
|
|
329
|
+
if re.search(pattern, line):
|
|
330
|
+
# Check if it's really detection code
|
|
331
|
+
if " in " in line and (
|
|
332
|
+
"content" in line or "text" in line or "code" in line or "source" in line
|
|
333
|
+
):
|
|
334
|
+
is_detection_code = True
|
|
335
|
+
break
|
|
336
|
+
# Check if it's a string literal being defined (eval or exec)
|
|
337
|
+
if re.search(r'["\'][^"\']*eval\([^"\']*["\']', line):
|
|
338
|
+
is_detection_code = True
|
|
339
|
+
break
|
|
340
|
+
if re.search(r'["\'][^"\']*exec\([^"\']*["\']', line):
|
|
341
|
+
is_detection_code = True
|
|
342
|
+
break
|
|
343
|
+
# Check for raw string regex patterns containing eval/exec
|
|
344
|
+
if re.search(r"r['\"][^'\"]*(?:eval|exec)[^'\"]*['\"]", line):
|
|
345
|
+
is_detection_code = True
|
|
346
|
+
break
|
|
347
|
+
|
|
348
|
+
if is_detection_code:
|
|
349
|
+
continue
|
|
350
|
+
|
|
351
|
+
# Skip JavaScript regex.exec() - pattern.exec(text)
|
|
352
|
+
if re.search(r"\w+\.exec\s*\(", line):
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
# This looks like real dangerous usage
|
|
356
|
+
return True
|
|
357
|
+
|
|
358
|
+
return False
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _remove_docstrings(content: str) -> str:
|
|
362
|
+
"""Remove docstrings from Python content to avoid false positives.
|
|
363
|
+
|
|
364
|
+
Docstrings often document security policies (e.g., "No eval() usage")
|
|
365
|
+
which should not trigger the scanner.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
content: Python source code
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
Content with docstrings replaced by placeholder comments.
|
|
372
|
+
"""
|
|
373
|
+
# Remove triple-quoted strings (docstrings)
|
|
374
|
+
# Match """ ... """ and ''' ... ''' including multiline
|
|
375
|
+
content = re.sub(r'"""[\s\S]*?"""', "# [docstring removed]", content)
|
|
376
|
+
content = re.sub(r"'''[\s\S]*?'''", "# [docstring removed]", content)
|
|
377
|
+
return content
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _is_security_policy_line(line: str) -> bool:
|
|
381
|
+
"""Check if a line is documenting security policy rather than using eval/exec.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
line: Stripped line of code
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
True if this appears to be security documentation.
|
|
388
|
+
"""
|
|
389
|
+
line_lower = line.lower()
|
|
390
|
+
|
|
391
|
+
# Patterns indicating security policy documentation
|
|
392
|
+
policy_patterns = [
|
|
393
|
+
r"no\s+eval", # "No eval" or "no eval()"
|
|
394
|
+
r"no\s+exec", # "No exec" or "no exec()"
|
|
395
|
+
r"never\s+use\s+eval",
|
|
396
|
+
r"never\s+use\s+exec",
|
|
397
|
+
r"avoid\s+eval",
|
|
398
|
+
r"avoid\s+exec",
|
|
399
|
+
r"don'?t\s+use\s+eval",
|
|
400
|
+
r"don'?t\s+use\s+exec",
|
|
401
|
+
r"prohibited.*eval",
|
|
402
|
+
r"prohibited.*exec",
|
|
403
|
+
r"security.*eval",
|
|
404
|
+
r"security.*exec",
|
|
405
|
+
]
|
|
406
|
+
|
|
407
|
+
for pattern in policy_patterns:
|
|
408
|
+
if re.search(pattern, line_lower):
|
|
409
|
+
return True
|
|
410
|
+
|
|
411
|
+
# Check for list item documentation (e.g., "- No eval() or exec() usage")
|
|
412
|
+
if line.startswith("-") and ("eval" in line_lower or "exec" in line_lower):
|
|
413
|
+
# If it contains "no", "never", "avoid", it's policy documentation
|
|
414
|
+
if any(word in line_lower for word in ["no ", "never", "avoid", "don't", "prohibited"]):
|
|
415
|
+
return True
|
|
416
|
+
|
|
417
|
+
return False
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
# Define step configurations for executor-based execution
|
|
421
|
+
BUG_PREDICT_STEPS = {
|
|
422
|
+
"recommend": WorkflowStepConfig(
|
|
423
|
+
name="recommend",
|
|
424
|
+
task_type="final_review", # Premium tier task
|
|
425
|
+
tier_hint="premium",
|
|
426
|
+
description="Generate bug prevention recommendations",
|
|
427
|
+
max_tokens=2000,
|
|
428
|
+
),
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
class BugPredictionWorkflow(BaseWorkflow):
|
|
433
|
+
"""Predict bugs by correlating current code with learned patterns.
|
|
434
|
+
|
|
435
|
+
Uses pattern library integration to identify code that matches
|
|
436
|
+
historical bug patterns and generates preventive recommendations.
|
|
437
|
+
"""
|
|
438
|
+
|
|
439
|
+
name = "bug-predict"
|
|
440
|
+
description = "Predict bugs by analyzing code against learned patterns"
|
|
441
|
+
stages = ["scan", "correlate", "predict", "recommend"]
|
|
442
|
+
tier_map = {
|
|
443
|
+
"scan": ModelTier.CHEAP,
|
|
444
|
+
"correlate": ModelTier.CAPABLE,
|
|
445
|
+
"predict": ModelTier.CAPABLE,
|
|
446
|
+
"recommend": ModelTier.PREMIUM,
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
def __init__(
|
|
450
|
+
self,
|
|
451
|
+
risk_threshold: float | None = None,
|
|
452
|
+
patterns_dir: str = "./patterns",
|
|
453
|
+
enable_auth_strategy: bool = True,
|
|
454
|
+
**kwargs: Any,
|
|
455
|
+
):
|
|
456
|
+
"""Initialize bug prediction workflow.
|
|
457
|
+
|
|
458
|
+
Args:
|
|
459
|
+
risk_threshold: Minimum risk score to trigger premium recommendations
|
|
460
|
+
(defaults to config value or 0.7)
|
|
461
|
+
patterns_dir: Directory containing learned patterns
|
|
462
|
+
enable_auth_strategy: If True, use intelligent subscription vs API routing
|
|
463
|
+
based on codebase size (default True)
|
|
464
|
+
**kwargs: Additional arguments passed to BaseWorkflow
|
|
465
|
+
|
|
466
|
+
"""
|
|
467
|
+
super().__init__(**kwargs)
|
|
468
|
+
|
|
469
|
+
# Create instance-level tier_map to prevent class-level mutation
|
|
470
|
+
self.tier_map = {
|
|
471
|
+
"scan": ModelTier.CHEAP,
|
|
472
|
+
"correlate": ModelTier.CAPABLE,
|
|
473
|
+
"predict": ModelTier.CAPABLE,
|
|
474
|
+
"recommend": ModelTier.PREMIUM,
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
# Load bug_predict config from attune.config.yml
|
|
478
|
+
self._bug_predict_config = _load_bug_predict_config()
|
|
479
|
+
|
|
480
|
+
# Use provided risk_threshold or fall back to config
|
|
481
|
+
self.risk_threshold = (
|
|
482
|
+
risk_threshold
|
|
483
|
+
if risk_threshold is not None
|
|
484
|
+
else self._bug_predict_config["risk_threshold"]
|
|
485
|
+
)
|
|
486
|
+
self.patterns_dir = patterns_dir
|
|
487
|
+
self.enable_auth_strategy = enable_auth_strategy
|
|
488
|
+
self._risk_score: float = 0.0
|
|
489
|
+
self._bug_patterns: list[dict] = []
|
|
490
|
+
self._auth_mode_used: str | None = None # Track which auth was recommended
|
|
491
|
+
self._load_patterns()
|
|
492
|
+
|
|
493
|
+
def _load_patterns(self) -> None:
|
|
494
|
+
"""Load bug patterns from the pattern library."""
|
|
495
|
+
debugging_file = Path(self.patterns_dir) / "debugging.json"
|
|
496
|
+
if debugging_file.exists():
|
|
497
|
+
try:
|
|
498
|
+
with open(debugging_file) as f:
|
|
499
|
+
data = json.load(f)
|
|
500
|
+
self._bug_patterns = data.get("patterns", [])
|
|
501
|
+
except (json.JSONDecodeError, OSError):
|
|
502
|
+
self._bug_patterns = []
|
|
503
|
+
|
|
504
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
505
|
+
"""Conditionally downgrade recommend stage based on risk score.
|
|
506
|
+
|
|
507
|
+
Args:
|
|
508
|
+
stage_name: Name of the stage to check
|
|
509
|
+
input_data: Current workflow data
|
|
510
|
+
|
|
511
|
+
Returns:
|
|
512
|
+
Tuple of (should_skip, reason)
|
|
513
|
+
|
|
514
|
+
"""
|
|
515
|
+
if stage_name == "recommend":
|
|
516
|
+
if self._risk_score < self.risk_threshold:
|
|
517
|
+
# Downgrade to CAPABLE instead of skipping
|
|
518
|
+
self.tier_map["recommend"] = ModelTier.CAPABLE
|
|
519
|
+
return False, None
|
|
520
|
+
return False, None
|
|
521
|
+
|
|
522
|
+
async def run_stage(
|
|
523
|
+
self,
|
|
524
|
+
stage_name: str,
|
|
525
|
+
tier: ModelTier,
|
|
526
|
+
input_data: Any,
|
|
527
|
+
) -> tuple[Any, int, int]:
|
|
528
|
+
"""Route to specific stage implementation.
|
|
529
|
+
|
|
530
|
+
Args:
|
|
531
|
+
stage_name: Name of the stage to run
|
|
532
|
+
tier: Model tier to use
|
|
533
|
+
input_data: Input data for the stage
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
Tuple of (output_data, input_tokens, output_tokens)
|
|
537
|
+
|
|
538
|
+
"""
|
|
539
|
+
if stage_name == "scan":
|
|
540
|
+
return await self._scan(input_data, tier)
|
|
541
|
+
if stage_name == "correlate":
|
|
542
|
+
return await self._correlate(input_data, tier)
|
|
543
|
+
if stage_name == "predict":
|
|
544
|
+
return await self._predict(input_data, tier)
|
|
545
|
+
if stage_name == "recommend":
|
|
546
|
+
return await self._recommend(input_data, tier)
|
|
547
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
548
|
+
|
|
549
|
+
async def _scan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
550
|
+
"""Scan codebase for code patterns and structures.
|
|
551
|
+
|
|
552
|
+
In production, this would analyze source files for patterns
|
|
553
|
+
that historically correlate with bugs.
|
|
554
|
+
"""
|
|
555
|
+
target_path = input_data.get("path", ".")
|
|
556
|
+
file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js"])
|
|
557
|
+
|
|
558
|
+
# Simulate scanning for code patterns
|
|
559
|
+
scanned_files: list[dict] = []
|
|
560
|
+
patterns_found: list[dict] = []
|
|
561
|
+
|
|
562
|
+
# Directories to exclude from scanning (dependencies, build artifacts, etc.)
|
|
563
|
+
exclude_dirs = [
|
|
564
|
+
".git",
|
|
565
|
+
"node_modules",
|
|
566
|
+
".venv",
|
|
567
|
+
"venv",
|
|
568
|
+
"env",
|
|
569
|
+
"__pycache__",
|
|
570
|
+
"site-packages",
|
|
571
|
+
"dist",
|
|
572
|
+
"build",
|
|
573
|
+
".tox",
|
|
574
|
+
".nox",
|
|
575
|
+
".eggs",
|
|
576
|
+
"*.egg-info",
|
|
577
|
+
]
|
|
578
|
+
|
|
579
|
+
# Get config options
|
|
580
|
+
config_exclude_patterns = self._bug_predict_config.get("exclude_files", [])
|
|
581
|
+
acceptable_contexts = self._bug_predict_config.get("acceptable_exception_contexts", None)
|
|
582
|
+
|
|
583
|
+
# === AUTH STRATEGY INTEGRATION ===
|
|
584
|
+
# Detect codebase size and recommend auth mode (first stage only)
|
|
585
|
+
if self.enable_auth_strategy:
|
|
586
|
+
try:
|
|
587
|
+
from attune.models import (
|
|
588
|
+
count_lines_of_code,
|
|
589
|
+
get_auth_strategy,
|
|
590
|
+
get_module_size_category,
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
# Calculate codebase size
|
|
594
|
+
codebase_lines = 0
|
|
595
|
+
target = Path(target_path)
|
|
596
|
+
if target.exists():
|
|
597
|
+
codebase_lines = count_lines_of_code(str(target))
|
|
598
|
+
|
|
599
|
+
# Get auth strategy and recommendation
|
|
600
|
+
strategy = get_auth_strategy()
|
|
601
|
+
if strategy:
|
|
602
|
+
# Get recommended auth mode
|
|
603
|
+
recommended_mode = strategy.get_recommended_mode(codebase_lines)
|
|
604
|
+
self._auth_mode_used = recommended_mode.value
|
|
605
|
+
|
|
606
|
+
# Get size category
|
|
607
|
+
size_category = get_module_size_category(codebase_lines)
|
|
608
|
+
|
|
609
|
+
# Log recommendation
|
|
610
|
+
logger.info(
|
|
611
|
+
f"Auth Strategy: {size_category.value} codebase ({codebase_lines} lines) "
|
|
612
|
+
f"-> {recommended_mode.value}",
|
|
613
|
+
)
|
|
614
|
+
except ImportError:
|
|
615
|
+
# Auth strategy module not available - continue without it
|
|
616
|
+
logger.debug("Auth strategy module not available")
|
|
617
|
+
except Exception as e:
|
|
618
|
+
# Don't fail the workflow if auth strategy detection fails
|
|
619
|
+
logger.warning(f"Auth strategy detection failed: {e}")
|
|
620
|
+
# === END AUTH STRATEGY ===/
|
|
621
|
+
|
|
622
|
+
# Walk directory and collect file info
|
|
623
|
+
target = Path(target_path)
|
|
624
|
+
if target.exists():
|
|
625
|
+
for ext in file_types:
|
|
626
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
627
|
+
# Skip excluded directories
|
|
628
|
+
path_str = str(file_path)
|
|
629
|
+
if any(excl in path_str for excl in exclude_dirs):
|
|
630
|
+
continue
|
|
631
|
+
|
|
632
|
+
# Skip files matching config exclude patterns
|
|
633
|
+
if _should_exclude_file(path_str, config_exclude_patterns):
|
|
634
|
+
continue
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
content = file_path.read_text(errors="ignore")
|
|
638
|
+
scanned_files.append(
|
|
639
|
+
{
|
|
640
|
+
"path": str(file_path),
|
|
641
|
+
"lines": len(content.splitlines()),
|
|
642
|
+
"size": len(content),
|
|
643
|
+
},
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
# Look for common bug-prone patterns
|
|
647
|
+
# Use smart detection with configurable acceptable contexts
|
|
648
|
+
if _has_problematic_exception_handlers(
|
|
649
|
+
content,
|
|
650
|
+
str(file_path),
|
|
651
|
+
acceptable_contexts,
|
|
652
|
+
):
|
|
653
|
+
patterns_found.append(
|
|
654
|
+
{
|
|
655
|
+
"file": str(file_path),
|
|
656
|
+
"pattern": "broad_exception",
|
|
657
|
+
"severity": "medium",
|
|
658
|
+
},
|
|
659
|
+
)
|
|
660
|
+
if "# TODO" in content or "# FIXME" in content:
|
|
661
|
+
patterns_found.append(
|
|
662
|
+
{
|
|
663
|
+
"file": str(file_path),
|
|
664
|
+
"pattern": "incomplete_code",
|
|
665
|
+
"severity": "low",
|
|
666
|
+
},
|
|
667
|
+
)
|
|
668
|
+
# Use smart detection to filter false positives
|
|
669
|
+
if _is_dangerous_eval_usage(content, str(file_path)):
|
|
670
|
+
patterns_found.append(
|
|
671
|
+
{
|
|
672
|
+
"file": str(file_path),
|
|
673
|
+
"pattern": "dangerous_eval",
|
|
674
|
+
"severity": "high",
|
|
675
|
+
},
|
|
676
|
+
)
|
|
677
|
+
except OSError:
|
|
678
|
+
continue
|
|
679
|
+
|
|
680
|
+
input_tokens = len(str(input_data)) // 4
|
|
681
|
+
output_tokens = len(str(scanned_files)) // 4 + len(str(patterns_found)) // 4
|
|
682
|
+
|
|
683
|
+
return (
|
|
684
|
+
{
|
|
685
|
+
"scanned_files": scanned_files[:100], # Limit for efficiency
|
|
686
|
+
"patterns_found": patterns_found,
|
|
687
|
+
"file_count": len(scanned_files),
|
|
688
|
+
"pattern_count": len(patterns_found),
|
|
689
|
+
**input_data,
|
|
690
|
+
},
|
|
691
|
+
input_tokens,
|
|
692
|
+
output_tokens,
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
async def _correlate(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
696
|
+
"""Match current code patterns against historical bug patterns.
|
|
697
|
+
|
|
698
|
+
Correlates findings from scan stage with patterns stored in
|
|
699
|
+
the debugging.json pattern library.
|
|
700
|
+
"""
|
|
701
|
+
patterns_found = input_data.get("patterns_found", [])
|
|
702
|
+
correlations: list[dict] = []
|
|
703
|
+
|
|
704
|
+
# Match against known bug patterns
|
|
705
|
+
for pattern in patterns_found:
|
|
706
|
+
pattern_type = pattern.get("pattern", "")
|
|
707
|
+
|
|
708
|
+
# Check against historical patterns
|
|
709
|
+
for bug_pattern in self._bug_patterns:
|
|
710
|
+
bug_type = bug_pattern.get("bug_type", "")
|
|
711
|
+
if self._patterns_correlate(pattern_type, bug_type):
|
|
712
|
+
correlations.append(
|
|
713
|
+
{
|
|
714
|
+
"current_pattern": pattern,
|
|
715
|
+
"historical_bug": {
|
|
716
|
+
"type": bug_type,
|
|
717
|
+
"root_cause": bug_pattern.get("root_cause", ""),
|
|
718
|
+
"fix": bug_pattern.get("fix", ""),
|
|
719
|
+
},
|
|
720
|
+
"confidence": 0.75,
|
|
721
|
+
},
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
# Add correlations for patterns without direct matches
|
|
725
|
+
for pattern in patterns_found:
|
|
726
|
+
if not any(c["current_pattern"] == pattern for c in correlations):
|
|
727
|
+
correlations.append(
|
|
728
|
+
{
|
|
729
|
+
"current_pattern": pattern,
|
|
730
|
+
"historical_bug": None,
|
|
731
|
+
"confidence": 0.3,
|
|
732
|
+
},
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
input_tokens = len(str(input_data)) // 4
|
|
736
|
+
output_tokens = len(str(correlations)) // 4
|
|
737
|
+
|
|
738
|
+
return (
|
|
739
|
+
{
|
|
740
|
+
"correlations": correlations,
|
|
741
|
+
"correlation_count": len(correlations),
|
|
742
|
+
"high_confidence_count": sum(1 for c in correlations if c["confidence"] > 0.6),
|
|
743
|
+
**input_data,
|
|
744
|
+
},
|
|
745
|
+
input_tokens,
|
|
746
|
+
output_tokens,
|
|
747
|
+
)
|
|
748
|
+
|
|
749
|
+
def _patterns_correlate(self, current: str, historical: str) -> bool:
|
|
750
|
+
"""Check if current pattern correlates with historical bug type."""
|
|
751
|
+
correlation_map = {
|
|
752
|
+
"broad_exception": ["null_reference", "type_mismatch", "unknown"],
|
|
753
|
+
"incomplete_code": ["async_timing", "null_reference"],
|
|
754
|
+
"dangerous_eval": ["import_error", "type_mismatch"],
|
|
755
|
+
}
|
|
756
|
+
return historical in correlation_map.get(current, [])
|
|
757
|
+
|
|
758
|
+
async def _predict(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
759
|
+
"""Identify high-risk areas based on correlation scores.
|
|
760
|
+
|
|
761
|
+
Calculates risk scores for each file and identifies
|
|
762
|
+
the most likely locations for bugs to occur.
|
|
763
|
+
"""
|
|
764
|
+
correlations = input_data.get("correlations", [])
|
|
765
|
+
patterns_found = input_data.get("patterns_found", [])
|
|
766
|
+
|
|
767
|
+
# Calculate file risk scores
|
|
768
|
+
file_risks: dict[str, float] = {}
|
|
769
|
+
for corr in correlations:
|
|
770
|
+
file_path = corr["current_pattern"].get("file", "")
|
|
771
|
+
confidence = corr.get("confidence", 0.3)
|
|
772
|
+
severity_weight = {
|
|
773
|
+
"high": 1.0,
|
|
774
|
+
"medium": 0.6,
|
|
775
|
+
"low": 0.3,
|
|
776
|
+
}.get(corr["current_pattern"].get("severity", "low"), 0.3)
|
|
777
|
+
|
|
778
|
+
risk = confidence * severity_weight
|
|
779
|
+
file_risks[file_path] = file_risks.get(file_path, 0) + risk
|
|
780
|
+
|
|
781
|
+
# Normalize and sort
|
|
782
|
+
max_risk = max(file_risks.values()) if file_risks else 1.0
|
|
783
|
+
predictions: list[dict] = [
|
|
784
|
+
{
|
|
785
|
+
"file": f,
|
|
786
|
+
"risk_score": round(r / max_risk, 2),
|
|
787
|
+
"patterns": [p for p in patterns_found if p.get("file") == f],
|
|
788
|
+
}
|
|
789
|
+
for f, r in sorted(file_risks.items(), key=lambda x: -x[1])
|
|
790
|
+
]
|
|
791
|
+
|
|
792
|
+
# Calculate overall risk score
|
|
793
|
+
self._risk_score = (
|
|
794
|
+
sum(float(p["risk_score"]) for p in predictions[:5]) / 5
|
|
795
|
+
if len(predictions) >= 5
|
|
796
|
+
else sum(float(p["risk_score"]) for p in predictions) / max(len(predictions), 1)
|
|
797
|
+
)
|
|
798
|
+
|
|
799
|
+
input_tokens = len(str(input_data)) // 4
|
|
800
|
+
output_tokens = len(str(predictions)) // 4
|
|
801
|
+
|
|
802
|
+
return (
|
|
803
|
+
{
|
|
804
|
+
"predictions": predictions[:20], # Top 20 risky files
|
|
805
|
+
"overall_risk_score": round(self._risk_score, 2),
|
|
806
|
+
"high_risk_files": sum(1 for p in predictions if float(p["risk_score"]) > 0.7),
|
|
807
|
+
**input_data,
|
|
808
|
+
},
|
|
809
|
+
input_tokens,
|
|
810
|
+
output_tokens,
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
async def _recommend(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
814
|
+
"""Generate actionable fix recommendations using LLM.
|
|
815
|
+
|
|
816
|
+
Uses premium tier (or capable if downgraded) to generate
|
|
817
|
+
specific recommendations for addressing predicted bugs.
|
|
818
|
+
|
|
819
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
820
|
+
"""
|
|
821
|
+
predictions = input_data.get("predictions", [])
|
|
822
|
+
target = input_data.get("target", "")
|
|
823
|
+
|
|
824
|
+
# Build context for LLM
|
|
825
|
+
top_risks = predictions[:10]
|
|
826
|
+
issues_summary = []
|
|
827
|
+
for pred in top_risks:
|
|
828
|
+
file_path = pred.get("file", "")
|
|
829
|
+
patterns = pred.get("patterns", [])
|
|
830
|
+
for p in patterns:
|
|
831
|
+
issues_summary.append(
|
|
832
|
+
f"- {file_path}: {p.get('pattern')} (severity: {p.get('severity')})",
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
# Build input payload
|
|
836
|
+
input_payload = f"""Target: {target or "codebase"}
|
|
837
|
+
|
|
838
|
+
Issues Found:
|
|
839
|
+
{chr(10).join(issues_summary) if issues_summary else "No specific issues identified"}
|
|
840
|
+
|
|
841
|
+
Historical Bug Patterns:
|
|
842
|
+
{json.dumps(self._bug_patterns[:5], indent=2) if self._bug_patterns else "None"}
|
|
843
|
+
|
|
844
|
+
Risk Score: {input_data.get("overall_risk_score", 0):.2f}"""
|
|
845
|
+
|
|
846
|
+
# Check if XML prompts are enabled
|
|
847
|
+
if self._is_xml_enabled():
|
|
848
|
+
# Use XML-enhanced prompt
|
|
849
|
+
user_message = self._render_xml_prompt(
|
|
850
|
+
role="senior software engineer specializing in bug prevention",
|
|
851
|
+
goal="Analyze bug-prone patterns and generate actionable recommendations",
|
|
852
|
+
instructions=[
|
|
853
|
+
"Explain why each pattern is risky",
|
|
854
|
+
"Provide specific fixes with code examples",
|
|
855
|
+
"Suggest preventive measures",
|
|
856
|
+
"Reference historical patterns when relevant",
|
|
857
|
+
"Prioritize by severity and risk score",
|
|
858
|
+
],
|
|
859
|
+
constraints=[
|
|
860
|
+
"Be specific and actionable",
|
|
861
|
+
"Include code examples where helpful",
|
|
862
|
+
"Group recommendations by priority",
|
|
863
|
+
],
|
|
864
|
+
input_type="bug_patterns",
|
|
865
|
+
input_payload=input_payload,
|
|
866
|
+
extra={
|
|
867
|
+
"risk_score": input_data.get("overall_risk_score", 0),
|
|
868
|
+
"pattern_count": len(issues_summary),
|
|
869
|
+
},
|
|
870
|
+
)
|
|
871
|
+
system = None # XML prompt includes all context
|
|
872
|
+
else:
|
|
873
|
+
# Use legacy plain text prompts
|
|
874
|
+
system = """You are a senior software engineer specializing in bug prevention.
|
|
875
|
+
Analyze the identified code patterns and generate actionable recommendations.
|
|
876
|
+
|
|
877
|
+
For each issue:
|
|
878
|
+
1. Explain why this pattern is risky
|
|
879
|
+
2. Provide a specific fix with code example if applicable
|
|
880
|
+
3. Suggest preventive measures
|
|
881
|
+
|
|
882
|
+
Be specific and actionable. Prioritize by severity."""
|
|
883
|
+
|
|
884
|
+
user_message = f"""Analyze these bug-prone patterns and provide recommendations:
|
|
885
|
+
|
|
886
|
+
{input_payload}
|
|
887
|
+
|
|
888
|
+
Provide detailed recommendations for preventing bugs."""
|
|
889
|
+
|
|
890
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
891
|
+
if self._executor is not None or self._api_key:
|
|
892
|
+
try:
|
|
893
|
+
step = BUG_PREDICT_STEPS["recommend"]
|
|
894
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
895
|
+
step=step,
|
|
896
|
+
prompt=user_message,
|
|
897
|
+
system=system,
|
|
898
|
+
)
|
|
899
|
+
except Exception as e:
|
|
900
|
+
# Graceful fallback to legacy _call_llm if executor fails
|
|
901
|
+
logger.warning(f"Executor failed, falling back to legacy LLM call: {e}")
|
|
902
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
903
|
+
tier,
|
|
904
|
+
system or "",
|
|
905
|
+
user_message,
|
|
906
|
+
max_tokens=2000,
|
|
907
|
+
)
|
|
908
|
+
else:
|
|
909
|
+
# Legacy path for backward compatibility
|
|
910
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
911
|
+
tier,
|
|
912
|
+
system or "",
|
|
913
|
+
user_message,
|
|
914
|
+
max_tokens=2000,
|
|
915
|
+
)
|
|
916
|
+
|
|
917
|
+
# Parse XML response if enforcement is enabled
|
|
918
|
+
parsed_data = self._parse_xml_response(response)
|
|
919
|
+
|
|
920
|
+
result = {
|
|
921
|
+
"recommendations": response,
|
|
922
|
+
"recommendation_count": len(top_risks),
|
|
923
|
+
"model_tier_used": tier.value,
|
|
924
|
+
"overall_risk_score": input_data.get("overall_risk_score", 0),
|
|
925
|
+
"auth_mode_used": self._auth_mode_used, # Track recommended auth mode
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
# Merge parsed XML data if available
|
|
929
|
+
if parsed_data.get("xml_parsed"):
|
|
930
|
+
result.update(
|
|
931
|
+
{
|
|
932
|
+
"xml_parsed": True,
|
|
933
|
+
"summary": parsed_data.get("summary"),
|
|
934
|
+
"findings": parsed_data.get("findings", []),
|
|
935
|
+
"checklist": parsed_data.get("checklist", []),
|
|
936
|
+
},
|
|
937
|
+
)
|
|
938
|
+
|
|
939
|
+
# Add formatted report for human readability
|
|
940
|
+
result["formatted_report"] = format_bug_predict_report(result, input_data)
|
|
941
|
+
|
|
942
|
+
return (result, input_tokens, output_tokens)
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
def format_bug_predict_report(result: dict, input_data: dict) -> str:
|
|
946
|
+
"""Format bug prediction output as a human-readable report.
|
|
947
|
+
|
|
948
|
+
Args:
|
|
949
|
+
result: The recommend stage result
|
|
950
|
+
input_data: Input data from previous stages
|
|
951
|
+
|
|
952
|
+
Returns:
|
|
953
|
+
Formatted report string
|
|
954
|
+
|
|
955
|
+
"""
|
|
956
|
+
lines = []
|
|
957
|
+
|
|
958
|
+
# Header with risk assessment
|
|
959
|
+
risk_score = result.get("overall_risk_score", 0)
|
|
960
|
+
if risk_score >= 0.8:
|
|
961
|
+
risk_icon = "🔴"
|
|
962
|
+
risk_text = "HIGH RISK"
|
|
963
|
+
elif risk_score >= 0.5:
|
|
964
|
+
risk_icon = "🟠"
|
|
965
|
+
risk_text = "MODERATE RISK"
|
|
966
|
+
elif risk_score >= 0.3:
|
|
967
|
+
risk_icon = "🟡"
|
|
968
|
+
risk_text = "LOW RISK"
|
|
969
|
+
else:
|
|
970
|
+
risk_icon = "🟢"
|
|
971
|
+
risk_text = "MINIMAL RISK"
|
|
972
|
+
|
|
973
|
+
lines.append("=" * 60)
|
|
974
|
+
lines.append("BUG PREDICTION REPORT")
|
|
975
|
+
lines.append("=" * 60)
|
|
976
|
+
lines.append("")
|
|
977
|
+
lines.append(f"Overall Risk: {risk_icon} {risk_text} ({risk_score:.0%})")
|
|
978
|
+
lines.append("")
|
|
979
|
+
|
|
980
|
+
# Scan summary
|
|
981
|
+
file_count = input_data.get("file_count", 0)
|
|
982
|
+
pattern_count = input_data.get("pattern_count", 0)
|
|
983
|
+
lines.append("-" * 60)
|
|
984
|
+
lines.append("SCAN SUMMARY")
|
|
985
|
+
lines.append("-" * 60)
|
|
986
|
+
lines.append(f"Files Scanned: {file_count}")
|
|
987
|
+
lines.append(f"Patterns Found: {pattern_count}")
|
|
988
|
+
lines.append("")
|
|
989
|
+
|
|
990
|
+
# Patterns found by severity
|
|
991
|
+
patterns = input_data.get("patterns_found", [])
|
|
992
|
+
if patterns:
|
|
993
|
+
high = [p for p in patterns if p.get("severity") == "high"]
|
|
994
|
+
medium = [p for p in patterns if p.get("severity") == "medium"]
|
|
995
|
+
low = [p for p in patterns if p.get("severity") == "low"]
|
|
996
|
+
|
|
997
|
+
lines.append("Pattern Breakdown:")
|
|
998
|
+
lines.append(f" 🔴 High: {len(high)}")
|
|
999
|
+
lines.append(f" 🟡 Medium: {len(medium)}")
|
|
1000
|
+
lines.append(f" 🟢 Low: {len(low)}")
|
|
1001
|
+
lines.append("")
|
|
1002
|
+
|
|
1003
|
+
# High risk predictions
|
|
1004
|
+
predictions = input_data.get("predictions", [])
|
|
1005
|
+
high_risk = [p for p in predictions if float(p.get("risk_score", 0)) > 0.7]
|
|
1006
|
+
if high_risk:
|
|
1007
|
+
lines.append("-" * 60)
|
|
1008
|
+
lines.append("HIGH RISK FILES")
|
|
1009
|
+
lines.append("-" * 60)
|
|
1010
|
+
for pred in high_risk[:10]:
|
|
1011
|
+
file_path = pred.get("file", "unknown")
|
|
1012
|
+
score = pred.get("risk_score", 0)
|
|
1013
|
+
file_patterns = pred.get("patterns", [])
|
|
1014
|
+
lines.append(f" 🔴 {file_path} (risk: {score:.0%})")
|
|
1015
|
+
for p in file_patterns[:3]:
|
|
1016
|
+
lines.append(
|
|
1017
|
+
f" - {p.get('pattern', 'unknown')}: {p.get('severity', 'unknown')}",
|
|
1018
|
+
)
|
|
1019
|
+
lines.append("")
|
|
1020
|
+
|
|
1021
|
+
# Correlations with historical bugs
|
|
1022
|
+
correlations = input_data.get("correlations", [])
|
|
1023
|
+
high_conf = [
|
|
1024
|
+
c for c in correlations if c.get("confidence", 0) > 0.6 and c.get("historical_bug")
|
|
1025
|
+
]
|
|
1026
|
+
if high_conf:
|
|
1027
|
+
lines.append("-" * 60)
|
|
1028
|
+
lines.append("HISTORICAL BUG CORRELATIONS")
|
|
1029
|
+
lines.append("-" * 60)
|
|
1030
|
+
for corr in high_conf[:5]:
|
|
1031
|
+
current = corr.get("current_pattern", {})
|
|
1032
|
+
historical = corr.get("historical_bug", {})
|
|
1033
|
+
confidence = corr.get("confidence", 0)
|
|
1034
|
+
lines.append(
|
|
1035
|
+
f" ⚠️ {current.get('pattern', 'unknown')} correlates with {historical.get('type', 'unknown')}",
|
|
1036
|
+
)
|
|
1037
|
+
lines.append(f" Confidence: {confidence:.0%}")
|
|
1038
|
+
if historical.get("root_cause"):
|
|
1039
|
+
lines.append(f" Root cause: {historical.get('root_cause')[:80]}")
|
|
1040
|
+
lines.append("")
|
|
1041
|
+
|
|
1042
|
+
# Recommendations
|
|
1043
|
+
recommendations = result.get("recommendations", "")
|
|
1044
|
+
if recommendations:
|
|
1045
|
+
lines.append("-" * 60)
|
|
1046
|
+
lines.append("RECOMMENDATIONS")
|
|
1047
|
+
lines.append("-" * 60)
|
|
1048
|
+
lines.append(recommendations)
|
|
1049
|
+
lines.append("")
|
|
1050
|
+
|
|
1051
|
+
# Footer
|
|
1052
|
+
lines.append("=" * 60)
|
|
1053
|
+
model_tier = result.get("model_tier_used", "unknown")
|
|
1054
|
+
lines.append(f"Analysis completed using {model_tier} tier model")
|
|
1055
|
+
lines.append("=" * 60)
|
|
1056
|
+
|
|
1057
|
+
return "\n".join(lines)
|
|
1058
|
+
|
|
1059
|
+
|
|
1060
|
+
def main():
|
|
1061
|
+
"""CLI entry point for bug prediction workflow."""
|
|
1062
|
+
import asyncio
|
|
1063
|
+
|
|
1064
|
+
async def run():
|
|
1065
|
+
workflow = BugPredictionWorkflow()
|
|
1066
|
+
result = await workflow.execute(path=".", file_types=[".py"])
|
|
1067
|
+
|
|
1068
|
+
print("\nBug Prediction Results")
|
|
1069
|
+
print("=" * 50)
|
|
1070
|
+
print(f"Provider: {result.provider}")
|
|
1071
|
+
print(f"Success: {result.success}")
|
|
1072
|
+
print(f"Risk Score: {result.final_output.get('overall_risk_score', 0)}")
|
|
1073
|
+
print(f"Recommendations: {result.final_output.get('recommendation_count', 0)}")
|
|
1074
|
+
print("\nCost Report:")
|
|
1075
|
+
print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
|
|
1076
|
+
savings = result.cost_report.savings
|
|
1077
|
+
pct = result.cost_report.savings_percent
|
|
1078
|
+
print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
|
|
1079
|
+
|
|
1080
|
+
asyncio.run(run())
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
if __name__ == "__main__":
|
|
1084
|
+
main()
|