attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1329 @@
|
|
|
1
|
+
"""Security Audit Workflow
|
|
2
|
+
|
|
3
|
+
OWASP-focused security scan with intelligent vulnerability assessment.
|
|
4
|
+
Integrates with team security decisions to filter known false positives.
|
|
5
|
+
|
|
6
|
+
Stages:
|
|
7
|
+
1. triage (CHEAP) - Quick scan for common vulnerability patterns
|
|
8
|
+
2. analyze (CAPABLE) - Deep analysis of flagged areas
|
|
9
|
+
3. assess (CAPABLE) - Risk scoring and severity classification
|
|
10
|
+
4. remediate (PREMIUM) - Generate remediation plan (conditional)
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from .base import BaseWorkflow, ModelTier
|
|
23
|
+
from .step_config import WorkflowStepConfig
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
# Define step configurations for executor-based execution
|
|
28
|
+
SECURITY_STEPS = {
|
|
29
|
+
"remediate": WorkflowStepConfig(
|
|
30
|
+
name="remediate",
|
|
31
|
+
task_type="final_review", # Premium tier task
|
|
32
|
+
tier_hint="premium",
|
|
33
|
+
description="Generate remediation plan for security vulnerabilities",
|
|
34
|
+
max_tokens=3000,
|
|
35
|
+
),
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Directories to skip during scanning (build artifacts, third-party code)
|
|
39
|
+
SKIP_DIRECTORIES = {
|
|
40
|
+
".git",
|
|
41
|
+
"node_modules",
|
|
42
|
+
"__pycache__",
|
|
43
|
+
"venv",
|
|
44
|
+
".venv",
|
|
45
|
+
"env",
|
|
46
|
+
".next", # Next.js build output
|
|
47
|
+
"dist",
|
|
48
|
+
"build",
|
|
49
|
+
".tox",
|
|
50
|
+
"site", # MkDocs output
|
|
51
|
+
"ebook-site",
|
|
52
|
+
"website", # Website build artifacts
|
|
53
|
+
"anthropic-cookbook", # Third-party examples
|
|
54
|
+
".eggs",
|
|
55
|
+
"*.egg-info",
|
|
56
|
+
"htmlcov", # Coverage report artifacts
|
|
57
|
+
"htmlcov_logging", # Coverage report artifacts
|
|
58
|
+
".coverage", # Coverage data
|
|
59
|
+
"vscode-extension", # VSCode extension code (separate security review)
|
|
60
|
+
"vscode-memory-panel", # VSCode panel code
|
|
61
|
+
"workflow-dashboard", # Dashboard build
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Patterns that indicate a line is DETECTION code, not vulnerable code
|
|
65
|
+
# These help avoid false positives when scanning security tools
|
|
66
|
+
DETECTION_PATTERNS = [
|
|
67
|
+
r'["\']eval\s*\(["\']', # String literal like "eval(" (detection, not execution)
|
|
68
|
+
r'["\']exec\s*\(["\']', # String literal like "exec(" (detection, not execution)
|
|
69
|
+
r"in\s+content", # Pattern detection like "eval(" in content
|
|
70
|
+
r"re\.compile", # Regex compilation for detection
|
|
71
|
+
r"\.finditer\(", # Regex matching for detection
|
|
72
|
+
r"\.search\(", # Regex searching for detection
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
# Known fake/test credential patterns to ignore
|
|
76
|
+
FAKE_CREDENTIAL_PATTERNS = [
|
|
77
|
+
r"EXAMPLE", # AWS example keys
|
|
78
|
+
r"FAKE",
|
|
79
|
+
r"TEST",
|
|
80
|
+
r"your-.*-here",
|
|
81
|
+
r'"your-key"', # Placeholder key
|
|
82
|
+
r"abc123xyz",
|
|
83
|
+
r"\.\.\.", # Placeholder with ellipsis
|
|
84
|
+
r"test-key",
|
|
85
|
+
r"mock",
|
|
86
|
+
r'"hardcoded_secret"', # Literal example text
|
|
87
|
+
r'"secret"$', # Generic "secret" as value
|
|
88
|
+
r'"secret123"', # Test password
|
|
89
|
+
r'"password"$', # Generic password as value
|
|
90
|
+
r"_PATTERN", # Pattern constants
|
|
91
|
+
r"_EXAMPLE", # Example constants
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
# Files/paths that contain security examples/tests (not vulnerabilities)
|
|
95
|
+
SECURITY_EXAMPLE_PATHS = [
|
|
96
|
+
"owasp_patterns.py",
|
|
97
|
+
"vulnerability_scanner.py",
|
|
98
|
+
"test_security",
|
|
99
|
+
"test_secrets",
|
|
100
|
+
"test_owasp",
|
|
101
|
+
"secrets_detector.py", # Security tool with pattern definitions
|
|
102
|
+
"pii_scrubber.py", # Privacy tool
|
|
103
|
+
"secure_memdocs", # Secure storage module
|
|
104
|
+
"/security/", # Security modules
|
|
105
|
+
"/benchmarks/", # Benchmark files with test fixtures
|
|
106
|
+
"benchmark_", # Benchmark files (e.g., benchmark_caching.py)
|
|
107
|
+
"phase_2_setup.py", # Setup file with educational patterns
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
# Patterns indicating test fixture data (code written to temp files for testing)
|
|
111
|
+
TEST_FIXTURE_PATTERNS = [
|
|
112
|
+
r"SECURITY_TEST_FILES\s*=", # Dict of test fixture code
|
|
113
|
+
r"write_text\s*\(", # Writing test data to temp files
|
|
114
|
+
r"# UNSAFE - DO NOT USE", # Educational comments showing bad patterns
|
|
115
|
+
r"# SAFE -", # Educational comments showing good patterns
|
|
116
|
+
r"# INJECTION RISK", # Educational markers
|
|
117
|
+
r"pragma:\s*allowlist\s*secret", # Explicit allowlist marker
|
|
118
|
+
]
|
|
119
|
+
|
|
120
|
+
# Test file patterns - findings here are informational, not critical
|
|
121
|
+
TEST_FILE_PATTERNS = [
|
|
122
|
+
r"/tests/",
|
|
123
|
+
r"/test_",
|
|
124
|
+
r"_test\.py$",
|
|
125
|
+
r"_demo\.py$",
|
|
126
|
+
r"_example\.py$",
|
|
127
|
+
r"/examples/",
|
|
128
|
+
r"/demo",
|
|
129
|
+
r"coach/vscode-extension", # Example VSCode extension
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
# Common security vulnerability patterns (OWASP Top 10 inspired)
|
|
133
|
+
SECURITY_PATTERNS = {
|
|
134
|
+
"sql_injection": {
|
|
135
|
+
"patterns": [
|
|
136
|
+
r'execute\s*\(\s*["\'].*%s',
|
|
137
|
+
r'cursor\.execute\s*\(\s*f["\']',
|
|
138
|
+
r"\.format\s*\(.*\).*execute",
|
|
139
|
+
],
|
|
140
|
+
"severity": "critical",
|
|
141
|
+
"owasp": "A03:2021 Injection",
|
|
142
|
+
},
|
|
143
|
+
"xss": {
|
|
144
|
+
"patterns": [
|
|
145
|
+
r"innerHTML\s*=",
|
|
146
|
+
r"dangerouslySetInnerHTML",
|
|
147
|
+
r"document\.write\s*\(",
|
|
148
|
+
],
|
|
149
|
+
"severity": "high",
|
|
150
|
+
"owasp": "A03:2021 Injection",
|
|
151
|
+
},
|
|
152
|
+
"hardcoded_secret": {
|
|
153
|
+
"patterns": [
|
|
154
|
+
r'password\s*=\s*["\'][^"\']+["\']',
|
|
155
|
+
r'api_key\s*=\s*["\'][^"\']+["\']',
|
|
156
|
+
r'secret\s*=\s*["\'][^"\']+["\']',
|
|
157
|
+
r'token\s*=\s*["\'][A-Za-z0-9]{20,}["\']',
|
|
158
|
+
],
|
|
159
|
+
"severity": "critical",
|
|
160
|
+
"owasp": "A02:2021 Cryptographic Failures",
|
|
161
|
+
},
|
|
162
|
+
"insecure_random": {
|
|
163
|
+
"patterns": [
|
|
164
|
+
r"random\.\w+\s*\(",
|
|
165
|
+
r"Math\.random\s*\(",
|
|
166
|
+
],
|
|
167
|
+
"severity": "medium",
|
|
168
|
+
"owasp": "A02:2021 Cryptographic Failures",
|
|
169
|
+
},
|
|
170
|
+
"path_traversal": {
|
|
171
|
+
"patterns": [
|
|
172
|
+
r"open\s*\([^)]*\+[^)]*\)",
|
|
173
|
+
r"readFile\s*\([^)]*\+[^)]*\)",
|
|
174
|
+
],
|
|
175
|
+
"severity": "high",
|
|
176
|
+
"owasp": "A01:2021 Broken Access Control",
|
|
177
|
+
},
|
|
178
|
+
"command_injection": {
|
|
179
|
+
"patterns": [
|
|
180
|
+
r"subprocess\.\w+\s*\([^)]*shell\s*=\s*True",
|
|
181
|
+
r"os\.system\s*\(",
|
|
182
|
+
r"eval\s*\(",
|
|
183
|
+
r"exec\s*\(",
|
|
184
|
+
],
|
|
185
|
+
"severity": "critical",
|
|
186
|
+
"owasp": "A03:2021 Injection",
|
|
187
|
+
},
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class SecurityAuditWorkflow(BaseWorkflow):
|
|
192
|
+
"""OWASP-focused security audit with team decision integration.
|
|
193
|
+
|
|
194
|
+
Scans code for security vulnerabilities while respecting
|
|
195
|
+
team decisions about false positives and accepted risks.
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
name = "security-audit"
|
|
199
|
+
description = "OWASP-focused security scan with vulnerability assessment"
|
|
200
|
+
stages = ["triage", "analyze", "assess", "remediate"]
|
|
201
|
+
tier_map = {
|
|
202
|
+
"triage": ModelTier.CHEAP,
|
|
203
|
+
"analyze": ModelTier.CAPABLE,
|
|
204
|
+
"assess": ModelTier.CAPABLE,
|
|
205
|
+
"remediate": ModelTier.PREMIUM,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
def __init__(
|
|
209
|
+
self,
|
|
210
|
+
patterns_dir: str = "./patterns",
|
|
211
|
+
skip_remediate_if_clean: bool = True,
|
|
212
|
+
use_crew_for_assessment: bool = True,
|
|
213
|
+
use_crew_for_remediation: bool = False,
|
|
214
|
+
crew_config: dict | None = None,
|
|
215
|
+
enable_auth_strategy: bool = True,
|
|
216
|
+
**kwargs: Any,
|
|
217
|
+
):
|
|
218
|
+
"""Initialize security audit workflow.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
patterns_dir: Directory containing security decisions
|
|
222
|
+
skip_remediate_if_clean: Skip remediation if no high/critical findings
|
|
223
|
+
use_crew_for_assessment: Use SecurityAuditCrew for vulnerability assessment (default: True)
|
|
224
|
+
use_crew_for_remediation: Use SecurityAuditCrew for enhanced remediation (default: True)
|
|
225
|
+
crew_config: Configuration dict for SecurityAuditCrew
|
|
226
|
+
enable_auth_strategy: If True, use intelligent subscription vs API routing
|
|
227
|
+
based on codebase size (default: True)
|
|
228
|
+
**kwargs: Additional arguments passed to BaseWorkflow
|
|
229
|
+
|
|
230
|
+
"""
|
|
231
|
+
super().__init__(**kwargs)
|
|
232
|
+
self.patterns_dir = patterns_dir
|
|
233
|
+
self.skip_remediate_if_clean = skip_remediate_if_clean
|
|
234
|
+
self.use_crew_for_assessment = use_crew_for_assessment
|
|
235
|
+
self.use_crew_for_remediation = use_crew_for_remediation
|
|
236
|
+
self.crew_config = crew_config or {}
|
|
237
|
+
self.enable_auth_strategy = enable_auth_strategy
|
|
238
|
+
self._has_critical: bool = False
|
|
239
|
+
self._team_decisions: dict[str, dict] = {}
|
|
240
|
+
self._crew: Any = None
|
|
241
|
+
self._crew_available = False
|
|
242
|
+
self._auth_mode_used: str | None = None # Track which auth was recommended
|
|
243
|
+
self._load_team_decisions()
|
|
244
|
+
|
|
245
|
+
def _load_team_decisions(self) -> None:
|
|
246
|
+
"""Load team security decisions for false positive filtering."""
|
|
247
|
+
decisions_file = Path(self.patterns_dir) / "security" / "team_decisions.json"
|
|
248
|
+
if decisions_file.exists():
|
|
249
|
+
try:
|
|
250
|
+
with open(decisions_file) as f:
|
|
251
|
+
data = json.load(f)
|
|
252
|
+
for decision in data.get("decisions", []):
|
|
253
|
+
key = decision.get("finding_hash", "")
|
|
254
|
+
self._team_decisions[key] = decision
|
|
255
|
+
except (json.JSONDecodeError, OSError):
|
|
256
|
+
pass
|
|
257
|
+
|
|
258
|
+
async def _initialize_crew(self) -> None:
|
|
259
|
+
"""Initialize the SecurityAuditCrew."""
|
|
260
|
+
if self._crew is not None:
|
|
261
|
+
return
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
from attune_llm.agent_factory.crews.security_audit import SecurityAuditCrew
|
|
265
|
+
|
|
266
|
+
self._crew = SecurityAuditCrew()
|
|
267
|
+
self._crew_available = True
|
|
268
|
+
logger.info("SecurityAuditCrew initialized successfully")
|
|
269
|
+
except ImportError as e:
|
|
270
|
+
logger.warning(f"SecurityAuditCrew not available: {e}")
|
|
271
|
+
self._crew_available = False
|
|
272
|
+
|
|
273
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
274
|
+
"""Skip remediation stage if no critical/high findings.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
stage_name: Name of the stage to check
|
|
278
|
+
input_data: Current workflow data
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
Tuple of (should_skip, reason)
|
|
282
|
+
|
|
283
|
+
"""
|
|
284
|
+
if stage_name == "remediate" and self.skip_remediate_if_clean:
|
|
285
|
+
if not self._has_critical:
|
|
286
|
+
return True, "No high/critical findings requiring remediation"
|
|
287
|
+
return False, None
|
|
288
|
+
|
|
289
|
+
async def run_stage(
|
|
290
|
+
self,
|
|
291
|
+
stage_name: str,
|
|
292
|
+
tier: ModelTier,
|
|
293
|
+
input_data: Any,
|
|
294
|
+
) -> tuple[Any, int, int]:
|
|
295
|
+
"""Route to specific stage implementation."""
|
|
296
|
+
if stage_name == "triage":
|
|
297
|
+
return await self._triage(input_data, tier)
|
|
298
|
+
if stage_name == "analyze":
|
|
299
|
+
return await self._analyze(input_data, tier)
|
|
300
|
+
if stage_name == "assess":
|
|
301
|
+
return await self._assess(input_data, tier)
|
|
302
|
+
if stage_name == "remediate":
|
|
303
|
+
return await self._remediate(input_data, tier)
|
|
304
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
305
|
+
|
|
306
|
+
async def _triage(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
307
|
+
"""Quick scan for common vulnerability patterns.
|
|
308
|
+
|
|
309
|
+
Uses regex patterns to identify potential security issues
|
|
310
|
+
across the codebase for further analysis.
|
|
311
|
+
"""
|
|
312
|
+
target_path = input_data.get("path", ".")
|
|
313
|
+
file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js", ".jsx"])
|
|
314
|
+
|
|
315
|
+
findings: list[dict] = []
|
|
316
|
+
files_scanned = 0
|
|
317
|
+
|
|
318
|
+
target = Path(target_path)
|
|
319
|
+
if target.exists():
|
|
320
|
+
# Handle both file and directory targets
|
|
321
|
+
files_to_scan: list[Path] = []
|
|
322
|
+
if target.is_file():
|
|
323
|
+
# Single file - check if it matches file_types
|
|
324
|
+
if any(str(target).endswith(ext) for ext in file_types):
|
|
325
|
+
files_to_scan = [target]
|
|
326
|
+
else:
|
|
327
|
+
# Directory - recursively find all matching files
|
|
328
|
+
for ext in file_types:
|
|
329
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
330
|
+
# Skip excluded directories
|
|
331
|
+
if any(skip in str(file_path) for skip in SKIP_DIRECTORIES):
|
|
332
|
+
continue
|
|
333
|
+
files_to_scan.append(file_path)
|
|
334
|
+
|
|
335
|
+
for file_path in files_to_scan:
|
|
336
|
+
try:
|
|
337
|
+
content = file_path.read_text(errors="ignore")
|
|
338
|
+
lines = content.split("\n")
|
|
339
|
+
files_scanned += 1
|
|
340
|
+
|
|
341
|
+
for vuln_type, vuln_info in SECURITY_PATTERNS.items():
|
|
342
|
+
for pattern in vuln_info["patterns"]:
|
|
343
|
+
matches = list(re.finditer(pattern, content, re.IGNORECASE))
|
|
344
|
+
for match in matches:
|
|
345
|
+
# Find line number and get the line content
|
|
346
|
+
line_num = content[: match.start()].count("\n") + 1
|
|
347
|
+
line_content = (
|
|
348
|
+
lines[line_num - 1] if line_num <= len(lines) else ""
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
# Skip if file is a security example/test file
|
|
352
|
+
file_name = str(file_path)
|
|
353
|
+
if any(exp in file_name for exp in SECURITY_EXAMPLE_PATHS):
|
|
354
|
+
continue
|
|
355
|
+
|
|
356
|
+
# Skip if this looks like detection/scanning code
|
|
357
|
+
if self._is_detection_code(line_content, match.group()):
|
|
358
|
+
continue
|
|
359
|
+
|
|
360
|
+
# Phase 2: Skip safe SQL parameterization patterns
|
|
361
|
+
if vuln_type == "sql_injection":
|
|
362
|
+
if self._is_safe_sql_parameterization(
|
|
363
|
+
line_content,
|
|
364
|
+
match.group(),
|
|
365
|
+
content,
|
|
366
|
+
):
|
|
367
|
+
continue
|
|
368
|
+
|
|
369
|
+
# Skip fake/test credentials
|
|
370
|
+
if vuln_type == "hardcoded_secret":
|
|
371
|
+
if self._is_fake_credential(match.group()):
|
|
372
|
+
continue
|
|
373
|
+
|
|
374
|
+
# Phase 2: Skip safe random usage (tests, demos, documented)
|
|
375
|
+
if vuln_type == "insecure_random":
|
|
376
|
+
if self._is_safe_random_usage(
|
|
377
|
+
line_content,
|
|
378
|
+
file_name,
|
|
379
|
+
content,
|
|
380
|
+
):
|
|
381
|
+
continue
|
|
382
|
+
|
|
383
|
+
# Skip command_injection in documentation strings
|
|
384
|
+
if vuln_type == "command_injection":
|
|
385
|
+
if self._is_documentation_or_string(
|
|
386
|
+
line_content,
|
|
387
|
+
match.group(),
|
|
388
|
+
):
|
|
389
|
+
continue
|
|
390
|
+
|
|
391
|
+
# Check if this is a test file - downgrade to informational
|
|
392
|
+
is_test_file = any(
|
|
393
|
+
re.search(pat, file_name) for pat in TEST_FILE_PATTERNS
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# Skip test file findings for hardcoded_secret (expected in tests)
|
|
397
|
+
if is_test_file and vuln_type == "hardcoded_secret":
|
|
398
|
+
continue
|
|
399
|
+
|
|
400
|
+
findings.append(
|
|
401
|
+
{
|
|
402
|
+
"type": vuln_type,
|
|
403
|
+
"file": str(file_path),
|
|
404
|
+
"line": line_num,
|
|
405
|
+
"match": match.group()[:100],
|
|
406
|
+
"severity": (
|
|
407
|
+
"low" if is_test_file else vuln_info["severity"]
|
|
408
|
+
),
|
|
409
|
+
"owasp": vuln_info["owasp"],
|
|
410
|
+
"is_test": is_test_file,
|
|
411
|
+
},
|
|
412
|
+
)
|
|
413
|
+
except OSError:
|
|
414
|
+
continue
|
|
415
|
+
|
|
416
|
+
# Phase 3: Apply AST-based filtering for command injection
|
|
417
|
+
try:
|
|
418
|
+
from .security_audit_phase3 import apply_phase3_filtering
|
|
419
|
+
|
|
420
|
+
# Separate command injection findings
|
|
421
|
+
cmd_findings = [f for f in findings if f["type"] == "command_injection"]
|
|
422
|
+
other_findings = [f for f in findings if f["type"] != "command_injection"]
|
|
423
|
+
|
|
424
|
+
# Apply Phase 3 filtering to command injection
|
|
425
|
+
filtered_cmd = apply_phase3_filtering(cmd_findings)
|
|
426
|
+
|
|
427
|
+
# Combine back
|
|
428
|
+
findings = other_findings + filtered_cmd
|
|
429
|
+
|
|
430
|
+
logger.info(
|
|
431
|
+
f"Phase 3: Filtered command_injection from {len(cmd_findings)} to {len(filtered_cmd)} "
|
|
432
|
+
f"({len(cmd_findings) - len(filtered_cmd)} false positives removed)"
|
|
433
|
+
)
|
|
434
|
+
except ImportError:
|
|
435
|
+
logger.debug("Phase 3 module not available, skipping AST-based filtering")
|
|
436
|
+
except Exception as e:
|
|
437
|
+
logger.warning(f"Phase 3 filtering failed: {e}")
|
|
438
|
+
|
|
439
|
+
# === AUTH STRATEGY INTEGRATION ===
|
|
440
|
+
# Detect codebase size and recommend auth mode (first stage only)
|
|
441
|
+
if self.enable_auth_strategy:
|
|
442
|
+
try:
|
|
443
|
+
from attune.models import (
|
|
444
|
+
count_lines_of_code,
|
|
445
|
+
get_auth_strategy,
|
|
446
|
+
get_module_size_category,
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Calculate codebase size
|
|
450
|
+
codebase_lines = 0
|
|
451
|
+
if target.exists():
|
|
452
|
+
if target.is_file():
|
|
453
|
+
codebase_lines = count_lines_of_code(target)
|
|
454
|
+
elif target.is_dir():
|
|
455
|
+
# Sum lines across all Python files
|
|
456
|
+
for py_file in target.rglob("*.py"):
|
|
457
|
+
try:
|
|
458
|
+
codebase_lines += count_lines_of_code(py_file)
|
|
459
|
+
except Exception:
|
|
460
|
+
pass
|
|
461
|
+
|
|
462
|
+
if codebase_lines > 0:
|
|
463
|
+
# Get auth strategy (first-time setup if needed)
|
|
464
|
+
strategy = get_auth_strategy()
|
|
465
|
+
|
|
466
|
+
# Get recommended auth mode
|
|
467
|
+
recommended_mode = strategy.get_recommended_mode(codebase_lines)
|
|
468
|
+
self._auth_mode_used = recommended_mode.value
|
|
469
|
+
|
|
470
|
+
# Get size category
|
|
471
|
+
size_category = get_module_size_category(codebase_lines)
|
|
472
|
+
|
|
473
|
+
# Log recommendation
|
|
474
|
+
logger.info(
|
|
475
|
+
f"Codebase: {target} ({codebase_lines} LOC, {size_category})"
|
|
476
|
+
)
|
|
477
|
+
logger.info(f"Recommended auth mode: {recommended_mode.value}")
|
|
478
|
+
|
|
479
|
+
# Get cost estimate
|
|
480
|
+
cost_estimate = strategy.estimate_cost(codebase_lines, recommended_mode)
|
|
481
|
+
|
|
482
|
+
if recommended_mode.value == "subscription":
|
|
483
|
+
logger.info(
|
|
484
|
+
f"Cost: {cost_estimate['quota_cost']} "
|
|
485
|
+
f"(fits in {cost_estimate['fits_in_context']} context)"
|
|
486
|
+
)
|
|
487
|
+
else: # API
|
|
488
|
+
logger.info(
|
|
489
|
+
f"Cost: ~${cost_estimate['monetary_cost']:.4f} "
|
|
490
|
+
f"(1M context window)"
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
# Don't fail workflow if auth strategy fails
|
|
495
|
+
logger.warning(f"Auth strategy detection failed: {e}")
|
|
496
|
+
|
|
497
|
+
input_tokens = len(str(input_data)) // 4
|
|
498
|
+
output_tokens = len(str(findings)) // 4
|
|
499
|
+
|
|
500
|
+
return (
|
|
501
|
+
{
|
|
502
|
+
"findings": findings,
|
|
503
|
+
"files_scanned": files_scanned,
|
|
504
|
+
"finding_count": len(findings),
|
|
505
|
+
**input_data,
|
|
506
|
+
},
|
|
507
|
+
input_tokens,
|
|
508
|
+
output_tokens,
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
512
|
+
"""Deep analysis of flagged areas.
|
|
513
|
+
|
|
514
|
+
Filters findings against team decisions and performs
|
|
515
|
+
deeper analysis of genuine security concerns.
|
|
516
|
+
"""
|
|
517
|
+
findings = input_data.get("findings", [])
|
|
518
|
+
analyzed: list[dict] = []
|
|
519
|
+
|
|
520
|
+
for finding in findings:
|
|
521
|
+
finding_key = finding.get("type", "")
|
|
522
|
+
|
|
523
|
+
# Check team decisions
|
|
524
|
+
decision = self._team_decisions.get(finding_key)
|
|
525
|
+
if decision:
|
|
526
|
+
if decision.get("decision") == "false_positive":
|
|
527
|
+
finding["status"] = "false_positive"
|
|
528
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
529
|
+
finding["decided_by"] = decision.get("decided_by", "")
|
|
530
|
+
elif decision.get("decision") == "accepted":
|
|
531
|
+
finding["status"] = "accepted_risk"
|
|
532
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
533
|
+
elif decision.get("decision") == "deferred":
|
|
534
|
+
finding["status"] = "deferred"
|
|
535
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
536
|
+
else:
|
|
537
|
+
finding["status"] = "needs_review"
|
|
538
|
+
else:
|
|
539
|
+
finding["status"] = "needs_review"
|
|
540
|
+
|
|
541
|
+
# Add context analysis
|
|
542
|
+
if finding["status"] == "needs_review":
|
|
543
|
+
finding["analysis"] = self._analyze_finding(finding)
|
|
544
|
+
|
|
545
|
+
analyzed.append(finding)
|
|
546
|
+
|
|
547
|
+
# Separate by status
|
|
548
|
+
needs_review = [f for f in analyzed if f["status"] == "needs_review"]
|
|
549
|
+
false_positives = [f for f in analyzed if f["status"] == "false_positive"]
|
|
550
|
+
accepted = [f for f in analyzed if f["status"] == "accepted_risk"]
|
|
551
|
+
|
|
552
|
+
input_tokens = len(str(input_data)) // 4
|
|
553
|
+
output_tokens = len(str(analyzed)) // 4
|
|
554
|
+
|
|
555
|
+
return (
|
|
556
|
+
{
|
|
557
|
+
"analyzed_findings": analyzed,
|
|
558
|
+
"needs_review": needs_review,
|
|
559
|
+
"false_positives": false_positives,
|
|
560
|
+
"accepted_risks": accepted,
|
|
561
|
+
"review_count": len(needs_review),
|
|
562
|
+
**input_data,
|
|
563
|
+
},
|
|
564
|
+
input_tokens,
|
|
565
|
+
output_tokens,
|
|
566
|
+
)
|
|
567
|
+
|
|
568
|
+
def _analyze_finding(self, finding: dict) -> str:
|
|
569
|
+
"""Generate analysis context for a finding."""
|
|
570
|
+
vuln_type = finding.get("type", "")
|
|
571
|
+
analyses = {
|
|
572
|
+
"sql_injection": "Potential SQL injection. Verify parameterized input.",
|
|
573
|
+
"xss": "Potential XSS vulnerability. Check output escaping.",
|
|
574
|
+
"hardcoded_secret": "Hardcoded credential. Use env vars or secrets manager.",
|
|
575
|
+
"insecure_random": "Insecure random. Use secrets module instead.",
|
|
576
|
+
"path_traversal": "Potential path traversal. Validate file paths.",
|
|
577
|
+
"command_injection": "Potential command injection. Avoid shell=True.",
|
|
578
|
+
}
|
|
579
|
+
return analyses.get(vuln_type, "Review for security implications.")
|
|
580
|
+
|
|
581
|
+
def _is_detection_code(self, line_content: str, match_text: str) -> bool:
|
|
582
|
+
"""Check if a match is actually detection/scanning code, not a vulnerability.
|
|
583
|
+
|
|
584
|
+
This prevents false positives when scanning security tools that contain
|
|
585
|
+
patterns like 'if "eval(" in content:' which are detecting vulnerabilities,
|
|
586
|
+
not introducing them.
|
|
587
|
+
"""
|
|
588
|
+
# Check if the line contains detection patterns
|
|
589
|
+
for pattern in DETECTION_PATTERNS:
|
|
590
|
+
if re.search(pattern, line_content, re.IGNORECASE):
|
|
591
|
+
return True
|
|
592
|
+
|
|
593
|
+
# Check if the match is inside a string literal used for comparison
|
|
594
|
+
# e.g., 'if "eval(" in content:' or 'pattern = r"eval\("'
|
|
595
|
+
if f'"{match_text.strip()}"' in line_content or f"'{match_text.strip()}'" in line_content:
|
|
596
|
+
return True
|
|
597
|
+
|
|
598
|
+
return False
|
|
599
|
+
|
|
600
|
+
def _is_fake_credential(self, match_text: str) -> bool:
|
|
601
|
+
"""Check if a matched credential is obviously fake/for testing.
|
|
602
|
+
|
|
603
|
+
This prevents false positives for test fixtures using patterns like
|
|
604
|
+
'AKIAIOSFODNN7EXAMPLE' (AWS official example) or 'test-key-not-real'.
|
|
605
|
+
"""
|
|
606
|
+
for pattern in FAKE_CREDENTIAL_PATTERNS:
|
|
607
|
+
if re.search(pattern, match_text, re.IGNORECASE):
|
|
608
|
+
return True
|
|
609
|
+
return False
|
|
610
|
+
|
|
611
|
+
def _is_documentation_or_string(self, line_content: str, match_text: str) -> bool:
|
|
612
|
+
"""Check if a command injection match is in documentation or string literals.
|
|
613
|
+
|
|
614
|
+
This prevents false positives for:
|
|
615
|
+
- Docstrings describing security issues
|
|
616
|
+
- String literals containing example vulnerable code
|
|
617
|
+
- Comments explaining vulnerabilities
|
|
618
|
+
"""
|
|
619
|
+
line = line_content.strip()
|
|
620
|
+
|
|
621
|
+
# Check if line is a comment or documentation
|
|
622
|
+
if line.startswith("#") or line.startswith("//") or line.startswith("*") or line.startswith("-"):
|
|
623
|
+
return True
|
|
624
|
+
|
|
625
|
+
# Check if inside a docstring (triple quotes)
|
|
626
|
+
if '"""' in line or "'''" in line:
|
|
627
|
+
return True
|
|
628
|
+
|
|
629
|
+
# Check if the match is inside a string literal being defined
|
|
630
|
+
# e.g., 'pattern = r"eval\("' or '"eval(" in content'
|
|
631
|
+
string_patterns = [
|
|
632
|
+
r'["\'].*' + re.escape(match_text.strip()[:10]) + r'.*["\']', # Inside quotes
|
|
633
|
+
r'r["\'].*' + re.escape(match_text.strip()[:10]), # Raw string
|
|
634
|
+
r'=\s*["\']', # String assignment
|
|
635
|
+
]
|
|
636
|
+
for pattern in string_patterns:
|
|
637
|
+
if re.search(pattern, line):
|
|
638
|
+
return True
|
|
639
|
+
|
|
640
|
+
# Check for common documentation patterns
|
|
641
|
+
doc_indicators = [
|
|
642
|
+
"example",
|
|
643
|
+
"vulnerable",
|
|
644
|
+
"insecure",
|
|
645
|
+
"dangerous",
|
|
646
|
+
"pattern",
|
|
647
|
+
"detect",
|
|
648
|
+
"scan",
|
|
649
|
+
"check for",
|
|
650
|
+
"look for",
|
|
651
|
+
]
|
|
652
|
+
line_lower = line.lower()
|
|
653
|
+
if any(ind in line_lower for ind in doc_indicators):
|
|
654
|
+
return True
|
|
655
|
+
|
|
656
|
+
return False
|
|
657
|
+
|
|
658
|
+
def _is_safe_sql_parameterization(self, line_content: str, match_text: str, file_content: str) -> bool:
|
|
659
|
+
"""Check if SQL query uses safe parameterization despite f-string usage.
|
|
660
|
+
|
|
661
|
+
Phase 2 Enhancement: Detects safe patterns like:
|
|
662
|
+
- placeholders = ",".join("?" * len(ids))
|
|
663
|
+
- cursor.execute(f"... IN ({placeholders})", ids)
|
|
664
|
+
|
|
665
|
+
This prevents false positives for the SQLite-recommended pattern
|
|
666
|
+
of building dynamic placeholder strings.
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
line_content: The line containing the match (may be incomplete for multi-line)
|
|
670
|
+
match_text: The matched text
|
|
671
|
+
file_content: Full file content for context analysis
|
|
672
|
+
|
|
673
|
+
Returns:
|
|
674
|
+
True if this is safe parameterized SQL, False otherwise
|
|
675
|
+
"""
|
|
676
|
+
# Get the position of the match in the full file content
|
|
677
|
+
match_pos = file_content.find(match_text)
|
|
678
|
+
if match_pos == -1:
|
|
679
|
+
# Try to find cursor.execute
|
|
680
|
+
match_pos = file_content.find("cursor.execute")
|
|
681
|
+
if match_pos == -1:
|
|
682
|
+
return False
|
|
683
|
+
|
|
684
|
+
# Extract a larger context (next 200 chars after match)
|
|
685
|
+
context = file_content[match_pos:match_pos + 200]
|
|
686
|
+
|
|
687
|
+
# Also get lines before the match for placeholder detection
|
|
688
|
+
lines_before = file_content[:match_pos].split("\n")
|
|
689
|
+
recent_lines = lines_before[-10:] if len(lines_before) > 10 else lines_before
|
|
690
|
+
|
|
691
|
+
# Pattern 1: Check if this is a placeholder-based parameterized query
|
|
692
|
+
# Look for: cursor.execute(f"... IN ({placeholders})", params)
|
|
693
|
+
if "placeholders" in context or any("placeholders" in line for line in recent_lines[-5:]):
|
|
694
|
+
# Check if context has both f-string and separate parameters
|
|
695
|
+
# Pattern: f"...{placeholders}..." followed by comma and params
|
|
696
|
+
if re.search(r'f["\'][^"\']*\{placeholders\}[^"\']*["\']\s*,\s*\w+', context):
|
|
697
|
+
return True # Safe - has separate parameters
|
|
698
|
+
|
|
699
|
+
# Also check if recent lines built the placeholders
|
|
700
|
+
for prev_line in reversed(recent_lines):
|
|
701
|
+
if "placeholders" in prev_line and '"?"' in prev_line and "join" in prev_line:
|
|
702
|
+
# Found placeholder construction
|
|
703
|
+
# Now check if the execute has separate parameters
|
|
704
|
+
if "," in context and any(param in context for param in ["run_ids", "ids", "params", "values", ")"]):
|
|
705
|
+
return True
|
|
706
|
+
|
|
707
|
+
# Pattern 2: Check if f-string only builds SQL structure with constants
|
|
708
|
+
# Example: f"SELECT * FROM {TABLE_NAME}" where TABLE_NAME is a constant
|
|
709
|
+
f_string_vars = re.findall(r'\{(\w+)\}', context)
|
|
710
|
+
if f_string_vars:
|
|
711
|
+
# Check if all variables are constants (UPPERCASE or table/column names)
|
|
712
|
+
all_constants = all(
|
|
713
|
+
var.isupper() or "TABLE" in var.upper() or "COLUMN" in var.upper()
|
|
714
|
+
for var in f_string_vars
|
|
715
|
+
)
|
|
716
|
+
if all_constants:
|
|
717
|
+
return True # Safe - using constants, not user data
|
|
718
|
+
|
|
719
|
+
# Pattern 3: Check for security note comments nearby
|
|
720
|
+
# If developers added security notes, it's likely safe
|
|
721
|
+
for prev_line in reversed(recent_lines[-3:]):
|
|
722
|
+
if "security note" in prev_line.lower() and "safe" in prev_line.lower():
|
|
723
|
+
return True
|
|
724
|
+
|
|
725
|
+
return False
|
|
726
|
+
|
|
727
|
+
def _is_safe_random_usage(self, line_content: str, file_path: str, file_content: str) -> bool:
|
|
728
|
+
"""Check if random usage is in a safe context (tests, simulations, non-crypto).
|
|
729
|
+
|
|
730
|
+
Phase 2 Enhancement: Reduces false positives for random module usage
|
|
731
|
+
in test fixtures, A/B testing simulations, and demo code.
|
|
732
|
+
|
|
733
|
+
Args:
|
|
734
|
+
line_content: The line containing the match
|
|
735
|
+
file_path: Path to the file being scanned
|
|
736
|
+
file_content: Full file content for context analysis
|
|
737
|
+
|
|
738
|
+
Returns:
|
|
739
|
+
True if random usage is safe/documented, False if potentially insecure
|
|
740
|
+
"""
|
|
741
|
+
# Check if file is a test file
|
|
742
|
+
is_test = any(pattern in file_path.lower() for pattern in ["/test", "test_", "conftest"])
|
|
743
|
+
|
|
744
|
+
# Check for explicit security notes nearby
|
|
745
|
+
lines = file_content.split("\n")
|
|
746
|
+
line_index = None
|
|
747
|
+
for i, line in enumerate(lines):
|
|
748
|
+
if line_content.strip() in line:
|
|
749
|
+
line_index = i
|
|
750
|
+
break
|
|
751
|
+
|
|
752
|
+
if line_index is not None:
|
|
753
|
+
# Check 5 lines before and after for security notes
|
|
754
|
+
context_start = max(0, line_index - 5)
|
|
755
|
+
context_end = min(len(lines), line_index + 5)
|
|
756
|
+
context = "\n".join(lines[context_start:context_end]).lower()
|
|
757
|
+
|
|
758
|
+
# Look for clarifying comments
|
|
759
|
+
safe_indicators = [
|
|
760
|
+
"security note",
|
|
761
|
+
"not cryptographic",
|
|
762
|
+
"not for crypto",
|
|
763
|
+
"test data",
|
|
764
|
+
"demo data",
|
|
765
|
+
"simulation",
|
|
766
|
+
"reproducible",
|
|
767
|
+
"deterministic",
|
|
768
|
+
"fixed seed",
|
|
769
|
+
"not used for security",
|
|
770
|
+
"not used for secrets",
|
|
771
|
+
"not used for tokens",
|
|
772
|
+
]
|
|
773
|
+
|
|
774
|
+
if any(indicator in context for indicator in safe_indicators):
|
|
775
|
+
return True # Documented as safe
|
|
776
|
+
|
|
777
|
+
# Check for common safe random patterns
|
|
778
|
+
line_lower = line_content.lower()
|
|
779
|
+
|
|
780
|
+
# Pattern 1: Fixed seed (reproducible tests)
|
|
781
|
+
if "random.seed(" in line_lower:
|
|
782
|
+
return True # Fixed seed is for reproducibility, not security
|
|
783
|
+
|
|
784
|
+
# Pattern 2: A/B testing, simulations, demos
|
|
785
|
+
safe_contexts = [
|
|
786
|
+
"simulation",
|
|
787
|
+
"demo",
|
|
788
|
+
"a/b test",
|
|
789
|
+
"ab_test",
|
|
790
|
+
"fixture",
|
|
791
|
+
"mock",
|
|
792
|
+
"example",
|
|
793
|
+
"sample",
|
|
794
|
+
]
|
|
795
|
+
if any(context in file_path.lower() for context in safe_contexts):
|
|
796
|
+
return True
|
|
797
|
+
|
|
798
|
+
# If it's a test file without crypto indicators, it's probably safe
|
|
799
|
+
if is_test:
|
|
800
|
+
crypto_indicators = ["password", "secret", "token", "key", "crypto", "auth"]
|
|
801
|
+
if not any(indicator in file_path.lower() for indicator in crypto_indicators):
|
|
802
|
+
return True
|
|
803
|
+
|
|
804
|
+
return False
|
|
805
|
+
|
|
806
|
+
async def _assess(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
807
|
+
"""Risk scoring and severity classification.
|
|
808
|
+
|
|
809
|
+
Calculates overall security risk score and identifies
|
|
810
|
+
critical issues requiring immediate attention.
|
|
811
|
+
|
|
812
|
+
When use_crew_for_assessment=True, uses SecurityAuditCrew's
|
|
813
|
+
comprehensive analysis for enhanced vulnerability detection.
|
|
814
|
+
"""
|
|
815
|
+
await self._initialize_crew()
|
|
816
|
+
|
|
817
|
+
needs_review = input_data.get("needs_review", [])
|
|
818
|
+
|
|
819
|
+
# Count by severity
|
|
820
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
|
|
821
|
+
for finding in needs_review:
|
|
822
|
+
sev = finding.get("severity", "low")
|
|
823
|
+
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
|
824
|
+
|
|
825
|
+
# Calculate risk score (0-100)
|
|
826
|
+
risk_score = (
|
|
827
|
+
severity_counts["critical"] * 25
|
|
828
|
+
+ severity_counts["high"] * 10
|
|
829
|
+
+ severity_counts["medium"] * 3
|
|
830
|
+
+ severity_counts["low"] * 1
|
|
831
|
+
)
|
|
832
|
+
risk_score = min(100, risk_score)
|
|
833
|
+
|
|
834
|
+
# Set flag for skip logic
|
|
835
|
+
self._has_critical = severity_counts["critical"] > 0 or severity_counts["high"] > 0
|
|
836
|
+
|
|
837
|
+
# Group findings by OWASP category
|
|
838
|
+
by_owasp: dict[str, list] = {}
|
|
839
|
+
for finding in needs_review:
|
|
840
|
+
owasp = finding.get("owasp", "Unknown")
|
|
841
|
+
if owasp not in by_owasp:
|
|
842
|
+
by_owasp[owasp] = []
|
|
843
|
+
by_owasp[owasp].append(finding)
|
|
844
|
+
|
|
845
|
+
# Use crew for enhanced assessment if available
|
|
846
|
+
crew_enhanced = False
|
|
847
|
+
crew_findings = []
|
|
848
|
+
if self.use_crew_for_assessment and self._crew_available:
|
|
849
|
+
target = input_data.get("path", ".")
|
|
850
|
+
try:
|
|
851
|
+
crew_report = await self._crew.audit(target=target)
|
|
852
|
+
if crew_report and crew_report.findings:
|
|
853
|
+
crew_enhanced = True
|
|
854
|
+
# Convert crew findings to workflow format
|
|
855
|
+
for finding in crew_report.findings:
|
|
856
|
+
crew_findings.append(
|
|
857
|
+
{
|
|
858
|
+
"type": finding.category.value,
|
|
859
|
+
"title": finding.title,
|
|
860
|
+
"description": finding.description,
|
|
861
|
+
"severity": finding.severity.value,
|
|
862
|
+
"file": finding.file_path or "",
|
|
863
|
+
"line": finding.line_number or 0,
|
|
864
|
+
"owasp": finding.category.value,
|
|
865
|
+
"remediation": finding.remediation or "",
|
|
866
|
+
"cwe_id": finding.cwe_id or "",
|
|
867
|
+
"cvss_score": finding.cvss_score or 0.0,
|
|
868
|
+
"source": "crew",
|
|
869
|
+
}
|
|
870
|
+
)
|
|
871
|
+
# Update severity counts with crew findings
|
|
872
|
+
for finding in crew_findings:
|
|
873
|
+
sev = finding.get("severity", "low")
|
|
874
|
+
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
|
875
|
+
# Recalculate risk score with crew findings
|
|
876
|
+
risk_score = (
|
|
877
|
+
severity_counts["critical"] * 25
|
|
878
|
+
+ severity_counts["high"] * 10
|
|
879
|
+
+ severity_counts["medium"] * 3
|
|
880
|
+
+ severity_counts["low"] * 1
|
|
881
|
+
)
|
|
882
|
+
risk_score = min(100, risk_score)
|
|
883
|
+
except Exception as e:
|
|
884
|
+
logger.warning(f"Crew assessment failed: {e}")
|
|
885
|
+
|
|
886
|
+
# Merge crew findings with pattern-based findings
|
|
887
|
+
all_critical = [f for f in needs_review if f.get("severity") == "critical"]
|
|
888
|
+
all_high = [f for f in needs_review if f.get("severity") == "high"]
|
|
889
|
+
if crew_enhanced:
|
|
890
|
+
all_critical.extend([f for f in crew_findings if f.get("severity") == "critical"])
|
|
891
|
+
all_high.extend([f for f in crew_findings if f.get("severity") == "high"])
|
|
892
|
+
|
|
893
|
+
assessment = {
|
|
894
|
+
"risk_score": risk_score,
|
|
895
|
+
"risk_level": (
|
|
896
|
+
"critical"
|
|
897
|
+
if risk_score >= 75
|
|
898
|
+
else "high" if risk_score >= 50 else "medium" if risk_score >= 25 else "low"
|
|
899
|
+
),
|
|
900
|
+
"severity_breakdown": severity_counts,
|
|
901
|
+
"by_owasp_category": {k: len(v) for k, v in by_owasp.items()},
|
|
902
|
+
"critical_findings": all_critical,
|
|
903
|
+
"high_findings": all_high,
|
|
904
|
+
"crew_enhanced": crew_enhanced,
|
|
905
|
+
"crew_findings_count": len(crew_findings) if crew_enhanced else 0,
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
input_tokens = len(str(input_data)) // 4
|
|
909
|
+
output_tokens = len(str(assessment)) // 4
|
|
910
|
+
|
|
911
|
+
# Build output with assessment
|
|
912
|
+
output = {
|
|
913
|
+
"assessment": assessment,
|
|
914
|
+
**input_data,
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
# Add formatted report for human readability
|
|
918
|
+
output["formatted_report"] = format_security_report(output)
|
|
919
|
+
|
|
920
|
+
return (
|
|
921
|
+
output,
|
|
922
|
+
input_tokens,
|
|
923
|
+
output_tokens,
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
async def _remediate(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
927
|
+
"""Generate remediation plan for security issues.
|
|
928
|
+
|
|
929
|
+
Creates actionable remediation steps prioritized by
|
|
930
|
+
severity and grouped by OWASP category.
|
|
931
|
+
|
|
932
|
+
When use_crew_for_remediation=True, uses SecurityAuditCrew's
|
|
933
|
+
Remediation Expert agent for enhanced recommendations.
|
|
934
|
+
|
|
935
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
936
|
+
"""
|
|
937
|
+
try:
|
|
938
|
+
from .security_adapters import _check_crew_available
|
|
939
|
+
|
|
940
|
+
adapters_available = True
|
|
941
|
+
except ImportError:
|
|
942
|
+
adapters_available = False
|
|
943
|
+
_check_crew_available = lambda: False
|
|
944
|
+
|
|
945
|
+
assessment = input_data.get("assessment", {})
|
|
946
|
+
critical = assessment.get("critical_findings", [])
|
|
947
|
+
high = assessment.get("high_findings", [])
|
|
948
|
+
target = input_data.get("target", input_data.get("path", ""))
|
|
949
|
+
|
|
950
|
+
crew_remediation = None
|
|
951
|
+
crew_enhanced = False
|
|
952
|
+
|
|
953
|
+
# Try crew-based remediation first if enabled
|
|
954
|
+
if self.use_crew_for_remediation and adapters_available and _check_crew_available():
|
|
955
|
+
crew_remediation = await self._get_crew_remediation(target, critical + high, assessment)
|
|
956
|
+
if crew_remediation:
|
|
957
|
+
crew_enhanced = True
|
|
958
|
+
|
|
959
|
+
# Build findings summary for LLM
|
|
960
|
+
findings_summary = []
|
|
961
|
+
for f in critical:
|
|
962
|
+
findings_summary.append(
|
|
963
|
+
f"CRITICAL: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
|
|
964
|
+
)
|
|
965
|
+
for f in high:
|
|
966
|
+
findings_summary.append(
|
|
967
|
+
f"HIGH: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
# Build input payload for prompt
|
|
971
|
+
input_payload = f"""Target: {target or "codebase"}
|
|
972
|
+
|
|
973
|
+
Findings:
|
|
974
|
+
{chr(10).join(findings_summary) if findings_summary else "No critical or high findings"}
|
|
975
|
+
|
|
976
|
+
Risk Score: {assessment.get("risk_score", 0)}/100
|
|
977
|
+
Risk Level: {assessment.get("risk_level", "unknown")}
|
|
978
|
+
|
|
979
|
+
Severity Breakdown: {json.dumps(assessment.get("severity_breakdown", {}), indent=2)}"""
|
|
980
|
+
|
|
981
|
+
# Check if XML prompts are enabled
|
|
982
|
+
if self._is_xml_enabled():
|
|
983
|
+
# Use XML-enhanced prompt
|
|
984
|
+
user_message = self._render_xml_prompt(
|
|
985
|
+
role="application security engineer",
|
|
986
|
+
goal="Generate a comprehensive remediation plan for security vulnerabilities",
|
|
987
|
+
instructions=[
|
|
988
|
+
"Explain each vulnerability and its potential impact",
|
|
989
|
+
"Provide specific remediation steps with code examples",
|
|
990
|
+
"Suggest preventive measures to avoid similar issues",
|
|
991
|
+
"Reference relevant OWASP guidelines",
|
|
992
|
+
"Prioritize by severity (critical first, then high)",
|
|
993
|
+
],
|
|
994
|
+
constraints=[
|
|
995
|
+
"Be specific and actionable",
|
|
996
|
+
"Include code examples where helpful",
|
|
997
|
+
"Group fixes by severity",
|
|
998
|
+
],
|
|
999
|
+
input_type="security_findings",
|
|
1000
|
+
input_payload=input_payload,
|
|
1001
|
+
extra={
|
|
1002
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
1003
|
+
"risk_level": assessment.get("risk_level", "unknown"),
|
|
1004
|
+
},
|
|
1005
|
+
)
|
|
1006
|
+
system = None # XML prompt includes all context
|
|
1007
|
+
else:
|
|
1008
|
+
# Use legacy plain text prompts
|
|
1009
|
+
system = """You are a security expert in application security and OWASP.
|
|
1010
|
+
Generate a comprehensive remediation plan for the security findings.
|
|
1011
|
+
|
|
1012
|
+
For each finding:
|
|
1013
|
+
1. Explain the vulnerability and its potential impact
|
|
1014
|
+
2. Provide specific remediation steps with code examples
|
|
1015
|
+
3. Suggest preventive measures to avoid similar issues
|
|
1016
|
+
4. Reference relevant OWASP guidelines
|
|
1017
|
+
|
|
1018
|
+
Prioritize by severity (critical first, then high).
|
|
1019
|
+
Be specific and actionable."""
|
|
1020
|
+
|
|
1021
|
+
user_message = f"""Generate a remediation plan for these security findings:
|
|
1022
|
+
|
|
1023
|
+
{input_payload}
|
|
1024
|
+
|
|
1025
|
+
Provide a detailed remediation plan with specific fixes."""
|
|
1026
|
+
|
|
1027
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
1028
|
+
if self._executor is not None or self._api_key:
|
|
1029
|
+
try:
|
|
1030
|
+
step = SECURITY_STEPS["remediate"]
|
|
1031
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
1032
|
+
step=step,
|
|
1033
|
+
prompt=user_message,
|
|
1034
|
+
system=system,
|
|
1035
|
+
)
|
|
1036
|
+
except Exception:
|
|
1037
|
+
# Fall back to legacy _call_llm if executor fails
|
|
1038
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
1039
|
+
tier,
|
|
1040
|
+
system or "",
|
|
1041
|
+
user_message,
|
|
1042
|
+
max_tokens=3000,
|
|
1043
|
+
)
|
|
1044
|
+
else:
|
|
1045
|
+
# Legacy path for backward compatibility
|
|
1046
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
1047
|
+
tier,
|
|
1048
|
+
system or "",
|
|
1049
|
+
user_message,
|
|
1050
|
+
max_tokens=3000,
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
# Parse XML response if enforcement is enabled
|
|
1054
|
+
parsed_data = self._parse_xml_response(response)
|
|
1055
|
+
|
|
1056
|
+
# Merge crew remediation if available
|
|
1057
|
+
if crew_enhanced and crew_remediation:
|
|
1058
|
+
response = self._merge_crew_remediation(response, crew_remediation)
|
|
1059
|
+
|
|
1060
|
+
result = {
|
|
1061
|
+
"remediation_plan": response,
|
|
1062
|
+
"remediation_count": len(critical) + len(high),
|
|
1063
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
1064
|
+
"risk_level": assessment.get("risk_level", "unknown"),
|
|
1065
|
+
"model_tier_used": tier.value,
|
|
1066
|
+
"crew_enhanced": crew_enhanced,
|
|
1067
|
+
"auth_mode_used": self._auth_mode_used, # Track recommended auth mode
|
|
1068
|
+
**input_data, # Merge all previous stage data
|
|
1069
|
+
}
|
|
1070
|
+
|
|
1071
|
+
# Add crew-specific fields if enhanced
|
|
1072
|
+
if crew_enhanced and crew_remediation:
|
|
1073
|
+
result["crew_findings"] = crew_remediation.get("findings", [])
|
|
1074
|
+
result["crew_agents_used"] = crew_remediation.get("agents_used", [])
|
|
1075
|
+
|
|
1076
|
+
# Merge parsed XML data if available
|
|
1077
|
+
if parsed_data.get("xml_parsed"):
|
|
1078
|
+
result.update(
|
|
1079
|
+
{
|
|
1080
|
+
"xml_parsed": True,
|
|
1081
|
+
"summary": parsed_data.get("summary"),
|
|
1082
|
+
"findings": parsed_data.get("findings", []),
|
|
1083
|
+
"checklist": parsed_data.get("checklist", []),
|
|
1084
|
+
},
|
|
1085
|
+
)
|
|
1086
|
+
|
|
1087
|
+
return (result, input_tokens, output_tokens)
|
|
1088
|
+
|
|
1089
|
+
async def _get_crew_remediation(
|
|
1090
|
+
self,
|
|
1091
|
+
target: str,
|
|
1092
|
+
findings: list,
|
|
1093
|
+
assessment: dict,
|
|
1094
|
+
) -> dict | None:
|
|
1095
|
+
"""Get remediation recommendations from SecurityAuditCrew.
|
|
1096
|
+
|
|
1097
|
+
Args:
|
|
1098
|
+
target: Path to codebase
|
|
1099
|
+
findings: List of findings needing remediation
|
|
1100
|
+
assessment: Current assessment dict
|
|
1101
|
+
|
|
1102
|
+
Returns:
|
|
1103
|
+
Crew results dict or None if failed
|
|
1104
|
+
|
|
1105
|
+
"""
|
|
1106
|
+
try:
|
|
1107
|
+
from attune_llm.agent_factory.crews import (
|
|
1108
|
+
SecurityAuditConfig,
|
|
1109
|
+
SecurityAuditCrew,
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
from .security_adapters import (
|
|
1113
|
+
crew_report_to_workflow_format,
|
|
1114
|
+
workflow_findings_to_crew_format,
|
|
1115
|
+
)
|
|
1116
|
+
|
|
1117
|
+
# Configure crew for focused remediation
|
|
1118
|
+
config = SecurityAuditConfig(
|
|
1119
|
+
scan_depth="quick", # Skip deep scan, focus on remediation
|
|
1120
|
+
**self.crew_config,
|
|
1121
|
+
)
|
|
1122
|
+
crew = SecurityAuditCrew(config=config)
|
|
1123
|
+
|
|
1124
|
+
# Convert findings to crew format for context
|
|
1125
|
+
crew_findings = workflow_findings_to_crew_format(findings)
|
|
1126
|
+
|
|
1127
|
+
# Run audit with remediation focus
|
|
1128
|
+
context = {
|
|
1129
|
+
"focus_areas": ["remediation"],
|
|
1130
|
+
"existing_findings": crew_findings,
|
|
1131
|
+
"skip_detection": True, # We already have findings
|
|
1132
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
report = await crew.audit(target, context=context)
|
|
1136
|
+
|
|
1137
|
+
if report:
|
|
1138
|
+
return crew_report_to_workflow_format(report)
|
|
1139
|
+
return None
|
|
1140
|
+
|
|
1141
|
+
except Exception as e:
|
|
1142
|
+
import logging
|
|
1143
|
+
|
|
1144
|
+
logging.getLogger(__name__).warning(f"Crew remediation failed: {e}")
|
|
1145
|
+
return None
|
|
1146
|
+
|
|
1147
|
+
def _merge_crew_remediation(self, llm_response: str, crew_remediation: dict) -> str:
|
|
1148
|
+
"""Merge crew remediation recommendations with LLM response.
|
|
1149
|
+
|
|
1150
|
+
Args:
|
|
1151
|
+
llm_response: LLM-generated remediation plan
|
|
1152
|
+
crew_remediation: Crew results in workflow format
|
|
1153
|
+
|
|
1154
|
+
Returns:
|
|
1155
|
+
Merged response with crew enhancements
|
|
1156
|
+
|
|
1157
|
+
"""
|
|
1158
|
+
crew_findings = crew_remediation.get("findings", [])
|
|
1159
|
+
|
|
1160
|
+
if not crew_findings:
|
|
1161
|
+
return llm_response
|
|
1162
|
+
|
|
1163
|
+
# Build crew section efficiently (avoid O(n²) string concat)
|
|
1164
|
+
parts = [
|
|
1165
|
+
"\n\n## Enhanced Remediation (SecurityAuditCrew)\n\n",
|
|
1166
|
+
f"**Agents Used**: {', '.join(crew_remediation.get('agents_used', []))}\n\n",
|
|
1167
|
+
]
|
|
1168
|
+
|
|
1169
|
+
for finding in crew_findings:
|
|
1170
|
+
if finding.get("remediation"):
|
|
1171
|
+
parts.append(f"### {finding.get('title', 'Finding')}\n")
|
|
1172
|
+
parts.append(f"**Severity**: {finding.get('severity', 'unknown').upper()}\n")
|
|
1173
|
+
if finding.get("cwe_id"):
|
|
1174
|
+
parts.append(f"**CWE**: {finding.get('cwe_id')}\n")
|
|
1175
|
+
if finding.get("cvss_score"):
|
|
1176
|
+
parts.append(f"**CVSS Score**: {finding.get('cvss_score')}\n")
|
|
1177
|
+
parts.append(f"\n**Remediation**:\n{finding.get('remediation')}\n\n")
|
|
1178
|
+
|
|
1179
|
+
return llm_response + "".join(parts)
|
|
1180
|
+
|
|
1181
|
+
def _get_remediation_action(self, finding: dict) -> str:
|
|
1182
|
+
"""Generate specific remediation action for a finding."""
|
|
1183
|
+
actions = {
|
|
1184
|
+
"sql_injection": "Use parameterized queries or ORM. Never interpolate user input.",
|
|
1185
|
+
"xss": "Use framework's auto-escaping. Sanitize user input.",
|
|
1186
|
+
"hardcoded_secret": "Move to env vars or use a secrets manager.",
|
|
1187
|
+
"insecure_random": "Use secrets.token_hex() or secrets.randbelow().",
|
|
1188
|
+
"path_traversal": "Use os.path.realpath() and validate paths.",
|
|
1189
|
+
"command_injection": "Use subprocess with shell=False and argument lists.",
|
|
1190
|
+
}
|
|
1191
|
+
return actions.get(finding.get("type", ""), "Apply security best practices.")
|
|
1192
|
+
|
|
1193
|
+
|
|
1194
|
+
def format_security_report(output: dict) -> str:
|
|
1195
|
+
"""Format security audit output as a human-readable report.
|
|
1196
|
+
|
|
1197
|
+
This format is designed to be:
|
|
1198
|
+
- Easy for humans to read and understand
|
|
1199
|
+
- Easy to copy/paste to an AI assistant for remediation help
|
|
1200
|
+
- Actionable with clear severity levels and file locations
|
|
1201
|
+
|
|
1202
|
+
Args:
|
|
1203
|
+
output: The workflow output dictionary
|
|
1204
|
+
|
|
1205
|
+
Returns:
|
|
1206
|
+
Formatted report string
|
|
1207
|
+
|
|
1208
|
+
"""
|
|
1209
|
+
lines = []
|
|
1210
|
+
|
|
1211
|
+
# Header
|
|
1212
|
+
assessment = output.get("assessment", {})
|
|
1213
|
+
risk_level = assessment.get("risk_level", "unknown").upper()
|
|
1214
|
+
risk_score = assessment.get("risk_score", 0)
|
|
1215
|
+
|
|
1216
|
+
lines.append("=" * 60)
|
|
1217
|
+
lines.append("SECURITY AUDIT REPORT")
|
|
1218
|
+
lines.append("=" * 60)
|
|
1219
|
+
lines.append("")
|
|
1220
|
+
lines.append(f"Risk Level: {risk_level}")
|
|
1221
|
+
lines.append(f"Risk Score: {risk_score}/100")
|
|
1222
|
+
lines.append("")
|
|
1223
|
+
|
|
1224
|
+
# Severity breakdown
|
|
1225
|
+
breakdown = assessment.get("severity_breakdown", {})
|
|
1226
|
+
lines.append("Severity Summary:")
|
|
1227
|
+
for sev in ["critical", "high", "medium", "low"]:
|
|
1228
|
+
count = breakdown.get(sev, 0)
|
|
1229
|
+
icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(sev, "⚪")
|
|
1230
|
+
lines.append(f" {icon} {sev.capitalize()}: {count}")
|
|
1231
|
+
lines.append("")
|
|
1232
|
+
|
|
1233
|
+
# Files scanned
|
|
1234
|
+
files_scanned = output.get("files_scanned", 0)
|
|
1235
|
+
lines.append(f"Files Scanned: {files_scanned}")
|
|
1236
|
+
lines.append("")
|
|
1237
|
+
|
|
1238
|
+
# Findings requiring review
|
|
1239
|
+
needs_review = output.get("needs_review", [])
|
|
1240
|
+
if needs_review:
|
|
1241
|
+
lines.append("-" * 60)
|
|
1242
|
+
lines.append("FINDINGS REQUIRING REVIEW")
|
|
1243
|
+
lines.append("-" * 60)
|
|
1244
|
+
lines.append("")
|
|
1245
|
+
|
|
1246
|
+
for i, finding in enumerate(needs_review, 1):
|
|
1247
|
+
severity = finding.get("severity", "unknown").upper()
|
|
1248
|
+
vuln_type = finding.get("type", "unknown")
|
|
1249
|
+
file_path = finding.get("file", "").split("Empathy-framework/")[-1]
|
|
1250
|
+
line_num = finding.get("line", 0)
|
|
1251
|
+
match = finding.get("match", "")[:50]
|
|
1252
|
+
owasp = finding.get("owasp", "")
|
|
1253
|
+
is_test = finding.get("is_test", False)
|
|
1254
|
+
analysis = finding.get("analysis", "")
|
|
1255
|
+
|
|
1256
|
+
test_marker = " [TEST FILE]" if is_test else ""
|
|
1257
|
+
lines.append(f"{i}. [{severity}]{test_marker} {vuln_type}")
|
|
1258
|
+
lines.append(f" File: {file_path}:{line_num}")
|
|
1259
|
+
lines.append(f" Match: {match}")
|
|
1260
|
+
lines.append(f" OWASP: {owasp}")
|
|
1261
|
+
if analysis:
|
|
1262
|
+
lines.append(f" Analysis: {analysis}")
|
|
1263
|
+
lines.append("")
|
|
1264
|
+
|
|
1265
|
+
# Accepted risks
|
|
1266
|
+
accepted = output.get("accepted_risks", [])
|
|
1267
|
+
if accepted:
|
|
1268
|
+
lines.append("-" * 60)
|
|
1269
|
+
lines.append("ACCEPTED RISKS (No Action Required)")
|
|
1270
|
+
lines.append("-" * 60)
|
|
1271
|
+
lines.append("")
|
|
1272
|
+
|
|
1273
|
+
for finding in accepted:
|
|
1274
|
+
vuln_type = finding.get("type", "unknown")
|
|
1275
|
+
file_path = finding.get("file", "").split("Empathy-framework/")[-1]
|
|
1276
|
+
line_num = finding.get("line", 0)
|
|
1277
|
+
reason = finding.get("decision_reason", "")
|
|
1278
|
+
|
|
1279
|
+
lines.append(f" - {vuln_type} in {file_path}:{line_num}")
|
|
1280
|
+
if reason:
|
|
1281
|
+
lines.append(f" Reason: {reason}")
|
|
1282
|
+
lines.append("")
|
|
1283
|
+
|
|
1284
|
+
# Remediation plan if present
|
|
1285
|
+
remediation = output.get("remediation_plan", "")
|
|
1286
|
+
if remediation and remediation.strip():
|
|
1287
|
+
lines.append("-" * 60)
|
|
1288
|
+
lines.append("REMEDIATION PLAN")
|
|
1289
|
+
lines.append("-" * 60)
|
|
1290
|
+
lines.append("")
|
|
1291
|
+
lines.append(remediation)
|
|
1292
|
+
lines.append("")
|
|
1293
|
+
|
|
1294
|
+
# Footer with action items
|
|
1295
|
+
lines.append("=" * 60)
|
|
1296
|
+
if needs_review:
|
|
1297
|
+
lines.append("ACTION REQUIRED:")
|
|
1298
|
+
lines.append(f" Review {len(needs_review)} finding(s) above")
|
|
1299
|
+
lines.append(" Copy this report to Claude Code for remediation help")
|
|
1300
|
+
else:
|
|
1301
|
+
lines.append("STATUS: All clear - no critical or high findings")
|
|
1302
|
+
lines.append("=" * 60)
|
|
1303
|
+
|
|
1304
|
+
return "\n".join(lines)
|
|
1305
|
+
|
|
1306
|
+
|
|
1307
|
+
def main():
|
|
1308
|
+
"""CLI entry point for security audit workflow."""
|
|
1309
|
+
import asyncio
|
|
1310
|
+
|
|
1311
|
+
async def run():
|
|
1312
|
+
workflow = SecurityAuditWorkflow()
|
|
1313
|
+
result = await workflow.execute(path=".", file_types=[".py"])
|
|
1314
|
+
|
|
1315
|
+
# Use the new formatted report
|
|
1316
|
+
report = format_security_report(result.final_output)
|
|
1317
|
+
print(report)
|
|
1318
|
+
|
|
1319
|
+
print("\nCost Report:")
|
|
1320
|
+
print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
|
|
1321
|
+
savings = result.cost_report.savings
|
|
1322
|
+
pct = result.cost_report.savings_percent
|
|
1323
|
+
print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
|
|
1324
|
+
|
|
1325
|
+
asyncio.run(run())
|
|
1326
|
+
|
|
1327
|
+
|
|
1328
|
+
if __name__ == "__main__":
|
|
1329
|
+
main()
|