empathy-framework 3.2.3__py3-none-any.whl → 3.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coach_wizards/__init__.py +11 -12
- coach_wizards/accessibility_wizard.py +12 -12
- coach_wizards/api_wizard.py +12 -12
- coach_wizards/base_wizard.py +26 -20
- coach_wizards/cicd_wizard.py +15 -13
- coach_wizards/code_reviewer_README.md +60 -0
- coach_wizards/code_reviewer_wizard.py +180 -0
- coach_wizards/compliance_wizard.py +12 -12
- coach_wizards/database_wizard.py +12 -12
- coach_wizards/debugging_wizard.py +12 -12
- coach_wizards/documentation_wizard.py +12 -12
- coach_wizards/generate_wizards.py +1 -2
- coach_wizards/localization_wizard.py +101 -19
- coach_wizards/migration_wizard.py +12 -12
- coach_wizards/monitoring_wizard.py +12 -12
- coach_wizards/observability_wizard.py +12 -12
- coach_wizards/performance_wizard.py +12 -12
- coach_wizards/prompt_engineering_wizard.py +22 -25
- coach_wizards/refactoring_wizard.py +12 -12
- coach_wizards/scaling_wizard.py +12 -12
- coach_wizards/security_wizard.py +12 -12
- coach_wizards/testing_wizard.py +12 -12
- {empathy_framework-3.2.3.dist-info → empathy_framework-3.8.2.dist-info}/METADATA +513 -58
- empathy_framework-3.8.2.dist-info/RECORD +333 -0
- empathy_framework-3.8.2.dist-info/entry_points.txt +22 -0
- {empathy_framework-3.2.3.dist-info → empathy_framework-3.8.2.dist-info}/top_level.txt +5 -1
- empathy_healthcare_plugin/__init__.py +1 -2
- empathy_healthcare_plugin/monitors/__init__.py +9 -0
- empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
- empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
- empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
- empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
- empathy_llm_toolkit/__init__.py +7 -7
- empathy_llm_toolkit/agent_factory/__init__.py +53 -0
- empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
- empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
- empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
- empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
- empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
- empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
- empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
- empathy_llm_toolkit/agent_factory/base.py +305 -0
- empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
- empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
- empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
- empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
- empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
- empathy_llm_toolkit/agent_factory/decorators.py +286 -0
- empathy_llm_toolkit/agent_factory/factory.py +558 -0
- empathy_llm_toolkit/agent_factory/framework.py +192 -0
- empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
- empathy_llm_toolkit/agent_factory/resilient.py +320 -0
- empathy_llm_toolkit/claude_memory.py +14 -15
- empathy_llm_toolkit/cli/__init__.py +8 -0
- empathy_llm_toolkit/cli/sync_claude.py +487 -0
- empathy_llm_toolkit/code_health.py +177 -22
- empathy_llm_toolkit/config/__init__.py +29 -0
- empathy_llm_toolkit/config/unified.py +295 -0
- empathy_llm_toolkit/contextual_patterns.py +11 -12
- empathy_llm_toolkit/core.py +51 -49
- empathy_llm_toolkit/git_pattern_extractor.py +16 -12
- empathy_llm_toolkit/levels.py +6 -13
- empathy_llm_toolkit/pattern_confidence.py +14 -18
- empathy_llm_toolkit/pattern_resolver.py +10 -12
- empathy_llm_toolkit/pattern_summary.py +13 -11
- empathy_llm_toolkit/providers.py +194 -28
- empathy_llm_toolkit/routing/__init__.py +32 -0
- empathy_llm_toolkit/routing/model_router.py +362 -0
- empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
- empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
- empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
- empathy_llm_toolkit/security/README.md +262 -0
- empathy_llm_toolkit/security/__init__.py +62 -0
- empathy_llm_toolkit/security/audit_logger.py +929 -0
- empathy_llm_toolkit/security/audit_logger_example.py +152 -0
- empathy_llm_toolkit/security/pii_scrubber.py +640 -0
- empathy_llm_toolkit/security/secrets_detector.py +678 -0
- empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
- empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
- empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
- empathy_llm_toolkit/session_status.py +18 -20
- empathy_llm_toolkit/state.py +20 -21
- empathy_llm_toolkit/wizards/__init__.py +38 -0
- empathy_llm_toolkit/wizards/base_wizard.py +364 -0
- empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
- empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
- empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
- empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
- empathy_os/__init__.py +76 -77
- empathy_os/adaptive/__init__.py +13 -0
- empathy_os/adaptive/task_complexity.py +127 -0
- empathy_os/{monitoring.py → agent_monitoring.py} +27 -27
- empathy_os/cache/__init__.py +117 -0
- empathy_os/cache/base.py +166 -0
- empathy_os/cache/dependency_manager.py +253 -0
- empathy_os/cache/hash_only.py +248 -0
- empathy_os/cache/hybrid.py +390 -0
- empathy_os/cache/storage.py +282 -0
- empathy_os/cli.py +515 -109
- empathy_os/cli_unified.py +189 -42
- empathy_os/config/__init__.py +63 -0
- empathy_os/config/xml_config.py +239 -0
- empathy_os/config.py +87 -36
- empathy_os/coordination.py +48 -54
- empathy_os/core.py +90 -99
- empathy_os/cost_tracker.py +20 -23
- empathy_os/dashboard/__init__.py +15 -0
- empathy_os/dashboard/server.py +743 -0
- empathy_os/discovery.py +9 -11
- empathy_os/emergence.py +20 -21
- empathy_os/exceptions.py +18 -30
- empathy_os/feedback_loops.py +27 -30
- empathy_os/levels.py +31 -34
- empathy_os/leverage_points.py +27 -28
- empathy_os/logging_config.py +11 -12
- empathy_os/memory/__init__.py +195 -0
- empathy_os/memory/claude_memory.py +466 -0
- empathy_os/memory/config.py +224 -0
- empathy_os/memory/control_panel.py +1298 -0
- empathy_os/memory/edges.py +179 -0
- empathy_os/memory/graph.py +567 -0
- empathy_os/memory/long_term.py +1194 -0
- empathy_os/memory/nodes.py +179 -0
- empathy_os/memory/redis_bootstrap.py +540 -0
- empathy_os/memory/security/__init__.py +31 -0
- empathy_os/memory/security/audit_logger.py +930 -0
- empathy_os/memory/security/pii_scrubber.py +640 -0
- empathy_os/memory/security/secrets_detector.py +678 -0
- empathy_os/memory/short_term.py +2119 -0
- empathy_os/memory/storage/__init__.py +15 -0
- empathy_os/memory/summary_index.py +583 -0
- empathy_os/memory/unified.py +619 -0
- empathy_os/metrics/__init__.py +12 -0
- empathy_os/metrics/prompt_metrics.py +190 -0
- empathy_os/models/__init__.py +136 -0
- empathy_os/models/__main__.py +13 -0
- empathy_os/models/cli.py +655 -0
- empathy_os/models/empathy_executor.py +354 -0
- empathy_os/models/executor.py +252 -0
- empathy_os/models/fallback.py +671 -0
- empathy_os/models/provider_config.py +563 -0
- empathy_os/models/registry.py +382 -0
- empathy_os/models/tasks.py +302 -0
- empathy_os/models/telemetry.py +548 -0
- empathy_os/models/token_estimator.py +378 -0
- empathy_os/models/validation.py +274 -0
- empathy_os/monitoring/__init__.py +52 -0
- empathy_os/monitoring/alerts.py +23 -0
- empathy_os/monitoring/alerts_cli.py +268 -0
- empathy_os/monitoring/multi_backend.py +271 -0
- empathy_os/monitoring/otel_backend.py +363 -0
- empathy_os/optimization/__init__.py +19 -0
- empathy_os/optimization/context_optimizer.py +272 -0
- empathy_os/pattern_library.py +29 -28
- empathy_os/persistence.py +30 -34
- empathy_os/platform_utils.py +261 -0
- empathy_os/plugins/__init__.py +28 -0
- empathy_os/plugins/base.py +361 -0
- empathy_os/plugins/registry.py +268 -0
- empathy_os/project_index/__init__.py +30 -0
- empathy_os/project_index/cli.py +335 -0
- empathy_os/project_index/crew_integration.py +430 -0
- empathy_os/project_index/index.py +425 -0
- empathy_os/project_index/models.py +501 -0
- empathy_os/project_index/reports.py +473 -0
- empathy_os/project_index/scanner.py +538 -0
- empathy_os/prompts/__init__.py +61 -0
- empathy_os/prompts/config.py +77 -0
- empathy_os/prompts/context.py +177 -0
- empathy_os/prompts/parser.py +285 -0
- empathy_os/prompts/registry.py +313 -0
- empathy_os/prompts/templates.py +208 -0
- empathy_os/redis_config.py +144 -58
- empathy_os/redis_memory.py +53 -56
- empathy_os/resilience/__init__.py +56 -0
- empathy_os/resilience/circuit_breaker.py +256 -0
- empathy_os/resilience/fallback.py +179 -0
- empathy_os/resilience/health.py +300 -0
- empathy_os/resilience/retry.py +209 -0
- empathy_os/resilience/timeout.py +135 -0
- empathy_os/routing/__init__.py +43 -0
- empathy_os/routing/chain_executor.py +433 -0
- empathy_os/routing/classifier.py +217 -0
- empathy_os/routing/smart_router.py +234 -0
- empathy_os/routing/wizard_registry.py +307 -0
- empathy_os/templates.py +12 -11
- empathy_os/trust/__init__.py +28 -0
- empathy_os/trust/circuit_breaker.py +579 -0
- empathy_os/trust_building.py +44 -36
- empathy_os/validation/__init__.py +19 -0
- empathy_os/validation/xml_validator.py +281 -0
- empathy_os/wizard_factory_cli.py +170 -0
- empathy_os/{workflows.py → workflow_commands.py} +123 -31
- empathy_os/workflows/__init__.py +360 -0
- empathy_os/workflows/base.py +1660 -0
- empathy_os/workflows/bug_predict.py +962 -0
- empathy_os/workflows/code_review.py +960 -0
- empathy_os/workflows/code_review_adapters.py +310 -0
- empathy_os/workflows/code_review_pipeline.py +720 -0
- empathy_os/workflows/config.py +600 -0
- empathy_os/workflows/dependency_check.py +648 -0
- empathy_os/workflows/document_gen.py +1069 -0
- empathy_os/workflows/documentation_orchestrator.py +1205 -0
- empathy_os/workflows/health_check.py +679 -0
- empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
- empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
- empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
- empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
- empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
- empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
- empathy_os/workflows/manage_documentation.py +804 -0
- empathy_os/workflows/new_sample_workflow1.py +146 -0
- empathy_os/workflows/new_sample_workflow1_README.md +150 -0
- empathy_os/workflows/perf_audit.py +687 -0
- empathy_os/workflows/pr_review.py +748 -0
- empathy_os/workflows/progress.py +445 -0
- empathy_os/workflows/progress_server.py +322 -0
- empathy_os/workflows/refactor_plan.py +693 -0
- empathy_os/workflows/release_prep.py +808 -0
- empathy_os/workflows/research_synthesis.py +404 -0
- empathy_os/workflows/secure_release.py +585 -0
- empathy_os/workflows/security_adapters.py +297 -0
- empathy_os/workflows/security_audit.py +1046 -0
- empathy_os/workflows/step_config.py +234 -0
- empathy_os/workflows/test5.py +125 -0
- empathy_os/workflows/test5_README.md +158 -0
- empathy_os/workflows/test_gen.py +1855 -0
- empathy_os/workflows/test_lifecycle.py +526 -0
- empathy_os/workflows/test_maintenance.py +626 -0
- empathy_os/workflows/test_maintenance_cli.py +590 -0
- empathy_os/workflows/test_maintenance_crew.py +821 -0
- empathy_os/workflows/xml_enhanced_crew.py +285 -0
- empathy_software_plugin/__init__.py +1 -2
- empathy_software_plugin/cli/__init__.py +120 -0
- empathy_software_plugin/cli/inspect.py +362 -0
- empathy_software_plugin/cli.py +35 -26
- empathy_software_plugin/plugin.py +4 -8
- empathy_software_plugin/wizards/__init__.py +42 -0
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
- empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
- empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
- empathy_software_plugin/wizards/base_wizard.py +288 -0
- empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
- empathy_software_plugin/wizards/code_review_wizard.py +606 -0
- empathy_software_plugin/wizards/debugging/__init__.py +50 -0
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
- empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
- empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
- empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
- empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
- empathy_software_plugin/wizards/debugging/verification.py +369 -0
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
- empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
- empathy_software_plugin/wizards/performance/__init__.py +9 -0
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
- empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
- empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
- empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
- empathy_software_plugin/wizards/security/__init__.py +32 -0
- empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
- empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
- empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
- empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
- empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
- empathy_software_plugin/wizards/testing/__init__.py +27 -0
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
- empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
- empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
- empathy_software_plugin/wizards/testing_wizard.py +274 -0
- hot_reload/README.md +473 -0
- hot_reload/__init__.py +62 -0
- hot_reload/config.py +84 -0
- hot_reload/integration.py +228 -0
- hot_reload/reloader.py +298 -0
- hot_reload/watcher.py +179 -0
- hot_reload/websocket.py +176 -0
- scaffolding/README.md +589 -0
- scaffolding/__init__.py +35 -0
- scaffolding/__main__.py +14 -0
- scaffolding/cli.py +240 -0
- test_generator/__init__.py +38 -0
- test_generator/__main__.py +14 -0
- test_generator/cli.py +226 -0
- test_generator/generator.py +325 -0
- test_generator/risk_analyzer.py +216 -0
- workflow_patterns/__init__.py +33 -0
- workflow_patterns/behavior.py +249 -0
- workflow_patterns/core.py +76 -0
- workflow_patterns/output.py +99 -0
- workflow_patterns/registry.py +255 -0
- workflow_patterns/structural.py +288 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
- agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
- agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
- agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
- agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
- agents/compliance_anticipation_agent.py +0 -1427
- agents/epic_integration_wizard.py +0 -541
- agents/trust_building_behaviors.py +0 -891
- empathy_framework-3.2.3.dist-info/RECORD +0 -104
- empathy_framework-3.2.3.dist-info/entry_points.txt +0 -7
- empathy_llm_toolkit/htmlcov/status.json +0 -1
- empathy_llm_toolkit/security/htmlcov/status.json +0 -1
- {empathy_framework-3.2.3.dist-info → empathy_framework-3.8.2.dist-info}/WHEEL +0 -0
- {empathy_framework-3.2.3.dist-info → empathy_framework-3.8.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1046 @@
|
|
|
1
|
+
"""Security Audit Workflow
|
|
2
|
+
|
|
3
|
+
OWASP-focused security scan with intelligent vulnerability assessment.
|
|
4
|
+
Integrates with team security decisions to filter known false positives.
|
|
5
|
+
|
|
6
|
+
Stages:
|
|
7
|
+
1. triage (CHEAP) - Quick scan for common vulnerability patterns
|
|
8
|
+
2. analyze (CAPABLE) - Deep analysis of flagged areas
|
|
9
|
+
3. assess (CAPABLE) - Risk scoring and severity classification
|
|
10
|
+
4. remediate (PREMIUM) - Generate remediation plan (conditional)
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from .base import BaseWorkflow, ModelTier
|
|
23
|
+
from .step_config import WorkflowStepConfig
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
# Define step configurations for executor-based execution
|
|
28
|
+
SECURITY_STEPS = {
|
|
29
|
+
"remediate": WorkflowStepConfig(
|
|
30
|
+
name="remediate",
|
|
31
|
+
task_type="final_review", # Premium tier task
|
|
32
|
+
tier_hint="premium",
|
|
33
|
+
description="Generate remediation plan for security vulnerabilities",
|
|
34
|
+
max_tokens=3000,
|
|
35
|
+
),
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Directories to skip during scanning (build artifacts, third-party code)
|
|
39
|
+
SKIP_DIRECTORIES = {
|
|
40
|
+
".git",
|
|
41
|
+
"node_modules",
|
|
42
|
+
"__pycache__",
|
|
43
|
+
"venv",
|
|
44
|
+
".venv",
|
|
45
|
+
"env",
|
|
46
|
+
".next", # Next.js build output
|
|
47
|
+
"dist",
|
|
48
|
+
"build",
|
|
49
|
+
".tox",
|
|
50
|
+
"site", # MkDocs output
|
|
51
|
+
"ebook-site",
|
|
52
|
+
"website", # Website build artifacts
|
|
53
|
+
"anthropic-cookbook", # Third-party examples
|
|
54
|
+
".eggs",
|
|
55
|
+
"*.egg-info",
|
|
56
|
+
"htmlcov", # Coverage report artifacts
|
|
57
|
+
"htmlcov_logging", # Coverage report artifacts
|
|
58
|
+
".coverage", # Coverage data
|
|
59
|
+
"vscode-extension", # VSCode extension code (separate security review)
|
|
60
|
+
"vscode-memory-panel", # VSCode panel code
|
|
61
|
+
"wizard-dashboard", # Dashboard build
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Patterns that indicate a line is DETECTION code, not vulnerable code
|
|
65
|
+
# These help avoid false positives when scanning security tools
|
|
66
|
+
DETECTION_PATTERNS = [
|
|
67
|
+
r'["\']eval\s*\(["\']', # String literal like "eval(" (detection, not execution)
|
|
68
|
+
r'["\']exec\s*\(["\']', # String literal like "exec(" (detection, not execution)
|
|
69
|
+
r"in\s+content", # Pattern detection like "eval(" in content
|
|
70
|
+
r"re\.compile", # Regex compilation for detection
|
|
71
|
+
r"\.finditer\(", # Regex matching for detection
|
|
72
|
+
r"\.search\(", # Regex searching for detection
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
# Known fake/test credential patterns to ignore
|
|
76
|
+
FAKE_CREDENTIAL_PATTERNS = [
|
|
77
|
+
r"EXAMPLE", # AWS example keys
|
|
78
|
+
r"FAKE",
|
|
79
|
+
r"TEST",
|
|
80
|
+
r"your-.*-here",
|
|
81
|
+
r'"your-key"', # Placeholder key
|
|
82
|
+
r"abc123xyz",
|
|
83
|
+
r"\.\.\.", # Placeholder with ellipsis
|
|
84
|
+
r"test-key",
|
|
85
|
+
r"mock",
|
|
86
|
+
r'"hardcoded_secret"', # Literal example text
|
|
87
|
+
r'"secret"$', # Generic "secret" as value
|
|
88
|
+
r'"secret123"', # Test password
|
|
89
|
+
r'"password"$', # Generic password as value
|
|
90
|
+
r"_PATTERN", # Pattern constants
|
|
91
|
+
r"_EXAMPLE", # Example constants
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
# Files/paths that contain security examples/tests (not vulnerabilities)
|
|
95
|
+
SECURITY_EXAMPLE_PATHS = [
|
|
96
|
+
"owasp_patterns.py",
|
|
97
|
+
"vulnerability_scanner.py",
|
|
98
|
+
"test_security",
|
|
99
|
+
"test_secrets",
|
|
100
|
+
"test_owasp",
|
|
101
|
+
"secrets_detector.py", # Security tool with pattern definitions
|
|
102
|
+
"pii_scrubber.py", # Privacy tool
|
|
103
|
+
"secure_memdocs", # Secure storage module
|
|
104
|
+
"/security/", # Security modules
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
# Test file patterns - findings here are informational, not critical
|
|
108
|
+
TEST_FILE_PATTERNS = [
|
|
109
|
+
r"/tests/",
|
|
110
|
+
r"/test_",
|
|
111
|
+
r"_test\.py$",
|
|
112
|
+
r"_demo\.py$",
|
|
113
|
+
r"_example\.py$",
|
|
114
|
+
r"/examples/",
|
|
115
|
+
r"/demo",
|
|
116
|
+
r"coach/vscode-extension", # Example VSCode extension
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
# Common security vulnerability patterns (OWASP Top 10 inspired)
|
|
120
|
+
SECURITY_PATTERNS = {
|
|
121
|
+
"sql_injection": {
|
|
122
|
+
"patterns": [
|
|
123
|
+
r'execute\s*\(\s*["\'].*%s',
|
|
124
|
+
r'cursor\.execute\s*\(\s*f["\']',
|
|
125
|
+
r"\.format\s*\(.*\).*execute",
|
|
126
|
+
],
|
|
127
|
+
"severity": "critical",
|
|
128
|
+
"owasp": "A03:2021 Injection",
|
|
129
|
+
},
|
|
130
|
+
"xss": {
|
|
131
|
+
"patterns": [
|
|
132
|
+
r"innerHTML\s*=",
|
|
133
|
+
r"dangerouslySetInnerHTML",
|
|
134
|
+
r"document\.write\s*\(",
|
|
135
|
+
],
|
|
136
|
+
"severity": "high",
|
|
137
|
+
"owasp": "A03:2021 Injection",
|
|
138
|
+
},
|
|
139
|
+
"hardcoded_secret": {
|
|
140
|
+
"patterns": [
|
|
141
|
+
r'password\s*=\s*["\'][^"\']+["\']',
|
|
142
|
+
r'api_key\s*=\s*["\'][^"\']+["\']',
|
|
143
|
+
r'secret\s*=\s*["\'][^"\']+["\']',
|
|
144
|
+
r'token\s*=\s*["\'][A-Za-z0-9]{20,}["\']',
|
|
145
|
+
],
|
|
146
|
+
"severity": "critical",
|
|
147
|
+
"owasp": "A02:2021 Cryptographic Failures",
|
|
148
|
+
},
|
|
149
|
+
"insecure_random": {
|
|
150
|
+
"patterns": [
|
|
151
|
+
r"random\.\w+\s*\(",
|
|
152
|
+
r"Math\.random\s*\(",
|
|
153
|
+
],
|
|
154
|
+
"severity": "medium",
|
|
155
|
+
"owasp": "A02:2021 Cryptographic Failures",
|
|
156
|
+
},
|
|
157
|
+
"path_traversal": {
|
|
158
|
+
"patterns": [
|
|
159
|
+
r"open\s*\([^)]*\+[^)]*\)",
|
|
160
|
+
r"readFile\s*\([^)]*\+[^)]*\)",
|
|
161
|
+
],
|
|
162
|
+
"severity": "high",
|
|
163
|
+
"owasp": "A01:2021 Broken Access Control",
|
|
164
|
+
},
|
|
165
|
+
"command_injection": {
|
|
166
|
+
"patterns": [
|
|
167
|
+
r"subprocess\.\w+\s*\([^)]*shell\s*=\s*True",
|
|
168
|
+
r"os\.system\s*\(",
|
|
169
|
+
r"eval\s*\(",
|
|
170
|
+
r"exec\s*\(",
|
|
171
|
+
],
|
|
172
|
+
"severity": "critical",
|
|
173
|
+
"owasp": "A03:2021 Injection",
|
|
174
|
+
},
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
class SecurityAuditWorkflow(BaseWorkflow):
|
|
179
|
+
"""OWASP-focused security audit with team decision integration.
|
|
180
|
+
|
|
181
|
+
Scans code for security vulnerabilities while respecting
|
|
182
|
+
team decisions about false positives and accepted risks.
|
|
183
|
+
"""
|
|
184
|
+
|
|
185
|
+
name = "security-audit"
|
|
186
|
+
description = "OWASP-focused security scan with vulnerability assessment"
|
|
187
|
+
stages = ["triage", "analyze", "assess", "remediate"]
|
|
188
|
+
tier_map = {
|
|
189
|
+
"triage": ModelTier.CHEAP,
|
|
190
|
+
"analyze": ModelTier.CAPABLE,
|
|
191
|
+
"assess": ModelTier.CAPABLE,
|
|
192
|
+
"remediate": ModelTier.PREMIUM,
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
def __init__(
|
|
196
|
+
self,
|
|
197
|
+
patterns_dir: str = "./patterns",
|
|
198
|
+
skip_remediate_if_clean: bool = True,
|
|
199
|
+
use_crew_for_assessment: bool = True,
|
|
200
|
+
use_crew_for_remediation: bool = True,
|
|
201
|
+
crew_config: dict | None = None,
|
|
202
|
+
**kwargs: Any,
|
|
203
|
+
):
|
|
204
|
+
"""Initialize security audit workflow.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
patterns_dir: Directory containing security decisions
|
|
208
|
+
skip_remediate_if_clean: Skip remediation if no high/critical findings
|
|
209
|
+
use_crew_for_assessment: Use SecurityAuditCrew for vulnerability assessment (default: True)
|
|
210
|
+
use_crew_for_remediation: Use SecurityAuditCrew for enhanced remediation (default: True)
|
|
211
|
+
crew_config: Configuration dict for SecurityAuditCrew
|
|
212
|
+
**kwargs: Additional arguments passed to BaseWorkflow
|
|
213
|
+
|
|
214
|
+
"""
|
|
215
|
+
super().__init__(**kwargs)
|
|
216
|
+
self.patterns_dir = patterns_dir
|
|
217
|
+
self.skip_remediate_if_clean = skip_remediate_if_clean
|
|
218
|
+
self.use_crew_for_assessment = use_crew_for_assessment
|
|
219
|
+
self.use_crew_for_remediation = use_crew_for_remediation
|
|
220
|
+
self.crew_config = crew_config or {}
|
|
221
|
+
self._has_critical: bool = False
|
|
222
|
+
self._team_decisions: dict[str, dict] = {}
|
|
223
|
+
self._crew: Any = None
|
|
224
|
+
self._crew_available = False
|
|
225
|
+
self._load_team_decisions()
|
|
226
|
+
|
|
227
|
+
def _load_team_decisions(self) -> None:
|
|
228
|
+
"""Load team security decisions for false positive filtering."""
|
|
229
|
+
decisions_file = Path(self.patterns_dir) / "security" / "team_decisions.json"
|
|
230
|
+
if decisions_file.exists():
|
|
231
|
+
try:
|
|
232
|
+
with open(decisions_file) as f:
|
|
233
|
+
data = json.load(f)
|
|
234
|
+
for decision in data.get("decisions", []):
|
|
235
|
+
key = decision.get("finding_hash", "")
|
|
236
|
+
self._team_decisions[key] = decision
|
|
237
|
+
except (json.JSONDecodeError, OSError):
|
|
238
|
+
pass
|
|
239
|
+
|
|
240
|
+
async def _initialize_crew(self) -> None:
|
|
241
|
+
"""Initialize the SecurityAuditCrew."""
|
|
242
|
+
if self._crew is not None:
|
|
243
|
+
return
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
from empathy_llm_toolkit.agent_factory.crews.security_audit import SecurityAuditCrew
|
|
247
|
+
|
|
248
|
+
self._crew = SecurityAuditCrew()
|
|
249
|
+
self._crew_available = True
|
|
250
|
+
logger.info("SecurityAuditCrew initialized successfully")
|
|
251
|
+
except ImportError as e:
|
|
252
|
+
logger.warning(f"SecurityAuditCrew not available: {e}")
|
|
253
|
+
self._crew_available = False
|
|
254
|
+
|
|
255
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
256
|
+
"""Skip remediation stage if no critical/high findings.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
stage_name: Name of the stage to check
|
|
260
|
+
input_data: Current workflow data
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Tuple of (should_skip, reason)
|
|
264
|
+
|
|
265
|
+
"""
|
|
266
|
+
if stage_name == "remediate" and self.skip_remediate_if_clean:
|
|
267
|
+
if not self._has_critical:
|
|
268
|
+
return True, "No high/critical findings requiring remediation"
|
|
269
|
+
return False, None
|
|
270
|
+
|
|
271
|
+
async def run_stage(
|
|
272
|
+
self,
|
|
273
|
+
stage_name: str,
|
|
274
|
+
tier: ModelTier,
|
|
275
|
+
input_data: Any,
|
|
276
|
+
) -> tuple[Any, int, int]:
|
|
277
|
+
"""Route to specific stage implementation."""
|
|
278
|
+
if stage_name == "triage":
|
|
279
|
+
return await self._triage(input_data, tier)
|
|
280
|
+
if stage_name == "analyze":
|
|
281
|
+
return await self._analyze(input_data, tier)
|
|
282
|
+
if stage_name == "assess":
|
|
283
|
+
return await self._assess(input_data, tier)
|
|
284
|
+
if stage_name == "remediate":
|
|
285
|
+
return await self._remediate(input_data, tier)
|
|
286
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
287
|
+
|
|
288
|
+
async def _triage(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
289
|
+
"""Quick scan for common vulnerability patterns.
|
|
290
|
+
|
|
291
|
+
Uses regex patterns to identify potential security issues
|
|
292
|
+
across the codebase for further analysis.
|
|
293
|
+
"""
|
|
294
|
+
target_path = input_data.get("path", ".")
|
|
295
|
+
file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js", ".jsx"])
|
|
296
|
+
|
|
297
|
+
findings: list[dict] = []
|
|
298
|
+
files_scanned = 0
|
|
299
|
+
|
|
300
|
+
target = Path(target_path)
|
|
301
|
+
if target.exists():
|
|
302
|
+
for ext in file_types:
|
|
303
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
304
|
+
# Skip excluded directories
|
|
305
|
+
if any(skip in str(file_path) for skip in SKIP_DIRECTORIES):
|
|
306
|
+
continue
|
|
307
|
+
|
|
308
|
+
try:
|
|
309
|
+
content = file_path.read_text(errors="ignore")
|
|
310
|
+
lines = content.split("\n")
|
|
311
|
+
files_scanned += 1
|
|
312
|
+
|
|
313
|
+
for vuln_type, vuln_info in SECURITY_PATTERNS.items():
|
|
314
|
+
for pattern in vuln_info["patterns"]:
|
|
315
|
+
matches = list(re.finditer(pattern, content, re.IGNORECASE))
|
|
316
|
+
for match in matches:
|
|
317
|
+
# Find line number and get the line content
|
|
318
|
+
line_num = content[: match.start()].count("\n") + 1
|
|
319
|
+
line_content = (
|
|
320
|
+
lines[line_num - 1] if line_num <= len(lines) else ""
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Skip if file is a security example/test file
|
|
324
|
+
file_name = str(file_path)
|
|
325
|
+
if any(exp in file_name for exp in SECURITY_EXAMPLE_PATHS):
|
|
326
|
+
continue
|
|
327
|
+
|
|
328
|
+
# Skip if this looks like detection/scanning code
|
|
329
|
+
if self._is_detection_code(line_content, match.group()):
|
|
330
|
+
continue
|
|
331
|
+
|
|
332
|
+
# Skip fake/test credentials
|
|
333
|
+
if vuln_type == "hardcoded_secret":
|
|
334
|
+
if self._is_fake_credential(match.group()):
|
|
335
|
+
continue
|
|
336
|
+
|
|
337
|
+
# Skip command_injection in documentation strings
|
|
338
|
+
if vuln_type == "command_injection":
|
|
339
|
+
if self._is_documentation_or_string(
|
|
340
|
+
line_content,
|
|
341
|
+
match.group(),
|
|
342
|
+
):
|
|
343
|
+
continue
|
|
344
|
+
|
|
345
|
+
# Check if this is a test file - downgrade to informational
|
|
346
|
+
is_test_file = any(
|
|
347
|
+
re.search(pat, file_name) for pat in TEST_FILE_PATTERNS
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
# Skip test file findings for hardcoded_secret (expected in tests)
|
|
351
|
+
if is_test_file and vuln_type == "hardcoded_secret":
|
|
352
|
+
continue
|
|
353
|
+
|
|
354
|
+
findings.append(
|
|
355
|
+
{
|
|
356
|
+
"type": vuln_type,
|
|
357
|
+
"file": str(file_path),
|
|
358
|
+
"line": line_num,
|
|
359
|
+
"match": match.group()[:100],
|
|
360
|
+
"severity": (
|
|
361
|
+
"low" if is_test_file else vuln_info["severity"]
|
|
362
|
+
),
|
|
363
|
+
"owasp": vuln_info["owasp"],
|
|
364
|
+
"is_test": is_test_file,
|
|
365
|
+
},
|
|
366
|
+
)
|
|
367
|
+
except OSError:
|
|
368
|
+
continue
|
|
369
|
+
|
|
370
|
+
input_tokens = len(str(input_data)) // 4
|
|
371
|
+
output_tokens = len(str(findings)) // 4
|
|
372
|
+
|
|
373
|
+
return (
|
|
374
|
+
{
|
|
375
|
+
"findings": findings,
|
|
376
|
+
"files_scanned": files_scanned,
|
|
377
|
+
"finding_count": len(findings),
|
|
378
|
+
**input_data,
|
|
379
|
+
},
|
|
380
|
+
input_tokens,
|
|
381
|
+
output_tokens,
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
385
|
+
"""Deep analysis of flagged areas.
|
|
386
|
+
|
|
387
|
+
Filters findings against team decisions and performs
|
|
388
|
+
deeper analysis of genuine security concerns.
|
|
389
|
+
"""
|
|
390
|
+
findings = input_data.get("findings", [])
|
|
391
|
+
analyzed: list[dict] = []
|
|
392
|
+
|
|
393
|
+
for finding in findings:
|
|
394
|
+
finding_key = finding.get("type", "")
|
|
395
|
+
|
|
396
|
+
# Check team decisions
|
|
397
|
+
decision = self._team_decisions.get(finding_key)
|
|
398
|
+
if decision:
|
|
399
|
+
if decision.get("decision") == "false_positive":
|
|
400
|
+
finding["status"] = "false_positive"
|
|
401
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
402
|
+
finding["decided_by"] = decision.get("decided_by", "")
|
|
403
|
+
elif decision.get("decision") == "accepted":
|
|
404
|
+
finding["status"] = "accepted_risk"
|
|
405
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
406
|
+
elif decision.get("decision") == "deferred":
|
|
407
|
+
finding["status"] = "deferred"
|
|
408
|
+
finding["decision_reason"] = decision.get("reason", "")
|
|
409
|
+
else:
|
|
410
|
+
finding["status"] = "needs_review"
|
|
411
|
+
else:
|
|
412
|
+
finding["status"] = "needs_review"
|
|
413
|
+
|
|
414
|
+
# Add context analysis
|
|
415
|
+
if finding["status"] == "needs_review":
|
|
416
|
+
finding["analysis"] = self._analyze_finding(finding)
|
|
417
|
+
|
|
418
|
+
analyzed.append(finding)
|
|
419
|
+
|
|
420
|
+
# Separate by status
|
|
421
|
+
needs_review = [f for f in analyzed if f["status"] == "needs_review"]
|
|
422
|
+
false_positives = [f for f in analyzed if f["status"] == "false_positive"]
|
|
423
|
+
accepted = [f for f in analyzed if f["status"] == "accepted_risk"]
|
|
424
|
+
|
|
425
|
+
input_tokens = len(str(input_data)) // 4
|
|
426
|
+
output_tokens = len(str(analyzed)) // 4
|
|
427
|
+
|
|
428
|
+
return (
|
|
429
|
+
{
|
|
430
|
+
"analyzed_findings": analyzed,
|
|
431
|
+
"needs_review": needs_review,
|
|
432
|
+
"false_positives": false_positives,
|
|
433
|
+
"accepted_risks": accepted,
|
|
434
|
+
"review_count": len(needs_review),
|
|
435
|
+
**input_data,
|
|
436
|
+
},
|
|
437
|
+
input_tokens,
|
|
438
|
+
output_tokens,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
def _analyze_finding(self, finding: dict) -> str:
|
|
442
|
+
"""Generate analysis context for a finding."""
|
|
443
|
+
vuln_type = finding.get("type", "")
|
|
444
|
+
analyses = {
|
|
445
|
+
"sql_injection": "Potential SQL injection. Verify parameterized input.",
|
|
446
|
+
"xss": "Potential XSS vulnerability. Check output escaping.",
|
|
447
|
+
"hardcoded_secret": "Hardcoded credential. Use env vars or secrets manager.",
|
|
448
|
+
"insecure_random": "Insecure random. Use secrets module instead.",
|
|
449
|
+
"path_traversal": "Potential path traversal. Validate file paths.",
|
|
450
|
+
"command_injection": "Potential command injection. Avoid shell=True.",
|
|
451
|
+
}
|
|
452
|
+
return analyses.get(vuln_type, "Review for security implications.")
|
|
453
|
+
|
|
454
|
+
def _is_detection_code(self, line_content: str, match_text: str) -> bool:
|
|
455
|
+
"""Check if a match is actually detection/scanning code, not a vulnerability.
|
|
456
|
+
|
|
457
|
+
This prevents false positives when scanning security tools that contain
|
|
458
|
+
patterns like 'if "eval(" in content:' which are detecting vulnerabilities,
|
|
459
|
+
not introducing them.
|
|
460
|
+
"""
|
|
461
|
+
# Check if the line contains detection patterns
|
|
462
|
+
for pattern in DETECTION_PATTERNS:
|
|
463
|
+
if re.search(pattern, line_content, re.IGNORECASE):
|
|
464
|
+
return True
|
|
465
|
+
|
|
466
|
+
# Check if the match is inside a string literal used for comparison
|
|
467
|
+
# e.g., 'if "eval(" in content:' or 'pattern = r"eval\("'
|
|
468
|
+
if f'"{match_text.strip()}"' in line_content or f"'{match_text.strip()}'" in line_content:
|
|
469
|
+
return True
|
|
470
|
+
|
|
471
|
+
return False
|
|
472
|
+
|
|
473
|
+
def _is_fake_credential(self, match_text: str) -> bool:
|
|
474
|
+
"""Check if a matched credential is obviously fake/for testing.
|
|
475
|
+
|
|
476
|
+
This prevents false positives for test fixtures using patterns like
|
|
477
|
+
'AKIAIOSFODNN7EXAMPLE' (AWS official example) or 'test-key-not-real'.
|
|
478
|
+
"""
|
|
479
|
+
for pattern in FAKE_CREDENTIAL_PATTERNS:
|
|
480
|
+
if re.search(pattern, match_text, re.IGNORECASE):
|
|
481
|
+
return True
|
|
482
|
+
return False
|
|
483
|
+
|
|
484
|
+
def _is_documentation_or_string(self, line_content: str, match_text: str) -> bool:
|
|
485
|
+
"""Check if a command injection match is in documentation or string literals.
|
|
486
|
+
|
|
487
|
+
This prevents false positives for:
|
|
488
|
+
- Docstrings describing security issues
|
|
489
|
+
- String literals containing example vulnerable code
|
|
490
|
+
- Comments explaining vulnerabilities
|
|
491
|
+
"""
|
|
492
|
+
line = line_content.strip()
|
|
493
|
+
|
|
494
|
+
# Check if line is a comment
|
|
495
|
+
if line.startswith("#") or line.startswith("//") or line.startswith("*"):
|
|
496
|
+
return True
|
|
497
|
+
|
|
498
|
+
# Check if inside a docstring (triple quotes)
|
|
499
|
+
if '"""' in line or "'''" in line:
|
|
500
|
+
return True
|
|
501
|
+
|
|
502
|
+
# Check if the match is inside a string literal being defined
|
|
503
|
+
# e.g., 'pattern = r"eval\("' or '"eval(" in content'
|
|
504
|
+
string_patterns = [
|
|
505
|
+
r'["\'].*' + re.escape(match_text.strip()[:10]) + r'.*["\']', # Inside quotes
|
|
506
|
+
r'r["\'].*' + re.escape(match_text.strip()[:10]), # Raw string
|
|
507
|
+
r'=\s*["\']', # String assignment
|
|
508
|
+
]
|
|
509
|
+
for pattern in string_patterns:
|
|
510
|
+
if re.search(pattern, line):
|
|
511
|
+
return True
|
|
512
|
+
|
|
513
|
+
# Check for common documentation patterns
|
|
514
|
+
doc_indicators = [
|
|
515
|
+
"example",
|
|
516
|
+
"vulnerable",
|
|
517
|
+
"insecure",
|
|
518
|
+
"dangerous",
|
|
519
|
+
"pattern",
|
|
520
|
+
"detect",
|
|
521
|
+
"scan",
|
|
522
|
+
"check for",
|
|
523
|
+
"look for",
|
|
524
|
+
]
|
|
525
|
+
line_lower = line.lower()
|
|
526
|
+
if any(ind in line_lower for ind in doc_indicators):
|
|
527
|
+
return True
|
|
528
|
+
|
|
529
|
+
return False
|
|
530
|
+
|
|
531
|
+
async def _assess(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
532
|
+
"""Risk scoring and severity classification.
|
|
533
|
+
|
|
534
|
+
Calculates overall security risk score and identifies
|
|
535
|
+
critical issues requiring immediate attention.
|
|
536
|
+
|
|
537
|
+
When use_crew_for_assessment=True, uses SecurityAuditCrew's
|
|
538
|
+
comprehensive analysis for enhanced vulnerability detection.
|
|
539
|
+
"""
|
|
540
|
+
await self._initialize_crew()
|
|
541
|
+
|
|
542
|
+
needs_review = input_data.get("needs_review", [])
|
|
543
|
+
|
|
544
|
+
# Count by severity
|
|
545
|
+
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
|
|
546
|
+
for finding in needs_review:
|
|
547
|
+
sev = finding.get("severity", "low")
|
|
548
|
+
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
|
549
|
+
|
|
550
|
+
# Calculate risk score (0-100)
|
|
551
|
+
risk_score = (
|
|
552
|
+
severity_counts["critical"] * 25
|
|
553
|
+
+ severity_counts["high"] * 10
|
|
554
|
+
+ severity_counts["medium"] * 3
|
|
555
|
+
+ severity_counts["low"] * 1
|
|
556
|
+
)
|
|
557
|
+
risk_score = min(100, risk_score)
|
|
558
|
+
|
|
559
|
+
# Set flag for skip logic
|
|
560
|
+
self._has_critical = severity_counts["critical"] > 0 or severity_counts["high"] > 0
|
|
561
|
+
|
|
562
|
+
# Group findings by OWASP category
|
|
563
|
+
by_owasp: dict[str, list] = {}
|
|
564
|
+
for finding in needs_review:
|
|
565
|
+
owasp = finding.get("owasp", "Unknown")
|
|
566
|
+
if owasp not in by_owasp:
|
|
567
|
+
by_owasp[owasp] = []
|
|
568
|
+
by_owasp[owasp].append(finding)
|
|
569
|
+
|
|
570
|
+
# Use crew for enhanced assessment if available
|
|
571
|
+
crew_enhanced = False
|
|
572
|
+
crew_findings = []
|
|
573
|
+
if self.use_crew_for_assessment and self._crew_available:
|
|
574
|
+
target = input_data.get("path", ".")
|
|
575
|
+
try:
|
|
576
|
+
crew_report = await self._crew.audit(target=target)
|
|
577
|
+
if crew_report and crew_report.findings:
|
|
578
|
+
crew_enhanced = True
|
|
579
|
+
# Convert crew findings to workflow format
|
|
580
|
+
for finding in crew_report.findings:
|
|
581
|
+
crew_findings.append(
|
|
582
|
+
{
|
|
583
|
+
"type": finding.category.value,
|
|
584
|
+
"title": finding.title,
|
|
585
|
+
"description": finding.description,
|
|
586
|
+
"severity": finding.severity.value,
|
|
587
|
+
"file": finding.file_path or "",
|
|
588
|
+
"line": finding.line_number or 0,
|
|
589
|
+
"owasp": finding.category.value,
|
|
590
|
+
"remediation": finding.remediation or "",
|
|
591
|
+
"cwe_id": finding.cwe_id or "",
|
|
592
|
+
"cvss_score": finding.cvss_score or 0.0,
|
|
593
|
+
"source": "crew",
|
|
594
|
+
}
|
|
595
|
+
)
|
|
596
|
+
# Update severity counts with crew findings
|
|
597
|
+
for finding in crew_findings:
|
|
598
|
+
sev = finding.get("severity", "low")
|
|
599
|
+
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
|
600
|
+
# Recalculate risk score with crew findings
|
|
601
|
+
risk_score = (
|
|
602
|
+
severity_counts["critical"] * 25
|
|
603
|
+
+ severity_counts["high"] * 10
|
|
604
|
+
+ severity_counts["medium"] * 3
|
|
605
|
+
+ severity_counts["low"] * 1
|
|
606
|
+
)
|
|
607
|
+
risk_score = min(100, risk_score)
|
|
608
|
+
except Exception as e:
|
|
609
|
+
logger.warning(f"Crew assessment failed: {e}")
|
|
610
|
+
|
|
611
|
+
# Merge crew findings with pattern-based findings
|
|
612
|
+
all_critical = [f for f in needs_review if f.get("severity") == "critical"]
|
|
613
|
+
all_high = [f for f in needs_review if f.get("severity") == "high"]
|
|
614
|
+
if crew_enhanced:
|
|
615
|
+
all_critical.extend([f for f in crew_findings if f.get("severity") == "critical"])
|
|
616
|
+
all_high.extend([f for f in crew_findings if f.get("severity") == "high"])
|
|
617
|
+
|
|
618
|
+
assessment = {
|
|
619
|
+
"risk_score": risk_score,
|
|
620
|
+
"risk_level": (
|
|
621
|
+
"critical"
|
|
622
|
+
if risk_score >= 75
|
|
623
|
+
else "high" if risk_score >= 50 else "medium" if risk_score >= 25 else "low"
|
|
624
|
+
),
|
|
625
|
+
"severity_breakdown": severity_counts,
|
|
626
|
+
"by_owasp_category": {k: len(v) for k, v in by_owasp.items()},
|
|
627
|
+
"critical_findings": all_critical,
|
|
628
|
+
"high_findings": all_high,
|
|
629
|
+
"crew_enhanced": crew_enhanced,
|
|
630
|
+
"crew_findings_count": len(crew_findings) if crew_enhanced else 0,
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
input_tokens = len(str(input_data)) // 4
|
|
634
|
+
output_tokens = len(str(assessment)) // 4
|
|
635
|
+
|
|
636
|
+
# Build output with assessment
|
|
637
|
+
output = {
|
|
638
|
+
"assessment": assessment,
|
|
639
|
+
**input_data,
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
# Add formatted report for human readability
|
|
643
|
+
output["formatted_report"] = format_security_report(output)
|
|
644
|
+
|
|
645
|
+
return (
|
|
646
|
+
output,
|
|
647
|
+
input_tokens,
|
|
648
|
+
output_tokens,
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
async def _remediate(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
652
|
+
"""Generate remediation plan for security issues.
|
|
653
|
+
|
|
654
|
+
Creates actionable remediation steps prioritized by
|
|
655
|
+
severity and grouped by OWASP category.
|
|
656
|
+
|
|
657
|
+
When use_crew_for_remediation=True, uses SecurityAuditCrew's
|
|
658
|
+
Remediation Expert agent for enhanced recommendations.
|
|
659
|
+
|
|
660
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
661
|
+
"""
|
|
662
|
+
from .security_adapters import _check_crew_available
|
|
663
|
+
|
|
664
|
+
assessment = input_data.get("assessment", {})
|
|
665
|
+
critical = assessment.get("critical_findings", [])
|
|
666
|
+
high = assessment.get("high_findings", [])
|
|
667
|
+
target = input_data.get("target", input_data.get("path", ""))
|
|
668
|
+
|
|
669
|
+
crew_remediation = None
|
|
670
|
+
crew_enhanced = False
|
|
671
|
+
|
|
672
|
+
# Try crew-based remediation first if enabled
|
|
673
|
+
if self.use_crew_for_remediation and _check_crew_available():
|
|
674
|
+
crew_remediation = await self._get_crew_remediation(target, critical + high, assessment)
|
|
675
|
+
if crew_remediation:
|
|
676
|
+
crew_enhanced = True
|
|
677
|
+
|
|
678
|
+
# Build findings summary for LLM
|
|
679
|
+
findings_summary = []
|
|
680
|
+
for f in critical:
|
|
681
|
+
findings_summary.append(
|
|
682
|
+
f"CRITICAL: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
|
|
683
|
+
)
|
|
684
|
+
for f in high:
|
|
685
|
+
findings_summary.append(
|
|
686
|
+
f"HIGH: {f.get('type')} in {f.get('file')}:{f.get('line')} - {f.get('owasp')}",
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
# Build input payload for prompt
|
|
690
|
+
input_payload = f"""Target: {target or "codebase"}
|
|
691
|
+
|
|
692
|
+
Findings:
|
|
693
|
+
{chr(10).join(findings_summary) if findings_summary else "No critical or high findings"}
|
|
694
|
+
|
|
695
|
+
Risk Score: {assessment.get("risk_score", 0)}/100
|
|
696
|
+
Risk Level: {assessment.get("risk_level", "unknown")}
|
|
697
|
+
|
|
698
|
+
Severity Breakdown: {json.dumps(assessment.get("severity_breakdown", {}), indent=2)}"""
|
|
699
|
+
|
|
700
|
+
# Check if XML prompts are enabled
|
|
701
|
+
if self._is_xml_enabled():
|
|
702
|
+
# Use XML-enhanced prompt
|
|
703
|
+
user_message = self._render_xml_prompt(
|
|
704
|
+
role="application security engineer",
|
|
705
|
+
goal="Generate a comprehensive remediation plan for security vulnerabilities",
|
|
706
|
+
instructions=[
|
|
707
|
+
"Explain each vulnerability and its potential impact",
|
|
708
|
+
"Provide specific remediation steps with code examples",
|
|
709
|
+
"Suggest preventive measures to avoid similar issues",
|
|
710
|
+
"Reference relevant OWASP guidelines",
|
|
711
|
+
"Prioritize by severity (critical first, then high)",
|
|
712
|
+
],
|
|
713
|
+
constraints=[
|
|
714
|
+
"Be specific and actionable",
|
|
715
|
+
"Include code examples where helpful",
|
|
716
|
+
"Group fixes by severity",
|
|
717
|
+
],
|
|
718
|
+
input_type="security_findings",
|
|
719
|
+
input_payload=input_payload,
|
|
720
|
+
extra={
|
|
721
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
722
|
+
"risk_level": assessment.get("risk_level", "unknown"),
|
|
723
|
+
},
|
|
724
|
+
)
|
|
725
|
+
system = None # XML prompt includes all context
|
|
726
|
+
else:
|
|
727
|
+
# Use legacy plain text prompts
|
|
728
|
+
system = """You are a security expert in application security and OWASP.
|
|
729
|
+
Generate a comprehensive remediation plan for the security findings.
|
|
730
|
+
|
|
731
|
+
For each finding:
|
|
732
|
+
1. Explain the vulnerability and its potential impact
|
|
733
|
+
2. Provide specific remediation steps with code examples
|
|
734
|
+
3. Suggest preventive measures to avoid similar issues
|
|
735
|
+
4. Reference relevant OWASP guidelines
|
|
736
|
+
|
|
737
|
+
Prioritize by severity (critical first, then high).
|
|
738
|
+
Be specific and actionable."""
|
|
739
|
+
|
|
740
|
+
user_message = f"""Generate a remediation plan for these security findings:
|
|
741
|
+
|
|
742
|
+
{input_payload}
|
|
743
|
+
|
|
744
|
+
Provide a detailed remediation plan with specific fixes."""
|
|
745
|
+
|
|
746
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
747
|
+
if self._executor is not None or self._api_key:
|
|
748
|
+
try:
|
|
749
|
+
step = SECURITY_STEPS["remediate"]
|
|
750
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
751
|
+
step=step,
|
|
752
|
+
prompt=user_message,
|
|
753
|
+
system=system,
|
|
754
|
+
)
|
|
755
|
+
except Exception:
|
|
756
|
+
# Fall back to legacy _call_llm if executor fails
|
|
757
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
758
|
+
tier,
|
|
759
|
+
system or "",
|
|
760
|
+
user_message,
|
|
761
|
+
max_tokens=3000,
|
|
762
|
+
)
|
|
763
|
+
else:
|
|
764
|
+
# Legacy path for backward compatibility
|
|
765
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
766
|
+
tier,
|
|
767
|
+
system or "",
|
|
768
|
+
user_message,
|
|
769
|
+
max_tokens=3000,
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
# Parse XML response if enforcement is enabled
|
|
773
|
+
parsed_data = self._parse_xml_response(response)
|
|
774
|
+
|
|
775
|
+
# Merge crew remediation if available
|
|
776
|
+
if crew_enhanced and crew_remediation:
|
|
777
|
+
response = self._merge_crew_remediation(response, crew_remediation)
|
|
778
|
+
|
|
779
|
+
result = {
|
|
780
|
+
"remediation_plan": response,
|
|
781
|
+
"remediation_count": len(critical) + len(high),
|
|
782
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
783
|
+
"risk_level": assessment.get("risk_level", "unknown"),
|
|
784
|
+
"model_tier_used": tier.value,
|
|
785
|
+
"crew_enhanced": crew_enhanced,
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
# Add crew-specific fields if enhanced
|
|
789
|
+
if crew_enhanced and crew_remediation:
|
|
790
|
+
result["crew_findings"] = crew_remediation.get("findings", [])
|
|
791
|
+
result["crew_agents_used"] = crew_remediation.get("agents_used", [])
|
|
792
|
+
|
|
793
|
+
# Merge parsed XML data if available
|
|
794
|
+
if parsed_data.get("xml_parsed"):
|
|
795
|
+
result.update(
|
|
796
|
+
{
|
|
797
|
+
"xml_parsed": True,
|
|
798
|
+
"summary": parsed_data.get("summary"),
|
|
799
|
+
"findings": parsed_data.get("findings", []),
|
|
800
|
+
"checklist": parsed_data.get("checklist", []),
|
|
801
|
+
},
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
return (result, input_tokens, output_tokens)
|
|
805
|
+
|
|
806
|
+
async def _get_crew_remediation(
|
|
807
|
+
self,
|
|
808
|
+
target: str,
|
|
809
|
+
findings: list,
|
|
810
|
+
assessment: dict,
|
|
811
|
+
) -> dict | None:
|
|
812
|
+
"""Get remediation recommendations from SecurityAuditCrew.
|
|
813
|
+
|
|
814
|
+
Args:
|
|
815
|
+
target: Path to codebase
|
|
816
|
+
findings: List of findings needing remediation
|
|
817
|
+
assessment: Current assessment dict
|
|
818
|
+
|
|
819
|
+
Returns:
|
|
820
|
+
Crew results dict or None if failed
|
|
821
|
+
|
|
822
|
+
"""
|
|
823
|
+
try:
|
|
824
|
+
from empathy_llm_toolkit.agent_factory.crews import (
|
|
825
|
+
SecurityAuditConfig,
|
|
826
|
+
SecurityAuditCrew,
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
from .security_adapters import (
|
|
830
|
+
crew_report_to_workflow_format,
|
|
831
|
+
workflow_findings_to_crew_format,
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
# Configure crew for focused remediation
|
|
835
|
+
config = SecurityAuditConfig(
|
|
836
|
+
scan_depth="quick", # Skip deep scan, focus on remediation
|
|
837
|
+
**self.crew_config,
|
|
838
|
+
)
|
|
839
|
+
crew = SecurityAuditCrew(config=config)
|
|
840
|
+
|
|
841
|
+
# Convert findings to crew format for context
|
|
842
|
+
crew_findings = workflow_findings_to_crew_format(findings)
|
|
843
|
+
|
|
844
|
+
# Run audit with remediation focus
|
|
845
|
+
context = {
|
|
846
|
+
"focus_areas": ["remediation"],
|
|
847
|
+
"existing_findings": crew_findings,
|
|
848
|
+
"skip_detection": True, # We already have findings
|
|
849
|
+
"risk_score": assessment.get("risk_score", 0),
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
report = await crew.audit(target, context=context)
|
|
853
|
+
|
|
854
|
+
if report:
|
|
855
|
+
return crew_report_to_workflow_format(report)
|
|
856
|
+
return None
|
|
857
|
+
|
|
858
|
+
except Exception as e:
|
|
859
|
+
import logging
|
|
860
|
+
|
|
861
|
+
logging.getLogger(__name__).warning(f"Crew remediation failed: {e}")
|
|
862
|
+
return None
|
|
863
|
+
|
|
864
|
+
def _merge_crew_remediation(self, llm_response: str, crew_remediation: dict) -> str:
|
|
865
|
+
"""Merge crew remediation recommendations with LLM response.
|
|
866
|
+
|
|
867
|
+
Args:
|
|
868
|
+
llm_response: LLM-generated remediation plan
|
|
869
|
+
crew_remediation: Crew results in workflow format
|
|
870
|
+
|
|
871
|
+
Returns:
|
|
872
|
+
Merged response with crew enhancements
|
|
873
|
+
|
|
874
|
+
"""
|
|
875
|
+
crew_findings = crew_remediation.get("findings", [])
|
|
876
|
+
|
|
877
|
+
if not crew_findings:
|
|
878
|
+
return llm_response
|
|
879
|
+
|
|
880
|
+
# Build crew section efficiently (avoid O(n²) string concat)
|
|
881
|
+
parts = [
|
|
882
|
+
"\n\n## Enhanced Remediation (SecurityAuditCrew)\n\n",
|
|
883
|
+
f"**Agents Used**: {', '.join(crew_remediation.get('agents_used', []))}\n\n",
|
|
884
|
+
]
|
|
885
|
+
|
|
886
|
+
for finding in crew_findings:
|
|
887
|
+
if finding.get("remediation"):
|
|
888
|
+
parts.append(f"### {finding.get('title', 'Finding')}\n")
|
|
889
|
+
parts.append(f"**Severity**: {finding.get('severity', 'unknown').upper()}\n")
|
|
890
|
+
if finding.get("cwe_id"):
|
|
891
|
+
parts.append(f"**CWE**: {finding.get('cwe_id')}\n")
|
|
892
|
+
if finding.get("cvss_score"):
|
|
893
|
+
parts.append(f"**CVSS Score**: {finding.get('cvss_score')}\n")
|
|
894
|
+
parts.append(f"\n**Remediation**:\n{finding.get('remediation')}\n\n")
|
|
895
|
+
|
|
896
|
+
return llm_response + "".join(parts)
|
|
897
|
+
|
|
898
|
+
def _get_remediation_action(self, finding: dict) -> str:
|
|
899
|
+
"""Generate specific remediation action for a finding."""
|
|
900
|
+
actions = {
|
|
901
|
+
"sql_injection": "Use parameterized queries or ORM. Never interpolate user input.",
|
|
902
|
+
"xss": "Use framework's auto-escaping. Sanitize user input.",
|
|
903
|
+
"hardcoded_secret": "Move to env vars or use a secrets manager.",
|
|
904
|
+
"insecure_random": "Use secrets.token_hex() or secrets.randbelow().",
|
|
905
|
+
"path_traversal": "Use os.path.realpath() and validate paths.",
|
|
906
|
+
"command_injection": "Use subprocess with shell=False and argument lists.",
|
|
907
|
+
}
|
|
908
|
+
return actions.get(finding.get("type", ""), "Apply security best practices.")
|
|
909
|
+
|
|
910
|
+
|
|
911
|
+
def format_security_report(output: dict) -> str:
|
|
912
|
+
"""Format security audit output as a human-readable report.
|
|
913
|
+
|
|
914
|
+
This format is designed to be:
|
|
915
|
+
- Easy for humans to read and understand
|
|
916
|
+
- Easy to copy/paste to an AI assistant for remediation help
|
|
917
|
+
- Actionable with clear severity levels and file locations
|
|
918
|
+
|
|
919
|
+
Args:
|
|
920
|
+
output: The workflow output dictionary
|
|
921
|
+
|
|
922
|
+
Returns:
|
|
923
|
+
Formatted report string
|
|
924
|
+
|
|
925
|
+
"""
|
|
926
|
+
lines = []
|
|
927
|
+
|
|
928
|
+
# Header
|
|
929
|
+
assessment = output.get("assessment", {})
|
|
930
|
+
risk_level = assessment.get("risk_level", "unknown").upper()
|
|
931
|
+
risk_score = assessment.get("risk_score", 0)
|
|
932
|
+
|
|
933
|
+
lines.append("=" * 60)
|
|
934
|
+
lines.append("SECURITY AUDIT REPORT")
|
|
935
|
+
lines.append("=" * 60)
|
|
936
|
+
lines.append("")
|
|
937
|
+
lines.append(f"Risk Level: {risk_level}")
|
|
938
|
+
lines.append(f"Risk Score: {risk_score}/100")
|
|
939
|
+
lines.append("")
|
|
940
|
+
|
|
941
|
+
# Severity breakdown
|
|
942
|
+
breakdown = assessment.get("severity_breakdown", {})
|
|
943
|
+
lines.append("Severity Summary:")
|
|
944
|
+
for sev in ["critical", "high", "medium", "low"]:
|
|
945
|
+
count = breakdown.get(sev, 0)
|
|
946
|
+
icon = {"critical": "🔴", "high": "🟠", "medium": "🟡", "low": "🟢"}.get(sev, "⚪")
|
|
947
|
+
lines.append(f" {icon} {sev.capitalize()}: {count}")
|
|
948
|
+
lines.append("")
|
|
949
|
+
|
|
950
|
+
# Files scanned
|
|
951
|
+
files_scanned = output.get("files_scanned", 0)
|
|
952
|
+
lines.append(f"Files Scanned: {files_scanned}")
|
|
953
|
+
lines.append("")
|
|
954
|
+
|
|
955
|
+
# Findings requiring review
|
|
956
|
+
needs_review = output.get("needs_review", [])
|
|
957
|
+
if needs_review:
|
|
958
|
+
lines.append("-" * 60)
|
|
959
|
+
lines.append("FINDINGS REQUIRING REVIEW")
|
|
960
|
+
lines.append("-" * 60)
|
|
961
|
+
lines.append("")
|
|
962
|
+
|
|
963
|
+
for i, finding in enumerate(needs_review, 1):
|
|
964
|
+
severity = finding.get("severity", "unknown").upper()
|
|
965
|
+
vuln_type = finding.get("type", "unknown")
|
|
966
|
+
file_path = finding.get("file", "").split("Empathy-framework/")[-1]
|
|
967
|
+
line_num = finding.get("line", 0)
|
|
968
|
+
match = finding.get("match", "")[:50]
|
|
969
|
+
owasp = finding.get("owasp", "")
|
|
970
|
+
is_test = finding.get("is_test", False)
|
|
971
|
+
analysis = finding.get("analysis", "")
|
|
972
|
+
|
|
973
|
+
test_marker = " [TEST FILE]" if is_test else ""
|
|
974
|
+
lines.append(f"{i}. [{severity}]{test_marker} {vuln_type}")
|
|
975
|
+
lines.append(f" File: {file_path}:{line_num}")
|
|
976
|
+
lines.append(f" Match: {match}")
|
|
977
|
+
lines.append(f" OWASP: {owasp}")
|
|
978
|
+
if analysis:
|
|
979
|
+
lines.append(f" Analysis: {analysis}")
|
|
980
|
+
lines.append("")
|
|
981
|
+
|
|
982
|
+
# Accepted risks
|
|
983
|
+
accepted = output.get("accepted_risks", [])
|
|
984
|
+
if accepted:
|
|
985
|
+
lines.append("-" * 60)
|
|
986
|
+
lines.append("ACCEPTED RISKS (No Action Required)")
|
|
987
|
+
lines.append("-" * 60)
|
|
988
|
+
lines.append("")
|
|
989
|
+
|
|
990
|
+
for finding in accepted:
|
|
991
|
+
vuln_type = finding.get("type", "unknown")
|
|
992
|
+
file_path = finding.get("file", "").split("Empathy-framework/")[-1]
|
|
993
|
+
line_num = finding.get("line", 0)
|
|
994
|
+
reason = finding.get("decision_reason", "")
|
|
995
|
+
|
|
996
|
+
lines.append(f" - {vuln_type} in {file_path}:{line_num}")
|
|
997
|
+
if reason:
|
|
998
|
+
lines.append(f" Reason: {reason}")
|
|
999
|
+
lines.append("")
|
|
1000
|
+
|
|
1001
|
+
# Remediation plan if present
|
|
1002
|
+
remediation = output.get("remediation_plan", "")
|
|
1003
|
+
if remediation and remediation.strip():
|
|
1004
|
+
lines.append("-" * 60)
|
|
1005
|
+
lines.append("REMEDIATION PLAN")
|
|
1006
|
+
lines.append("-" * 60)
|
|
1007
|
+
lines.append("")
|
|
1008
|
+
lines.append(remediation)
|
|
1009
|
+
lines.append("")
|
|
1010
|
+
|
|
1011
|
+
# Footer with action items
|
|
1012
|
+
lines.append("=" * 60)
|
|
1013
|
+
if needs_review:
|
|
1014
|
+
lines.append("ACTION REQUIRED:")
|
|
1015
|
+
lines.append(f" Review {len(needs_review)} finding(s) above")
|
|
1016
|
+
lines.append(" Copy this report to Claude Code for remediation help")
|
|
1017
|
+
else:
|
|
1018
|
+
lines.append("STATUS: All clear - no critical or high findings")
|
|
1019
|
+
lines.append("=" * 60)
|
|
1020
|
+
|
|
1021
|
+
return "\n".join(lines)
|
|
1022
|
+
|
|
1023
|
+
|
|
1024
|
+
def main():
|
|
1025
|
+
"""CLI entry point for security audit workflow."""
|
|
1026
|
+
import asyncio
|
|
1027
|
+
|
|
1028
|
+
async def run():
|
|
1029
|
+
workflow = SecurityAuditWorkflow()
|
|
1030
|
+
result = await workflow.execute(path=".", file_types=[".py"])
|
|
1031
|
+
|
|
1032
|
+
# Use the new formatted report
|
|
1033
|
+
report = format_security_report(result.final_output)
|
|
1034
|
+
print(report)
|
|
1035
|
+
|
|
1036
|
+
print("\nCost Report:")
|
|
1037
|
+
print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
|
|
1038
|
+
savings = result.cost_report.savings
|
|
1039
|
+
pct = result.cost_report.savings_percent
|
|
1040
|
+
print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
|
|
1041
|
+
|
|
1042
|
+
asyncio.run(run())
|
|
1043
|
+
|
|
1044
|
+
|
|
1045
|
+
if __name__ == "__main__":
|
|
1046
|
+
main()
|