empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,740 +0,0 @@
|
|
|
1
|
-
"""Security Learning Wizard (Level 4)
|
|
2
|
-
|
|
3
|
-
Security analysis wizard that learns from team decisions.
|
|
4
|
-
Demonstrates what's possible with persistent memory: AI that remembers
|
|
5
|
-
which warnings are false positives and team security policies.
|
|
6
|
-
|
|
7
|
-
"Suppressing 8 warnings you've previously marked as acceptable."
|
|
8
|
-
|
|
9
|
-
Key capabilities enabled by persistent memory:
|
|
10
|
-
- False positive learning
|
|
11
|
-
- Team security policy accumulation
|
|
12
|
-
- Context-aware severity adjustment
|
|
13
|
-
- Historical vulnerability tracking
|
|
14
|
-
|
|
15
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
16
|
-
Licensed under Fair Source 0.9
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
import hashlib
|
|
20
|
-
import json
|
|
21
|
-
import logging
|
|
22
|
-
import re
|
|
23
|
-
from dataclasses import dataclass, field
|
|
24
|
-
from datetime import datetime
|
|
25
|
-
from pathlib import Path
|
|
26
|
-
from typing import Any
|
|
27
|
-
|
|
28
|
-
from .base_wizard import BaseWizard
|
|
29
|
-
|
|
30
|
-
logger = logging.getLogger(__name__)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@dataclass
|
|
34
|
-
class SecurityFinding:
|
|
35
|
-
"""A security finding from scanning"""
|
|
36
|
-
|
|
37
|
-
finding_id: str
|
|
38
|
-
file_path: str
|
|
39
|
-
line_number: int
|
|
40
|
-
vulnerability_type: str
|
|
41
|
-
severity: str # critical, high, medium, low, info
|
|
42
|
-
description: str
|
|
43
|
-
code_snippet: str
|
|
44
|
-
owasp_category: str | None = None
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
@dataclass
|
|
48
|
-
class TeamDecision:
|
|
49
|
-
"""A team's decision about a security finding"""
|
|
50
|
-
|
|
51
|
-
finding_hash: str # Hash of the finding pattern
|
|
52
|
-
decision: str # accepted, false_positive, deferred, fixed
|
|
53
|
-
reason: str
|
|
54
|
-
decided_by: str
|
|
55
|
-
decided_at: str
|
|
56
|
-
applies_to: str # "all", "file", "pattern"
|
|
57
|
-
expiration: str | None = None # Optional expiration date
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
@dataclass
|
|
61
|
-
class LearningResult:
|
|
62
|
-
"""Result of applying learned patterns to findings"""
|
|
63
|
-
|
|
64
|
-
total_findings: int
|
|
65
|
-
suppressed_count: int
|
|
66
|
-
adjusted_count: int
|
|
67
|
-
new_findings: int
|
|
68
|
-
suppression_details: list[dict[str, Any]] = field(default_factory=list)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class SecurityLearningWizard(BaseWizard):
|
|
72
|
-
"""Security Learning Wizard - Level 4 with Team Knowledge
|
|
73
|
-
|
|
74
|
-
What's now possible that wasn't before:
|
|
75
|
-
|
|
76
|
-
WITHOUT PERSISTENT MEMORY (Before):
|
|
77
|
-
- Same false positives flagged every scan
|
|
78
|
-
- No learning from team decisions
|
|
79
|
-
- Security fatigue from noise
|
|
80
|
-
- Lost context about accepted risks
|
|
81
|
-
|
|
82
|
-
WITH PERSISTENT MEMORY (After):
|
|
83
|
-
- AI remembers team decisions
|
|
84
|
-
- False positives automatically suppressed
|
|
85
|
-
- Context preserved: "Accepted by @sarah on 2025-09-15 because..."
|
|
86
|
-
- Security debt tracked over time
|
|
87
|
-
|
|
88
|
-
Example:
|
|
89
|
-
>>> wizard = SecurityLearningWizard()
|
|
90
|
-
>>> result = await wizard.analyze({
|
|
91
|
-
... "project_path": ".",
|
|
92
|
-
... "apply_learned_patterns": True
|
|
93
|
-
... })
|
|
94
|
-
>>> print(result["learning_applied"]["suppressed_count"])
|
|
95
|
-
# Shows how many warnings were suppressed based on team decisions
|
|
96
|
-
|
|
97
|
-
"""
|
|
98
|
-
|
|
99
|
-
@property
|
|
100
|
-
def name(self) -> str:
|
|
101
|
-
return "Security Learning Wizard"
|
|
102
|
-
|
|
103
|
-
@property
|
|
104
|
-
def level(self) -> int:
|
|
105
|
-
return 4
|
|
106
|
-
|
|
107
|
-
def __init__(self, pattern_storage_path: str = "./patterns/security"):
|
|
108
|
-
"""Initialize the security learning wizard.
|
|
109
|
-
|
|
110
|
-
Args:
|
|
111
|
-
pattern_storage_path: Path to git-based pattern storage
|
|
112
|
-
|
|
113
|
-
"""
|
|
114
|
-
super().__init__()
|
|
115
|
-
self.pattern_storage_path = Path(pattern_storage_path)
|
|
116
|
-
self.pattern_storage_path.mkdir(parents=True, exist_ok=True)
|
|
117
|
-
|
|
118
|
-
# Vulnerability detection patterns
|
|
119
|
-
self.vulnerability_patterns = {
|
|
120
|
-
"sql_injection": {
|
|
121
|
-
"patterns": [
|
|
122
|
-
r"execute\s*\(\s*['\"].*%s.*['\"]",
|
|
123
|
-
r"cursor\.execute\s*\(\s*f['\"]",
|
|
124
|
-
r"\.query\s*\(\s*['\"].*\+",
|
|
125
|
-
r"SELECT.*\+.*WHERE",
|
|
126
|
-
],
|
|
127
|
-
"owasp": "A03:2021",
|
|
128
|
-
"severity": "high",
|
|
129
|
-
},
|
|
130
|
-
"xss": {
|
|
131
|
-
"patterns": [
|
|
132
|
-
r"innerHTML\s*=",
|
|
133
|
-
r"document\.write\s*\(",
|
|
134
|
-
r"\.html\s*\(\s*[^)]*\$",
|
|
135
|
-
r"dangerouslySetInnerHTML",
|
|
136
|
-
],
|
|
137
|
-
"owasp": "A03:2021",
|
|
138
|
-
"severity": "high",
|
|
139
|
-
},
|
|
140
|
-
"hardcoded_secret": {
|
|
141
|
-
"patterns": [
|
|
142
|
-
r"password\s*=\s*['\"][^'\"]+['\"]",
|
|
143
|
-
r"api[_-]?key\s*=\s*['\"][^'\"]+['\"]",
|
|
144
|
-
r"secret\s*=\s*['\"][^'\"]+['\"]",
|
|
145
|
-
r"token\s*=\s*['\"][a-zA-Z0-9]{20,}['\"]",
|
|
146
|
-
],
|
|
147
|
-
"owasp": "A07:2021",
|
|
148
|
-
"severity": "critical",
|
|
149
|
-
},
|
|
150
|
-
"insecure_random": {
|
|
151
|
-
"patterns": [
|
|
152
|
-
r"Math\.random\s*\(",
|
|
153
|
-
r"random\.random\s*\(",
|
|
154
|
-
r"rand\s*\(",
|
|
155
|
-
],
|
|
156
|
-
"owasp": "A02:2021",
|
|
157
|
-
"severity": "medium",
|
|
158
|
-
},
|
|
159
|
-
"path_traversal": {
|
|
160
|
-
"patterns": [
|
|
161
|
-
r"open\s*\([^)]*\+",
|
|
162
|
-
r"readFile\s*\([^)]*\+",
|
|
163
|
-
r"\.\.\/",
|
|
164
|
-
],
|
|
165
|
-
"owasp": "A01:2021",
|
|
166
|
-
"severity": "high",
|
|
167
|
-
},
|
|
168
|
-
"command_injection": {
|
|
169
|
-
"patterns": [
|
|
170
|
-
r"exec\s*\([^)]*\+",
|
|
171
|
-
r"system\s*\([^)]*\+",
|
|
172
|
-
r"subprocess.*shell\s*=\s*True",
|
|
173
|
-
r"eval\s*\(",
|
|
174
|
-
],
|
|
175
|
-
"owasp": "A03:2021",
|
|
176
|
-
"severity": "critical",
|
|
177
|
-
},
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
181
|
-
"""Analyze project security with learned patterns.
|
|
182
|
-
|
|
183
|
-
Context expects:
|
|
184
|
-
- project_path: Path to the project
|
|
185
|
-
- apply_learned_patterns: Apply team decisions (default True)
|
|
186
|
-
- exclude_patterns: Patterns to exclude
|
|
187
|
-
- scan_depth: "quick", "standard", "thorough" (default "standard")
|
|
188
|
-
|
|
189
|
-
Returns:
|
|
190
|
-
Analysis with:
|
|
191
|
-
- findings: Security findings (after learning applied)
|
|
192
|
-
- learning_applied: Summary of learned suppressions
|
|
193
|
-
- raw_findings: All findings before learning
|
|
194
|
-
- predictions: Level 4 predictions
|
|
195
|
-
- recommendations: Actionable steps
|
|
196
|
-
|
|
197
|
-
"""
|
|
198
|
-
project_path = Path(context.get("project_path", "."))
|
|
199
|
-
apply_learning = context.get("apply_learned_patterns", True)
|
|
200
|
-
exclude_patterns = context.get(
|
|
201
|
-
"exclude_patterns",
|
|
202
|
-
["node_modules", "venv", ".git", "__pycache__", "test", "tests"],
|
|
203
|
-
)
|
|
204
|
-
scan_depth = context.get("scan_depth", "standard")
|
|
205
|
-
|
|
206
|
-
# Step 1: Scan for vulnerabilities
|
|
207
|
-
raw_findings = await self._scan_for_vulnerabilities(
|
|
208
|
-
project_path,
|
|
209
|
-
exclude_patterns,
|
|
210
|
-
scan_depth,
|
|
211
|
-
)
|
|
212
|
-
|
|
213
|
-
# Step 2: Load team decisions
|
|
214
|
-
team_decisions = self._load_team_decisions()
|
|
215
|
-
|
|
216
|
-
# Step 3: Apply learned patterns
|
|
217
|
-
learning_result = None
|
|
218
|
-
filtered_findings = raw_findings
|
|
219
|
-
|
|
220
|
-
if apply_learning:
|
|
221
|
-
learning_result = self._apply_learned_patterns(raw_findings, team_decisions)
|
|
222
|
-
filtered_findings = [
|
|
223
|
-
f
|
|
224
|
-
for f in raw_findings
|
|
225
|
-
if f.finding_id
|
|
226
|
-
not in [d["finding_id"] for d in learning_result.suppression_details]
|
|
227
|
-
]
|
|
228
|
-
|
|
229
|
-
# Step 4: Group findings by severity
|
|
230
|
-
by_severity = self._group_by_severity(filtered_findings)
|
|
231
|
-
|
|
232
|
-
# Step 5: Generate predictions (Level 4)
|
|
233
|
-
predictions = self._generate_predictions(filtered_findings, raw_findings, team_decisions)
|
|
234
|
-
|
|
235
|
-
# Step 6: Generate recommendations
|
|
236
|
-
recommendations = self._generate_recommendations(
|
|
237
|
-
filtered_findings,
|
|
238
|
-
learning_result,
|
|
239
|
-
team_decisions,
|
|
240
|
-
)
|
|
241
|
-
|
|
242
|
-
return {
|
|
243
|
-
"findings": [
|
|
244
|
-
{
|
|
245
|
-
"id": f.finding_id,
|
|
246
|
-
"file": f.file_path,
|
|
247
|
-
"line": f.line_number,
|
|
248
|
-
"type": f.vulnerability_type,
|
|
249
|
-
"severity": f.severity,
|
|
250
|
-
"description": f.description,
|
|
251
|
-
"owasp": f.owasp_category,
|
|
252
|
-
"code_preview": f.code_snippet[:80],
|
|
253
|
-
}
|
|
254
|
-
for f in filtered_findings[:50] # Top 50
|
|
255
|
-
],
|
|
256
|
-
"summary": {
|
|
257
|
-
"total_after_learning": len(filtered_findings),
|
|
258
|
-
"by_severity": by_severity,
|
|
259
|
-
},
|
|
260
|
-
"learning_applied": (
|
|
261
|
-
{
|
|
262
|
-
"enabled": True,
|
|
263
|
-
"total_raw_findings": learning_result.total_findings,
|
|
264
|
-
"suppressed_count": learning_result.suppressed_count,
|
|
265
|
-
"new_findings": learning_result.new_findings,
|
|
266
|
-
"noise_reduction_percent": (
|
|
267
|
-
round(
|
|
268
|
-
(learning_result.suppressed_count / learning_result.total_findings)
|
|
269
|
-
* 100,
|
|
270
|
-
1,
|
|
271
|
-
)
|
|
272
|
-
if learning_result.total_findings > 0
|
|
273
|
-
else 0
|
|
274
|
-
),
|
|
275
|
-
"suppression_details": learning_result.suppression_details[:10],
|
|
276
|
-
}
|
|
277
|
-
if learning_result
|
|
278
|
-
else {"enabled": False}
|
|
279
|
-
),
|
|
280
|
-
"raw_findings_count": len(raw_findings),
|
|
281
|
-
"team_decisions_count": len(team_decisions),
|
|
282
|
-
"predictions": predictions,
|
|
283
|
-
"recommendations": recommendations,
|
|
284
|
-
"confidence": 0.85,
|
|
285
|
-
"memory_benefit": self._calculate_memory_benefit(learning_result, team_decisions),
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
async def _scan_for_vulnerabilities(
|
|
289
|
-
self,
|
|
290
|
-
project_path: Path,
|
|
291
|
-
exclude_patterns: list[str],
|
|
292
|
-
scan_depth: str,
|
|
293
|
-
) -> list[SecurityFinding]:
|
|
294
|
-
"""Scan project for security vulnerabilities"""
|
|
295
|
-
findings = []
|
|
296
|
-
|
|
297
|
-
# File extensions to scan
|
|
298
|
-
extensions = ["*.py", "*.js", "*.ts", "*.tsx", "*.jsx", "*.java", "*.go", "*.rb"]
|
|
299
|
-
|
|
300
|
-
# Adjust file limit based on scan depth
|
|
301
|
-
file_limits = {"quick": 50, "standard": 200, "thorough": 1000}
|
|
302
|
-
file_limit = file_limits.get(scan_depth, 200)
|
|
303
|
-
|
|
304
|
-
files_scanned = 0
|
|
305
|
-
for ext in extensions:
|
|
306
|
-
for file_path in project_path.rglob(ext):
|
|
307
|
-
if files_scanned >= file_limit:
|
|
308
|
-
break
|
|
309
|
-
|
|
310
|
-
# Skip excluded patterns
|
|
311
|
-
if any(exclude in str(file_path) for exclude in exclude_patterns):
|
|
312
|
-
continue
|
|
313
|
-
|
|
314
|
-
try:
|
|
315
|
-
findings.extend(self._scan_file(file_path))
|
|
316
|
-
files_scanned += 1
|
|
317
|
-
except (OSError, UnicodeDecodeError) as e:
|
|
318
|
-
logger.debug(f"Could not scan {file_path}: {e}")
|
|
319
|
-
continue
|
|
320
|
-
|
|
321
|
-
return findings
|
|
322
|
-
|
|
323
|
-
def _scan_file(self, file_path: Path) -> list[SecurityFinding]:
|
|
324
|
-
"""Scan a single file for vulnerabilities"""
|
|
325
|
-
findings = []
|
|
326
|
-
|
|
327
|
-
with open(file_path, encoding="utf-8", errors="ignore") as f:
|
|
328
|
-
lines = f.readlines()
|
|
329
|
-
|
|
330
|
-
for line_num, line in enumerate(lines, 1):
|
|
331
|
-
for vuln_type, config in self.vulnerability_patterns.items():
|
|
332
|
-
for pattern in config["patterns"]:
|
|
333
|
-
if re.search(pattern, line, re.IGNORECASE):
|
|
334
|
-
finding = SecurityFinding(
|
|
335
|
-
finding_id=self._generate_finding_id(file_path, line_num, vuln_type),
|
|
336
|
-
file_path=str(file_path),
|
|
337
|
-
line_number=line_num,
|
|
338
|
-
vulnerability_type=vuln_type,
|
|
339
|
-
severity=str(config["severity"]),
|
|
340
|
-
description=self._get_description(vuln_type),
|
|
341
|
-
code_snippet=line.strip(),
|
|
342
|
-
owasp_category=str(config["owasp"]) if config.get("owasp") else None,
|
|
343
|
-
)
|
|
344
|
-
findings.append(finding)
|
|
345
|
-
break # One finding per line per type
|
|
346
|
-
|
|
347
|
-
return findings
|
|
348
|
-
|
|
349
|
-
def _generate_finding_id(self, file_path: Path, line_number: int, vuln_type: str) -> str:
|
|
350
|
-
"""Generate a stable finding ID"""
|
|
351
|
-
content = f"{file_path}:{line_number}:{vuln_type}"
|
|
352
|
-
return hashlib.md5(content.encode(), usedforsecurity=False).hexdigest()[:12]
|
|
353
|
-
|
|
354
|
-
def _get_description(self, vuln_type: str) -> str:
|
|
355
|
-
"""Get description for vulnerability type"""
|
|
356
|
-
descriptions = {
|
|
357
|
-
"sql_injection": "Potential SQL injection - user input may reach query",
|
|
358
|
-
"xss": "Potential XSS vulnerability - unsanitized content in DOM",
|
|
359
|
-
"hardcoded_secret": "Hardcoded credential or secret detected",
|
|
360
|
-
"insecure_random": "Cryptographically weak random number generator",
|
|
361
|
-
"path_traversal": "Potential path traversal - file path from user input",
|
|
362
|
-
"command_injection": "Potential command injection - shell execution with input",
|
|
363
|
-
}
|
|
364
|
-
return descriptions.get(vuln_type, f"Potential {vuln_type} vulnerability")
|
|
365
|
-
|
|
366
|
-
def _load_team_decisions(self) -> list[TeamDecision]:
|
|
367
|
-
"""Load team decisions from pattern storage"""
|
|
368
|
-
decisions = []
|
|
369
|
-
decisions_file = self.pattern_storage_path / "team_decisions.json"
|
|
370
|
-
|
|
371
|
-
if decisions_file.exists():
|
|
372
|
-
try:
|
|
373
|
-
with open(decisions_file, encoding="utf-8") as f:
|
|
374
|
-
data = json.load(f)
|
|
375
|
-
|
|
376
|
-
for decision_data in data.get("decisions", []):
|
|
377
|
-
# Skip expired decisions
|
|
378
|
-
if decision_data.get("expiration"):
|
|
379
|
-
exp_date = datetime.fromisoformat(
|
|
380
|
-
decision_data["expiration"].replace("Z", ""),
|
|
381
|
-
)
|
|
382
|
-
if exp_date < datetime.now():
|
|
383
|
-
continue
|
|
384
|
-
|
|
385
|
-
decisions.append(
|
|
386
|
-
TeamDecision(
|
|
387
|
-
finding_hash=decision_data["finding_hash"],
|
|
388
|
-
decision=decision_data["decision"],
|
|
389
|
-
reason=decision_data["reason"],
|
|
390
|
-
decided_by=decision_data["decided_by"],
|
|
391
|
-
decided_at=decision_data["decided_at"],
|
|
392
|
-
applies_to=decision_data.get("applies_to", "pattern"),
|
|
393
|
-
expiration=decision_data.get("expiration"),
|
|
394
|
-
),
|
|
395
|
-
)
|
|
396
|
-
except (json.JSONDecodeError, KeyError) as e:
|
|
397
|
-
logger.warning(f"Could not load team decisions: {e}")
|
|
398
|
-
|
|
399
|
-
return decisions
|
|
400
|
-
|
|
401
|
-
def _apply_learned_patterns(
|
|
402
|
-
self,
|
|
403
|
-
findings: list[SecurityFinding],
|
|
404
|
-
decisions: list[TeamDecision],
|
|
405
|
-
) -> LearningResult:
|
|
406
|
-
"""Apply team decisions to filter findings"""
|
|
407
|
-
suppression_details = []
|
|
408
|
-
suppressed_ids = set()
|
|
409
|
-
|
|
410
|
-
for finding in findings:
|
|
411
|
-
# Check if there's a decision for this finding pattern
|
|
412
|
-
finding_hash = self._hash_finding_pattern(finding)
|
|
413
|
-
|
|
414
|
-
for decision in decisions:
|
|
415
|
-
if self._decision_matches(finding, finding_hash, decision):
|
|
416
|
-
if decision.decision in ["accepted", "false_positive"]:
|
|
417
|
-
suppression_details.append(
|
|
418
|
-
{
|
|
419
|
-
"finding_id": finding.finding_id,
|
|
420
|
-
"type": finding.vulnerability_type,
|
|
421
|
-
"file": finding.file_path,
|
|
422
|
-
"decision": decision.decision,
|
|
423
|
-
"reason": decision.reason,
|
|
424
|
-
"decided_by": decision.decided_by,
|
|
425
|
-
"decided_at": decision.decided_at,
|
|
426
|
-
},
|
|
427
|
-
)
|
|
428
|
-
suppressed_ids.add(finding.finding_id)
|
|
429
|
-
break
|
|
430
|
-
|
|
431
|
-
return LearningResult(
|
|
432
|
-
total_findings=len(findings),
|
|
433
|
-
suppressed_count=len(suppression_details),
|
|
434
|
-
adjusted_count=0,
|
|
435
|
-
new_findings=len(findings) - len(suppression_details),
|
|
436
|
-
suppression_details=suppression_details,
|
|
437
|
-
)
|
|
438
|
-
|
|
439
|
-
def _hash_finding_pattern(self, finding: SecurityFinding) -> str:
|
|
440
|
-
"""Create a hash that identifies the pattern (not specific location)"""
|
|
441
|
-
# Hash based on vulnerability type and code pattern (not line number)
|
|
442
|
-
pattern_content = f"{finding.vulnerability_type}:{finding.code_snippet.strip()}"
|
|
443
|
-
return hashlib.md5(pattern_content.encode(), usedforsecurity=False).hexdigest()[:16]
|
|
444
|
-
|
|
445
|
-
def _decision_matches(
|
|
446
|
-
self,
|
|
447
|
-
finding: SecurityFinding,
|
|
448
|
-
finding_hash: str,
|
|
449
|
-
decision: TeamDecision,
|
|
450
|
-
) -> bool:
|
|
451
|
-
"""Check if a decision applies to a finding"""
|
|
452
|
-
if decision.applies_to == "all":
|
|
453
|
-
# Decision applies to all findings of this type
|
|
454
|
-
return decision.finding_hash.startswith(finding.vulnerability_type)
|
|
455
|
-
|
|
456
|
-
if decision.applies_to == "file":
|
|
457
|
-
# Decision applies to all findings in a specific file
|
|
458
|
-
return decision.finding_hash == finding.file_path
|
|
459
|
-
|
|
460
|
-
# Default: pattern match
|
|
461
|
-
return decision.finding_hash == finding_hash
|
|
462
|
-
|
|
463
|
-
def _group_by_severity(self, findings: list[SecurityFinding]) -> dict[str, int]:
|
|
464
|
-
"""Group findings by severity"""
|
|
465
|
-
by_severity: dict[str, int] = {
|
|
466
|
-
"critical": 0,
|
|
467
|
-
"high": 0,
|
|
468
|
-
"medium": 0,
|
|
469
|
-
"low": 0,
|
|
470
|
-
"info": 0,
|
|
471
|
-
}
|
|
472
|
-
|
|
473
|
-
for finding in findings:
|
|
474
|
-
if finding.severity in by_severity:
|
|
475
|
-
by_severity[finding.severity] += 1
|
|
476
|
-
|
|
477
|
-
return by_severity
|
|
478
|
-
|
|
479
|
-
def _generate_predictions(
|
|
480
|
-
self,
|
|
481
|
-
filtered_findings: list[SecurityFinding],
|
|
482
|
-
raw_findings: list[SecurityFinding],
|
|
483
|
-
team_decisions: list[TeamDecision],
|
|
484
|
-
) -> list[dict[str, Any]]:
|
|
485
|
-
"""Generate Level 4 predictions"""
|
|
486
|
-
predictions = []
|
|
487
|
-
|
|
488
|
-
# Prediction 1: Critical vulnerability warning
|
|
489
|
-
critical_count = sum(1 for f in filtered_findings if f.severity == "critical")
|
|
490
|
-
if critical_count > 0:
|
|
491
|
-
predictions.append(
|
|
492
|
-
{
|
|
493
|
-
"type": "critical_vulnerabilities",
|
|
494
|
-
"severity": "critical",
|
|
495
|
-
"description": (
|
|
496
|
-
f"{critical_count} critical vulnerabilities detected. "
|
|
497
|
-
"In our experience, these are actively exploited in the wild."
|
|
498
|
-
),
|
|
499
|
-
"prevention_steps": [
|
|
500
|
-
"Fix before deployment",
|
|
501
|
-
"Add to security review checklist",
|
|
502
|
-
"Consider automated blocking in CI/CD",
|
|
503
|
-
],
|
|
504
|
-
},
|
|
505
|
-
)
|
|
506
|
-
|
|
507
|
-
# Prediction 2: Pattern concentration
|
|
508
|
-
vuln_types = [f.vulnerability_type for f in filtered_findings]
|
|
509
|
-
if vuln_types:
|
|
510
|
-
most_common = max(set(vuln_types), key=vuln_types.count)
|
|
511
|
-
count = vuln_types.count(most_common)
|
|
512
|
-
if count >= 3:
|
|
513
|
-
predictions.append(
|
|
514
|
-
{
|
|
515
|
-
"type": "pattern_concentration",
|
|
516
|
-
"severity": "high",
|
|
517
|
-
"description": (
|
|
518
|
-
f"{count} instances of {most_common} detected. "
|
|
519
|
-
"Clustered vulnerabilities suggest systematic issue."
|
|
520
|
-
),
|
|
521
|
-
"prevention_steps": [
|
|
522
|
-
f"Add linting rule for {most_common}",
|
|
523
|
-
"Review coding patterns team-wide",
|
|
524
|
-
"Consider security training",
|
|
525
|
-
],
|
|
526
|
-
},
|
|
527
|
-
)
|
|
528
|
-
|
|
529
|
-
# Prediction 3: Learning effectiveness
|
|
530
|
-
if len(raw_findings) > 0 and len(team_decisions) > 0:
|
|
531
|
-
suppression_rate = (len(raw_findings) - len(filtered_findings)) / len(raw_findings)
|
|
532
|
-
if suppression_rate > 0.3:
|
|
533
|
-
predictions.append(
|
|
534
|
-
{
|
|
535
|
-
"type": "learning_effective",
|
|
536
|
-
"severity": "info",
|
|
537
|
-
"description": (
|
|
538
|
-
f"Team decisions reduced noise by {int(suppression_rate * 100)}%. "
|
|
539
|
-
"Persistent memory is working."
|
|
540
|
-
),
|
|
541
|
-
"prevention_steps": ["Continue documenting decisions"],
|
|
542
|
-
},
|
|
543
|
-
)
|
|
544
|
-
|
|
545
|
-
# Prediction 4: Aging decisions warning
|
|
546
|
-
old_decisions = [
|
|
547
|
-
d
|
|
548
|
-
for d in team_decisions
|
|
549
|
-
if (datetime.now() - datetime.fromisoformat(d.decided_at.replace("Z", ""))).days > 180
|
|
550
|
-
]
|
|
551
|
-
if old_decisions:
|
|
552
|
-
predictions.append(
|
|
553
|
-
{
|
|
554
|
-
"type": "aging_decisions",
|
|
555
|
-
"severity": "medium",
|
|
556
|
-
"description": (
|
|
557
|
-
f"{len(old_decisions)} security decisions are over 6 months old. "
|
|
558
|
-
"Consider reviewing if they're still valid."
|
|
559
|
-
),
|
|
560
|
-
"prevention_steps": [
|
|
561
|
-
"Review accepted risks quarterly",
|
|
562
|
-
"Set expiration dates on decisions",
|
|
563
|
-
"Re-evaluate in context of new threats",
|
|
564
|
-
],
|
|
565
|
-
},
|
|
566
|
-
)
|
|
567
|
-
|
|
568
|
-
return predictions
|
|
569
|
-
|
|
570
|
-
def _generate_recommendations(
|
|
571
|
-
self,
|
|
572
|
-
filtered_findings: list[SecurityFinding],
|
|
573
|
-
learning_result: LearningResult | None,
|
|
574
|
-
team_decisions: list[TeamDecision],
|
|
575
|
-
) -> list[str]:
|
|
576
|
-
"""Generate actionable recommendations"""
|
|
577
|
-
recommendations = []
|
|
578
|
-
|
|
579
|
-
# Severity-based recommendations
|
|
580
|
-
critical_count = sum(1 for f in filtered_findings if f.severity == "critical")
|
|
581
|
-
high_count = sum(1 for f in filtered_findings if f.severity == "high")
|
|
582
|
-
|
|
583
|
-
if critical_count > 0:
|
|
584
|
-
recommendations.append(
|
|
585
|
-
f"🚨 {critical_count} CRITICAL vulnerabilities - block deployment until fixed",
|
|
586
|
-
)
|
|
587
|
-
|
|
588
|
-
if high_count > 0:
|
|
589
|
-
recommendations.append(
|
|
590
|
-
f"⚠️ {high_count} HIGH severity findings - prioritize for next sprint",
|
|
591
|
-
)
|
|
592
|
-
|
|
593
|
-
# Learning-based recommendations
|
|
594
|
-
if learning_result and learning_result.suppressed_count > 0:
|
|
595
|
-
recommendations.append(
|
|
596
|
-
f"✅ {learning_result.suppressed_count} findings suppressed by team decisions "
|
|
597
|
-
f"(reducing noise by {int(learning_result.suppressed_count / learning_result.total_findings * 100)}%)",
|
|
598
|
-
)
|
|
599
|
-
|
|
600
|
-
# Help build knowledge base
|
|
601
|
-
new_types = {f.vulnerability_type for f in filtered_findings}
|
|
602
|
-
if new_types and len(team_decisions) < 10:
|
|
603
|
-
recommendations.append(
|
|
604
|
-
"💡 Tip: After reviewing, use record_decision() to teach the wizard",
|
|
605
|
-
)
|
|
606
|
-
|
|
607
|
-
# Memory benefit
|
|
608
|
-
if team_decisions:
|
|
609
|
-
recommendations.append(
|
|
610
|
-
f"📚 Using {len(team_decisions)} team security decisions from memory",
|
|
611
|
-
)
|
|
612
|
-
else:
|
|
613
|
-
recommendations.append(
|
|
614
|
-
"💾 No team decisions recorded yet - start building security knowledge base",
|
|
615
|
-
)
|
|
616
|
-
|
|
617
|
-
return recommendations
|
|
618
|
-
|
|
619
|
-
def _calculate_memory_benefit(
|
|
620
|
-
self,
|
|
621
|
-
learning_result: LearningResult | None,
|
|
622
|
-
team_decisions: list[TeamDecision],
|
|
623
|
-
) -> dict[str, Any]:
|
|
624
|
-
"""Calculate the benefit provided by persistent memory"""
|
|
625
|
-
if not team_decisions:
|
|
626
|
-
return {
|
|
627
|
-
"decisions_available": False,
|
|
628
|
-
"value_statement": (
|
|
629
|
-
"No team decisions recorded yet. "
|
|
630
|
-
"Use record_decision() to start building your security knowledge base."
|
|
631
|
-
),
|
|
632
|
-
"noise_reduction": "N/A",
|
|
633
|
-
}
|
|
634
|
-
|
|
635
|
-
suppressed = learning_result.suppressed_count if learning_result else 0
|
|
636
|
-
total = learning_result.total_findings if learning_result else 0
|
|
637
|
-
|
|
638
|
-
return {
|
|
639
|
-
"decisions_available": True,
|
|
640
|
-
"decisions_count": len(team_decisions),
|
|
641
|
-
"findings_suppressed": suppressed,
|
|
642
|
-
"noise_reduction_percent": (round((suppressed / total) * 100, 1) if total > 0 else 0),
|
|
643
|
-
"value_statement": (
|
|
644
|
-
f"Persistent memory applied {len(team_decisions)} team decisions, "
|
|
645
|
-
f"suppressing {suppressed} warnings. "
|
|
646
|
-
"Without memory, you'd review the same false positives every scan."
|
|
647
|
-
),
|
|
648
|
-
"oldest_decision": min((d.decided_at for d in team_decisions), default="N/A"),
|
|
649
|
-
}
|
|
650
|
-
|
|
651
|
-
async def record_decision(
|
|
652
|
-
self,
|
|
653
|
-
finding: dict[str, Any],
|
|
654
|
-
decision: str,
|
|
655
|
-
reason: str,
|
|
656
|
-
decided_by: str,
|
|
657
|
-
applies_to: str = "pattern",
|
|
658
|
-
expiration_days: int | None = None,
|
|
659
|
-
) -> bool:
|
|
660
|
-
"""Record a team decision about a security finding.
|
|
661
|
-
|
|
662
|
-
Call this after reviewing a finding to teach the wizard.
|
|
663
|
-
|
|
664
|
-
Args:
|
|
665
|
-
finding: The finding dict from analyze results
|
|
666
|
-
decision: "accepted", "false_positive", "deferred", "fixed"
|
|
667
|
-
reason: Why this decision was made
|
|
668
|
-
decided_by: Who made the decision (e.g., "@sarah")
|
|
669
|
-
applies_to: "pattern" (this specific pattern), "file", or "all" (all of this type)
|
|
670
|
-
expiration_days: Optional days until decision expires
|
|
671
|
-
|
|
672
|
-
Returns:
|
|
673
|
-
True if recorded successfully
|
|
674
|
-
|
|
675
|
-
Example:
|
|
676
|
-
>>> await wizard.record_decision(
|
|
677
|
-
... finding=results["findings"][0],
|
|
678
|
-
... decision="false_positive",
|
|
679
|
-
... reason="ORM handles SQL escaping, not vulnerable",
|
|
680
|
-
... decided_by="@sarah",
|
|
681
|
-
... applies_to="pattern"
|
|
682
|
-
... )
|
|
683
|
-
|
|
684
|
-
"""
|
|
685
|
-
decisions_file = self.pattern_storage_path / "team_decisions.json"
|
|
686
|
-
|
|
687
|
-
# Load existing decisions
|
|
688
|
-
decisions_data: dict[str, list[dict]] = {"decisions": []}
|
|
689
|
-
if decisions_file.exists():
|
|
690
|
-
try:
|
|
691
|
-
with open(decisions_file, encoding="utf-8") as f:
|
|
692
|
-
decisions_data = json.load(f)
|
|
693
|
-
except json.JSONDecodeError:
|
|
694
|
-
pass
|
|
695
|
-
|
|
696
|
-
# Create finding hash based on applies_to
|
|
697
|
-
if applies_to == "all":
|
|
698
|
-
finding_hash = finding.get("type", "unknown")
|
|
699
|
-
elif applies_to == "file":
|
|
700
|
-
finding_hash = finding.get("file", "unknown")
|
|
701
|
-
else:
|
|
702
|
-
# Pattern-based hash
|
|
703
|
-
pattern_content = f"{finding.get('type')}:{finding.get('code_preview', '')}"
|
|
704
|
-
finding_hash = hashlib.md5(pattern_content.encode(), usedforsecurity=False).hexdigest()[
|
|
705
|
-
:16
|
|
706
|
-
]
|
|
707
|
-
|
|
708
|
-
# Create decision record
|
|
709
|
-
expiration = None
|
|
710
|
-
if expiration_days:
|
|
711
|
-
from datetime import timedelta
|
|
712
|
-
|
|
713
|
-
expiration = (datetime.now() + timedelta(days=expiration_days)).isoformat()
|
|
714
|
-
|
|
715
|
-
new_decision = {
|
|
716
|
-
"finding_hash": finding_hash,
|
|
717
|
-
"decision": decision,
|
|
718
|
-
"reason": reason,
|
|
719
|
-
"decided_by": decided_by,
|
|
720
|
-
"decided_at": datetime.now().isoformat(),
|
|
721
|
-
"applies_to": applies_to,
|
|
722
|
-
"expiration": expiration,
|
|
723
|
-
"original_finding": {
|
|
724
|
-
"type": finding.get("type"),
|
|
725
|
-
"file": finding.get("file"),
|
|
726
|
-
"severity": finding.get("severity"),
|
|
727
|
-
},
|
|
728
|
-
}
|
|
729
|
-
|
|
730
|
-
decisions_data["decisions"].append(new_decision)
|
|
731
|
-
|
|
732
|
-
# Store
|
|
733
|
-
try:
|
|
734
|
-
with open(decisions_file, "w", encoding="utf-8") as f:
|
|
735
|
-
json.dump(decisions_data, f, indent=2)
|
|
736
|
-
logger.info(f"Recorded security decision: {decision} for {finding.get('type')}")
|
|
737
|
-
return True
|
|
738
|
-
except OSError as e:
|
|
739
|
-
logger.error(f"Could not record decision: {e}")
|
|
740
|
-
return False
|