agent-audit 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. agent_audit/__init__.py +3 -0
  2. agent_audit/__main__.py +13 -0
  3. agent_audit/cli/__init__.py +1 -0
  4. agent_audit/cli/commands/__init__.py +1 -0
  5. agent_audit/cli/commands/init.py +44 -0
  6. agent_audit/cli/commands/inspect.py +236 -0
  7. agent_audit/cli/commands/scan.py +329 -0
  8. agent_audit/cli/formatters/__init__.py +1 -0
  9. agent_audit/cli/formatters/json.py +138 -0
  10. agent_audit/cli/formatters/sarif.py +155 -0
  11. agent_audit/cli/formatters/terminal.py +221 -0
  12. agent_audit/cli/main.py +34 -0
  13. agent_audit/config/__init__.py +1 -0
  14. agent_audit/config/ignore.py +477 -0
  15. agent_audit/core_utils/__init__.py +1 -0
  16. agent_audit/models/__init__.py +18 -0
  17. agent_audit/models/finding.py +159 -0
  18. agent_audit/models/risk.py +77 -0
  19. agent_audit/models/tool.py +182 -0
  20. agent_audit/rules/__init__.py +6 -0
  21. agent_audit/rules/engine.py +503 -0
  22. agent_audit/rules/loader.py +160 -0
  23. agent_audit/scanners/__init__.py +5 -0
  24. agent_audit/scanners/base.py +32 -0
  25. agent_audit/scanners/config_scanner.py +390 -0
  26. agent_audit/scanners/mcp_config_scanner.py +321 -0
  27. agent_audit/scanners/mcp_inspector.py +421 -0
  28. agent_audit/scanners/python_scanner.py +544 -0
  29. agent_audit/scanners/secret_scanner.py +521 -0
  30. agent_audit/utils/__init__.py +21 -0
  31. agent_audit/utils/compat.py +98 -0
  32. agent_audit/utils/mcp_client.py +343 -0
  33. agent_audit/version.py +3 -0
  34. agent_audit-0.1.0.dist-info/METADATA +219 -0
  35. agent_audit-0.1.0.dist-info/RECORD +37 -0
  36. agent_audit-0.1.0.dist-info/WHEEL +4 -0
  37. agent_audit-0.1.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,477 @@
1
+ """
2
+ Ignore and Allowlist configuration management.
3
+
4
+ Handles:
5
+ - Loading .agent-audit.yaml configuration
6
+ - Rule-level and path-level ignore rules
7
+ - Confidence score adjustment based on allowlists
8
+ - Baseline scanning support
9
+ """
10
+
11
+ import fnmatch
12
+ import hashlib
13
+ import json
14
+ import logging
15
+ from pathlib import Path
16
+ from typing import Dict, Any, List, Optional, Set
17
+ from dataclasses import dataclass, field
18
+ from datetime import datetime
19
+
20
+ import yaml
21
+
22
+ from agent_audit.models.finding import Finding
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @dataclass
28
+ class IgnoreRule:
29
+ """Single ignore rule definition."""
30
+ rule_id: Optional[str] = None # Rule ID to ignore, e.g., "AGENT-003"
31
+ paths: List[str] = field(default_factory=list) # Glob path patterns
32
+ tools: List[str] = field(default_factory=list) # Tool names
33
+ reason: str = ""
34
+
35
+
36
+ @dataclass
37
+ class ScanConfig:
38
+ """Scan configuration."""
39
+ exclude: List[str] = field(default_factory=list)
40
+ min_severity: str = "low"
41
+ fail_on: str = "high"
42
+
43
+
44
+ @dataclass
45
+ class AllowlistConfig:
46
+ """Allowlist configuration."""
47
+ # Network hosts allowed for AGENT-003 (data exfiltration)
48
+ allowed_hosts: List[str] = field(default_factory=list)
49
+
50
+ # File paths allowed for file access
51
+ allowed_paths: List[str] = field(default_factory=list)
52
+
53
+ # Ignore rules
54
+ ignore_rules: List[IgnoreRule] = field(default_factory=list)
55
+
56
+ # Inline ignore marker (like # noqa)
57
+ inline_ignore_marker: str = "# noaudit"
58
+
59
+ # Scan configuration
60
+ scan: ScanConfig = field(default_factory=ScanConfig)
61
+
62
+
63
+ class IgnoreManager:
64
+ """
65
+ Manager for ignore rules and allowlist configuration.
66
+
67
+ Loads configuration from .agent-audit.yaml and provides methods
68
+ to check if findings should be suppressed or have adjusted confidence.
69
+ """
70
+
71
+ CONFIG_FILENAMES = ['.agent-audit.yaml', '.agent-audit.yml', 'agent-audit.yaml']
72
+
73
+ def __init__(self):
74
+ self.config: Optional[AllowlistConfig] = None
75
+ self._loaded_from: Optional[Path] = None
76
+ self._base_path: Optional[Path] = None # Base path for relative path matching
77
+
78
+ def load(self, project_path: Path) -> bool:
79
+ """
80
+ Load ignore configuration from project path.
81
+
82
+ Searches for config in:
83
+ 1. The scan target directory (project_path)
84
+ 2. Parent directories of project_path up to filesystem root
85
+
86
+ Note: Does NOT search CWD if it's outside the scan target's path hierarchy.
87
+ This prevents loading unrelated configs when scanning external directories.
88
+
89
+ Args:
90
+ project_path: Root path of the project to scan
91
+
92
+ Returns:
93
+ True if configuration was loaded successfully
94
+ """
95
+ # Resolve to absolute path
96
+ project_path = project_path.resolve()
97
+
98
+ # Collect search paths (deduplicated, ordered)
99
+ search_paths: List[Path] = []
100
+
101
+ # 1. Scan target directory
102
+ search_paths.append(project_path)
103
+
104
+ # 2. Parent directories of project_path up to root
105
+ parent = project_path.parent
106
+ while parent != parent.parent:
107
+ if parent not in search_paths:
108
+ search_paths.append(parent)
109
+ parent = parent.parent
110
+
111
+ # Search each path for config files
112
+ for search_path in search_paths:
113
+ for filename in self.CONFIG_FILENAMES:
114
+ config_path = search_path / filename
115
+ if config_path.exists():
116
+ # Store base path as the scan target (for relative path matching)
117
+ self._base_path = project_path
118
+ return self._load_file(config_path)
119
+
120
+ return False
121
+
122
+ def get_exclude_patterns(self) -> List[str]:
123
+ """Get the list of exclude patterns from scan config."""
124
+ if self.config and self.config.scan:
125
+ return self.config.scan.exclude
126
+ return []
127
+
128
+ def _load_file(self, path: Path) -> bool:
129
+ """Load configuration from a specific file."""
130
+ try:
131
+ with open(path, 'r', encoding='utf-8') as f:
132
+ data = yaml.safe_load(f)
133
+
134
+ if not data:
135
+ return False
136
+
137
+ # Parse ignore rules (handle None value from YAML when only comments exist)
138
+ ignore_rules = []
139
+ ignore_data = data.get('ignore') or []
140
+ for rule_data in ignore_data:
141
+ rule = IgnoreRule(
142
+ rule_id=rule_data.get('rule_id'),
143
+ paths=rule_data.get('paths', []),
144
+ tools=rule_data.get('tools', []),
145
+ reason=rule_data.get('reason', '')
146
+ )
147
+ ignore_rules.append(rule)
148
+
149
+ # Parse scan configuration (handle None value from YAML)
150
+ scan_data = data.get('scan') or {}
151
+ scan_config = ScanConfig(
152
+ exclude=scan_data.get('exclude') or [],
153
+ min_severity=scan_data.get('min_severity') or 'low',
154
+ fail_on=scan_data.get('fail_on') or 'high'
155
+ )
156
+
157
+ self.config = AllowlistConfig(
158
+ allowed_hosts=data.get('allowed_hosts', []),
159
+ allowed_paths=data.get('allowed_paths', []),
160
+ ignore_rules=ignore_rules,
161
+ inline_ignore_marker=data.get('inline_ignore_marker', '# noaudit'),
162
+ scan=scan_config
163
+ )
164
+ self._loaded_from = path
165
+ logger.debug(f"Loaded config from {path}")
166
+ return True
167
+
168
+ except yaml.YAMLError as e:
169
+ logger.warning(f"Failed to parse {path}: {e}")
170
+ return False
171
+ except Exception as e:
172
+ logger.warning(f"Error loading {path}: {e}")
173
+ return False
174
+
175
+ def should_ignore(
176
+ self,
177
+ rule_id: str,
178
+ file_path: str,
179
+ tool_name: str = ""
180
+ ) -> Optional[str]:
181
+ """
182
+ Check if a finding should be ignored.
183
+
184
+ Args:
185
+ rule_id: The rule ID (e.g., "AGENT-003")
186
+ file_path: Path of the file where finding was detected
187
+ tool_name: Optional tool name involved
188
+
189
+ Returns:
190
+ Ignore reason if should be ignored, None otherwise
191
+ """
192
+ if not self.config:
193
+ return None
194
+
195
+ # Compute relative path for matching
196
+ rel_path = self._get_relative_path(file_path)
197
+
198
+ for ignore in self.config.ignore_rules:
199
+ # Match rule ID if specified (support "*" as wildcard for all rules)
200
+ if ignore.rule_id and ignore.rule_id != "*" and ignore.rule_id != rule_id:
201
+ continue
202
+
203
+ # Match paths if specified
204
+ if ignore.paths:
205
+ path_matched = self._match_any_pattern(rel_path, ignore.paths)
206
+ if not path_matched:
207
+ continue
208
+
209
+ # Match tool name if specified
210
+ if ignore.tools:
211
+ if tool_name and tool_name not in ignore.tools:
212
+ continue
213
+
214
+ # All conditions matched
215
+ return ignore.reason or f"Suppressed by config ({self._loaded_from})"
216
+
217
+ return None
218
+
219
+ def _get_relative_path(self, file_path: str) -> str:
220
+ """
221
+ Convert a file path to a relative path for pattern matching.
222
+
223
+ If the file_path is absolute and within the base_path,
224
+ returns the relative portion. Otherwise returns the original path.
225
+ """
226
+ try:
227
+ file_path_obj = Path(file_path)
228
+ if file_path_obj.is_absolute() and self._base_path:
229
+ try:
230
+ return str(file_path_obj.relative_to(self._base_path))
231
+ except ValueError:
232
+ # file_path is not relative to base_path
233
+ pass
234
+ return file_path
235
+ except Exception:
236
+ return file_path
237
+
238
+ def _match_any_pattern(self, path: str, patterns: List[str]) -> bool:
239
+ """
240
+ Check if a path matches any of the given glob patterns.
241
+
242
+ Handles both simple patterns (tests/**) and recursive patterns.
243
+ Uses forward slashes for cross-platform consistency.
244
+ """
245
+ # Normalize path separators to forward slashes
246
+ normalized_path = path.replace('\\', '/')
247
+
248
+ for pattern in patterns:
249
+ normalized_pattern = pattern.replace('\\', '/')
250
+
251
+ # Try direct fnmatch
252
+ if fnmatch.fnmatch(normalized_path, normalized_pattern):
253
+ return True
254
+
255
+ # For patterns like "tests/**", also match exact prefix "tests/"
256
+ if normalized_pattern.endswith('/**'):
257
+ prefix = normalized_pattern[:-3] # Remove "/**"
258
+ if normalized_path.startswith(prefix + '/') or normalized_path == prefix:
259
+ return True
260
+
261
+ # For patterns like "**/test_*", match against filename
262
+ if normalized_pattern.startswith('**/'):
263
+ suffix_pattern = normalized_pattern[3:] # Remove "**/"
264
+ filename = Path(normalized_path).name
265
+ if fnmatch.fnmatch(filename, suffix_pattern):
266
+ return True
267
+ # Also try matching any path component
268
+ for part in Path(normalized_path).parts:
269
+ if fnmatch.fnmatch(part, suffix_pattern):
270
+ return True
271
+
272
+ return False
273
+
274
+ def should_exclude_path(self, file_path: str) -> bool:
275
+ """
276
+ Check if a file path should be excluded from scanning.
277
+
278
+ Args:
279
+ file_path: Path to check (can be absolute or relative)
280
+
281
+ Returns:
282
+ True if the path should be excluded
283
+ """
284
+ if not self.config or not self.config.scan.exclude:
285
+ return False
286
+
287
+ rel_path = self._get_relative_path(file_path)
288
+ return self._match_any_pattern(rel_path, self.config.scan.exclude)
289
+
290
+ def adjust_confidence(
291
+ self,
292
+ rule_id: str,
293
+ finding_metadata: Dict[str, Any]
294
+ ) -> float:
295
+ """
296
+ Calculate confidence adjustment based on allowlist.
297
+
298
+ Args:
299
+ rule_id: The rule ID
300
+ finding_metadata: Metadata from the finding (may contain target_host, file_path, etc.)
301
+
302
+ Returns:
303
+ Confidence multiplier (0.0 to 1.0)
304
+ """
305
+ if not self.config:
306
+ return 1.0
307
+
308
+ adjustment = 1.0
309
+
310
+ # For AGENT-003 (data exfiltration), check if target is allowed
311
+ if rule_id == "AGENT-003":
312
+ target_host = finding_metadata.get('target_host', '')
313
+ if target_host:
314
+ for pattern in self.config.allowed_hosts:
315
+ if fnmatch.fnmatch(target_host, pattern):
316
+ adjustment *= 0.3 # Significant reduction
317
+ break
318
+
319
+ # Check if file path is in allowed paths
320
+ file_path = finding_metadata.get('file_path', '')
321
+ if file_path:
322
+ for allowed in self.config.allowed_paths:
323
+ if file_path.startswith(allowed):
324
+ adjustment *= 0.7
325
+ break
326
+
327
+ return adjustment
328
+
329
+ def apply_to_finding(self, finding: Finding) -> Finding:
330
+ """
331
+ Apply ignore rules and confidence adjustments to a finding.
332
+
333
+ Args:
334
+ finding: The finding to process
335
+
336
+ Returns:
337
+ The finding with suppressed/confidence fields updated
338
+ """
339
+ # Check if should be suppressed
340
+ tool_name = finding.metadata.get('tool_name', '')
341
+ ignore_reason = self.should_ignore(
342
+ finding.rule_id,
343
+ finding.location.file_path,
344
+ tool_name
345
+ )
346
+
347
+ if ignore_reason:
348
+ finding.suppressed = True
349
+ finding.suppressed_reason = ignore_reason
350
+ finding.suppressed_by = str(self._loaded_from) if self._loaded_from else None
351
+
352
+ # Adjust confidence
353
+ adjustment = self.adjust_confidence(finding.rule_id, finding.metadata)
354
+ finding.confidence *= adjustment
355
+
356
+ return finding
357
+
358
+
359
+ # Baseline scanning support
360
+
361
+ def compute_fingerprint(finding: Finding) -> str:
362
+ """
363
+ Compute a stable fingerprint for a finding.
364
+
365
+ The fingerprint is used for baseline comparison and deduplication.
366
+ It's stable across reruns for the same issue.
367
+ """
368
+ components = [
369
+ finding.rule_id,
370
+ finding.location.file_path,
371
+ str(finding.location.start_line),
372
+ (finding.location.snippet or "")[:50]
373
+ ]
374
+ raw = "|".join(components)
375
+ return hashlib.sha256(raw.encode()).hexdigest()[:16]
376
+
377
+
378
+ def save_baseline(findings: List[Finding], output_path: Path):
379
+ """
380
+ Save findings as a baseline file.
381
+
382
+ Args:
383
+ findings: List of findings to save as baseline
384
+ output_path: Path to save the baseline file
385
+ """
386
+ baseline = {
387
+ "version": "1.0",
388
+ "created_at": datetime.utcnow().isoformat(),
389
+ "fingerprints": [compute_fingerprint(f) for f in findings]
390
+ }
391
+ output_path.write_text(json.dumps(baseline, indent=2), encoding="utf-8")
392
+
393
+
394
+ def load_baseline(baseline_path: Path) -> Set[str]:
395
+ """
396
+ Load fingerprints from a baseline file.
397
+
398
+ Args:
399
+ baseline_path: Path to the baseline file
400
+
401
+ Returns:
402
+ Set of fingerprints from the baseline
403
+ """
404
+ try:
405
+ data = json.loads(baseline_path.read_text(encoding="utf-8"))
406
+ return set(data.get("fingerprints", []))
407
+ except Exception as e:
408
+ logger.warning(f"Failed to load baseline from {baseline_path}: {e}")
409
+ return set()
410
+
411
+
412
+ def filter_by_baseline(
413
+ findings: List[Finding],
414
+ baseline: Set[str]
415
+ ) -> List[Finding]:
416
+ """
417
+ Filter findings to only include those not in the baseline.
418
+
419
+ Args:
420
+ findings: List of findings to filter
421
+ baseline: Set of fingerprints from the baseline
422
+
423
+ Returns:
424
+ List of findings that are new (not in baseline)
425
+ """
426
+ return [
427
+ f for f in findings
428
+ if compute_fingerprint(f) not in baseline
429
+ ]
430
+
431
+
432
+ def create_default_config() -> str:
433
+ """
434
+ Create a default .agent-audit.yaml configuration template.
435
+
436
+ Returns:
437
+ YAML string with default configuration
438
+ """
439
+ return '''# Agent Audit Configuration
440
+ # https://github.com/your-org/agent-audit
441
+
442
+ # Scan settings
443
+ scan:
444
+ exclude:
445
+ - "tests/**"
446
+ - "venv/**"
447
+ - "node_modules/**"
448
+ - ".git/**"
449
+ min_severity: low
450
+ fail_on: high
451
+
452
+ # Allowed network hosts (reduces confidence for AGENT-003)
453
+ # Use wildcards: *.internal.company.com
454
+ allowed_hosts:
455
+ - "*.internal.company.com"
456
+ - "api.openai.com"
457
+ - "api.anthropic.com"
458
+
459
+ # Allowed file paths
460
+ allowed_paths:
461
+ - "/tmp"
462
+ - "/app/data"
463
+
464
+ # Ignore rules
465
+ ignore:
466
+ # Example: Ignore data exfiltration warnings in auth module
467
+ # - rule_id: AGENT-003
468
+ # paths:
469
+ # - "auth/**"
470
+ # reason: "Auth module legitimately communicates with auth service"
471
+
472
+ # Example: Ignore excessive permissions for admin agent
473
+ # - rule_id: AGENT-002
474
+ # paths:
475
+ # - "admin_agent.py"
476
+ # reason: "Admin agent requires broad permissions by design"
477
+ '''
@@ -0,0 +1 @@
1
+ """Shared utilities for Agent Security Suite."""
@@ -0,0 +1,18 @@
1
+ """Core data models for Agent Security Suite."""
2
+
3
+ from agent_audit.models.finding import Finding, Severity, Category, Location, Remediation
4
+ from agent_audit.models.tool import ToolDefinition, PermissionType, RiskLevel, ToolParameter
5
+ from agent_audit.models.risk import RiskScore
6
+
7
+ __all__ = [
8
+ "Finding",
9
+ "Severity",
10
+ "Category",
11
+ "Location",
12
+ "Remediation",
13
+ "ToolDefinition",
14
+ "PermissionType",
15
+ "RiskLevel",
16
+ "ToolParameter",
17
+ "RiskScore",
18
+ ]
@@ -0,0 +1,159 @@
1
+ """Finding model for security scan results."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime
5
+ from typing import Optional, Dict, Any
6
+
7
+ from agent_audit.models.risk import Severity, Category, Location
8
+
9
+
10
+ @dataclass
11
+ class Remediation:
12
+ """Remediation guidance for a finding."""
13
+ description: str
14
+ code_example: Optional[str] = None
15
+ reference_url: Optional[str] = None
16
+
17
+
18
+ @dataclass
19
+ class Finding:
20
+ """
21
+ Security finding result.
22
+
23
+ Represents a single security issue discovered during scanning.
24
+ Includes fields from both technical-spec.md and delta-spec.md.
25
+ """
26
+ rule_id: str # e.g., "AGENT-001"
27
+ title: str
28
+ description: str
29
+ severity: Severity
30
+ category: Category
31
+ location: Location
32
+
33
+ # Delta-spec additions for confidence scoring
34
+ confidence: float = 1.0 # 0.0-1.0 confidence score
35
+ suppressed: bool = False
36
+ suppressed_reason: Optional[str] = None
37
+ suppressed_by: Optional[str] = None # config file path
38
+
39
+ # Standard fields
40
+ cwe_id: Optional[str] = None # e.g., "CWE-78"
41
+ owasp_id: Optional[str] = None # e.g., "OWASP-AGENT-01"
42
+ remediation: Optional[Remediation] = None
43
+ metadata: Dict[str, Any] = field(default_factory=dict)
44
+ detected_at: datetime = field(default_factory=datetime.utcnow)
45
+
46
+ def is_actionable(self, min_confidence: float = 0.5) -> bool:
47
+ """
48
+ Determine if this finding requires user attention.
49
+
50
+ A finding is actionable if:
51
+ - It is not suppressed
52
+ - Its confidence meets or exceeds the minimum threshold
53
+
54
+ Args:
55
+ min_confidence: Minimum confidence threshold (default 0.5)
56
+
57
+ Returns:
58
+ True if the finding should be shown to the user
59
+ """
60
+ return not self.suppressed and self.confidence >= min_confidence
61
+
62
+ def to_sarif(self) -> Dict[str, Any]:
63
+ """Convert to SARIF 2.1.0 result format."""
64
+ result = {
65
+ "ruleId": self.rule_id,
66
+ "level": self._severity_to_sarif_level(),
67
+ "message": {"text": self.description},
68
+ "locations": [{
69
+ "physicalLocation": {
70
+ "artifactLocation": {"uri": self.location.file_path},
71
+ "region": {
72
+ "startLine": self.location.start_line,
73
+ "endLine": self.location.end_line
74
+ }
75
+ }
76
+ }]
77
+ }
78
+
79
+ # Add column information if available
80
+ if self.location.start_column is not None:
81
+ result["locations"][0]["physicalLocation"]["region"]["startColumn"] = (
82
+ self.location.start_column
83
+ )
84
+ if self.location.end_column is not None:
85
+ result["locations"][0]["physicalLocation"]["region"]["endColumn"] = (
86
+ self.location.end_column
87
+ )
88
+
89
+ # Add fingerprint for deduplication
90
+ result["fingerprints"] = {
91
+ "primary": self._compute_fingerprint()
92
+ }
93
+
94
+ # Add properties for additional metadata
95
+ properties = {}
96
+ if self.confidence < 1.0:
97
+ properties["confidence"] = self.confidence
98
+ if self.cwe_id:
99
+ properties["cwe"] = self.cwe_id
100
+ if self.owasp_id:
101
+ properties["owasp"] = self.owasp_id
102
+ if properties:
103
+ result["properties"] = properties
104
+
105
+ return result
106
+
107
+ def _severity_to_sarif_level(self) -> str:
108
+ """Map severity to SARIF level."""
109
+ mapping = {
110
+ Severity.CRITICAL: "error",
111
+ Severity.HIGH: "error",
112
+ Severity.MEDIUM: "warning",
113
+ Severity.LOW: "note",
114
+ Severity.INFO: "note"
115
+ }
116
+ return mapping[self.severity]
117
+
118
+ def _compute_fingerprint(self) -> str:
119
+ """Compute a stable fingerprint for deduplication."""
120
+ import hashlib
121
+ components = [
122
+ self.rule_id,
123
+ self.location.file_path,
124
+ str(self.location.start_line),
125
+ (self.location.snippet or "")[:50]
126
+ ]
127
+ raw = "|".join(components)
128
+ return hashlib.sha256(raw.encode()).hexdigest()[:16]
129
+
130
+ def to_dict(self) -> Dict[str, Any]:
131
+ """Convert to dictionary for JSON serialization."""
132
+ return {
133
+ "rule_id": self.rule_id,
134
+ "title": self.title,
135
+ "description": self.description,
136
+ "severity": self.severity.value,
137
+ "category": self.category.value,
138
+ "location": {
139
+ "file_path": self.location.file_path,
140
+ "start_line": self.location.start_line,
141
+ "end_line": self.location.end_line,
142
+ "start_column": self.location.start_column,
143
+ "end_column": self.location.end_column,
144
+ "snippet": self.location.snippet,
145
+ },
146
+ "confidence": self.confidence,
147
+ "suppressed": self.suppressed,
148
+ "suppressed_reason": self.suppressed_reason,
149
+ "suppressed_by": self.suppressed_by,
150
+ "cwe_id": self.cwe_id,
151
+ "owasp_id": self.owasp_id,
152
+ "remediation": {
153
+ "description": self.remediation.description,
154
+ "code_example": self.remediation.code_example,
155
+ "reference_url": self.remediation.reference_url,
156
+ } if self.remediation else None,
157
+ "metadata": self.metadata,
158
+ "detected_at": self.detected_at.isoformat(),
159
+ }