@raghulm/aegis-mcp 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -2
- package/server/main.py +12 -0
- package/tools/security/terraform.py +382 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@raghulm/aegis-mcp",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.9",
|
|
4
4
|
"description": "DevSecOps-focused MCP server for AWS, Kubernetes, CI/CD, and security tooling.",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Raghul M",
|
|
@@ -40,7 +40,8 @@
|
|
|
40
40
|
"aws",
|
|
41
41
|
"kubernetes",
|
|
42
42
|
"claude",
|
|
43
|
-
"jenkins"
|
|
43
|
+
"jenkins",
|
|
44
|
+
"terraform"
|
|
44
45
|
],
|
|
45
46
|
"publishConfig": {
|
|
46
47
|
"access": "public",
|
package/server/main.py
CHANGED
|
@@ -28,6 +28,7 @@ from tools.network.ssl_checker import check_ssl_certificate
|
|
|
28
28
|
from tools.security.deps import check_dependencies
|
|
29
29
|
from tools.security.secrets import scan_secrets
|
|
30
30
|
from tools.security.semgrep import run_semgrep_scan
|
|
31
|
+
from tools.security.terraform import scan_terraform
|
|
31
32
|
from tools.security.trivy import run_trivy_scan
|
|
32
33
|
|
|
33
34
|
settings = load_settings()
|
|
@@ -149,6 +150,17 @@ def security_semgrep_scan(path: str, config: str = "auto", token: str = "") -> d
|
|
|
149
150
|
return run_semgrep_scan(path, config)
|
|
150
151
|
|
|
151
152
|
|
|
153
|
+
# ── Terraform security scanner ─────────────────────────────────────
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
@mcp.tool()
|
|
157
|
+
@audit_tool_call("security_scan_terraform")
|
|
158
|
+
def security_scan_terraform(path: str, severity: str = "", token: str = "") -> dict:
|
|
159
|
+
"""Scan Terraform (.tf) files for security misconfigurations (S3, IAM, RDS, EC2, networking, encryption, credentials). Optionally filter by severity (CRITICAL, HIGH, MEDIUM, LOW). This tool runs locally and has full access to the user's local filesystem."""
|
|
160
|
+
_authorize(token, "security_scan_terraform")
|
|
161
|
+
return scan_terraform(path, severity)
|
|
162
|
+
|
|
163
|
+
|
|
152
164
|
# ── Jenkins CI/CD tools ────────────────────────────────────────────
|
|
153
165
|
|
|
154
166
|
|
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
# ── Block extraction ───────────────────────────────────────────────
|
|
8
|
+
|
|
9
|
+
_BLOCK_RE = re.compile(
|
|
10
|
+
r"""
|
|
11
|
+
(resource|data)\s+ # block type
|
|
12
|
+
"([^"]+)"\s+ # resource type e.g. "aws_s3_bucket"
|
|
13
|
+
"([^"]+)"\s* # resource name e.g. "my_bucket"
|
|
14
|
+
\{ # opening brace
|
|
15
|
+
""",
|
|
16
|
+
re.VERBOSE,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
_SKIP_DIRS = {".git", "__pycache__", "node_modules", ".venv", "venv", ".terraform", "dist", "build"}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _extract_blocks(content: str) -> list[tuple[str, str, str, str]]:
|
|
23
|
+
"""Return (block_type, resource_type, name, body) for every resource/data block."""
|
|
24
|
+
blocks: list[tuple[str, str, str, str]] = []
|
|
25
|
+
for match in _BLOCK_RE.finditer(content):
|
|
26
|
+
block_type = match.group(1)
|
|
27
|
+
resource_type = match.group(2)
|
|
28
|
+
name = match.group(3)
|
|
29
|
+
start = match.end()
|
|
30
|
+
depth = 1
|
|
31
|
+
pos = start
|
|
32
|
+
while pos < len(content) and depth > 0:
|
|
33
|
+
if content[pos] == "{":
|
|
34
|
+
depth += 1
|
|
35
|
+
elif content[pos] == "}":
|
|
36
|
+
depth -= 1
|
|
37
|
+
pos += 1
|
|
38
|
+
body = content[start:pos - 1] if depth == 0 else content[start:]
|
|
39
|
+
blocks.append((block_type, resource_type, name, body))
|
|
40
|
+
return blocks
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# ── Finding helper ─────────────────────────────────────────────────
|
|
44
|
+
|
|
45
|
+
def _finding(
|
|
46
|
+
rule_id: str,
|
|
47
|
+
severity: str,
|
|
48
|
+
resource_type: str,
|
|
49
|
+
resource_name: str,
|
|
50
|
+
file_path: str,
|
|
51
|
+
message: str,
|
|
52
|
+
recommendation: str,
|
|
53
|
+
) -> dict[str, str]:
|
|
54
|
+
return {
|
|
55
|
+
"rule_id": rule_id,
|
|
56
|
+
"severity": severity,
|
|
57
|
+
"resource_type": resource_type,
|
|
58
|
+
"resource_name": resource_name,
|
|
59
|
+
"file": file_path,
|
|
60
|
+
"message": message,
|
|
61
|
+
"recommendation": recommendation,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# ── Individual rules ───────────────────────────────────────────────
|
|
66
|
+
|
|
67
|
+
def _check_tf001(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
68
|
+
"""S3 bucket without server-side encryption."""
|
|
69
|
+
if rtype != "aws_s3_bucket":
|
|
70
|
+
return []
|
|
71
|
+
if "server_side_encryption_configuration" not in body:
|
|
72
|
+
return [_finding(
|
|
73
|
+
"TF001", "HIGH", rtype, name, fp,
|
|
74
|
+
f"S3 bucket '{name}' does not have server-side encryption configured.",
|
|
75
|
+
"Add a server_side_encryption_configuration block with AES256 or aws:kms.",
|
|
76
|
+
)]
|
|
77
|
+
return []
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _check_tf002(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
81
|
+
"""S3 bucket without versioning."""
|
|
82
|
+
if rtype != "aws_s3_bucket":
|
|
83
|
+
return []
|
|
84
|
+
if "versioning" not in body:
|
|
85
|
+
return [_finding(
|
|
86
|
+
"TF002", "MEDIUM", rtype, name, fp,
|
|
87
|
+
f"S3 bucket '{name}' does not have versioning enabled.",
|
|
88
|
+
"Add a versioning block with enabled = true.",
|
|
89
|
+
)]
|
|
90
|
+
return []
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _check_tf003(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
94
|
+
"""S3 bucket with public ACL."""
|
|
95
|
+
if rtype != "aws_s3_bucket":
|
|
96
|
+
return []
|
|
97
|
+
if re.search(r"""acl\s*=\s*["'](?:public-read|public-read-write)["']""", body):
|
|
98
|
+
return [_finding(
|
|
99
|
+
"TF003", "CRITICAL", rtype, name, fp,
|
|
100
|
+
f"S3 bucket '{name}' has a public ACL configured.",
|
|
101
|
+
"Remove the public ACL or use aws_s3_bucket_public_access_block to restrict access.",
|
|
102
|
+
)]
|
|
103
|
+
return []
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _check_tf004(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
107
|
+
"""Security group ingress open to 0.0.0.0/0."""
|
|
108
|
+
if rtype != "aws_security_group":
|
|
109
|
+
return []
|
|
110
|
+
findings: list[dict[str, str]] = []
|
|
111
|
+
# Look for ingress blocks containing 0.0.0.0/0
|
|
112
|
+
ingress_blocks = re.findall(r"ingress\s*\{([^}]*)\}", body, re.DOTALL)
|
|
113
|
+
for block in ingress_blocks:
|
|
114
|
+
if re.search(r"""["']0\.0\.0\.0/0["']""", block):
|
|
115
|
+
findings.append(_finding(
|
|
116
|
+
"TF004", "HIGH", rtype, name, fp,
|
|
117
|
+
f"Security group '{name}' has ingress open to 0.0.0.0/0.",
|
|
118
|
+
"Restrict ingress CIDR blocks to specific trusted IP ranges.",
|
|
119
|
+
))
|
|
120
|
+
break
|
|
121
|
+
return findings
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _check_tf005(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
125
|
+
"""Security group egress open to 0.0.0.0/0."""
|
|
126
|
+
if rtype != "aws_security_group":
|
|
127
|
+
return []
|
|
128
|
+
findings: list[dict[str, str]] = []
|
|
129
|
+
egress_blocks = re.findall(r"egress\s*\{([^}]*)\}", body, re.DOTALL)
|
|
130
|
+
for block in egress_blocks:
|
|
131
|
+
if re.search(r"""["']0\.0\.0\.0/0["']""", block):
|
|
132
|
+
findings.append(_finding(
|
|
133
|
+
"TF005", "MEDIUM", rtype, name, fp,
|
|
134
|
+
f"Security group '{name}' has egress open to 0.0.0.0/0.",
|
|
135
|
+
"Restrict egress CIDR blocks to required destinations only.",
|
|
136
|
+
))
|
|
137
|
+
break
|
|
138
|
+
return findings
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _check_tf006(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
142
|
+
"""IAM policy with wildcard action."""
|
|
143
|
+
if rtype not in ("aws_iam_policy", "aws_iam_role_policy", "aws_iam_policy_document"):
|
|
144
|
+
return []
|
|
145
|
+
if re.search(r"""actions?\s*=\s*\[\s*["']\*["']""", body) or \
|
|
146
|
+
re.search(r""""Action"\s*:\s*["']\*["']""", body) or \
|
|
147
|
+
re.search(r""""Action"\s*:\s*\[\s*["']\*["']""", body):
|
|
148
|
+
return [_finding(
|
|
149
|
+
"TF006", "CRITICAL", rtype, name, fp,
|
|
150
|
+
f"IAM policy '{name}' uses wildcard (*) actions.",
|
|
151
|
+
"Follow the principle of least privilege and specify only required actions.",
|
|
152
|
+
)]
|
|
153
|
+
return []
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _check_tf007(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
157
|
+
"""IAM policy with wildcard resource."""
|
|
158
|
+
if rtype not in ("aws_iam_policy", "aws_iam_role_policy", "aws_iam_policy_document"):
|
|
159
|
+
return []
|
|
160
|
+
if re.search(r"""resources?\s*=\s*\[\s*["']\*["']""", body) or \
|
|
161
|
+
re.search(r""""Resource"\s*:\s*["']\*["']""", body) or \
|
|
162
|
+
re.search(r""""Resource"\s*:\s*\[\s*["']\*["']""", body):
|
|
163
|
+
return [_finding(
|
|
164
|
+
"TF007", "HIGH", rtype, name, fp,
|
|
165
|
+
f"IAM policy '{name}' uses wildcard (*) resources.",
|
|
166
|
+
"Scope resource ARNs to the specific resources that are needed.",
|
|
167
|
+
)]
|
|
168
|
+
return []
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _check_tf008(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
172
|
+
"""RDS instance publicly accessible."""
|
|
173
|
+
if rtype != "aws_db_instance":
|
|
174
|
+
return []
|
|
175
|
+
if re.search(r"publicly_accessible\s*=\s*true", body):
|
|
176
|
+
return [_finding(
|
|
177
|
+
"TF008", "CRITICAL", rtype, name, fp,
|
|
178
|
+
f"RDS instance '{name}' is publicly accessible.",
|
|
179
|
+
"Set publicly_accessible = false and use private subnets.",
|
|
180
|
+
)]
|
|
181
|
+
return []
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _check_tf009(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
185
|
+
"""RDS instance without storage encryption."""
|
|
186
|
+
if rtype != "aws_db_instance":
|
|
187
|
+
return []
|
|
188
|
+
if "storage_encrypted" not in body or re.search(r"storage_encrypted\s*=\s*false", body):
|
|
189
|
+
return [_finding(
|
|
190
|
+
"TF009", "HIGH", rtype, name, fp,
|
|
191
|
+
f"RDS instance '{name}' does not have storage encryption enabled.",
|
|
192
|
+
"Set storage_encrypted = true and optionally specify a kms_key_id.",
|
|
193
|
+
)]
|
|
194
|
+
return []
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
def _check_tf010(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
198
|
+
"""EC2 instance without IMDSv2 enforcement."""
|
|
199
|
+
if rtype != "aws_instance":
|
|
200
|
+
return []
|
|
201
|
+
if "metadata_options" not in body or "http_tokens" not in body:
|
|
202
|
+
return [_finding(
|
|
203
|
+
"TF010", "HIGH", rtype, name, fp,
|
|
204
|
+
f"EC2 instance '{name}' does not enforce IMDSv2.",
|
|
205
|
+
"Add metadata_options with http_tokens = \"required\" to enforce IMDSv2.",
|
|
206
|
+
)]
|
|
207
|
+
return []
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def _check_tf011(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
211
|
+
"""CloudTrail with logging disabled."""
|
|
212
|
+
if rtype != "aws_cloudtrail":
|
|
213
|
+
return []
|
|
214
|
+
if re.search(r"enable_logging\s*=\s*false", body):
|
|
215
|
+
return [_finding(
|
|
216
|
+
"TF011", "CRITICAL", rtype, name, fp,
|
|
217
|
+
f"CloudTrail '{name}' has logging disabled.",
|
|
218
|
+
"Set enable_logging = true to ensure audit trail capture.",
|
|
219
|
+
)]
|
|
220
|
+
return []
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _check_tf012(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
224
|
+
"""S3 bucket without logging."""
|
|
225
|
+
if rtype != "aws_s3_bucket":
|
|
226
|
+
return []
|
|
227
|
+
if "logging" not in body:
|
|
228
|
+
return [_finding(
|
|
229
|
+
"TF012", "MEDIUM", rtype, name, fp,
|
|
230
|
+
f"S3 bucket '{name}' does not have access logging configured.",
|
|
231
|
+
"Add a logging block pointing to a dedicated logging bucket.",
|
|
232
|
+
)]
|
|
233
|
+
return []
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _check_tf013(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
237
|
+
"""EBS volume without encryption."""
|
|
238
|
+
if rtype != "aws_ebs_volume":
|
|
239
|
+
return []
|
|
240
|
+
if "encrypted" not in body or re.search(r"encrypted\s*=\s*false", body):
|
|
241
|
+
return [_finding(
|
|
242
|
+
"TF013", "HIGH", rtype, name, fp,
|
|
243
|
+
f"EBS volume '{name}' is not encrypted.",
|
|
244
|
+
"Set encrypted = true and optionally specify a kms_key_id.",
|
|
245
|
+
)]
|
|
246
|
+
return []
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _check_tf014(rtype: str, name: str, body: str, fp: str) -> list[dict[str, str]]:
|
|
250
|
+
"""Subnet with map_public_ip_on_launch enabled."""
|
|
251
|
+
if rtype != "aws_subnet":
|
|
252
|
+
return []
|
|
253
|
+
if re.search(r"map_public_ip_on_launch\s*=\s*true", body):
|
|
254
|
+
return [_finding(
|
|
255
|
+
"TF014", "MEDIUM", rtype, name, fp,
|
|
256
|
+
f"Subnet '{name}' automatically assigns public IP addresses on launch.",
|
|
257
|
+
"Set map_public_ip_on_launch = false unless instances explicitly need public IPs.",
|
|
258
|
+
)]
|
|
259
|
+
return []
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def _check_tf015(content: str, fp: str) -> list[dict[str, str]]:
|
|
263
|
+
"""Hardcoded credentials in .tf files (file-level check)."""
|
|
264
|
+
findings: list[dict[str, str]] = []
|
|
265
|
+
patterns = [
|
|
266
|
+
(r"""access_key\s*=\s*["'][A-Za-z0-9/+=]{16,}["']""", "access_key"),
|
|
267
|
+
(r"""secret_key\s*=\s*["'][A-Za-z0-9/+=]{16,}["']""", "secret_key"),
|
|
268
|
+
]
|
|
269
|
+
for pat, key_type in patterns:
|
|
270
|
+
if re.search(pat, content):
|
|
271
|
+
findings.append(_finding(
|
|
272
|
+
"TF015", "CRITICAL", "provider/variable", "hardcoded", fp,
|
|
273
|
+
f"Hardcoded {key_type} found in Terraform file.",
|
|
274
|
+
"Use environment variables, AWS profiles, or a secrets manager instead of hardcoded keys.",
|
|
275
|
+
))
|
|
276
|
+
return findings
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# ── Rule registry ──────────────────────────────────────────────────
|
|
280
|
+
|
|
281
|
+
_BLOCK_RULES = [
|
|
282
|
+
_check_tf001,
|
|
283
|
+
_check_tf002,
|
|
284
|
+
_check_tf003,
|
|
285
|
+
_check_tf004,
|
|
286
|
+
_check_tf005,
|
|
287
|
+
_check_tf006,
|
|
288
|
+
_check_tf007,
|
|
289
|
+
_check_tf008,
|
|
290
|
+
_check_tf009,
|
|
291
|
+
_check_tf010,
|
|
292
|
+
_check_tf011,
|
|
293
|
+
_check_tf012,
|
|
294
|
+
_check_tf013,
|
|
295
|
+
_check_tf014,
|
|
296
|
+
]
|
|
297
|
+
|
|
298
|
+
_FILE_RULES = [
|
|
299
|
+
_check_tf015,
|
|
300
|
+
]
|
|
301
|
+
|
|
302
|
+
_VALID_SEVERITIES = {"CRITICAL", "HIGH", "MEDIUM", "LOW"}
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
# ── Public API ─────────────────────────────────────────────────────
|
|
306
|
+
|
|
307
|
+
def scan_terraform(path: str, severity: str = "") -> dict[str, Any]:
|
|
308
|
+
"""Scan Terraform (.tf) files for security misconfigurations.
|
|
309
|
+
|
|
310
|
+
Args:
|
|
311
|
+
path: Path to a single .tf file or a directory containing .tf files.
|
|
312
|
+
severity: Optional severity filter (CRITICAL, HIGH, MEDIUM, LOW).
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
Dict with summary stats and a list of findings.
|
|
316
|
+
"""
|
|
317
|
+
if not os.path.exists(path):
|
|
318
|
+
raise RuntimeError(f"Path does not exist: {path}")
|
|
319
|
+
|
|
320
|
+
severity_filter = severity.upper().strip() if severity else ""
|
|
321
|
+
if severity_filter and severity_filter not in _VALID_SEVERITIES:
|
|
322
|
+
raise RuntimeError(
|
|
323
|
+
f"Invalid severity filter '{severity}'. Must be one of: {', '.join(sorted(_VALID_SEVERITIES))}"
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
tf_files: list[str] = []
|
|
327
|
+
if os.path.isfile(path):
|
|
328
|
+
if path.endswith(".tf"):
|
|
329
|
+
tf_files.append(path)
|
|
330
|
+
else:
|
|
331
|
+
raise RuntimeError(f"File is not a Terraform file (.tf): {path}")
|
|
332
|
+
else:
|
|
333
|
+
for root, dirs, files in os.walk(path):
|
|
334
|
+
dirs[:] = [d for d in dirs if d not in _SKIP_DIRS]
|
|
335
|
+
for fname in files:
|
|
336
|
+
if fname.endswith(".tf"):
|
|
337
|
+
tf_files.append(os.path.join(root, fname))
|
|
338
|
+
|
|
339
|
+
if not tf_files:
|
|
340
|
+
return {
|
|
341
|
+
"files_scanned": 0,
|
|
342
|
+
"total_findings": 0,
|
|
343
|
+
"summary": {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0},
|
|
344
|
+
"findings": [],
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
all_findings: list[dict[str, str]] = []
|
|
348
|
+
|
|
349
|
+
for tf_file in tf_files:
|
|
350
|
+
try:
|
|
351
|
+
with open(tf_file, "r", encoding="utf-8", errors="ignore") as fh:
|
|
352
|
+
content = fh.read()
|
|
353
|
+
except OSError:
|
|
354
|
+
continue
|
|
355
|
+
|
|
356
|
+
# Block-level rules
|
|
357
|
+
blocks = _extract_blocks(content)
|
|
358
|
+
for block_type, resource_type, name, body in blocks:
|
|
359
|
+
for rule_fn in _BLOCK_RULES:
|
|
360
|
+
all_findings.extend(rule_fn(resource_type, name, body, tf_file))
|
|
361
|
+
|
|
362
|
+
# File-level rules
|
|
363
|
+
for file_rule_fn in _FILE_RULES:
|
|
364
|
+
all_findings.extend(file_rule_fn(content, tf_file))
|
|
365
|
+
|
|
366
|
+
# Apply severity filter
|
|
367
|
+
if severity_filter:
|
|
368
|
+
all_findings = [f for f in all_findings if f["severity"] == severity_filter]
|
|
369
|
+
|
|
370
|
+
# Build summary
|
|
371
|
+
summary = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0}
|
|
372
|
+
for f in all_findings:
|
|
373
|
+
sev = f["severity"]
|
|
374
|
+
if sev in summary:
|
|
375
|
+
summary[sev] += 1
|
|
376
|
+
|
|
377
|
+
return {
|
|
378
|
+
"files_scanned": len(tf_files),
|
|
379
|
+
"total_findings": len(all_findings),
|
|
380
|
+
"summary": summary,
|
|
381
|
+
"findings": all_findings,
|
|
382
|
+
}
|