exploitgraph 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- core/__init__.py +0 -0
- core/attack_graph.py +83 -0
- core/aws_client.py +284 -0
- core/config.py +83 -0
- core/console.py +469 -0
- core/context_engine.py +172 -0
- core/correlator.py +476 -0
- core/http_client.py +243 -0
- core/logger.py +97 -0
- core/module_loader.py +69 -0
- core/risk_engine.py +47 -0
- core/session_manager.py +254 -0
- exploitgraph-1.0.0.dist-info/METADATA +429 -0
- exploitgraph-1.0.0.dist-info/RECORD +42 -0
- exploitgraph-1.0.0.dist-info/WHEEL +5 -0
- exploitgraph-1.0.0.dist-info/entry_points.txt +2 -0
- exploitgraph-1.0.0.dist-info/licenses/LICENSE +21 -0
- exploitgraph-1.0.0.dist-info/top_level.txt +2 -0
- modules/__init__.py +0 -0
- modules/base.py +82 -0
- modules/cloud/__init__.py +0 -0
- modules/cloud/aws_credential_validator.py +340 -0
- modules/cloud/azure_enum.py +289 -0
- modules/cloud/cloudtrail_analyzer.py +494 -0
- modules/cloud/gcp_enum.py +272 -0
- modules/cloud/iam_enum.py +321 -0
- modules/cloud/iam_privilege_escalation.py +515 -0
- modules/cloud/metadata_check.py +315 -0
- modules/cloud/s3_enum.py +469 -0
- modules/discovery/__init__.py +0 -0
- modules/discovery/http_enum.py +235 -0
- modules/discovery/subdomain_enum.py +260 -0
- modules/exploitation/__init__.py +0 -0
- modules/exploitation/api_exploit.py +403 -0
- modules/exploitation/jwt_attack.py +346 -0
- modules/exploitation/ssrf_scanner.py +258 -0
- modules/reporting/__init__.py +0 -0
- modules/reporting/html_report.py +446 -0
- modules/reporting/json_export.py +107 -0
- modules/secrets/__init__.py +0 -0
- modules/secrets/file_secrets.py +358 -0
- modules/secrets/git_secrets.py +267 -0
modules/cloud/s3_enum.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ExploitGraph Module: S3 Bucket Enumerator
|
|
3
|
+
Category: cloud
|
|
4
|
+
Detects public S3 buckets via HTTP and optional boto3 API calls.
|
|
5
|
+
Works WITHOUT AWS credentials (HTTP mode). Richer results WITH credentials (boto3 mode).
|
|
6
|
+
|
|
7
|
+
Real-world AWS misconfigs detected:
|
|
8
|
+
- Public-read ACL (no credentials needed to list objects)
|
|
9
|
+
- Missing Block Public Access settings
|
|
10
|
+
- Bucket policy allowing s3:GetObject to *
|
|
11
|
+
- Directory listing enabled
|
|
12
|
+
"""
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
import re
|
|
15
|
+
import io
|
|
16
|
+
import zipfile
|
|
17
|
+
from typing import TYPE_CHECKING
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
from requests.packages.urllib3.exceptions import InsecureRequestWarning
|
|
22
|
+
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
|
23
|
+
|
|
24
|
+
from modules.base import BaseModule, ModuleResult
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from core.session_manager import Session
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class S3Enum(BaseModule):
|
|
31
|
+
|
|
32
|
+
NAME = "s3_enum"
|
|
33
|
+
DESCRIPTION = "Discover and audit public S3 buckets — HTTP-based + optional boto3 integration"
|
|
34
|
+
AUTHOR = "ExploitGraph Team"
|
|
35
|
+
VERSION = "1.2.0"
|
|
36
|
+
CATEGORY = "cloud"
|
|
37
|
+
SEVERITY = "CRITICAL"
|
|
38
|
+
MITRE = ["T1530", "T1580"]
|
|
39
|
+
AWS_PARALLEL = "aws s3 ls s3://bucket --no-sign-request | aws s3api get-bucket-acl"
|
|
40
|
+
|
|
41
|
+
OPTIONS = {
|
|
42
|
+
"TARGET": {"default": "", "required": True, "description": "Target base URL or S3 bucket URL"},
|
|
43
|
+
"BUCKET_NAME": {"default": "", "required": False, "description": "Specific bucket name to test"},
|
|
44
|
+
"WORDLIST": {"default": "", "required": False, "description": "Bucket name wordlist path"},
|
|
45
|
+
"AWS_PROFILE": {"default": "", "required": False, "description": "AWS CLI profile for boto3 mode"},
|
|
46
|
+
"AWS_REGION": {"default": "us-east-1", "required": False, "description": "AWS region"},
|
|
47
|
+
"DOWNLOAD_FILES":{"default": "true","required": False, "description": "Download found files for analysis"},
|
|
48
|
+
"TIMEOUT": {"default": "8", "required": False, "description": "Request timeout"},
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
# S3 URL patterns for detecting bucket references in responses
|
|
52
|
+
_S3_PATTERNS = [
|
|
53
|
+
r's3\.amazonaws\.com/([a-z0-9][a-z0-9\-\.]{1,61}[a-z0-9])',
|
|
54
|
+
r'([a-z0-9][a-z0-9\-\.]{1,61}[a-z0-9])\.s3\.amazonaws\.com',
|
|
55
|
+
r'([a-z0-9][a-z0-9\-\.]{1,61}[a-z0-9])\.s3\.[a-z0-9-]+\.amazonaws\.com',
|
|
56
|
+
r's3://([a-z0-9][a-z0-9\-\.]{1,61}[a-z0-9])',
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
def run(self, session: "Session") -> ModuleResult:
|
|
60
|
+
from core.config import cfg
|
|
61
|
+
from core.logger import log
|
|
62
|
+
|
|
63
|
+
target = self.get_option("TARGET") or session.target
|
|
64
|
+
timeout = int(self.get_option("TIMEOUT", "8"))
|
|
65
|
+
download= self.get_option("DOWNLOAD_FILES", "true").lower() == "true"
|
|
66
|
+
|
|
67
|
+
self._timer_start()
|
|
68
|
+
log.section("S3 Bucket Enumeration & Audit")
|
|
69
|
+
log.info(f"MITRE: T1530 — Data from Cloud Storage Object")
|
|
70
|
+
log.info(f"AWS: aws s3 ls s3://bucket --no-sign-request")
|
|
71
|
+
|
|
72
|
+
found_buckets = []
|
|
73
|
+
found_files = []
|
|
74
|
+
|
|
75
|
+
# Step 1: Extract bucket names from target responses
|
|
76
|
+
bucket_names = self._discover_bucket_names(target, timeout, session)
|
|
77
|
+
|
|
78
|
+
# Step 2: Test each bucket for public access
|
|
79
|
+
for name in bucket_names:
|
|
80
|
+
result = self._test_bucket(name, timeout, download, session)
|
|
81
|
+
if result:
|
|
82
|
+
found_buckets.append(result)
|
|
83
|
+
found_files.extend(result.get("files", []))
|
|
84
|
+
|
|
85
|
+
# Step 3: Check target URL for exposed storage paths
|
|
86
|
+
self._check_storage_paths(target, timeout, download, session, found_files)
|
|
87
|
+
|
|
88
|
+
# Step 4: Optional boto3 mode
|
|
89
|
+
if self.get_option("AWS_PROFILE") or cfg.aws_enabled:
|
|
90
|
+
self._boto3_audit(bucket_names, session)
|
|
91
|
+
|
|
92
|
+
# Graph
|
|
93
|
+
if found_buckets or found_files:
|
|
94
|
+
session.add_graph_node("s3_exposure", f"Cloud Storage\nExposed ({len(found_files)} files)",
|
|
95
|
+
"exposure", "CRITICAL",
|
|
96
|
+
f"{len(found_buckets)} buckets, {len(found_files)} files")
|
|
97
|
+
session.add_graph_edge("http_enum", "s3_exposure", "finds exposed storage", "T1530")
|
|
98
|
+
|
|
99
|
+
elapsed = self._timer_stop()
|
|
100
|
+
log.success(f"S3 audit done in {elapsed}s — {len(found_buckets)} buckets, {len(found_files)} files")
|
|
101
|
+
|
|
102
|
+
return ModuleResult(True, {
|
|
103
|
+
"buckets_found": len(found_buckets),
|
|
104
|
+
"files_found": len(found_files),
|
|
105
|
+
"buckets": found_buckets,
|
|
106
|
+
"files": found_files,
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
def _discover_bucket_names(self, target: str, timeout: int,
|
|
110
|
+
session: "Session") -> list[str]:
|
|
111
|
+
from core.logger import log
|
|
112
|
+
names = set()
|
|
113
|
+
|
|
114
|
+
# User-specified bucket
|
|
115
|
+
if bucket := self.get_option("BUCKET_NAME"):
|
|
116
|
+
names.add(bucket)
|
|
117
|
+
|
|
118
|
+
# Extract from target hostname
|
|
119
|
+
# flaws.cloud → try "flaws.cloud" AND "flaws" as bucket names
|
|
120
|
+
from urllib.parse import urlparse
|
|
121
|
+
hostname = urlparse(target).hostname or ""
|
|
122
|
+
base = hostname.split(".")[0]
|
|
123
|
+
|
|
124
|
+
if hostname and hostname not in ("localhost", "127.0.0.1"):
|
|
125
|
+
names.add(hostname) # e.g. "flaws.cloud" is itself a valid bucket name
|
|
126
|
+
if base and base not in ("localhost", "127", "www"):
|
|
127
|
+
names.add(base) # e.g. "flaws"
|
|
128
|
+
for suffix in ["-backups", "-backup", "-assets", "-static", "-uploads",
|
|
129
|
+
"-prod", "-staging", "-dev", "-data", "-logs", "-config"]:
|
|
130
|
+
names.add(f"{base}{suffix}")
|
|
131
|
+
|
|
132
|
+
# Scan target responses for S3 URLs
|
|
133
|
+
log.step("Scanning target responses for S3 bucket references...")
|
|
134
|
+
try:
|
|
135
|
+
r = requests.get(target, timeout=timeout, verify=False)
|
|
136
|
+
for pattern in self._S3_PATTERNS:
|
|
137
|
+
for match in re.findall(pattern, r.text, re.IGNORECASE):
|
|
138
|
+
name = match.strip().lower()
|
|
139
|
+
if 2 < len(name) < 64:
|
|
140
|
+
names.add(name)
|
|
141
|
+
log.found(f"S3 bucket reference in response: {name}")
|
|
142
|
+
except Exception:
|
|
143
|
+
pass
|
|
144
|
+
|
|
145
|
+
# Load wordlist
|
|
146
|
+
wl = self.get_option("WORDLIST", "")
|
|
147
|
+
if wl and Path(wl).exists():
|
|
148
|
+
for line in Path(wl).read_text().splitlines():
|
|
149
|
+
line = line.strip()
|
|
150
|
+
if line and not line.startswith("#"):
|
|
151
|
+
names.add(line)
|
|
152
|
+
if base:
|
|
153
|
+
names.add(f"{base}-{line}")
|
|
154
|
+
|
|
155
|
+
return list(names)
|
|
156
|
+
|
|
157
|
+
def _test_bucket(self, name: str, timeout: int, download: bool,
|
|
158
|
+
session: "Session") -> dict | None:
|
|
159
|
+
from core.logger import log
|
|
160
|
+
import xml.etree.ElementTree as ET
|
|
161
|
+
|
|
162
|
+
# All URL patterns to try — including anonymous S3 API (flaws.cloud pattern)
|
|
163
|
+
urls_to_try = [
|
|
164
|
+
f"https://{name}.s3.amazonaws.com/",
|
|
165
|
+
f"https://s3.amazonaws.com/{name}/",
|
|
166
|
+
f"https://{name}.s3.amazonaws.com/?list-type=2",
|
|
167
|
+
f"https://s3.amazonaws.com/{name}?list-type=2",
|
|
168
|
+
f"https://{name}.s3.us-east-1.amazonaws.com/",
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
headers = {
|
|
172
|
+
"User-Agent": "ExploitGraph/1.0 (Security Research)",
|
|
173
|
+
# No Authorization = anonymous/unsigned request
|
|
174
|
+
# Equivalent to: aws s3 ls s3://bucket --no-sign-request
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
for url in urls_to_try:
|
|
178
|
+
try:
|
|
179
|
+
r = requests.get(url, headers=headers, timeout=timeout,
|
|
180
|
+
verify=False, allow_redirects=True)
|
|
181
|
+
|
|
182
|
+
if r.status_code == 200 and "ListBucketResult" in r.text:
|
|
183
|
+
log.critical(f"PUBLIC S3 BUCKET: s3://{name}")
|
|
184
|
+
log.info(f"AWS cmd: aws s3 ls s3://{name} --no-sign-request")
|
|
185
|
+
log.info(f"AWS cmd: aws s3 sync s3://{name} ./ --no-sign-request")
|
|
186
|
+
|
|
187
|
+
# Parse XML properly (handles namespaces)
|
|
188
|
+
files = []
|
|
189
|
+
try:
|
|
190
|
+
xml_clean = re.sub(r' xmlns="[^"]+"', '', r.text)
|
|
191
|
+
root = ET.fromstring(xml_clean)
|
|
192
|
+
for content in root.findall('.//Contents'):
|
|
193
|
+
key_el = content.find('Key')
|
|
194
|
+
size_el = content.find('Size')
|
|
195
|
+
if key_el is not None:
|
|
196
|
+
files.append(key_el.text)
|
|
197
|
+
truncated = root.find('.//IsTruncated')
|
|
198
|
+
if truncated is not None and truncated.text == 'true':
|
|
199
|
+
log.warning(" Results truncated — use AWS CLI for full listing")
|
|
200
|
+
except Exception:
|
|
201
|
+
# Fallback regex
|
|
202
|
+
files = re.findall(r"<Key>([^<]+)</Key>", r.text)
|
|
203
|
+
|
|
204
|
+
log.success(f" Objects in bucket: {len(files)}")
|
|
205
|
+
for f in files[:10]:
|
|
206
|
+
log.secret("Object", f)
|
|
207
|
+
|
|
208
|
+
session.add_finding(
|
|
209
|
+
module="s3_enum",
|
|
210
|
+
title=f"Public S3 Bucket: {name}",
|
|
211
|
+
severity="CRITICAL",
|
|
212
|
+
description=(
|
|
213
|
+
f"S3 bucket '{name}' allows anonymous listing. "
|
|
214
|
+
f"{len(files)} objects are publicly accessible. "
|
|
215
|
+
"This is equivalent to the flaws.cloud attack pattern."
|
|
216
|
+
),
|
|
217
|
+
evidence=(
|
|
218
|
+
f"URL: {url}\nHTTP 200 with XML listing\n"
|
|
219
|
+
f"Objects ({len(files)}): {', '.join(files[:5])}"
|
|
220
|
+
f"{'...' if len(files) > 5 else ''}\n"
|
|
221
|
+
f"AWS: aws s3 ls s3://{name} --no-sign-request"
|
|
222
|
+
),
|
|
223
|
+
recommendation=(
|
|
224
|
+
"Enable S3 Block Public Access:\n"
|
|
225
|
+
f"aws s3api put-public-access-block --bucket {name} "
|
|
226
|
+
"--public-access-block-configuration "
|
|
227
|
+
"BlockPublicAcls=true,IgnorePublicAcls=true,"
|
|
228
|
+
"BlockPublicPolicy=true,RestrictPublicBuckets=true"
|
|
229
|
+
),
|
|
230
|
+
cvss_score=9.8,
|
|
231
|
+
aws_parallel="s3:ListBucket granted to AllUsers — flaws.cloud challenge 1 pattern",
|
|
232
|
+
mitre_technique="T1530",
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
downloaded = []
|
|
236
|
+
if download:
|
|
237
|
+
downloaded = self._download_files(name, files, timeout, session)
|
|
238
|
+
|
|
239
|
+
return {
|
|
240
|
+
"name": name,
|
|
241
|
+
"url": url,
|
|
242
|
+
"objects": len(files),
|
|
243
|
+
"files": downloaded,
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
elif r.status_code == 403:
|
|
247
|
+
log.step(f"[403] s3://{name} — bucket exists but requires credentials")
|
|
248
|
+
elif r.status_code == 301:
|
|
249
|
+
# Redirect to correct region — follow it
|
|
250
|
+
location = r.headers.get("Location", "")
|
|
251
|
+
if location and name in location:
|
|
252
|
+
log.step(f"Redirecting to region endpoint: {location}")
|
|
253
|
+
try:
|
|
254
|
+
r2 = requests.get(location, headers=headers,
|
|
255
|
+
timeout=timeout, verify=False)
|
|
256
|
+
if r2.status_code == 200 and "ListBucketResult" in r2.text:
|
|
257
|
+
files = re.findall(r"<Key>([^<]+)</Key>", r2.text)
|
|
258
|
+
log.critical(f"PUBLIC BUCKET (redirected region): s3://{name}")
|
|
259
|
+
return {"name": name, "url": location,
|
|
260
|
+
"objects": len(files), "files": []}
|
|
261
|
+
except Exception:
|
|
262
|
+
pass
|
|
263
|
+
except Exception:
|
|
264
|
+
pass
|
|
265
|
+
|
|
266
|
+
return None
|
|
267
|
+
|
|
268
|
+
def _download_files(self, bucket: str, keys: list[str],
|
|
269
|
+
timeout: int, session: "Session") -> list[dict]:
|
|
270
|
+
from core.logger import log
|
|
271
|
+
downloaded = []
|
|
272
|
+
INTERESTING = [".env", ".json", ".yaml", ".yml", ".conf", ".ini",
|
|
273
|
+
".key", ".pem", ".zip", ".tar", ".gz", ".sql",
|
|
274
|
+
"config", "secret", "credential", "backup", "password"]
|
|
275
|
+
|
|
276
|
+
for key in keys[:20]:
|
|
277
|
+
if any(ext in key.lower() for ext in INTERESTING):
|
|
278
|
+
url = f"https://{bucket}.s3.amazonaws.com/{key}"
|
|
279
|
+
try:
|
|
280
|
+
r = requests.get(url, timeout=timeout, verify=False)
|
|
281
|
+
if r.status_code == 200:
|
|
282
|
+
content = None
|
|
283
|
+
log.found(f"Downloaded: {key} ({len(r.content)} bytes)")
|
|
284
|
+
|
|
285
|
+
if key.endswith(".zip"):
|
|
286
|
+
try:
|
|
287
|
+
with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
|
|
288
|
+
members = zf.namelist()
|
|
289
|
+
log.step(f"Archive members: {members}")
|
|
290
|
+
for member in members:
|
|
291
|
+
try:
|
|
292
|
+
text = zf.read(member).decode("utf-8", errors="ignore")
|
|
293
|
+
session.exposed_files.append({
|
|
294
|
+
"url": f"{url}!/{member}",
|
|
295
|
+
"path": f"zip:{member}",
|
|
296
|
+
"content": text,
|
|
297
|
+
"source": "s3_enum",
|
|
298
|
+
})
|
|
299
|
+
except Exception:
|
|
300
|
+
pass
|
|
301
|
+
except Exception:
|
|
302
|
+
pass
|
|
303
|
+
else:
|
|
304
|
+
content = r.text
|
|
305
|
+
session.exposed_files.append({
|
|
306
|
+
"url": url,
|
|
307
|
+
"path": key,
|
|
308
|
+
"content": content,
|
|
309
|
+
"source": "s3_enum",
|
|
310
|
+
})
|
|
311
|
+
|
|
312
|
+
downloaded.append({"key": key, "url": url,
|
|
313
|
+
"size": len(r.content), "content": content})
|
|
314
|
+
except Exception:
|
|
315
|
+
pass
|
|
316
|
+
return downloaded
|
|
317
|
+
|
|
318
|
+
def _check_storage_paths(self, target: str, timeout: int, download: bool,
|
|
319
|
+
session: "Session", found_files: list):
|
|
320
|
+
"""Check for storage-like paths on the target web server."""
|
|
321
|
+
from core.config import cfg
|
|
322
|
+
from core.logger import log
|
|
323
|
+
|
|
324
|
+
storage_paths = []
|
|
325
|
+
wl = cfg.wordlist_path("backup_files")
|
|
326
|
+
if wl.exists():
|
|
327
|
+
storage_paths = [l.strip() for l in wl.read_text().splitlines()
|
|
328
|
+
if l.strip() and not l.startswith("#")]
|
|
329
|
+
|
|
330
|
+
if not storage_paths:
|
|
331
|
+
storage_paths = [
|
|
332
|
+
"/static/backups/", "/backups/", "/backup/", "/uploads/",
|
|
333
|
+
"/.env", "/.env.backup", "/.env.production",
|
|
334
|
+
"/config.json", "/config.yaml", "/app.yaml",
|
|
335
|
+
]
|
|
336
|
+
|
|
337
|
+
for path in storage_paths:
|
|
338
|
+
url = f"{target}{path}"
|
|
339
|
+
try:
|
|
340
|
+
r = requests.get(url, timeout=timeout, verify=False)
|
|
341
|
+
if r.status_code == 200 and len(r.content) > 10:
|
|
342
|
+
log.found(f"[{r.status_code}] Exposed: {url} ({len(r.content)} bytes)")
|
|
343
|
+
file_info = {
|
|
344
|
+
"url": url,
|
|
345
|
+
"path": path,
|
|
346
|
+
"content": r.text if not url.endswith(".zip") else None,
|
|
347
|
+
"size": len(r.content),
|
|
348
|
+
"source": "s3_enum",
|
|
349
|
+
}
|
|
350
|
+
session.exposed_files.append(file_info)
|
|
351
|
+
found_files.append(file_info)
|
|
352
|
+
|
|
353
|
+
# Handle ZIP archives
|
|
354
|
+
if url.endswith(".zip") and download:
|
|
355
|
+
try:
|
|
356
|
+
with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
|
|
357
|
+
for member in zf.namelist():
|
|
358
|
+
text = zf.read(member).decode("utf-8", errors="ignore")
|
|
359
|
+
session.exposed_files.append({
|
|
360
|
+
"url": f"{url}!/{member}",
|
|
361
|
+
"path": f"zip:{member}",
|
|
362
|
+
"content": text,
|
|
363
|
+
"source": "s3_enum",
|
|
364
|
+
})
|
|
365
|
+
log.secret("Archive file", member)
|
|
366
|
+
except Exception:
|
|
367
|
+
pass
|
|
368
|
+
|
|
369
|
+
session.add_finding(
|
|
370
|
+
module="s3_enum",
|
|
371
|
+
title=f"Exposed Storage File: {path}",
|
|
372
|
+
severity="CRITICAL" if any(x in path for x in [".env","backup","config"]) else "HIGH",
|
|
373
|
+
description=f"File accessible without authentication: {url}",
|
|
374
|
+
evidence=f"HTTP 200 OK | Size: {len(r.content)} bytes",
|
|
375
|
+
recommendation="Restrict access via authentication middleware. Never expose backup files publicly.",
|
|
376
|
+
cvss_score=9.0,
|
|
377
|
+
aws_parallel="S3 object with public-read ACL and no pre-signed URL requirement",
|
|
378
|
+
mitre_technique="T1530",
|
|
379
|
+
)
|
|
380
|
+
except Exception:
|
|
381
|
+
pass
|
|
382
|
+
|
|
383
|
+
def _boto3_audit(self, bucket_names: list[str], session: "Session"):
|
|
384
|
+
"""Use boto3 for deeper bucket analysis (read-only operations only)."""
|
|
385
|
+
from core.logger import log
|
|
386
|
+
try:
|
|
387
|
+
import boto3
|
|
388
|
+
from botocore.exceptions import ClientError, NoCredentialsError
|
|
389
|
+
|
|
390
|
+
profile = self.get_option("AWS_PROFILE", "")
|
|
391
|
+
region = self.get_option("AWS_REGION", "us-east-1")
|
|
392
|
+
|
|
393
|
+
try:
|
|
394
|
+
boto_session = boto3.Session(profile_name=profile or None, region_name=region)
|
|
395
|
+
s3 = boto_session.client("s3")
|
|
396
|
+
|
|
397
|
+
log.info("boto3 mode: running authenticated S3 audit...")
|
|
398
|
+
|
|
399
|
+
# List all accessible buckets
|
|
400
|
+
try:
|
|
401
|
+
response = s3.list_buckets()
|
|
402
|
+
all_buckets = [b["Name"] for b in response.get("Buckets", [])]
|
|
403
|
+
log.found(f"boto3: Found {len(all_buckets)} accessible buckets")
|
|
404
|
+
|
|
405
|
+
for bucket in all_buckets:
|
|
406
|
+
self._boto3_check_bucket(s3, bucket, session)
|
|
407
|
+
|
|
408
|
+
except NoCredentialsError:
|
|
409
|
+
log.warning("boto3: No AWS credentials found. Set AWS_PROFILE or configure ~/.aws/credentials")
|
|
410
|
+
except ClientError as e:
|
|
411
|
+
log.warning(f"boto3: {e.response['Error']['Message']}")
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
log.warning(f"boto3 session error: {e}")
|
|
415
|
+
|
|
416
|
+
except ImportError:
|
|
417
|
+
log.step("boto3 not installed — HTTP-only mode. Install: pip install boto3")
|
|
418
|
+
|
|
419
|
+
def _boto3_check_bucket(self, s3, bucket_name: str, session: "Session"):
|
|
420
|
+
from core.logger import log
|
|
421
|
+
from botocore.exceptions import ClientError
|
|
422
|
+
try:
|
|
423
|
+
# Check Block Public Access
|
|
424
|
+
try:
|
|
425
|
+
bpa = s3.get_public_access_block(Bucket=bucket_name)
|
|
426
|
+
config = bpa["PublicAccessBlockConfiguration"]
|
|
427
|
+
if not all(config.values()):
|
|
428
|
+
log.warning(f"[boto3] {bucket_name}: Block Public Access NOT fully enabled")
|
|
429
|
+
session.add_finding(
|
|
430
|
+
module="s3_enum",
|
|
431
|
+
title=f"S3 Block Public Access Disabled: {bucket_name}",
|
|
432
|
+
severity="HIGH",
|
|
433
|
+
description="Block Public Access is not fully enabled on this S3 bucket.",
|
|
434
|
+
evidence=f"Bucket: {bucket_name}\nConfig: {config}",
|
|
435
|
+
recommendation="Run: aws s3api put-public-access-block --bucket {name} --public-access-block-configuration BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true",
|
|
436
|
+
cvss_score=7.5,
|
|
437
|
+
aws_parallel="s3:PutPublicAccessBlock remediation required",
|
|
438
|
+
mitre_technique="T1530",
|
|
439
|
+
)
|
|
440
|
+
except ClientError:
|
|
441
|
+
pass
|
|
442
|
+
|
|
443
|
+
# Check ACL
|
|
444
|
+
try:
|
|
445
|
+
acl = s3.get_bucket_acl(Bucket=bucket_name)
|
|
446
|
+
for grant in acl.get("Grants", []):
|
|
447
|
+
grantee = grant.get("Grantee", {})
|
|
448
|
+
if grantee.get("URI") in [
|
|
449
|
+
"http://acs.amazonaws.com/groups/global/AllUsers",
|
|
450
|
+
"http://acs.amazonaws.com/groups/global/AuthenticatedUsers",
|
|
451
|
+
]:
|
|
452
|
+
perm = grant.get("Permission")
|
|
453
|
+
log.critical(f"[boto3] {bucket_name}: Public ACL — {grantee['URI'].split('/')[-1]} has {perm}")
|
|
454
|
+
session.add_finding(
|
|
455
|
+
module="s3_enum",
|
|
456
|
+
title=f"S3 Public ACL: {bucket_name}",
|
|
457
|
+
severity="CRITICAL",
|
|
458
|
+
description=f"Bucket {bucket_name} has a public ACL granting {perm} to {grantee['URI'].split('/')[-1]}",
|
|
459
|
+
evidence=f"ACL Grant: {grant}",
|
|
460
|
+
recommendation="Remove public ACL: aws s3api put-bucket-acl --bucket {name} --acl private",
|
|
461
|
+
cvss_score=9.8,
|
|
462
|
+
aws_parallel="s3:PutBucketAcl with 'private' ACL",
|
|
463
|
+
mitre_technique="T1530",
|
|
464
|
+
)
|
|
465
|
+
except ClientError:
|
|
466
|
+
pass
|
|
467
|
+
|
|
468
|
+
except Exception as e:
|
|
469
|
+
log.step(f"[boto3] Error checking {bucket_name}: {e}")
|
|
File without changes
|