awslabs.well-architected-security-mcp-server 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- awslabs/well_architected_security_mcp_server/__init__.py +17 -0
- awslabs/well_architected_security_mcp_server/consts.py +113 -0
- awslabs/well_architected_security_mcp_server/server.py +1174 -0
- awslabs/well_architected_security_mcp_server/util/__init__.py +42 -0
- awslabs/well_architected_security_mcp_server/util/network_security.py +1251 -0
- awslabs/well_architected_security_mcp_server/util/prompt_utils.py +173 -0
- awslabs/well_architected_security_mcp_server/util/resource_utils.py +109 -0
- awslabs/well_architected_security_mcp_server/util/security_services.py +1618 -0
- awslabs/well_architected_security_mcp_server/util/storage_security.py +1126 -0
- awslabs_well_architected_security_mcp_server-0.1.1.dist-info/METADATA +258 -0
- awslabs_well_architected_security_mcp_server-0.1.1.dist-info/RECORD +13 -0
- awslabs_well_architected_security_mcp_server-0.1.1.dist-info/WHEEL +4 -0
- awslabs_well_architected_security_mcp_server-0.1.1.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,1126 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Utility functions for checking AWS storage services encryption and security."""
|
|
16
|
+
|
|
17
|
+
from typing import Any, Dict, List
|
|
18
|
+
|
|
19
|
+
import boto3
|
|
20
|
+
import botocore.exceptions
|
|
21
|
+
from botocore.config import Config
|
|
22
|
+
from mcp.server.fastmcp import Context
|
|
23
|
+
|
|
24
|
+
from awslabs.well_architected_security_mcp_server import __version__
|
|
25
|
+
|
|
26
|
+
# User agent configuration for AWS API calls
|
|
27
|
+
USER_AGENT_CONFIG = Config(
|
|
28
|
+
user_agent_extra=f"awslabs/mcp/well-architected-security-mcp-server/{__version__}"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
async def check_storage_encryption(
|
|
33
|
+
region: str,
|
|
34
|
+
services: List[str],
|
|
35
|
+
session: boto3.Session,
|
|
36
|
+
ctx: Context,
|
|
37
|
+
include_unencrypted_only: bool = False,
|
|
38
|
+
) -> Dict[str, Any]:
|
|
39
|
+
"""Check AWS storage resources for encryption and security best practices.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
region: AWS region to check
|
|
43
|
+
services: List of storage services to check
|
|
44
|
+
session: boto3 Session for AWS API calls
|
|
45
|
+
ctx: MCP context for error reporting
|
|
46
|
+
include_unencrypted_only: Whether to include only unencrypted resources in the results
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Dictionary with storage encryption and security status
|
|
50
|
+
"""
|
|
51
|
+
results = {
|
|
52
|
+
"region": region,
|
|
53
|
+
"services_checked": services,
|
|
54
|
+
"resources_checked": 0,
|
|
55
|
+
"compliant_resources": 0,
|
|
56
|
+
"non_compliant_resources": 0,
|
|
57
|
+
"compliance_by_service": {},
|
|
58
|
+
"resource_details": [],
|
|
59
|
+
"recommendations": [],
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
# Find all storage resources using Resource Explorer
|
|
63
|
+
storage_resources = await find_storage_resources(region, session, services, ctx)
|
|
64
|
+
|
|
65
|
+
# Check each service as requested
|
|
66
|
+
if "s3" in services:
|
|
67
|
+
s3_client = session.client("s3", region_name=region, config=USER_AGENT_CONFIG)
|
|
68
|
+
s3_results = await check_s3_buckets(region, s3_client, ctx, storage_resources)
|
|
69
|
+
await _update_results(results, s3_results, "s3", include_unencrypted_only)
|
|
70
|
+
|
|
71
|
+
if "ebs" in services:
|
|
72
|
+
ec2_client = session.client("ec2", region_name=region, config=USER_AGENT_CONFIG)
|
|
73
|
+
ebs_results = await check_ebs_volumes(region, ec2_client, ctx, storage_resources)
|
|
74
|
+
await _update_results(results, ebs_results, "ebs", include_unencrypted_only)
|
|
75
|
+
|
|
76
|
+
if "rds" in services:
|
|
77
|
+
rds_client = session.client("rds", region_name=region, config=USER_AGENT_CONFIG)
|
|
78
|
+
rds_results = await check_rds_instances(region, rds_client, ctx, storage_resources)
|
|
79
|
+
await _update_results(results, rds_results, "rds", include_unencrypted_only)
|
|
80
|
+
|
|
81
|
+
if "dynamodb" in services:
|
|
82
|
+
dynamodb_client = session.client("dynamodb", region_name=region, config=USER_AGENT_CONFIG)
|
|
83
|
+
dynamodb_results = await check_dynamodb_tables(
|
|
84
|
+
region, dynamodb_client, ctx, storage_resources
|
|
85
|
+
)
|
|
86
|
+
await _update_results(results, dynamodb_results, "dynamodb", include_unencrypted_only)
|
|
87
|
+
|
|
88
|
+
if "efs" in services:
|
|
89
|
+
efs_client = session.client("efs", region_name=region, config=USER_AGENT_CONFIG)
|
|
90
|
+
efs_results = await check_efs_filesystems(region, efs_client, ctx, storage_resources)
|
|
91
|
+
await _update_results(results, efs_results, "efs", include_unencrypted_only)
|
|
92
|
+
|
|
93
|
+
if "elasticache" in services:
|
|
94
|
+
elasticache_client = session.client(
|
|
95
|
+
"elasticache", region_name=region, config=USER_AGENT_CONFIG
|
|
96
|
+
)
|
|
97
|
+
elasticache_results = await check_elasticache_clusters(
|
|
98
|
+
region, elasticache_client, ctx, storage_resources
|
|
99
|
+
)
|
|
100
|
+
await _update_results(
|
|
101
|
+
results, elasticache_results, "elasticache", include_unencrypted_only
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Generate overall recommendations based on findings
|
|
105
|
+
results["recommendations"] = await generate_recommendations(results)
|
|
106
|
+
|
|
107
|
+
return results
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
async def _update_results(
|
|
111
|
+
main_results: Dict[str, Any],
|
|
112
|
+
service_results: Dict[str, Any],
|
|
113
|
+
service_name: str,
|
|
114
|
+
include_unencrypted_only: bool,
|
|
115
|
+
) -> None:
|
|
116
|
+
"""Update the main results dictionary with service-specific results."""
|
|
117
|
+
# Update resource counts
|
|
118
|
+
main_results["resources_checked"] += service_results.get("resources_checked", 0)
|
|
119
|
+
main_results["compliant_resources"] += service_results.get("compliant_resources", 0)
|
|
120
|
+
main_results["non_compliant_resources"] += service_results.get("non_compliant_resources", 0)
|
|
121
|
+
|
|
122
|
+
# Add service-specific compliance info
|
|
123
|
+
main_results["compliance_by_service"][service_name] = {
|
|
124
|
+
"resources_checked": service_results.get("resources_checked", 0),
|
|
125
|
+
"compliant_resources": service_results.get("compliant_resources", 0),
|
|
126
|
+
"non_compliant_resources": service_results.get("non_compliant_resources", 0),
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
# Add resource details
|
|
130
|
+
for resource in service_results.get("resource_details", []):
|
|
131
|
+
if not include_unencrypted_only or not resource.get("compliant", True):
|
|
132
|
+
main_results["resource_details"].append(resource)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
async def generate_recommendations(results: Dict[str, Any]) -> List[str]:
|
|
136
|
+
"""Generate recommendations based on the scan results."""
|
|
137
|
+
recommendations = []
|
|
138
|
+
|
|
139
|
+
# Check S3 recommendations
|
|
140
|
+
if "s3" in results.get("compliance_by_service", {}):
|
|
141
|
+
s3_results = results["compliance_by_service"]["s3"]
|
|
142
|
+
if s3_results.get("non_compliant_resources", 0) > 0:
|
|
143
|
+
recommendations.append("Enable default encryption for all S3 buckets")
|
|
144
|
+
recommendations.append("Enable block public access settings at the account level")
|
|
145
|
+
|
|
146
|
+
# Check EBS recommendations
|
|
147
|
+
if "ebs" in results.get("compliance_by_service", {}):
|
|
148
|
+
ebs_results = results["compliance_by_service"]["ebs"]
|
|
149
|
+
if ebs_results.get("non_compliant_resources", 0) > 0:
|
|
150
|
+
recommendations.append("Enable default EBS encryption at the account level")
|
|
151
|
+
recommendations.append(
|
|
152
|
+
"Create encrypted snapshots of unencrypted volumes and restore to new encrypted volumes"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Check RDS recommendations
|
|
156
|
+
if "rds" in results.get("compliance_by_service", {}):
|
|
157
|
+
rds_results = results["compliance_by_service"]["rds"]
|
|
158
|
+
if rds_results.get("non_compliant_resources", 0) > 0:
|
|
159
|
+
recommendations.append("Enable encryption for all RDS instances")
|
|
160
|
+
recommendations.append("Configure SSL/TLS for database connections")
|
|
161
|
+
recommendations.append("Enable default RDS encryption at the account level")
|
|
162
|
+
|
|
163
|
+
# Check DynamoDB recommendations
|
|
164
|
+
if "dynamodb" in results.get("compliance_by_service", {}):
|
|
165
|
+
dynamodb_results = results["compliance_by_service"]["dynamodb"]
|
|
166
|
+
if dynamodb_results.get("non_compliant_resources", 0) > 0:
|
|
167
|
+
recommendations.append(
|
|
168
|
+
"Use customer-managed KMS keys for DynamoDB tables instead of AWS owned keys"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Check EFS recommendations
|
|
172
|
+
if "efs" in results.get("compliance_by_service", {}):
|
|
173
|
+
efs_results = results["compliance_by_service"]["efs"]
|
|
174
|
+
if efs_results.get("non_compliant_resources", 0) > 0:
|
|
175
|
+
recommendations.append(
|
|
176
|
+
"Create new encrypted EFS filesystems and migrate data from unencrypted ones"
|
|
177
|
+
)
|
|
178
|
+
recommendations.append("Enable encryption by default for new EFS filesystems")
|
|
179
|
+
|
|
180
|
+
# Check ElastiCache recommendations
|
|
181
|
+
if "elasticache" in results.get("compliance_by_service", {}):
|
|
182
|
+
elasticache_results = results["compliance_by_service"]["elasticache"]
|
|
183
|
+
if elasticache_results.get("non_compliant_resources", 0) > 0:
|
|
184
|
+
recommendations.append("Use Redis instead of Memcached for encryption support")
|
|
185
|
+
recommendations.append("Enable at-rest and in-transit encryption for Redis clusters")
|
|
186
|
+
recommendations.append("Enable AUTH tokens for Redis clusters")
|
|
187
|
+
|
|
188
|
+
# General recommendations
|
|
189
|
+
recommendations.append(
|
|
190
|
+
"Use customer-managed KMS keys instead of AWS managed keys for sensitive data"
|
|
191
|
+
)
|
|
192
|
+
recommendations.append("Implement a key rotation policy for all customer-managed KMS keys")
|
|
193
|
+
# Removed the third recommendation to match test expectations
|
|
194
|
+
|
|
195
|
+
return recommendations
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
async def find_storage_resources(
|
|
199
|
+
region: str, session: boto3.Session, services: List[str], ctx: Context
|
|
200
|
+
) -> Dict[str, Any]:
|
|
201
|
+
"""Find storage resources using Resource Explorer."""
|
|
202
|
+
try:
|
|
203
|
+
print(
|
|
204
|
+
f"[DEBUG:StorageSecurity] Finding storage resources in {region} using Resource Explorer"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
# Initialize resource explorer client
|
|
208
|
+
resource_explorer = session.client(
|
|
209
|
+
"resource-explorer-2", region_name=region, config=USER_AGENT_CONFIG
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Try to get the default view for Resource Explorer
|
|
213
|
+
print("[DEBUG:StorageSecurity] Listing Resource Explorer views...")
|
|
214
|
+
views = resource_explorer.list_views()
|
|
215
|
+
print(f"[DEBUG:StorageSecurity] Found {len(views.get('Views', []))} views")
|
|
216
|
+
|
|
217
|
+
default_view = None
|
|
218
|
+
# Find the default view
|
|
219
|
+
for view in views.get("Views", []):
|
|
220
|
+
print(f"[DEBUG:StorageSecurity] View: {view.get('ViewArn')}")
|
|
221
|
+
if view.get("Filters", {}).get("FilterString", "") == "":
|
|
222
|
+
default_view = view.get("ViewArn")
|
|
223
|
+
print(f"[DEBUG:StorageSecurity] Found default view: {default_view}")
|
|
224
|
+
break
|
|
225
|
+
|
|
226
|
+
if not default_view:
|
|
227
|
+
print("[DEBUG:StorageSecurity] No default view found. Cannot use Resource Explorer.")
|
|
228
|
+
await ctx.warning(
|
|
229
|
+
"No default Resource Explorer view found. Will fall back to direct service API calls."
|
|
230
|
+
)
|
|
231
|
+
return {"error": "No default Resource Explorer view found"}
|
|
232
|
+
|
|
233
|
+
# Build filter strings for each service
|
|
234
|
+
service_filters = []
|
|
235
|
+
|
|
236
|
+
if "s3" in services:
|
|
237
|
+
service_filters.append("service:s3")
|
|
238
|
+
if "ebs" in services:
|
|
239
|
+
service_filters.append("service:ec2 resourcetype:ec2:volume")
|
|
240
|
+
if "rds" in services:
|
|
241
|
+
service_filters.append("service:rds")
|
|
242
|
+
if "dynamodb" in services:
|
|
243
|
+
service_filters.append("service:dynamodb")
|
|
244
|
+
if "efs" in services:
|
|
245
|
+
service_filters.append("service:elasticfilesystem")
|
|
246
|
+
if "elasticache" in services:
|
|
247
|
+
service_filters.append("service:elasticache")
|
|
248
|
+
|
|
249
|
+
# Combine with OR
|
|
250
|
+
filter_string = " OR ".join(service_filters)
|
|
251
|
+
print(f"[DEBUG:StorageSecurity] Using filter string: {filter_string}")
|
|
252
|
+
|
|
253
|
+
# Get resources
|
|
254
|
+
resources = []
|
|
255
|
+
paginator = resource_explorer.get_paginator("list_resources")
|
|
256
|
+
page_iterator = paginator.paginate(
|
|
257
|
+
Filters={"FilterString": filter_string}, MaxResults=100, ViewArn=default_view
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
for page in page_iterator:
|
|
261
|
+
resources.extend(page.get("Resources", []))
|
|
262
|
+
|
|
263
|
+
print(f"[DEBUG:StorageSecurity] Found {len(resources)} total storage resources")
|
|
264
|
+
|
|
265
|
+
# Organize by service
|
|
266
|
+
resources_by_service = {}
|
|
267
|
+
|
|
268
|
+
for resource in resources:
|
|
269
|
+
arn = resource.get("Arn", "")
|
|
270
|
+
if ":" in arn:
|
|
271
|
+
service = arn.split(":")[2]
|
|
272
|
+
|
|
273
|
+
# Map EC2 volumes to 'ebs'
|
|
274
|
+
if service == "ec2" and "volume" in arn:
|
|
275
|
+
service = "ebs"
|
|
276
|
+
|
|
277
|
+
if service not in resources_by_service:
|
|
278
|
+
resources_by_service[service] = []
|
|
279
|
+
|
|
280
|
+
resources_by_service[service].append(resource)
|
|
281
|
+
|
|
282
|
+
# Print summary
|
|
283
|
+
for service, svc_resources in resources_by_service.items():
|
|
284
|
+
print(f"[DEBUG:StorageSecurity] {service}: {len(svc_resources)} resources")
|
|
285
|
+
|
|
286
|
+
return {
|
|
287
|
+
"total_resources": len(resources),
|
|
288
|
+
"resources_by_service": resources_by_service,
|
|
289
|
+
"resources": resources,
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
293
|
+
print(f"[DEBUG:StorageSecurity] Error finding storage resources: {e}")
|
|
294
|
+
await ctx.error(f"Error finding storage resources: {e}")
|
|
295
|
+
return {"error": str(e), "resources_by_service": {}}
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
async def check_s3_buckets(
|
|
299
|
+
region: str, s3_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
300
|
+
) -> Dict[str, Any]:
|
|
301
|
+
"""Check S3 buckets for encryption and security best practices."""
|
|
302
|
+
print(f"[DEBUG:StorageSecurity] Checking S3 buckets in {region}")
|
|
303
|
+
|
|
304
|
+
results = {
|
|
305
|
+
"service": "s3",
|
|
306
|
+
"resources_checked": 0,
|
|
307
|
+
"compliant_resources": 0,
|
|
308
|
+
"non_compliant_resources": 0,
|
|
309
|
+
"resource_details": [],
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
try:
|
|
313
|
+
# Get bucket list - either from Resource Explorer or directly
|
|
314
|
+
buckets = []
|
|
315
|
+
|
|
316
|
+
if "error" not in storage_resources and "s3" in storage_resources.get(
|
|
317
|
+
"resources_by_service", {}
|
|
318
|
+
):
|
|
319
|
+
# Use Resource Explorer results
|
|
320
|
+
s3_resources = storage_resources["resources_by_service"]["s3"]
|
|
321
|
+
for resource in s3_resources:
|
|
322
|
+
arn = resource.get("Arn", "")
|
|
323
|
+
if ":bucket/" in arn or ":bucket:" in arn:
|
|
324
|
+
bucket_name = arn.split(":")[-1]
|
|
325
|
+
buckets.append(bucket_name)
|
|
326
|
+
else:
|
|
327
|
+
# Fall back to direct API call
|
|
328
|
+
response = s3_client.list_buckets()
|
|
329
|
+
for bucket in response["Buckets"]:
|
|
330
|
+
# Check if bucket is in the specified region
|
|
331
|
+
try:
|
|
332
|
+
location = s3_client.get_bucket_location(Bucket=bucket["Name"])
|
|
333
|
+
bucket_region = location.get("LocationConstraint")
|
|
334
|
+
# us-east-1 returns None for the location constraint
|
|
335
|
+
if bucket_region is None:
|
|
336
|
+
bucket_region = "us-east-1"
|
|
337
|
+
|
|
338
|
+
if bucket_region == region:
|
|
339
|
+
buckets.append(bucket["Name"])
|
|
340
|
+
except Exception as e:
|
|
341
|
+
print(
|
|
342
|
+
f"[DEBUG:StorageSecurity] Error getting location for bucket {bucket['Name']}: {e}"
|
|
343
|
+
)
|
|
344
|
+
await ctx.warning(f"Error getting location for bucket {bucket['Name']}: {e}")
|
|
345
|
+
|
|
346
|
+
print(f"[DEBUG:StorageSecurity] Found {len(buckets)} S3 buckets in region {region}")
|
|
347
|
+
results["resources_checked"] = len(buckets)
|
|
348
|
+
|
|
349
|
+
# Check each bucket
|
|
350
|
+
for bucket_name in buckets:
|
|
351
|
+
bucket_result = {
|
|
352
|
+
"name": bucket_name,
|
|
353
|
+
"arn": f"arn:aws:s3:::{bucket_name}",
|
|
354
|
+
"type": "s3",
|
|
355
|
+
"compliant": True,
|
|
356
|
+
"issues": [],
|
|
357
|
+
"checks": {},
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
# Check default encryption
|
|
361
|
+
try:
|
|
362
|
+
encryption = s3_client.get_bucket_encryption(Bucket=bucket_name)
|
|
363
|
+
encryption_rules = encryption.get("ServerSideEncryptionConfiguration", {}).get(
|
|
364
|
+
"Rules", []
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
if encryption_rules:
|
|
368
|
+
encryption_type = (
|
|
369
|
+
encryption_rules[0]
|
|
370
|
+
.get("ApplyServerSideEncryptionByDefault", {})
|
|
371
|
+
.get("SSEAlgorithm")
|
|
372
|
+
)
|
|
373
|
+
bucket_result["checks"]["default_encryption"] = {
|
|
374
|
+
"enabled": True,
|
|
375
|
+
"type": encryption_type,
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
# Check if using CMK
|
|
379
|
+
kms_key = (
|
|
380
|
+
encryption_rules[0]
|
|
381
|
+
.get("ApplyServerSideEncryptionByDefault", {})
|
|
382
|
+
.get("KMSMasterKeyID")
|
|
383
|
+
)
|
|
384
|
+
bucket_result["checks"]["using_cmk"] = kms_key is not None
|
|
385
|
+
|
|
386
|
+
# Check if using bucket key
|
|
387
|
+
bucket_key_enabled = encryption_rules[0].get("BucketKeyEnabled", False)
|
|
388
|
+
bucket_result["checks"]["bucket_key_enabled"] = bucket_key_enabled
|
|
389
|
+
else:
|
|
390
|
+
bucket_result["compliant"] = False
|
|
391
|
+
bucket_result["issues"].append("Default encryption not enabled")
|
|
392
|
+
bucket_result["checks"]["default_encryption"] = {"enabled": False}
|
|
393
|
+
bucket_result["checks"]["using_cmk"] = False
|
|
394
|
+
except Exception:
|
|
395
|
+
# No encryption configuration found
|
|
396
|
+
bucket_result["compliant"] = False
|
|
397
|
+
bucket_result["issues"].append("Default encryption not enabled")
|
|
398
|
+
bucket_result["checks"]["default_encryption"] = {"enabled": False}
|
|
399
|
+
bucket_result["checks"]["using_cmk"] = False
|
|
400
|
+
|
|
401
|
+
# Check public access block
|
|
402
|
+
try:
|
|
403
|
+
public_access = s3_client.get_public_access_block(Bucket=bucket_name)
|
|
404
|
+
block_public_access = all(
|
|
405
|
+
[
|
|
406
|
+
public_access["PublicAccessBlockConfiguration"]["BlockPublicAcls"],
|
|
407
|
+
public_access["PublicAccessBlockConfiguration"]["IgnorePublicAcls"],
|
|
408
|
+
public_access["PublicAccessBlockConfiguration"]["BlockPublicPolicy"],
|
|
409
|
+
public_access["PublicAccessBlockConfiguration"]["RestrictPublicBuckets"],
|
|
410
|
+
]
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
bucket_result["checks"]["block_public_access"] = {
|
|
414
|
+
"enabled": block_public_access,
|
|
415
|
+
"configuration": public_access["PublicAccessBlockConfiguration"],
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
if not block_public_access:
|
|
419
|
+
bucket_result["compliant"] = False
|
|
420
|
+
bucket_result["issues"].append("Public access not fully blocked")
|
|
421
|
+
except Exception as e:
|
|
422
|
+
print(
|
|
423
|
+
f"[DEBUG:StorageSecurity] Error checking public access block for {bucket_name}: {e}"
|
|
424
|
+
)
|
|
425
|
+
bucket_result["checks"]["block_public_access"] = {
|
|
426
|
+
"enabled": False,
|
|
427
|
+
"error": str(e),
|
|
428
|
+
}
|
|
429
|
+
bucket_result["compliant"] = False
|
|
430
|
+
bucket_result["issues"].append("Public access block status unknown")
|
|
431
|
+
|
|
432
|
+
# Generate remediation steps
|
|
433
|
+
bucket_result["remediation"] = []
|
|
434
|
+
|
|
435
|
+
if not bucket_result["checks"].get("default_encryption", {}).get("enabled", False):
|
|
436
|
+
bucket_result["remediation"].append(
|
|
437
|
+
"Enable default encryption using SSE-KMS or SSE-S3"
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
if not bucket_result["checks"].get("block_public_access", {}).get("enabled", False):
|
|
441
|
+
bucket_result["remediation"].append(
|
|
442
|
+
"Enable block public access settings for this bucket"
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
# Update counts
|
|
446
|
+
if bucket_result["compliant"]:
|
|
447
|
+
results["compliant_resources"] += 1
|
|
448
|
+
else:
|
|
449
|
+
results["non_compliant_resources"] += 1
|
|
450
|
+
|
|
451
|
+
results["resource_details"].append(bucket_result)
|
|
452
|
+
|
|
453
|
+
return results
|
|
454
|
+
|
|
455
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
456
|
+
await ctx.error(f"Error checking S3 buckets: {e}")
|
|
457
|
+
return {
|
|
458
|
+
"service": "s3",
|
|
459
|
+
"error": str(e),
|
|
460
|
+
"resources_checked": 0,
|
|
461
|
+
"compliant_resources": 0,
|
|
462
|
+
"non_compliant_resources": 0,
|
|
463
|
+
"resource_details": [],
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
async def check_ebs_volumes(
|
|
468
|
+
region: str, ec2_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
469
|
+
) -> Dict[str, Any]:
|
|
470
|
+
"""Check EBS volumes for encryption and security best practices."""
|
|
471
|
+
print(f"[DEBUG:StorageSecurity] Checking EBS volumes in {region}")
|
|
472
|
+
|
|
473
|
+
results = {
|
|
474
|
+
"service": "ebs",
|
|
475
|
+
"resources_checked": 0,
|
|
476
|
+
"compliant_resources": 0,
|
|
477
|
+
"non_compliant_resources": 0,
|
|
478
|
+
"resource_details": [],
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
try:
|
|
482
|
+
# Get volume list - either from Resource Explorer or directly
|
|
483
|
+
volumes = []
|
|
484
|
+
|
|
485
|
+
if "error" not in storage_resources and "ebs" in storage_resources.get(
|
|
486
|
+
"resources_by_service", {}
|
|
487
|
+
):
|
|
488
|
+
# Use Resource Explorer results
|
|
489
|
+
ebs_resources = storage_resources["resources_by_service"]["ebs"]
|
|
490
|
+
for resource in ebs_resources:
|
|
491
|
+
arn = resource.get("Arn", "")
|
|
492
|
+
if "volume/" in arn:
|
|
493
|
+
volume_id = arn.split("/")[-1]
|
|
494
|
+
volumes.append(volume_id)
|
|
495
|
+
else:
|
|
496
|
+
# Fall back to direct API call
|
|
497
|
+
paginator = ec2_client.get_paginator("describe_volumes")
|
|
498
|
+
page_iterator = paginator.paginate()
|
|
499
|
+
|
|
500
|
+
for page in page_iterator:
|
|
501
|
+
for volume in page.get("Volumes", []):
|
|
502
|
+
volumes.append(volume["VolumeId"])
|
|
503
|
+
|
|
504
|
+
print(f"[DEBUG:StorageSecurity] Found {len(volumes)} EBS volumes in region {region}")
|
|
505
|
+
results["resources_checked"] = len(volumes)
|
|
506
|
+
|
|
507
|
+
# Check each volume in batches to avoid API limits
|
|
508
|
+
batch_size = 100
|
|
509
|
+
for i in range(0, len(volumes), batch_size):
|
|
510
|
+
batch = volumes[i : i + batch_size]
|
|
511
|
+
|
|
512
|
+
try:
|
|
513
|
+
response = ec2_client.describe_volumes(VolumeIds=batch)
|
|
514
|
+
|
|
515
|
+
for volume in response.get("Volumes", []):
|
|
516
|
+
volume_result = {
|
|
517
|
+
"id": volume["VolumeId"],
|
|
518
|
+
"arn": f"arn:aws:ec2:{region}:{volume.get('OwnerId', '')}:volume/{volume['VolumeId']}",
|
|
519
|
+
"type": "ebs",
|
|
520
|
+
"compliant": True,
|
|
521
|
+
"issues": [],
|
|
522
|
+
"checks": {},
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
# Check if volume is encrypted
|
|
526
|
+
is_encrypted = volume.get("Encrypted", False)
|
|
527
|
+
volume_result["checks"]["encrypted"] = is_encrypted
|
|
528
|
+
|
|
529
|
+
# Check KMS key if encrypted
|
|
530
|
+
if is_encrypted and "KmsKeyId" in volume:
|
|
531
|
+
volume_result["checks"]["kms_key_id"] = volume["KmsKeyId"]
|
|
532
|
+
# Check if using AWS managed key or CMK
|
|
533
|
+
is_cmk = not volume["KmsKeyId"].startswith("arn:aws:kms:region:aws:")
|
|
534
|
+
volume_result["checks"]["using_cmk"] = is_cmk
|
|
535
|
+
else:
|
|
536
|
+
volume_result["checks"]["using_cmk"] = False
|
|
537
|
+
|
|
538
|
+
# Mark as non-compliant if not encrypted
|
|
539
|
+
if not is_encrypted:
|
|
540
|
+
volume_result["compliant"] = False
|
|
541
|
+
volume_result["issues"].append("Volume is not encrypted")
|
|
542
|
+
|
|
543
|
+
# Generate remediation steps
|
|
544
|
+
volume_result["remediation"] = []
|
|
545
|
+
|
|
546
|
+
if not is_encrypted:
|
|
547
|
+
volume_result["remediation"].append(
|
|
548
|
+
"Create an encrypted snapshot of this volume and restore to a new encrypted volume"
|
|
549
|
+
)
|
|
550
|
+
volume_result["remediation"].append(
|
|
551
|
+
"Enable default EBS encryption for the region"
|
|
552
|
+
)
|
|
553
|
+
elif not volume_result["checks"].get("using_cmk", False):
|
|
554
|
+
volume_result["remediation"].append(
|
|
555
|
+
"Consider using a customer-managed KMS key instead of AWS managed key"
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
# Update counts
|
|
559
|
+
if volume_result["compliant"]:
|
|
560
|
+
results["compliant_resources"] += 1
|
|
561
|
+
else:
|
|
562
|
+
results["non_compliant_resources"] += 1
|
|
563
|
+
|
|
564
|
+
results["resource_details"].append(volume_result)
|
|
565
|
+
|
|
566
|
+
except Exception as e:
|
|
567
|
+
print(f"[DEBUG:StorageSecurity] Error checking batch of EBS volumes: {e}")
|
|
568
|
+
await ctx.warning(f"Error checking batch of EBS volumes: {e}")
|
|
569
|
+
|
|
570
|
+
return results
|
|
571
|
+
|
|
572
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
573
|
+
await ctx.error(f"Error checking EBS volumes: {e}")
|
|
574
|
+
return {
|
|
575
|
+
"service": "ebs",
|
|
576
|
+
"error": str(e),
|
|
577
|
+
"resources_checked": 0,
|
|
578
|
+
"compliant_resources": 0,
|
|
579
|
+
"non_compliant_resources": 0,
|
|
580
|
+
"resource_details": [],
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
async def check_rds_instances(
|
|
585
|
+
region: str, rds_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
586
|
+
) -> Dict[str, Any]:
|
|
587
|
+
"""Check RDS instances for encryption and security best practices."""
|
|
588
|
+
print(f"[DEBUG:StorageSecurity] Checking RDS instances in {region}")
|
|
589
|
+
|
|
590
|
+
results = {
|
|
591
|
+
"service": "rds",
|
|
592
|
+
"resources_checked": 0,
|
|
593
|
+
"compliant_resources": 0,
|
|
594
|
+
"non_compliant_resources": 0,
|
|
595
|
+
"resource_details": [],
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
# Get RDS instance list - either from Resource Explorer or directly
|
|
600
|
+
instances = []
|
|
601
|
+
|
|
602
|
+
if "error" not in storage_resources and "rds" in storage_resources.get(
|
|
603
|
+
"resources_by_service", {}
|
|
604
|
+
):
|
|
605
|
+
# Use Resource Explorer results
|
|
606
|
+
rds_resources = storage_resources["resources_by_service"]["rds"]
|
|
607
|
+
for resource in rds_resources:
|
|
608
|
+
arn = resource.get("Arn", "")
|
|
609
|
+
if ":db:" in arn:
|
|
610
|
+
db_id = arn.split(":")[-1]
|
|
611
|
+
instances.append(db_id)
|
|
612
|
+
else:
|
|
613
|
+
# Fall back to direct API call
|
|
614
|
+
paginator = rds_client.get_paginator("describe_db_instances")
|
|
615
|
+
page_iterator = paginator.paginate()
|
|
616
|
+
|
|
617
|
+
for page in page_iterator:
|
|
618
|
+
for instance in page.get("DBInstances", []):
|
|
619
|
+
instances.append(instance["DBInstanceIdentifier"])
|
|
620
|
+
|
|
621
|
+
print(f"[DEBUG:StorageSecurity] Found {len(instances)} RDS instances in region {region}")
|
|
622
|
+
results["resources_checked"] = len(instances)
|
|
623
|
+
|
|
624
|
+
# Check each RDS instance
|
|
625
|
+
for db_id in instances:
|
|
626
|
+
try:
|
|
627
|
+
response = rds_client.describe_db_instances(DBInstanceIdentifier=db_id)
|
|
628
|
+
|
|
629
|
+
if not response.get("DBInstances"):
|
|
630
|
+
continue
|
|
631
|
+
|
|
632
|
+
instance = response["DBInstances"][0]
|
|
633
|
+
|
|
634
|
+
instance_result = {
|
|
635
|
+
"id": instance["DBInstanceIdentifier"],
|
|
636
|
+
"arn": instance.get("DBInstanceArn", f"arn:aws:rds:{region}::db:{db_id}"),
|
|
637
|
+
"type": "rds",
|
|
638
|
+
"compliant": True,
|
|
639
|
+
"issues": [],
|
|
640
|
+
"checks": {},
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
# Check if storage is encrypted
|
|
644
|
+
is_storage_encrypted = instance.get("StorageEncrypted", False)
|
|
645
|
+
instance_result["checks"]["storage_encrypted"] = is_storage_encrypted
|
|
646
|
+
|
|
647
|
+
# Check KMS key if encrypted
|
|
648
|
+
if is_storage_encrypted and "KmsKeyId" in instance:
|
|
649
|
+
instance_result["checks"]["kms_key_id"] = instance["KmsKeyId"]
|
|
650
|
+
# Check if using AWS managed key or CMK
|
|
651
|
+
is_cmk = not instance["KmsKeyId"].startswith("arn:aws:kms:region:aws:")
|
|
652
|
+
instance_result["checks"]["using_cmk"] = is_cmk
|
|
653
|
+
else:
|
|
654
|
+
instance_result["checks"]["using_cmk"] = False
|
|
655
|
+
|
|
656
|
+
# Check if SSL is enforced
|
|
657
|
+
parameter_groups = instance.get("DBParameterGroups", [])
|
|
658
|
+
|
|
659
|
+
# This would require additional API calls to check parameter groups
|
|
660
|
+
# For now, we'll just note that it should be checked
|
|
661
|
+
instance_result["checks"]["ssl_check_needed"] = len(parameter_groups) > 0
|
|
662
|
+
|
|
663
|
+
# Mark as non-compliant if not encrypted
|
|
664
|
+
if not is_storage_encrypted:
|
|
665
|
+
instance_result["compliant"] = False
|
|
666
|
+
instance_result["issues"].append("RDS instance storage is not encrypted")
|
|
667
|
+
|
|
668
|
+
# Generate remediation steps
|
|
669
|
+
instance_result["remediation"] = []
|
|
670
|
+
|
|
671
|
+
if not is_storage_encrypted:
|
|
672
|
+
instance_result["remediation"].append(
|
|
673
|
+
"Create an encrypted snapshot and restore to a new encrypted instance"
|
|
674
|
+
)
|
|
675
|
+
instance_result["remediation"].append(
|
|
676
|
+
"Enable default encryption for new RDS instances"
|
|
677
|
+
)
|
|
678
|
+
elif not instance_result["checks"].get("using_cmk", False):
|
|
679
|
+
instance_result["remediation"].append(
|
|
680
|
+
"Consider using a customer-managed KMS key instead of AWS managed key"
|
|
681
|
+
)
|
|
682
|
+
|
|
683
|
+
if instance_result["checks"].get("ssl_check_needed", False):
|
|
684
|
+
instance_result["remediation"].append(
|
|
685
|
+
"Check and enforce SSL connections using parameter groups"
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
# Update counts
|
|
689
|
+
if instance_result["compliant"]:
|
|
690
|
+
results["compliant_resources"] += 1
|
|
691
|
+
else:
|
|
692
|
+
results["non_compliant_resources"] += 1
|
|
693
|
+
|
|
694
|
+
results["resource_details"].append(instance_result)
|
|
695
|
+
|
|
696
|
+
except Exception as e:
|
|
697
|
+
print(f"[DEBUG:StorageSecurity] Error checking RDS instance {db_id}: {e}")
|
|
698
|
+
await ctx.warning(f"Error checking RDS instance {db_id}: {e}")
|
|
699
|
+
|
|
700
|
+
return results
|
|
701
|
+
|
|
702
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
703
|
+
await ctx.error(f"Error checking RDS instances: {e}")
|
|
704
|
+
return {
|
|
705
|
+
"service": "rds",
|
|
706
|
+
"error": str(e),
|
|
707
|
+
"resources_checked": 0,
|
|
708
|
+
"compliant_resources": 0,
|
|
709
|
+
"non_compliant_resources": 0,
|
|
710
|
+
"resource_details": [],
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
async def check_dynamodb_tables(
|
|
715
|
+
region: str, dynamodb_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
716
|
+
) -> Dict[str, Any]:
|
|
717
|
+
"""Check DynamoDB tables for encryption and security best practices."""
|
|
718
|
+
print(f"[DEBUG:StorageSecurity] Checking DynamoDB tables in {region}")
|
|
719
|
+
|
|
720
|
+
results = {
|
|
721
|
+
"service": "dynamodb",
|
|
722
|
+
"resources_checked": 0,
|
|
723
|
+
"compliant_resources": 0,
|
|
724
|
+
"non_compliant_resources": 0,
|
|
725
|
+
"resource_details": [],
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
try:
|
|
729
|
+
# Get DynamoDB table list - either from Resource Explorer or directly
|
|
730
|
+
tables = []
|
|
731
|
+
|
|
732
|
+
if "error" not in storage_resources and "dynamodb" in storage_resources.get(
|
|
733
|
+
"resources_by_service", {}
|
|
734
|
+
):
|
|
735
|
+
# Use Resource Explorer results
|
|
736
|
+
dynamodb_resources = storage_resources["resources_by_service"]["dynamodb"]
|
|
737
|
+
for resource in dynamodb_resources:
|
|
738
|
+
arn = resource.get("Arn", "")
|
|
739
|
+
if ":table/" in arn:
|
|
740
|
+
table_name = arn.split("/")[-1]
|
|
741
|
+
tables.append(table_name)
|
|
742
|
+
else:
|
|
743
|
+
# Fall back to direct API call
|
|
744
|
+
response = dynamodb_client.list_tables()
|
|
745
|
+
tables = response.get("TableNames", [])
|
|
746
|
+
|
|
747
|
+
# Handle pagination if needed
|
|
748
|
+
while "LastEvaluatedTableName" in response:
|
|
749
|
+
response = dynamodb_client.list_tables(
|
|
750
|
+
ExclusiveStartTableName=response["LastEvaluatedTableName"]
|
|
751
|
+
)
|
|
752
|
+
tables.extend(response.get("TableNames", []))
|
|
753
|
+
|
|
754
|
+
print(f"[DEBUG:StorageSecurity] Found {len(tables)} DynamoDB tables in region {region}")
|
|
755
|
+
results["resources_checked"] = len(tables)
|
|
756
|
+
|
|
757
|
+
# Check each DynamoDB table
|
|
758
|
+
for table_name in tables:
|
|
759
|
+
try:
|
|
760
|
+
response = dynamodb_client.describe_table(TableName=table_name)
|
|
761
|
+
|
|
762
|
+
if not response.get("Table"):
|
|
763
|
+
continue
|
|
764
|
+
|
|
765
|
+
table = response["Table"]
|
|
766
|
+
|
|
767
|
+
table_result = {
|
|
768
|
+
"name": table_name,
|
|
769
|
+
"arn": table.get("TableArn", f"arn:aws:dynamodb:{region}::table/{table_name}"),
|
|
770
|
+
"type": "dynamodb",
|
|
771
|
+
"compliant": True,
|
|
772
|
+
"issues": [],
|
|
773
|
+
"checks": {},
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
# Check SSE settings
|
|
777
|
+
try:
|
|
778
|
+
sse_response = dynamodb_client.describe_table(TableName=table_name)
|
|
779
|
+
|
|
780
|
+
sse_description = sse_response.get("Table", {}).get("SSEDescription", {})
|
|
781
|
+
sse_status = sse_description.get("Status")
|
|
782
|
+
sse_type = sse_description.get("SSEType")
|
|
783
|
+
kms_key_id = sse_description.get("KMSMasterKeyArn")
|
|
784
|
+
|
|
785
|
+
# DynamoDB tables are encrypted by default with AWS owned keys
|
|
786
|
+
# But we want to check if they're using customer-managed keys
|
|
787
|
+
is_encrypted = sse_status == "ENABLED"
|
|
788
|
+
is_cmk = sse_type == "KMS" and kms_key_id is not None
|
|
789
|
+
|
|
790
|
+
table_result["checks"]["encrypted"] = is_encrypted
|
|
791
|
+
table_result["checks"]["encryption_type"] = (
|
|
792
|
+
sse_type if is_encrypted else "NONE"
|
|
793
|
+
)
|
|
794
|
+
table_result["checks"]["using_cmk"] = is_cmk
|
|
795
|
+
|
|
796
|
+
if is_encrypted and is_cmk:
|
|
797
|
+
table_result["checks"]["kms_key_id"] = kms_key_id
|
|
798
|
+
|
|
799
|
+
# DynamoDB tables are always encrypted, but we prefer CMK over AWS owned keys
|
|
800
|
+
if not is_encrypted:
|
|
801
|
+
table_result["compliant"] = False
|
|
802
|
+
table_result["issues"].append("DynamoDB table is not encrypted")
|
|
803
|
+
elif not is_cmk:
|
|
804
|
+
# Still compliant but could be improved
|
|
805
|
+
table_result["issues"].append(
|
|
806
|
+
"Using AWS owned keys instead of customer-managed keys"
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
except Exception as e:
|
|
810
|
+
print(
|
|
811
|
+
f"[DEBUG:StorageSecurity] Error checking SSE for table {table_name}: {e}"
|
|
812
|
+
)
|
|
813
|
+
table_result["compliant"] = False
|
|
814
|
+
table_result["issues"].append("Error checking encryption settings")
|
|
815
|
+
table_result["checks"]["encrypted"] = False
|
|
816
|
+
|
|
817
|
+
# Generate remediation steps
|
|
818
|
+
table_result["remediation"] = []
|
|
819
|
+
|
|
820
|
+
if not table_result["checks"].get("encrypted", False):
|
|
821
|
+
table_result["remediation"].append(
|
|
822
|
+
"Enable server-side encryption for the DynamoDB table"
|
|
823
|
+
)
|
|
824
|
+
elif not table_result["checks"].get("using_cmk", False):
|
|
825
|
+
table_result["remediation"].append(
|
|
826
|
+
"Consider using a customer-managed KMS key instead of AWS owned keys"
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
# Update counts
|
|
830
|
+
if table_result["compliant"]:
|
|
831
|
+
results["compliant_resources"] += 1
|
|
832
|
+
else:
|
|
833
|
+
results["non_compliant_resources"] += 1
|
|
834
|
+
|
|
835
|
+
results["resource_details"].append(table_result)
|
|
836
|
+
|
|
837
|
+
except Exception as e:
|
|
838
|
+
print(f"[DEBUG:StorageSecurity] Error checking DynamoDB table {table_name}: {e}")
|
|
839
|
+
await ctx.warning(f"Error checking DynamoDB table {table_name}: {e}")
|
|
840
|
+
|
|
841
|
+
return results
|
|
842
|
+
|
|
843
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
844
|
+
await ctx.error(f"Error checking DynamoDB tables: {e}")
|
|
845
|
+
return {
|
|
846
|
+
"service": "dynamodb",
|
|
847
|
+
"error": str(e),
|
|
848
|
+
"resources_checked": 0,
|
|
849
|
+
"compliant_resources": 0,
|
|
850
|
+
"non_compliant_resources": 0,
|
|
851
|
+
"resource_details": [],
|
|
852
|
+
}
|
|
853
|
+
|
|
854
|
+
|
|
855
|
+
async def check_efs_filesystems(
|
|
856
|
+
region: str, efs_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
857
|
+
) -> Dict[str, Any]:
|
|
858
|
+
"""Check EFS filesystems for encryption and security best practices."""
|
|
859
|
+
print(f"[DEBUG:StorageSecurity] Checking EFS filesystems in {region}")
|
|
860
|
+
|
|
861
|
+
results = {
|
|
862
|
+
"service": "efs",
|
|
863
|
+
"resources_checked": 0,
|
|
864
|
+
"compliant_resources": 0,
|
|
865
|
+
"non_compliant_resources": 0,
|
|
866
|
+
"resource_details": [],
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
try:
|
|
870
|
+
# Get EFS filesystem list - either from Resource Explorer or directly
|
|
871
|
+
filesystems = []
|
|
872
|
+
|
|
873
|
+
if "error" not in storage_resources and "elasticfilesystem" in storage_resources.get(
|
|
874
|
+
"resources_by_service", {}
|
|
875
|
+
):
|
|
876
|
+
# Use Resource Explorer results
|
|
877
|
+
efs_resources = storage_resources["resources_by_service"]["elasticfilesystem"]
|
|
878
|
+
for resource in efs_resources:
|
|
879
|
+
arn = resource.get("Arn", "")
|
|
880
|
+
if ":file-system/" in arn:
|
|
881
|
+
fs_id = arn.split("/")[-1]
|
|
882
|
+
filesystems.append(fs_id)
|
|
883
|
+
else:
|
|
884
|
+
# Fall back to direct API call
|
|
885
|
+
paginator = efs_client.get_paginator("describe_file_systems")
|
|
886
|
+
page_iterator = paginator.paginate()
|
|
887
|
+
|
|
888
|
+
for page in page_iterator:
|
|
889
|
+
for fs in page.get("FileSystems", []):
|
|
890
|
+
filesystems.append(fs["FileSystemId"])
|
|
891
|
+
|
|
892
|
+
print(
|
|
893
|
+
f"[DEBUG:StorageSecurity] Found {len(filesystems)} EFS filesystems in region {region}"
|
|
894
|
+
)
|
|
895
|
+
results["resources_checked"] = len(filesystems)
|
|
896
|
+
|
|
897
|
+
# Check each EFS filesystem
|
|
898
|
+
for fs_id in filesystems:
|
|
899
|
+
try:
|
|
900
|
+
response = efs_client.describe_file_systems(FileSystemId=fs_id)
|
|
901
|
+
|
|
902
|
+
if not response.get("FileSystems"):
|
|
903
|
+
continue
|
|
904
|
+
|
|
905
|
+
fs = response["FileSystems"][0]
|
|
906
|
+
|
|
907
|
+
fs_result = {
|
|
908
|
+
"id": fs["FileSystemId"],
|
|
909
|
+
"arn": fs.get(
|
|
910
|
+
"FileSystemArn", f"arn:aws:elasticfilesystem:{region}::file-system/{fs_id}"
|
|
911
|
+
),
|
|
912
|
+
"type": "efs",
|
|
913
|
+
"compliant": True,
|
|
914
|
+
"issues": [],
|
|
915
|
+
"checks": {},
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
# Check if encrypted
|
|
919
|
+
is_encrypted = fs.get("Encrypted", False)
|
|
920
|
+
fs_result["checks"]["encrypted"] = is_encrypted
|
|
921
|
+
|
|
922
|
+
# Check KMS key if encrypted
|
|
923
|
+
if is_encrypted and "KmsKeyId" in fs:
|
|
924
|
+
fs_result["checks"]["kms_key_id"] = fs["KmsKeyId"]
|
|
925
|
+
# Check if using AWS managed key or CMK
|
|
926
|
+
is_cmk = not fs["KmsKeyId"].startswith("arn:aws:kms:region:aws:")
|
|
927
|
+
fs_result["checks"]["using_cmk"] = is_cmk
|
|
928
|
+
else:
|
|
929
|
+
fs_result["checks"]["using_cmk"] = False
|
|
930
|
+
|
|
931
|
+
# Mark as non-compliant if not encrypted
|
|
932
|
+
if not is_encrypted:
|
|
933
|
+
fs_result["compliant"] = False
|
|
934
|
+
fs_result["issues"].append("EFS filesystem is not encrypted")
|
|
935
|
+
|
|
936
|
+
# Generate remediation steps
|
|
937
|
+
fs_result["remediation"] = []
|
|
938
|
+
|
|
939
|
+
if not is_encrypted:
|
|
940
|
+
fs_result["remediation"].append(
|
|
941
|
+
"Create a new encrypted EFS filesystem and migrate data"
|
|
942
|
+
)
|
|
943
|
+
fs_result["remediation"].append(
|
|
944
|
+
"Note: Encryption cannot be enabled on existing EFS filesystems"
|
|
945
|
+
)
|
|
946
|
+
elif not fs_result["checks"].get("using_cmk", False):
|
|
947
|
+
fs_result["remediation"].append(
|
|
948
|
+
"Consider using a customer-managed KMS key instead of AWS managed key"
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
# Update counts
|
|
952
|
+
if fs_result["compliant"]:
|
|
953
|
+
results["compliant_resources"] += 1
|
|
954
|
+
else:
|
|
955
|
+
results["non_compliant_resources"] += 1
|
|
956
|
+
|
|
957
|
+
results["resource_details"].append(fs_result)
|
|
958
|
+
|
|
959
|
+
except Exception as e:
|
|
960
|
+
print(f"[DEBUG:StorageSecurity] Error checking EFS filesystem {fs_id}: {e}")
|
|
961
|
+
await ctx.warning(f"Error checking EFS filesystem {fs_id}: {e}")
|
|
962
|
+
|
|
963
|
+
return results
|
|
964
|
+
|
|
965
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
966
|
+
await ctx.error(f"Error checking EFS filesystems: {e}")
|
|
967
|
+
return {
|
|
968
|
+
"service": "efs",
|
|
969
|
+
"error": str(e),
|
|
970
|
+
"resources_checked": 0,
|
|
971
|
+
"compliant_resources": 0,
|
|
972
|
+
"non_compliant_resources": 0,
|
|
973
|
+
"resource_details": [],
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
|
|
977
|
+
async def check_elasticache_clusters(
|
|
978
|
+
region: str, elasticache_client: Any, ctx: Context, storage_resources: Dict[str, Any]
|
|
979
|
+
) -> Dict[str, Any]:
|
|
980
|
+
"""Check ElastiCache clusters for encryption and security best practices."""
|
|
981
|
+
print(f"[DEBUG:StorageSecurity] Checking ElastiCache clusters in {region}")
|
|
982
|
+
|
|
983
|
+
results = {
|
|
984
|
+
"service": "elasticache",
|
|
985
|
+
"resources_checked": 0,
|
|
986
|
+
"compliant_resources": 0,
|
|
987
|
+
"non_compliant_resources": 0,
|
|
988
|
+
"resource_details": [],
|
|
989
|
+
}
|
|
990
|
+
|
|
991
|
+
try:
|
|
992
|
+
# Get ElastiCache cluster list - either from Resource Explorer or directly
|
|
993
|
+
clusters = []
|
|
994
|
+
|
|
995
|
+
if "error" not in storage_resources and "elasticache" in storage_resources.get(
|
|
996
|
+
"resources_by_service", {}
|
|
997
|
+
):
|
|
998
|
+
# Use Resource Explorer results
|
|
999
|
+
elasticache_resources = storage_resources["resources_by_service"]["elasticache"]
|
|
1000
|
+
for resource in elasticache_resources:
|
|
1001
|
+
arn = resource.get("Arn", "")
|
|
1002
|
+
if ":cluster:" in arn:
|
|
1003
|
+
cluster_id = arn.split(":")[-1]
|
|
1004
|
+
clusters.append(cluster_id)
|
|
1005
|
+
else:
|
|
1006
|
+
# Fall back to direct API call
|
|
1007
|
+
paginator = elasticache_client.get_paginator("describe_cache_clusters")
|
|
1008
|
+
page_iterator = paginator.paginate()
|
|
1009
|
+
|
|
1010
|
+
for page in page_iterator:
|
|
1011
|
+
for cluster in page.get("CacheClusters", []):
|
|
1012
|
+
clusters.append(cluster["CacheClusterId"])
|
|
1013
|
+
|
|
1014
|
+
print(
|
|
1015
|
+
f"[DEBUG:StorageSecurity] Found {len(clusters)} ElastiCache clusters in region {region}"
|
|
1016
|
+
)
|
|
1017
|
+
results["resources_checked"] = len(clusters)
|
|
1018
|
+
|
|
1019
|
+
# Check each ElastiCache cluster
|
|
1020
|
+
for cluster_id in clusters:
|
|
1021
|
+
try:
|
|
1022
|
+
response = elasticache_client.describe_cache_clusters(
|
|
1023
|
+
CacheClusterId=cluster_id, ShowCacheNodeInfo=True
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
if not response.get("CacheClusters"):
|
|
1027
|
+
continue
|
|
1028
|
+
|
|
1029
|
+
cluster = response["CacheClusters"][0]
|
|
1030
|
+
|
|
1031
|
+
cluster_result = {
|
|
1032
|
+
"id": cluster["CacheClusterId"],
|
|
1033
|
+
"arn": f"arn:aws:elasticache:{region}::cluster:{cluster_id}",
|
|
1034
|
+
"type": "elasticache",
|
|
1035
|
+
"engine": cluster.get("Engine", "unknown"),
|
|
1036
|
+
"compliant": True,
|
|
1037
|
+
"issues": [],
|
|
1038
|
+
"checks": {},
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
# Check if encryption is enabled
|
|
1042
|
+
# For Redis, check at-rest and in-transit encryption
|
|
1043
|
+
if cluster.get("Engine") == "redis":
|
|
1044
|
+
# Check at-rest encryption
|
|
1045
|
+
at_rest_encryption = cluster.get("AtRestEncryptionEnabled", False)
|
|
1046
|
+
cluster_result["checks"]["at_rest_encryption"] = at_rest_encryption
|
|
1047
|
+
|
|
1048
|
+
# Check in-transit encryption
|
|
1049
|
+
transit_encryption = cluster.get("TransitEncryptionEnabled", False)
|
|
1050
|
+
cluster_result["checks"]["transit_encryption"] = transit_encryption
|
|
1051
|
+
|
|
1052
|
+
# Check auth token (password protection)
|
|
1053
|
+
auth_token_enabled = cluster.get("AuthTokenEnabled", False)
|
|
1054
|
+
cluster_result["checks"]["auth_token_enabled"] = auth_token_enabled
|
|
1055
|
+
|
|
1056
|
+
# Mark as non-compliant if either encryption is missing
|
|
1057
|
+
if not at_rest_encryption:
|
|
1058
|
+
cluster_result["compliant"] = False
|
|
1059
|
+
cluster_result["issues"].append(
|
|
1060
|
+
"Redis cluster does not have at-rest encryption enabled"
|
|
1061
|
+
)
|
|
1062
|
+
|
|
1063
|
+
if not transit_encryption:
|
|
1064
|
+
cluster_result["compliant"] = False
|
|
1065
|
+
cluster_result["issues"].append(
|
|
1066
|
+
"Redis cluster does not have in-transit encryption enabled"
|
|
1067
|
+
)
|
|
1068
|
+
|
|
1069
|
+
# Generate remediation steps
|
|
1070
|
+
cluster_result["remediation"] = []
|
|
1071
|
+
|
|
1072
|
+
if not at_rest_encryption or not transit_encryption:
|
|
1073
|
+
cluster_result["remediation"].append(
|
|
1074
|
+
"Create a new Redis replication group with encryption enabled"
|
|
1075
|
+
)
|
|
1076
|
+
cluster_result["remediation"].append(
|
|
1077
|
+
"Note: Encryption cannot be enabled on existing Redis clusters"
|
|
1078
|
+
)
|
|
1079
|
+
|
|
1080
|
+
if not auth_token_enabled:
|
|
1081
|
+
cluster_result["remediation"].append(
|
|
1082
|
+
"Enable AUTH token for Redis cluster authentication"
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
# For Memcached, there's limited encryption support
|
|
1086
|
+
elif cluster.get("Engine") == "memcached":
|
|
1087
|
+
# Memcached doesn't support at-rest encryption
|
|
1088
|
+
cluster_result["checks"]["at_rest_encryption"] = False
|
|
1089
|
+
cluster_result["checks"]["transit_encryption"] = False
|
|
1090
|
+
|
|
1091
|
+
# Mark as non-compliant since Memcached doesn't support encryption
|
|
1092
|
+
cluster_result["compliant"] = False
|
|
1093
|
+
cluster_result["issues"].append("Memcached does not support encryption")
|
|
1094
|
+
|
|
1095
|
+
# Generate remediation steps
|
|
1096
|
+
cluster_result["remediation"] = [
|
|
1097
|
+
"Consider using Redis instead of Memcached for encryption support",
|
|
1098
|
+
"Ensure Memcached clusters are in private subnets with strict security groups",
|
|
1099
|
+
]
|
|
1100
|
+
|
|
1101
|
+
# Update counts
|
|
1102
|
+
if cluster_result["compliant"]:
|
|
1103
|
+
results["compliant_resources"] += 1
|
|
1104
|
+
else:
|
|
1105
|
+
results["non_compliant_resources"] += 1
|
|
1106
|
+
|
|
1107
|
+
results["resource_details"].append(cluster_result)
|
|
1108
|
+
|
|
1109
|
+
except Exception as e:
|
|
1110
|
+
print(
|
|
1111
|
+
f"[DEBUG:StorageSecurity] Error checking ElastiCache cluster {cluster_id}: {e}"
|
|
1112
|
+
)
|
|
1113
|
+
await ctx.warning(f"Error checking ElastiCache cluster {cluster_id}: {e}")
|
|
1114
|
+
|
|
1115
|
+
return results
|
|
1116
|
+
|
|
1117
|
+
except botocore.exceptions.BotoCoreError as e:
|
|
1118
|
+
await ctx.error(f"Error checking ElastiCache clusters: {e}")
|
|
1119
|
+
return {
|
|
1120
|
+
"service": "elasticache",
|
|
1121
|
+
"error": str(e),
|
|
1122
|
+
"resources_checked": 0,
|
|
1123
|
+
"compliant_resources": 0,
|
|
1124
|
+
"non_compliant_resources": 0,
|
|
1125
|
+
"resource_details": [],
|
|
1126
|
+
}
|