aws-inventory-manager 0.17.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_inventory_manager-0.17.12.dist-info/LICENSE +21 -0
- aws_inventory_manager-0.17.12.dist-info/METADATA +1292 -0
- aws_inventory_manager-0.17.12.dist-info/RECORD +152 -0
- aws_inventory_manager-0.17.12.dist-info/WHEEL +5 -0
- aws_inventory_manager-0.17.12.dist-info/entry_points.txt +2 -0
- aws_inventory_manager-0.17.12.dist-info/top_level.txt +1 -0
- src/__init__.py +3 -0
- src/aws/__init__.py +11 -0
- src/aws/client.py +128 -0
- src/aws/credentials.py +191 -0
- src/aws/rate_limiter.py +177 -0
- src/cli/__init__.py +12 -0
- src/cli/config.py +130 -0
- src/cli/main.py +4046 -0
- src/cloudtrail/__init__.py +5 -0
- src/cloudtrail/query.py +642 -0
- src/config_service/__init__.py +21 -0
- src/config_service/collector.py +346 -0
- src/config_service/detector.py +256 -0
- src/config_service/resource_type_mapping.py +328 -0
- src/cost/__init__.py +5 -0
- src/cost/analyzer.py +226 -0
- src/cost/explorer.py +209 -0
- src/cost/reporter.py +237 -0
- src/delta/__init__.py +5 -0
- src/delta/calculator.py +206 -0
- src/delta/differ.py +185 -0
- src/delta/formatters.py +272 -0
- src/delta/models.py +154 -0
- src/delta/reporter.py +234 -0
- src/matching/__init__.py +6 -0
- src/matching/config.py +52 -0
- src/matching/normalizer.py +450 -0
- src/matching/prompts.py +33 -0
- src/models/__init__.py +21 -0
- src/models/config_diff.py +135 -0
- src/models/cost_report.py +87 -0
- src/models/deletion_operation.py +104 -0
- src/models/deletion_record.py +97 -0
- src/models/delta_report.py +122 -0
- src/models/efs_resource.py +80 -0
- src/models/elasticache_resource.py +90 -0
- src/models/group.py +318 -0
- src/models/inventory.py +133 -0
- src/models/protection_rule.py +123 -0
- src/models/report.py +288 -0
- src/models/resource.py +111 -0
- src/models/security_finding.py +102 -0
- src/models/snapshot.py +122 -0
- src/restore/__init__.py +20 -0
- src/restore/audit.py +175 -0
- src/restore/cleaner.py +461 -0
- src/restore/config.py +209 -0
- src/restore/deleter.py +976 -0
- src/restore/dependency.py +254 -0
- src/restore/safety.py +115 -0
- src/security/__init__.py +0 -0
- src/security/checks/__init__.py +0 -0
- src/security/checks/base.py +56 -0
- src/security/checks/ec2_checks.py +88 -0
- src/security/checks/elasticache_checks.py +149 -0
- src/security/checks/iam_checks.py +102 -0
- src/security/checks/rds_checks.py +140 -0
- src/security/checks/s3_checks.py +95 -0
- src/security/checks/secrets_checks.py +96 -0
- src/security/checks/sg_checks.py +142 -0
- src/security/cis_mapper.py +97 -0
- src/security/models.py +53 -0
- src/security/reporter.py +174 -0
- src/security/scanner.py +87 -0
- src/snapshot/__init__.py +6 -0
- src/snapshot/capturer.py +453 -0
- src/snapshot/filter.py +259 -0
- src/snapshot/inventory_storage.py +236 -0
- src/snapshot/report_formatter.py +250 -0
- src/snapshot/reporter.py +189 -0
- src/snapshot/resource_collectors/__init__.py +5 -0
- src/snapshot/resource_collectors/apigateway.py +140 -0
- src/snapshot/resource_collectors/backup.py +136 -0
- src/snapshot/resource_collectors/base.py +81 -0
- src/snapshot/resource_collectors/cloudformation.py +55 -0
- src/snapshot/resource_collectors/cloudwatch.py +109 -0
- src/snapshot/resource_collectors/codebuild.py +69 -0
- src/snapshot/resource_collectors/codepipeline.py +82 -0
- src/snapshot/resource_collectors/dynamodb.py +65 -0
- src/snapshot/resource_collectors/ec2.py +240 -0
- src/snapshot/resource_collectors/ecs.py +215 -0
- src/snapshot/resource_collectors/efs_collector.py +102 -0
- src/snapshot/resource_collectors/eks.py +200 -0
- src/snapshot/resource_collectors/elasticache_collector.py +79 -0
- src/snapshot/resource_collectors/elb.py +126 -0
- src/snapshot/resource_collectors/eventbridge.py +156 -0
- src/snapshot/resource_collectors/glue.py +199 -0
- src/snapshot/resource_collectors/iam.py +188 -0
- src/snapshot/resource_collectors/kms.py +111 -0
- src/snapshot/resource_collectors/lambda_func.py +139 -0
- src/snapshot/resource_collectors/rds.py +109 -0
- src/snapshot/resource_collectors/route53.py +86 -0
- src/snapshot/resource_collectors/s3.py +105 -0
- src/snapshot/resource_collectors/secretsmanager.py +70 -0
- src/snapshot/resource_collectors/sns.py +68 -0
- src/snapshot/resource_collectors/sqs.py +82 -0
- src/snapshot/resource_collectors/ssm.py +160 -0
- src/snapshot/resource_collectors/stepfunctions.py +74 -0
- src/snapshot/resource_collectors/vpcendpoints.py +79 -0
- src/snapshot/resource_collectors/waf.py +159 -0
- src/snapshot/storage.py +351 -0
- src/storage/__init__.py +21 -0
- src/storage/audit_store.py +419 -0
- src/storage/database.py +294 -0
- src/storage/group_store.py +763 -0
- src/storage/inventory_store.py +320 -0
- src/storage/resource_store.py +416 -0
- src/storage/schema.py +339 -0
- src/storage/snapshot_store.py +363 -0
- src/utils/__init__.py +12 -0
- src/utils/export.py +305 -0
- src/utils/hash.py +60 -0
- src/utils/logging.py +63 -0
- src/utils/pagination.py +41 -0
- src/utils/paths.py +51 -0
- src/utils/progress.py +41 -0
- src/utils/unsupported_resources.py +306 -0
- src/web/__init__.py +5 -0
- src/web/app.py +97 -0
- src/web/dependencies.py +69 -0
- src/web/routes/__init__.py +1 -0
- src/web/routes/api/__init__.py +18 -0
- src/web/routes/api/charts.py +156 -0
- src/web/routes/api/cleanup.py +186 -0
- src/web/routes/api/filters.py +253 -0
- src/web/routes/api/groups.py +305 -0
- src/web/routes/api/inventories.py +80 -0
- src/web/routes/api/queries.py +202 -0
- src/web/routes/api/resources.py +393 -0
- src/web/routes/api/snapshots.py +314 -0
- src/web/routes/api/views.py +260 -0
- src/web/routes/pages.py +198 -0
- src/web/services/__init__.py +1 -0
- src/web/templates/base.html +955 -0
- src/web/templates/components/navbar.html +31 -0
- src/web/templates/components/sidebar.html +104 -0
- src/web/templates/pages/audit_logs.html +86 -0
- src/web/templates/pages/cleanup.html +279 -0
- src/web/templates/pages/dashboard.html +227 -0
- src/web/templates/pages/diff.html +175 -0
- src/web/templates/pages/error.html +30 -0
- src/web/templates/pages/groups.html +721 -0
- src/web/templates/pages/queries.html +246 -0
- src/web/templates/pages/resources.html +2429 -0
- src/web/templates/pages/snapshot_detail.html +271 -0
- src/web/templates/pages/snapshots.html +429 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""Dependency graph analysis for resource deletion ordering.
|
|
2
|
+
|
|
3
|
+
Builds dependency graph from resource metadata and computes deletion order
|
|
4
|
+
using Kahn's topological sort algorithm.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections import defaultdict, deque
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DependencyResolver:
|
|
14
|
+
"""Dependency resolver for resource deletion ordering.
|
|
15
|
+
|
|
16
|
+
Builds dependency graph from resource metadata and computes safe deletion
|
|
17
|
+
order using Kahn's topological sort algorithm. Detects circular dependencies
|
|
18
|
+
and assigns resources to deletion tiers.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
graph: Dependency graph where graph[child] = [parent1, parent2, ...]
|
|
22
|
+
Children must be deleted before parents
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
# Dependency field mappings for common AWS resource types
|
|
26
|
+
DEPENDENCY_FIELDS = {
|
|
27
|
+
"AWS::EC2::Instance": ["VpcId", "SubnetId", "SecurityGroupIds"],
|
|
28
|
+
"AWS::EC2::Subnet": ["VpcId"],
|
|
29
|
+
"AWS::EC2::SecurityGroup": ["VpcId"],
|
|
30
|
+
"AWS::EC2::VPC": [], # VPCs have no dependencies
|
|
31
|
+
"AWS::RDS::DBInstance": ["DBSubnetGroupName", "VpcSecurityGroupIds"],
|
|
32
|
+
"AWS::Lambda::Function": ["VpcConfig.SubnetIds", "Role"],
|
|
33
|
+
"AWS::ECS::Service": ["Cluster", "LoadBalancers.TargetGroupArn"],
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
def __init__(self) -> None:
|
|
37
|
+
"""Initialize dependency resolver with empty graph."""
|
|
38
|
+
self.graph: dict[str, list[str]] = {}
|
|
39
|
+
|
|
40
|
+
def add_dependency(self, parent: str, child: str) -> None:
|
|
41
|
+
"""Add dependency: child must be deleted before parent.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
parent: Resource ID that depends on child
|
|
45
|
+
child: Resource ID that parent depends on
|
|
46
|
+
"""
|
|
47
|
+
if child not in self.graph:
|
|
48
|
+
self.graph[child] = []
|
|
49
|
+
|
|
50
|
+
if parent not in self.graph[child]:
|
|
51
|
+
self.graph[child].append(parent)
|
|
52
|
+
|
|
53
|
+
# Ensure parent exists in graph even if it has no dependencies
|
|
54
|
+
if parent not in self.graph:
|
|
55
|
+
self.graph[parent] = []
|
|
56
|
+
|
|
57
|
+
def build_graph_from_resources(self, resources: list[dict]) -> None:
|
|
58
|
+
"""Build dependency graph from resource metadata.
|
|
59
|
+
|
|
60
|
+
Automatically detects dependencies based on resource type and metadata
|
|
61
|
+
fields (VpcId, SubnetId, etc.).
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
resources: List of resource dictionaries with metadata
|
|
65
|
+
"""
|
|
66
|
+
# Build resource ID index
|
|
67
|
+
resource_index = {r["resource_id"]: r for r in resources}
|
|
68
|
+
|
|
69
|
+
for resource in resources:
|
|
70
|
+
resource_id = resource["resource_id"]
|
|
71
|
+
resource_type = resource.get("resource_type", "")
|
|
72
|
+
metadata = resource.get("metadata", {})
|
|
73
|
+
|
|
74
|
+
# Get dependency fields for this resource type
|
|
75
|
+
dep_fields = self.DEPENDENCY_FIELDS.get(resource_type, [])
|
|
76
|
+
|
|
77
|
+
for field in dep_fields:
|
|
78
|
+
# Handle nested fields (e.g., "VpcConfig.SubnetIds")
|
|
79
|
+
field_value = self._get_nested_field(metadata, field)
|
|
80
|
+
|
|
81
|
+
if field_value:
|
|
82
|
+
# Handle list values (e.g., SecurityGroupIds)
|
|
83
|
+
if isinstance(field_value, list):
|
|
84
|
+
for dep_id in field_value:
|
|
85
|
+
if dep_id in resource_index:
|
|
86
|
+
# resource_id depends on dep_id (parent)
|
|
87
|
+
# resource_id must be deleted before dep_id
|
|
88
|
+
self.add_dependency(parent=dep_id, child=resource_id)
|
|
89
|
+
else:
|
|
90
|
+
if field_value in resource_index:
|
|
91
|
+
# resource_id depends on field_value (parent)
|
|
92
|
+
# resource_id must be deleted before field_value
|
|
93
|
+
self.add_dependency(parent=field_value, child=resource_id)
|
|
94
|
+
|
|
95
|
+
def compute_deletion_order(self, resources: list[str]) -> list[str]:
|
|
96
|
+
"""Compute deletion order using Kahn's topological sort algorithm.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
resources: List of resource IDs to order
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
List of resource IDs in deletion order (children before parents)
|
|
103
|
+
|
|
104
|
+
Raises:
|
|
105
|
+
ValueError: If circular dependency detected
|
|
106
|
+
"""
|
|
107
|
+
if self.has_cycle():
|
|
108
|
+
raise ValueError("Circular dependency detected - cannot compute deletion order")
|
|
109
|
+
|
|
110
|
+
# Build in-degree map (number of dependencies each resource has)
|
|
111
|
+
in_degree = {resource: 0 for resource in resources}
|
|
112
|
+
|
|
113
|
+
for resource in resources:
|
|
114
|
+
if resource in self.graph:
|
|
115
|
+
# Count how many parents depend on this resource
|
|
116
|
+
for parent in self.graph[resource]:
|
|
117
|
+
if parent in in_degree:
|
|
118
|
+
in_degree[parent] += 1
|
|
119
|
+
|
|
120
|
+
# Start with resources that have no dependencies (in-degree = 0)
|
|
121
|
+
queue = deque([resource for resource, degree in in_degree.items() if degree == 0])
|
|
122
|
+
result = []
|
|
123
|
+
|
|
124
|
+
while queue:
|
|
125
|
+
# Remove resource with no dependencies
|
|
126
|
+
current = queue.popleft()
|
|
127
|
+
result.append(current)
|
|
128
|
+
|
|
129
|
+
# Reduce in-degree for all parents of this resource
|
|
130
|
+
if current in self.graph:
|
|
131
|
+
for parent in self.graph[current]:
|
|
132
|
+
if parent in in_degree:
|
|
133
|
+
in_degree[parent] -= 1
|
|
134
|
+
|
|
135
|
+
# If parent now has no dependencies, add to queue
|
|
136
|
+
if in_degree[parent] == 0:
|
|
137
|
+
queue.append(parent)
|
|
138
|
+
|
|
139
|
+
# If result doesn't contain all resources, there's a cycle
|
|
140
|
+
if len(result) != len(resources):
|
|
141
|
+
raise ValueError("Circular dependency detected - some resources not reachable")
|
|
142
|
+
|
|
143
|
+
return result
|
|
144
|
+
|
|
145
|
+
def has_cycle(self) -> bool:
|
|
146
|
+
"""Detect if dependency graph contains cycles.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
True if circular dependency exists, False otherwise
|
|
150
|
+
"""
|
|
151
|
+
# Use DFS with color marking (white, gray, black)
|
|
152
|
+
white = 0 # Unvisited
|
|
153
|
+
gray = 1 # Visiting
|
|
154
|
+
black = 2 # Visited
|
|
155
|
+
|
|
156
|
+
color = {node: white for node in self.graph}
|
|
157
|
+
|
|
158
|
+
def dfs(node: str) -> bool:
|
|
159
|
+
"""DFS visit that returns True if cycle found."""
|
|
160
|
+
if color.get(node, white) == gray:
|
|
161
|
+
# Back edge found - cycle exists
|
|
162
|
+
return True
|
|
163
|
+
|
|
164
|
+
if color.get(node, white) == black:
|
|
165
|
+
# Already visited
|
|
166
|
+
return False
|
|
167
|
+
|
|
168
|
+
# Mark as visiting
|
|
169
|
+
color[node] = gray
|
|
170
|
+
|
|
171
|
+
# Visit all parents
|
|
172
|
+
for parent in self.graph.get(node, []):
|
|
173
|
+
if dfs(parent):
|
|
174
|
+
return True
|
|
175
|
+
|
|
176
|
+
# Mark as visited
|
|
177
|
+
color[node] = black
|
|
178
|
+
return False
|
|
179
|
+
|
|
180
|
+
# Check all nodes
|
|
181
|
+
for node in self.graph:
|
|
182
|
+
if color[node] == white:
|
|
183
|
+
if dfs(node):
|
|
184
|
+
return True
|
|
185
|
+
|
|
186
|
+
return False
|
|
187
|
+
|
|
188
|
+
def get_deletion_tiers(self, resources: list[str]) -> dict[int, list[str]]:
|
|
189
|
+
"""Assign resources to deletion tiers based on dependency depth.
|
|
190
|
+
|
|
191
|
+
Tier 1: Resources with no dependencies (delete first)
|
|
192
|
+
Tier 2: Resources depending only on tier 1
|
|
193
|
+
Tier 3: Resources depending on tier 2, etc.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
resources: List of resource IDs
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Dictionary mapping tier number to list of resource IDs
|
|
200
|
+
"""
|
|
201
|
+
# Build tier assignment based on dependency depth
|
|
202
|
+
tiers: dict[int, list[str]] = defaultdict(list)
|
|
203
|
+
resource_tier: dict[str, int] = {}
|
|
204
|
+
|
|
205
|
+
# Build in-degree map
|
|
206
|
+
in_degree = {resource: 0 for resource in resources}
|
|
207
|
+
for resource in resources:
|
|
208
|
+
if resource in self.graph:
|
|
209
|
+
for parent in self.graph[resource]:
|
|
210
|
+
if parent in in_degree:
|
|
211
|
+
in_degree[parent] += 1
|
|
212
|
+
|
|
213
|
+
# Start with tier 1 (resources with no dependencies)
|
|
214
|
+
queue = deque([(resource, 1) for resource, degree in in_degree.items() if degree == 0])
|
|
215
|
+
|
|
216
|
+
while queue:
|
|
217
|
+
current, tier = queue.popleft()
|
|
218
|
+
tiers[tier].append(current)
|
|
219
|
+
resource_tier[current] = tier
|
|
220
|
+
|
|
221
|
+
# Process children (resources that depend on current)
|
|
222
|
+
if current in self.graph:
|
|
223
|
+
for parent in self.graph[current]:
|
|
224
|
+
if parent in in_degree:
|
|
225
|
+
in_degree[parent] -= 1
|
|
226
|
+
|
|
227
|
+
if in_degree[parent] == 0:
|
|
228
|
+
# Parent goes in next tier
|
|
229
|
+
queue.append((parent, tier + 1))
|
|
230
|
+
|
|
231
|
+
return dict(tiers)
|
|
232
|
+
|
|
233
|
+
def _get_nested_field(self, metadata: dict, field_path: str) -> Any:
|
|
234
|
+
"""Get nested field value from metadata.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
metadata: Resource metadata dictionary
|
|
238
|
+
field_path: Field path (e.g., "VpcConfig.SubnetIds")
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
Field value if found, None otherwise
|
|
242
|
+
"""
|
|
243
|
+
parts = field_path.split(".")
|
|
244
|
+
value: Any = metadata
|
|
245
|
+
|
|
246
|
+
for part in parts:
|
|
247
|
+
if isinstance(value, dict):
|
|
248
|
+
value = value.get(part)
|
|
249
|
+
if value is None:
|
|
250
|
+
return None
|
|
251
|
+
else:
|
|
252
|
+
return None
|
|
253
|
+
|
|
254
|
+
return value
|
src/restore/safety.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
"""Safety checks and protection rule evaluation.
|
|
2
|
+
|
|
3
|
+
Evaluates resources against protection rules to prevent accidental deletion.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from src.models.protection_rule import ProtectionRule
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SafetyChecker:
|
|
14
|
+
"""Safety checker for resource protection evaluation.
|
|
15
|
+
|
|
16
|
+
Evaluates resources against configured protection rules to determine if
|
|
17
|
+
they should be protected from deletion. Supports multiple rule types
|
|
18
|
+
(tag, type, age, cost, native) with priority-based evaluation.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
rules: List of protection rules sorted by priority
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, rules: list[ProtectionRule]) -> None:
|
|
25
|
+
"""Initialize safety checker.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
rules: List of protection rules (sorted by priority, 1=highest)
|
|
29
|
+
"""
|
|
30
|
+
self.rules = sorted(rules, key=lambda r: r.priority)
|
|
31
|
+
|
|
32
|
+
def is_protected(self, resource: dict) -> tuple[bool, Optional[str]]:
|
|
33
|
+
"""Check if resource is protected by any rule.
|
|
34
|
+
|
|
35
|
+
Evaluates resource against all enabled protection rules in priority order.
|
|
36
|
+
Returns on first matching rule (highest priority wins).
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
resource: Resource metadata dictionary
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Tuple of (is_protected, reason)
|
|
43
|
+
is_protected: True if resource matches any protection rule
|
|
44
|
+
reason: Human-readable reason for protection, None if not protected
|
|
45
|
+
"""
|
|
46
|
+
for rule in self.rules:
|
|
47
|
+
if not rule.enabled:
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
if rule.matches(resource):
|
|
51
|
+
reason = self._get_protection_reason(rule, resource)
|
|
52
|
+
return True, reason
|
|
53
|
+
|
|
54
|
+
return False, None
|
|
55
|
+
|
|
56
|
+
def check_all_protections(self, resource: dict) -> list[ProtectionRule]:
|
|
57
|
+
"""Check which protection rules match a resource.
|
|
58
|
+
|
|
59
|
+
Unlike is_protected(), this returns ALL matching rules, not just the
|
|
60
|
+
first one. Useful for detailed protection analysis.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
resource: Resource metadata dictionary
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List of all matching protection rules
|
|
67
|
+
"""
|
|
68
|
+
matching_rules = []
|
|
69
|
+
|
|
70
|
+
for rule in self.rules:
|
|
71
|
+
if not rule.enabled:
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
if rule.matches(resource):
|
|
75
|
+
matching_rules.append(rule)
|
|
76
|
+
|
|
77
|
+
return matching_rules
|
|
78
|
+
|
|
79
|
+
def _get_protection_reason(self, rule: ProtectionRule, resource: dict) -> str:
|
|
80
|
+
"""Generate human-readable protection reason.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
rule: Protection rule that matched
|
|
84
|
+
resource: Resource metadata
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Human-readable protection reason string
|
|
88
|
+
"""
|
|
89
|
+
if rule.description:
|
|
90
|
+
return f"{rule.description} (rule: {rule.rule_id})"
|
|
91
|
+
|
|
92
|
+
# Generate default reason based on rule type
|
|
93
|
+
if rule.rule_type.value == "tag":
|
|
94
|
+
tag_key = rule.patterns.get("tag_key", "")
|
|
95
|
+
resource_tag_value = resource.get("tags", {}).get(tag_key, "")
|
|
96
|
+
return f"Tag {tag_key}={resource_tag_value} (rule: {rule.rule_id})"
|
|
97
|
+
|
|
98
|
+
elif rule.rule_type.value == "type":
|
|
99
|
+
resource_type = resource.get("resource_type", "")
|
|
100
|
+
return f"Resource type {resource_type} protected (rule: {rule.rule_id})"
|
|
101
|
+
|
|
102
|
+
elif rule.rule_type.value == "age":
|
|
103
|
+
age_days = resource.get("age_days", 0)
|
|
104
|
+
threshold = rule.threshold_value
|
|
105
|
+
return f"Resource age {age_days} days < {threshold} days threshold (rule: {rule.rule_id})"
|
|
106
|
+
|
|
107
|
+
elif rule.rule_type.value == "cost":
|
|
108
|
+
cost = resource.get("estimated_monthly_cost", 0)
|
|
109
|
+
threshold = rule.threshold_value
|
|
110
|
+
return f"Resource cost ${cost}/month >= ${threshold} threshold (rule: {rule.rule_id})"
|
|
111
|
+
|
|
112
|
+
elif rule.rule_type.value == "native":
|
|
113
|
+
return f"Native protection enabled (rule: {rule.rule_id})"
|
|
114
|
+
|
|
115
|
+
return f"Protected by rule {rule.rule_id}"
|
src/security/__init__.py
ADDED
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""Base class for security checks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import List
|
|
7
|
+
|
|
8
|
+
from ...models.security_finding import SecurityFinding, Severity
|
|
9
|
+
from ...models.snapshot import Snapshot
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SecurityCheck(ABC):
|
|
13
|
+
"""Abstract base class for all security checks.
|
|
14
|
+
|
|
15
|
+
Each security check should:
|
|
16
|
+
1. Have a unique check_id
|
|
17
|
+
2. Have a defined severity level
|
|
18
|
+
3. Implement the execute method to scan a snapshot
|
|
19
|
+
4. Return a list of SecurityFinding objects
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self) -> None:
|
|
23
|
+
"""Initialize the security check."""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def check_id(self) -> str:
|
|
29
|
+
"""Unique identifier for this check.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
String identifier (e.g., "s3_public_bucket")
|
|
33
|
+
"""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def severity(self) -> Severity:
|
|
39
|
+
"""Severity level for findings from this check.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Severity enum value
|
|
43
|
+
"""
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
@abstractmethod
|
|
47
|
+
def execute(self, snapshot: Snapshot) -> List[SecurityFinding]:
|
|
48
|
+
"""Execute the security check on a snapshot.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
snapshot: Snapshot to scan for security issues
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
List of SecurityFinding objects (empty list if no issues found)
|
|
55
|
+
"""
|
|
56
|
+
pass
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""EC2 security checks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List
|
|
6
|
+
|
|
7
|
+
from ...models.security_finding import SecurityFinding, Severity
|
|
8
|
+
from ...models.snapshot import Snapshot
|
|
9
|
+
from .base import SecurityCheck
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class EC2IMDSv1Check(SecurityCheck):
|
|
13
|
+
"""Check for EC2 instances with IMDSv1 enabled.
|
|
14
|
+
|
|
15
|
+
EC2 instances should require IMDSv2 (Instance Metadata Service Version 2)
|
|
16
|
+
for better security. IMDSv1 is vulnerable to SSRF attacks.
|
|
17
|
+
|
|
18
|
+
Severity: MEDIUM
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
@property
|
|
22
|
+
def check_id(self) -> str:
|
|
23
|
+
"""Return check identifier."""
|
|
24
|
+
return "ec2_imdsv1_enabled"
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def severity(self) -> Severity:
|
|
28
|
+
"""Return check severity."""
|
|
29
|
+
return Severity.MEDIUM
|
|
30
|
+
|
|
31
|
+
def execute(self, snapshot: Snapshot) -> List[SecurityFinding]:
|
|
32
|
+
"""Execute EC2 IMDSv1 check.
|
|
33
|
+
|
|
34
|
+
Checks for:
|
|
35
|
+
- EC2 instances with MetadataOptions.HttpTokens = "optional" (IMDSv1 enabled)
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
snapshot: Snapshot to scan
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
List of findings for EC2 instances with IMDSv1 enabled
|
|
42
|
+
"""
|
|
43
|
+
findings: List[SecurityFinding] = []
|
|
44
|
+
|
|
45
|
+
for resource in snapshot.resources:
|
|
46
|
+
# Only check EC2 instances
|
|
47
|
+
if not resource.resource_type.startswith("ec2:instance"):
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
if resource.raw_config is None:
|
|
51
|
+
continue
|
|
52
|
+
|
|
53
|
+
# Check if IMDSv1 is enabled
|
|
54
|
+
if self._is_imdsv1_enabled(resource.raw_config):
|
|
55
|
+
instance_id = resource.raw_config.get("InstanceId", "unknown")
|
|
56
|
+
finding = SecurityFinding(
|
|
57
|
+
resource_arn=resource.arn,
|
|
58
|
+
finding_type=self.check_id,
|
|
59
|
+
severity=self.severity,
|
|
60
|
+
description=f"EC2 instance '{instance_id}' has IMDSv1 enabled. "
|
|
61
|
+
f"The instance allows optional use of session tokens for metadata access, "
|
|
62
|
+
f"making it vulnerable to SSRF attacks.",
|
|
63
|
+
remediation="Require IMDSv2 for this EC2 instance. "
|
|
64
|
+
"Use the AWS CLI command: "
|
|
65
|
+
f"'aws ec2 modify-instance-metadata-options --instance-id {instance_id} "
|
|
66
|
+
"--http-tokens required --http-endpoint enabled' "
|
|
67
|
+
"or update the instance's metadata options in the EC2 console to require IMDSv2.",
|
|
68
|
+
metadata={"instance_id": instance_id, "region": resource.region},
|
|
69
|
+
)
|
|
70
|
+
findings.append(finding)
|
|
71
|
+
|
|
72
|
+
return findings
|
|
73
|
+
|
|
74
|
+
def _is_imdsv1_enabled(self, config: Dict[str, Any]) -> bool:
|
|
75
|
+
"""Check if instance has IMDSv1 enabled.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
config: Instance raw configuration
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
True if IMDSv1 is enabled (HttpTokens is "optional")
|
|
82
|
+
"""
|
|
83
|
+
metadata_options = config.get("MetadataOptions", {})
|
|
84
|
+
http_tokens = metadata_options.get("HttpTokens", "optional")
|
|
85
|
+
|
|
86
|
+
# "optional" means IMDSv1 is enabled (BAD)
|
|
87
|
+
# "required" means IMDSv2 is required (GOOD)
|
|
88
|
+
return http_tokens == "optional"
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""ElastiCache security checks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List
|
|
6
|
+
|
|
7
|
+
from ...models.security_finding import SecurityFinding, Severity
|
|
8
|
+
from ...models.snapshot import Snapshot
|
|
9
|
+
from .base import SecurityCheck
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ElastiCacheEncryptionCheck(SecurityCheck):
|
|
13
|
+
"""Check for ElastiCache clusters with encryption issues.
|
|
14
|
+
|
|
15
|
+
Checks for:
|
|
16
|
+
- Redis clusters without encryption at rest (AtRestEncryptionEnabled=False)
|
|
17
|
+
- Redis/Memcached clusters without encryption in transit (TransitEncryptionEnabled=False)
|
|
18
|
+
|
|
19
|
+
Note: Memcached does not support encryption at rest, so only in-transit
|
|
20
|
+
encryption is checked for Memcached clusters.
|
|
21
|
+
|
|
22
|
+
Severity: MEDIUM
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def check_id(self) -> str:
|
|
27
|
+
"""Return check identifier."""
|
|
28
|
+
return "elasticache_unencrypted"
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def severity(self) -> Severity:
|
|
32
|
+
"""Return check severity."""
|
|
33
|
+
return Severity.MEDIUM
|
|
34
|
+
|
|
35
|
+
def execute(self, snapshot: Snapshot) -> List[SecurityFinding]:
|
|
36
|
+
"""Execute ElastiCache encryption checks.
|
|
37
|
+
|
|
38
|
+
Checks for:
|
|
39
|
+
- Redis: AtRestEncryptionEnabled=False or TransitEncryptionEnabled=False
|
|
40
|
+
- Memcached: TransitEncryptionEnabled=False only (doesn't support at-rest)
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
snapshot: Snapshot to scan
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
List of findings for ElastiCache encryption issues
|
|
47
|
+
"""
|
|
48
|
+
findings: List[SecurityFinding] = []
|
|
49
|
+
|
|
50
|
+
for resource in snapshot.resources:
|
|
51
|
+
# Only check ElastiCache resources
|
|
52
|
+
if not resource.resource_type.startswith("elasticache:"):
|
|
53
|
+
continue
|
|
54
|
+
|
|
55
|
+
if resource.raw_config is None:
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
engine = resource.raw_config.get("Engine", "").lower()
|
|
59
|
+
|
|
60
|
+
# Check encryption at rest (Redis only)
|
|
61
|
+
if engine == "redis" and not self._is_encrypted_at_rest(resource.raw_config):
|
|
62
|
+
finding = self._create_at_rest_finding(resource.name, resource.arn, resource.region, engine)
|
|
63
|
+
findings.append(finding)
|
|
64
|
+
|
|
65
|
+
# Check encryption in transit (both Redis and Memcached)
|
|
66
|
+
if not self._is_encrypted_in_transit(resource.raw_config):
|
|
67
|
+
finding = self._create_in_transit_finding(resource.name, resource.arn, resource.region, engine)
|
|
68
|
+
findings.append(finding)
|
|
69
|
+
|
|
70
|
+
return findings
|
|
71
|
+
|
|
72
|
+
def _is_encrypted_at_rest(self, config: Dict[str, Any]) -> bool:
|
|
73
|
+
"""Check if cluster has encryption at rest enabled.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
config: ElastiCache cluster raw configuration
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
True if AtRestEncryptionEnabled is True
|
|
80
|
+
"""
|
|
81
|
+
return config.get("AtRestEncryptionEnabled", False)
|
|
82
|
+
|
|
83
|
+
def _is_encrypted_in_transit(self, config: Dict[str, Any]) -> bool:
|
|
84
|
+
"""Check if cluster has encryption in transit enabled.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
config: ElastiCache cluster raw configuration
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
True if TransitEncryptionEnabled is True
|
|
91
|
+
"""
|
|
92
|
+
return config.get("TransitEncryptionEnabled", False)
|
|
93
|
+
|
|
94
|
+
def _create_at_rest_finding(self, cluster_id: str, arn: str, region: str, engine: str) -> SecurityFinding:
|
|
95
|
+
"""Create a finding for cluster without encryption at rest.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
cluster_id: Cluster identifier
|
|
99
|
+
arn: Resource ARN
|
|
100
|
+
region: AWS region
|
|
101
|
+
engine: Engine type (redis/memcached)
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
SecurityFinding for at-rest encryption issue
|
|
105
|
+
"""
|
|
106
|
+
return SecurityFinding(
|
|
107
|
+
resource_arn=arn,
|
|
108
|
+
finding_type=self.check_id,
|
|
109
|
+
severity=self.severity,
|
|
110
|
+
description=f"ElastiCache {engine} cluster '{cluster_id}' does not have encryption at rest enabled. "
|
|
111
|
+
f"Data stored on disk is not encrypted, which could expose sensitive cached data "
|
|
112
|
+
f"if storage media is compromised.",
|
|
113
|
+
remediation="Enable encryption at rest for this ElastiCache cluster. "
|
|
114
|
+
"Note: Encryption at rest cannot be enabled on existing clusters. "
|
|
115
|
+
"You must create a new cluster with encryption enabled and migrate your data. "
|
|
116
|
+
"Create a backup of your existing cluster, then restore it to a new cluster with "
|
|
117
|
+
"AtRestEncryptionEnabled=true. For Redis clusters, ensure you specify a KMS key. "
|
|
118
|
+
"After verification, update your applications to point to the new encrypted cluster.",
|
|
119
|
+
metadata={"cluster_id": cluster_id, "region": region, "engine": engine},
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def _create_in_transit_finding(self, cluster_id: str, arn: str, region: str, engine: str) -> SecurityFinding:
|
|
123
|
+
"""Create a finding for cluster without encryption in transit.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
cluster_id: Cluster identifier
|
|
127
|
+
arn: Resource ARN
|
|
128
|
+
region: AWS region
|
|
129
|
+
engine: Engine type (redis/memcached)
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
SecurityFinding for in-transit encryption issue
|
|
133
|
+
"""
|
|
134
|
+
return SecurityFinding(
|
|
135
|
+
resource_arn=arn,
|
|
136
|
+
finding_type=self.check_id,
|
|
137
|
+
severity=self.severity,
|
|
138
|
+
description=f"ElastiCache {engine} cluster '{cluster_id}' does not have encryption in transit enabled. "
|
|
139
|
+
f"Data transmitted between the cache and clients is not encrypted, which could expose "
|
|
140
|
+
f"sensitive cached data during transmission.",
|
|
141
|
+
remediation="Enable encryption in transit for this ElastiCache cluster. "
|
|
142
|
+
"Note: Encryption in transit cannot be enabled on existing clusters. "
|
|
143
|
+
"You must create a new cluster with encryption enabled. "
|
|
144
|
+
f"For {engine}, create a new cluster with TransitEncryptionEnabled=true. "
|
|
145
|
+
"After creating the encrypted cluster, update your application connection strings "
|
|
146
|
+
"to use TLS/SSL connections and point to the new cluster endpoint. "
|
|
147
|
+
"Verify the encrypted connection is working before decommissioning the old cluster.",
|
|
148
|
+
metadata={"cluster_id": cluster_id, "region": region, "engine": engine},
|
|
149
|
+
)
|