aws-inventory-manager 0.17.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aws_inventory_manager-0.17.12.dist-info/LICENSE +21 -0
- aws_inventory_manager-0.17.12.dist-info/METADATA +1292 -0
- aws_inventory_manager-0.17.12.dist-info/RECORD +152 -0
- aws_inventory_manager-0.17.12.dist-info/WHEEL +5 -0
- aws_inventory_manager-0.17.12.dist-info/entry_points.txt +2 -0
- aws_inventory_manager-0.17.12.dist-info/top_level.txt +1 -0
- src/__init__.py +3 -0
- src/aws/__init__.py +11 -0
- src/aws/client.py +128 -0
- src/aws/credentials.py +191 -0
- src/aws/rate_limiter.py +177 -0
- src/cli/__init__.py +12 -0
- src/cli/config.py +130 -0
- src/cli/main.py +4046 -0
- src/cloudtrail/__init__.py +5 -0
- src/cloudtrail/query.py +642 -0
- src/config_service/__init__.py +21 -0
- src/config_service/collector.py +346 -0
- src/config_service/detector.py +256 -0
- src/config_service/resource_type_mapping.py +328 -0
- src/cost/__init__.py +5 -0
- src/cost/analyzer.py +226 -0
- src/cost/explorer.py +209 -0
- src/cost/reporter.py +237 -0
- src/delta/__init__.py +5 -0
- src/delta/calculator.py +206 -0
- src/delta/differ.py +185 -0
- src/delta/formatters.py +272 -0
- src/delta/models.py +154 -0
- src/delta/reporter.py +234 -0
- src/matching/__init__.py +6 -0
- src/matching/config.py +52 -0
- src/matching/normalizer.py +450 -0
- src/matching/prompts.py +33 -0
- src/models/__init__.py +21 -0
- src/models/config_diff.py +135 -0
- src/models/cost_report.py +87 -0
- src/models/deletion_operation.py +104 -0
- src/models/deletion_record.py +97 -0
- src/models/delta_report.py +122 -0
- src/models/efs_resource.py +80 -0
- src/models/elasticache_resource.py +90 -0
- src/models/group.py +318 -0
- src/models/inventory.py +133 -0
- src/models/protection_rule.py +123 -0
- src/models/report.py +288 -0
- src/models/resource.py +111 -0
- src/models/security_finding.py +102 -0
- src/models/snapshot.py +122 -0
- src/restore/__init__.py +20 -0
- src/restore/audit.py +175 -0
- src/restore/cleaner.py +461 -0
- src/restore/config.py +209 -0
- src/restore/deleter.py +976 -0
- src/restore/dependency.py +254 -0
- src/restore/safety.py +115 -0
- src/security/__init__.py +0 -0
- src/security/checks/__init__.py +0 -0
- src/security/checks/base.py +56 -0
- src/security/checks/ec2_checks.py +88 -0
- src/security/checks/elasticache_checks.py +149 -0
- src/security/checks/iam_checks.py +102 -0
- src/security/checks/rds_checks.py +140 -0
- src/security/checks/s3_checks.py +95 -0
- src/security/checks/secrets_checks.py +96 -0
- src/security/checks/sg_checks.py +142 -0
- src/security/cis_mapper.py +97 -0
- src/security/models.py +53 -0
- src/security/reporter.py +174 -0
- src/security/scanner.py +87 -0
- src/snapshot/__init__.py +6 -0
- src/snapshot/capturer.py +453 -0
- src/snapshot/filter.py +259 -0
- src/snapshot/inventory_storage.py +236 -0
- src/snapshot/report_formatter.py +250 -0
- src/snapshot/reporter.py +189 -0
- src/snapshot/resource_collectors/__init__.py +5 -0
- src/snapshot/resource_collectors/apigateway.py +140 -0
- src/snapshot/resource_collectors/backup.py +136 -0
- src/snapshot/resource_collectors/base.py +81 -0
- src/snapshot/resource_collectors/cloudformation.py +55 -0
- src/snapshot/resource_collectors/cloudwatch.py +109 -0
- src/snapshot/resource_collectors/codebuild.py +69 -0
- src/snapshot/resource_collectors/codepipeline.py +82 -0
- src/snapshot/resource_collectors/dynamodb.py +65 -0
- src/snapshot/resource_collectors/ec2.py +240 -0
- src/snapshot/resource_collectors/ecs.py +215 -0
- src/snapshot/resource_collectors/efs_collector.py +102 -0
- src/snapshot/resource_collectors/eks.py +200 -0
- src/snapshot/resource_collectors/elasticache_collector.py +79 -0
- src/snapshot/resource_collectors/elb.py +126 -0
- src/snapshot/resource_collectors/eventbridge.py +156 -0
- src/snapshot/resource_collectors/glue.py +199 -0
- src/snapshot/resource_collectors/iam.py +188 -0
- src/snapshot/resource_collectors/kms.py +111 -0
- src/snapshot/resource_collectors/lambda_func.py +139 -0
- src/snapshot/resource_collectors/rds.py +109 -0
- src/snapshot/resource_collectors/route53.py +86 -0
- src/snapshot/resource_collectors/s3.py +105 -0
- src/snapshot/resource_collectors/secretsmanager.py +70 -0
- src/snapshot/resource_collectors/sns.py +68 -0
- src/snapshot/resource_collectors/sqs.py +82 -0
- src/snapshot/resource_collectors/ssm.py +160 -0
- src/snapshot/resource_collectors/stepfunctions.py +74 -0
- src/snapshot/resource_collectors/vpcendpoints.py +79 -0
- src/snapshot/resource_collectors/waf.py +159 -0
- src/snapshot/storage.py +351 -0
- src/storage/__init__.py +21 -0
- src/storage/audit_store.py +419 -0
- src/storage/database.py +294 -0
- src/storage/group_store.py +763 -0
- src/storage/inventory_store.py +320 -0
- src/storage/resource_store.py +416 -0
- src/storage/schema.py +339 -0
- src/storage/snapshot_store.py +363 -0
- src/utils/__init__.py +12 -0
- src/utils/export.py +305 -0
- src/utils/hash.py +60 -0
- src/utils/logging.py +63 -0
- src/utils/pagination.py +41 -0
- src/utils/paths.py +51 -0
- src/utils/progress.py +41 -0
- src/utils/unsupported_resources.py +306 -0
- src/web/__init__.py +5 -0
- src/web/app.py +97 -0
- src/web/dependencies.py +69 -0
- src/web/routes/__init__.py +1 -0
- src/web/routes/api/__init__.py +18 -0
- src/web/routes/api/charts.py +156 -0
- src/web/routes/api/cleanup.py +186 -0
- src/web/routes/api/filters.py +253 -0
- src/web/routes/api/groups.py +305 -0
- src/web/routes/api/inventories.py +80 -0
- src/web/routes/api/queries.py +202 -0
- src/web/routes/api/resources.py +393 -0
- src/web/routes/api/snapshots.py +314 -0
- src/web/routes/api/views.py +260 -0
- src/web/routes/pages.py +198 -0
- src/web/services/__init__.py +1 -0
- src/web/templates/base.html +955 -0
- src/web/templates/components/navbar.html +31 -0
- src/web/templates/components/sidebar.html +104 -0
- src/web/templates/pages/audit_logs.html +86 -0
- src/web/templates/pages/cleanup.html +279 -0
- src/web/templates/pages/dashboard.html +227 -0
- src/web/templates/pages/diff.html +175 -0
- src/web/templates/pages/error.html +30 -0
- src/web/templates/pages/groups.html +721 -0
- src/web/templates/pages/queries.html +246 -0
- src/web/templates/pages/resources.html +2429 -0
- src/web/templates/pages/snapshot_detail.html +271 -0
- src/web/templates/pages/snapshots.html +429 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Cost report models for cost analysis and tracking."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class CostBreakdown:
|
|
10
|
+
"""Represents cost breakdown for baseline or non-baseline resources."""
|
|
11
|
+
|
|
12
|
+
total: float
|
|
13
|
+
by_service: Dict[str, float] = field(default_factory=dict)
|
|
14
|
+
percentage: float = 0.0
|
|
15
|
+
|
|
16
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
17
|
+
"""Convert to dictionary for serialization."""
|
|
18
|
+
return {
|
|
19
|
+
"total": self.total,
|
|
20
|
+
"by_service": self.by_service,
|
|
21
|
+
"percentage": self.percentage,
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class CostReport:
|
|
27
|
+
"""Represents cost analysis separating baseline vs non-baseline costs."""
|
|
28
|
+
|
|
29
|
+
generated_at: datetime
|
|
30
|
+
baseline_snapshot_name: str
|
|
31
|
+
period_start: datetime
|
|
32
|
+
period_end: datetime
|
|
33
|
+
baseline_costs: CostBreakdown
|
|
34
|
+
non_baseline_costs: CostBreakdown
|
|
35
|
+
total_cost: float
|
|
36
|
+
data_complete: bool = True
|
|
37
|
+
data_through: Optional[datetime] = None
|
|
38
|
+
lag_days: int = 0
|
|
39
|
+
|
|
40
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
41
|
+
"""Convert to dictionary for serialization."""
|
|
42
|
+
return {
|
|
43
|
+
"generated_at": self.generated_at.isoformat(),
|
|
44
|
+
"baseline_snapshot_name": self.baseline_snapshot_name,
|
|
45
|
+
"period_start": self.period_start.isoformat(),
|
|
46
|
+
"period_end": self.period_end.isoformat(),
|
|
47
|
+
"baseline_costs": self.baseline_costs.to_dict(),
|
|
48
|
+
"non_baseline_costs": self.non_baseline_costs.to_dict(),
|
|
49
|
+
"total_cost": self.total_cost,
|
|
50
|
+
"data_complete": self.data_complete,
|
|
51
|
+
"data_through": self.data_through.isoformat() if self.data_through else None,
|
|
52
|
+
"lag_days": self.lag_days,
|
|
53
|
+
"summary": {
|
|
54
|
+
"baseline_total": self.baseline_costs.total,
|
|
55
|
+
"baseline_percentage": self.baseline_costs.percentage,
|
|
56
|
+
"non_baseline_total": self.non_baseline_costs.total,
|
|
57
|
+
"non_baseline_percentage": self.non_baseline_costs.percentage,
|
|
58
|
+
"total": self.total_cost,
|
|
59
|
+
},
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def baseline_percentage(self) -> float:
|
|
64
|
+
"""Get baseline cost percentage."""
|
|
65
|
+
return self.baseline_costs.percentage
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def non_baseline_percentage(self) -> float:
|
|
69
|
+
"""Get non-baseline cost percentage."""
|
|
70
|
+
return self.non_baseline_costs.percentage
|
|
71
|
+
|
|
72
|
+
def get_top_services(self, limit: int = 5, baseline: bool = True) -> Dict[str, float]:
|
|
73
|
+
"""Get top N services by cost.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
limit: Number of top services to return
|
|
77
|
+
baseline: If True, return baseline services; if False, non-baseline
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Dictionary of service name to cost, sorted by cost descending
|
|
81
|
+
"""
|
|
82
|
+
services = self.baseline_costs.by_service if baseline else self.non_baseline_costs.by_service
|
|
83
|
+
|
|
84
|
+
# Sort by cost descending
|
|
85
|
+
sorted_services = sorted(services.items(), key=lambda x: x[1], reverse=True)
|
|
86
|
+
|
|
87
|
+
return dict(sorted_services[:limit])
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Deletion operation model.
|
|
2
|
+
|
|
3
|
+
Represents a complete restore operation with metadata, filters, and execution context.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OperationMode(Enum):
|
|
15
|
+
"""Operation execution mode."""
|
|
16
|
+
|
|
17
|
+
DRY_RUN = "dry-run"
|
|
18
|
+
EXECUTE = "execute"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class OperationStatus(Enum):
|
|
22
|
+
"""Operation execution status with state transitions."""
|
|
23
|
+
|
|
24
|
+
PLANNED = "planned"
|
|
25
|
+
EXECUTING = "executing"
|
|
26
|
+
COMPLETED = "completed"
|
|
27
|
+
PARTIAL = "partial"
|
|
28
|
+
FAILED = "failed"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class DeletionOperation:
|
|
33
|
+
"""Deletion operation entity.
|
|
34
|
+
|
|
35
|
+
Represents a complete restore operation with metadata, filters, and execution
|
|
36
|
+
context. Tracks overall progress and status of resource deletion.
|
|
37
|
+
|
|
38
|
+
State transitions:
|
|
39
|
+
planned → executing → completed (all succeeded)
|
|
40
|
+
planned → executing → partial (some failed)
|
|
41
|
+
planned → executing → failed (critical error)
|
|
42
|
+
|
|
43
|
+
Attributes:
|
|
44
|
+
operation_id: Unique identifier for the operation
|
|
45
|
+
baseline_snapshot: Name of baseline snapshot to compare against
|
|
46
|
+
timestamp: When operation was initiated (ISO 8601 UTC)
|
|
47
|
+
account_id: AWS account ID (12-digit number)
|
|
48
|
+
mode: dry-run or execute
|
|
49
|
+
status: Current execution status
|
|
50
|
+
total_resources: Total resources identified for deletion
|
|
51
|
+
succeeded_count: Number successfully deleted (default: 0)
|
|
52
|
+
failed_count: Number that failed to delete (default: 0)
|
|
53
|
+
skipped_count: Number skipped due to protections (default: 0)
|
|
54
|
+
aws_profile: AWS profile used for credentials (optional)
|
|
55
|
+
filters: Resource type and region filters (optional)
|
|
56
|
+
started_at: When execution started (optional, execute mode only)
|
|
57
|
+
completed_at: When execution completed (optional)
|
|
58
|
+
duration_seconds: Total execution duration (optional)
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
operation_id: str
|
|
62
|
+
baseline_snapshot: str
|
|
63
|
+
timestamp: datetime
|
|
64
|
+
account_id: str
|
|
65
|
+
mode: OperationMode
|
|
66
|
+
status: OperationStatus
|
|
67
|
+
total_resources: int
|
|
68
|
+
succeeded_count: int = 0
|
|
69
|
+
failed_count: int = 0
|
|
70
|
+
skipped_count: int = 0
|
|
71
|
+
aws_profile: Optional[str] = None
|
|
72
|
+
filters: Optional[dict] = field(default=None)
|
|
73
|
+
started_at: Optional[datetime] = None
|
|
74
|
+
completed_at: Optional[datetime] = None
|
|
75
|
+
duration_seconds: Optional[float] = None
|
|
76
|
+
|
|
77
|
+
def validate(self) -> bool:
|
|
78
|
+
"""Validate operation invariants.
|
|
79
|
+
|
|
80
|
+
Validation rules:
|
|
81
|
+
- succeeded_count + failed_count + skipped_count == total_resources
|
|
82
|
+
- completed_at must be after started_at
|
|
83
|
+
- dry-run mode must have planned status
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
True if validation passes
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If any validation rule fails
|
|
90
|
+
"""
|
|
91
|
+
# Count validation
|
|
92
|
+
if self.succeeded_count + self.failed_count + self.skipped_count != self.total_resources:
|
|
93
|
+
raise ValueError("Resource counts don't match total")
|
|
94
|
+
|
|
95
|
+
# Timing validation
|
|
96
|
+
if self.completed_at and self.started_at:
|
|
97
|
+
if self.completed_at < self.started_at:
|
|
98
|
+
raise ValueError("Completion time before start time")
|
|
99
|
+
|
|
100
|
+
# Mode/status consistency
|
|
101
|
+
if self.mode == OperationMode.DRY_RUN and self.status != OperationStatus.PLANNED:
|
|
102
|
+
raise ValueError("Dry-run mode must have planned status")
|
|
103
|
+
|
|
104
|
+
return True
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Deletion record model.
|
|
2
|
+
|
|
3
|
+
Individual resource deletion attempt with result and metadata.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DeletionStatus(Enum):
|
|
15
|
+
"""Individual resource deletion status."""
|
|
16
|
+
|
|
17
|
+
SUCCEEDED = "succeeded"
|
|
18
|
+
FAILED = "failed"
|
|
19
|
+
SKIPPED = "skipped"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class DeletionRecord:
|
|
24
|
+
"""Deletion record entity.
|
|
25
|
+
|
|
26
|
+
Represents an individual resource deletion attempt with result and metadata.
|
|
27
|
+
Each record belongs to a DeletionOperation and tracks the outcome for a single
|
|
28
|
+
resource.
|
|
29
|
+
|
|
30
|
+
Validation rules:
|
|
31
|
+
- status=succeeded: no error_code or protection_reason
|
|
32
|
+
- status=failed: requires error_code
|
|
33
|
+
- status=skipped: requires protection_reason
|
|
34
|
+
- resource_arn must start with "arn:aws:"
|
|
35
|
+
- estimated_monthly_cost must be >= 0 if provided
|
|
36
|
+
|
|
37
|
+
Attributes:
|
|
38
|
+
record_id: Unique identifier for this record
|
|
39
|
+
operation_id: Parent operation identifier
|
|
40
|
+
resource_arn: AWS Resource Name (ARN format)
|
|
41
|
+
resource_id: Resource identifier (ID, name)
|
|
42
|
+
resource_type: AWS resource type (format: aws:service:type)
|
|
43
|
+
region: AWS region
|
|
44
|
+
timestamp: When deletion was attempted (ISO 8601 UTC)
|
|
45
|
+
status: Deletion outcome (succeeded, failed, skipped)
|
|
46
|
+
error_code: AWS error code if failed (optional)
|
|
47
|
+
error_message: Human-readable error if failed (optional)
|
|
48
|
+
protection_reason: Why resource was skipped (optional)
|
|
49
|
+
deletion_tier: Tier (1-5) for deletion ordering (optional)
|
|
50
|
+
tags: Resource tags at deletion time (optional)
|
|
51
|
+
estimated_monthly_cost: Estimated cost in USD (optional)
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
record_id: str
|
|
55
|
+
operation_id: str
|
|
56
|
+
resource_arn: str
|
|
57
|
+
resource_id: str
|
|
58
|
+
resource_type: str
|
|
59
|
+
region: str
|
|
60
|
+
timestamp: datetime
|
|
61
|
+
status: DeletionStatus
|
|
62
|
+
error_code: Optional[str] = None
|
|
63
|
+
error_message: Optional[str] = None
|
|
64
|
+
protection_reason: Optional[str] = None
|
|
65
|
+
deletion_tier: Optional[int] = None
|
|
66
|
+
tags: Optional[dict] = field(default=None)
|
|
67
|
+
estimated_monthly_cost: Optional[float] = None
|
|
68
|
+
|
|
69
|
+
def validate(self) -> bool:
|
|
70
|
+
"""Validate record invariants.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
True if validation passes
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
ValueError: If any validation rule fails
|
|
77
|
+
"""
|
|
78
|
+
# Status-specific validation
|
|
79
|
+
if self.status == DeletionStatus.FAILED:
|
|
80
|
+
if not self.error_code:
|
|
81
|
+
raise ValueError("Failed status requires error_code")
|
|
82
|
+
elif self.status == DeletionStatus.SKIPPED:
|
|
83
|
+
if not self.protection_reason:
|
|
84
|
+
raise ValueError("Skipped status requires protection_reason")
|
|
85
|
+
elif self.status == DeletionStatus.SUCCEEDED:
|
|
86
|
+
if self.error_code or self.protection_reason:
|
|
87
|
+
raise ValueError("Succeeded status cannot have error or protection reason")
|
|
88
|
+
|
|
89
|
+
# ARN format validation
|
|
90
|
+
if not self.resource_arn.startswith("arn:aws:"):
|
|
91
|
+
raise ValueError("Invalid ARN format")
|
|
92
|
+
|
|
93
|
+
# Cost validation
|
|
94
|
+
if self.estimated_monthly_cost is not None and self.estimated_monthly_cost < 0:
|
|
95
|
+
raise ValueError("Cost cannot be negative")
|
|
96
|
+
|
|
97
|
+
return True
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
"""Delta report models for tracking resource changes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from ..delta.models import DriftReport
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class ResourceChange:
|
|
15
|
+
"""Represents a modified resource in a delta report."""
|
|
16
|
+
|
|
17
|
+
resource: Any # Current Resource instance
|
|
18
|
+
baseline_resource: Any # Reference Resource instance (keeping field name for compatibility)
|
|
19
|
+
change_type: str # 'modified'
|
|
20
|
+
old_config_hash: str
|
|
21
|
+
new_config_hash: str
|
|
22
|
+
changes_summary: Optional[str] = None
|
|
23
|
+
|
|
24
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
25
|
+
"""Convert to dictionary for serialization."""
|
|
26
|
+
return {
|
|
27
|
+
"arn": self.resource.arn,
|
|
28
|
+
"resource_type": self.resource.resource_type,
|
|
29
|
+
"name": self.resource.name,
|
|
30
|
+
"region": self.resource.region,
|
|
31
|
+
"change_type": self.change_type,
|
|
32
|
+
"tags": self.resource.tags,
|
|
33
|
+
"old_config_hash": self.old_config_hash,
|
|
34
|
+
"new_config_hash": self.new_config_hash,
|
|
35
|
+
"changes_summary": self.changes_summary,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class DeltaReport:
|
|
41
|
+
"""Represents differences between two snapshots."""
|
|
42
|
+
|
|
43
|
+
generated_at: datetime
|
|
44
|
+
baseline_snapshot_name: str # Reference snapshot name (keeping field name for compatibility)
|
|
45
|
+
current_snapshot_name: str
|
|
46
|
+
added_resources: List[Any] = field(default_factory=list) # List[Resource]
|
|
47
|
+
deleted_resources: List[Any] = field(default_factory=list) # List[Resource]
|
|
48
|
+
modified_resources: List[ResourceChange] = field(default_factory=list)
|
|
49
|
+
baseline_resource_count: int = 0 # Reference snapshot count (keeping field name for compatibility)
|
|
50
|
+
current_resource_count: int = 0
|
|
51
|
+
drift_report: Optional[DriftReport] = None # Configuration drift details (when --show-diff is used)
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Convert to dictionary for serialization."""
|
|
55
|
+
result = {
|
|
56
|
+
"generated_at": self.generated_at.isoformat(),
|
|
57
|
+
"baseline_snapshot_name": self.baseline_snapshot_name,
|
|
58
|
+
"current_snapshot_name": self.current_snapshot_name,
|
|
59
|
+
"added_resources": [r.to_dict() for r in self.added_resources],
|
|
60
|
+
"deleted_resources": [r.to_dict() for r in self.deleted_resources],
|
|
61
|
+
"modified_resources": [r.to_dict() for r in self.modified_resources],
|
|
62
|
+
"baseline_resource_count": self.baseline_resource_count,
|
|
63
|
+
"current_resource_count": self.current_resource_count,
|
|
64
|
+
"summary": {
|
|
65
|
+
"added": len(self.added_resources),
|
|
66
|
+
"deleted": len(self.deleted_resources),
|
|
67
|
+
"modified": len(self.modified_resources),
|
|
68
|
+
"unchanged": self.unchanged_count,
|
|
69
|
+
"total_changes": self.total_changes,
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Include drift details if available
|
|
74
|
+
if self.drift_report is not None:
|
|
75
|
+
result["drift_details"] = self.drift_report.to_dict()
|
|
76
|
+
|
|
77
|
+
return result
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def total_changes(self) -> int:
|
|
81
|
+
"""Total number of changes detected."""
|
|
82
|
+
return len(self.added_resources) + len(self.deleted_resources) + len(self.modified_resources)
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def unchanged_count(self) -> int:
|
|
86
|
+
"""Number of unchanged resources."""
|
|
87
|
+
# Resources that existed in reference snapshot and still exist unchanged
|
|
88
|
+
return self.baseline_resource_count - len(self.deleted_resources) - len(self.modified_resources)
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def has_changes(self) -> bool:
|
|
92
|
+
"""Whether any changes were detected."""
|
|
93
|
+
return self.total_changes > 0
|
|
94
|
+
|
|
95
|
+
def group_by_service(self) -> Dict[str, Dict[str, List]]:
|
|
96
|
+
"""Group changes by service type.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Dictionary mapping service type to changes dict with 'added', 'deleted', 'modified' lists
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
grouped: Dict[str, Dict[str, List[Any]]] = {}
|
|
103
|
+
|
|
104
|
+
for resource in self.added_resources:
|
|
105
|
+
service = resource.resource_type
|
|
106
|
+
if service not in grouped:
|
|
107
|
+
grouped[service] = {"added": [], "deleted": [], "modified": []}
|
|
108
|
+
grouped[service]["added"].append(resource)
|
|
109
|
+
|
|
110
|
+
for resource in self.deleted_resources:
|
|
111
|
+
service = resource.resource_type
|
|
112
|
+
if service not in grouped:
|
|
113
|
+
grouped[service] = {"added": [], "deleted": [], "modified": []}
|
|
114
|
+
grouped[service]["deleted"].append(resource)
|
|
115
|
+
|
|
116
|
+
for change in self.modified_resources:
|
|
117
|
+
service = change.resource.resource_type
|
|
118
|
+
if service not in grouped:
|
|
119
|
+
grouped[service] = {"added": [], "deleted": [], "modified": []}
|
|
120
|
+
grouped[service]["modified"].append(change)
|
|
121
|
+
|
|
122
|
+
return grouped
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""EFS resource model."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
|
|
10
|
+
from ..utils.hash import compute_config_hash
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class EFSFileSystem:
|
|
15
|
+
"""Represents an AWS EFS file system."""
|
|
16
|
+
|
|
17
|
+
file_system_id: str
|
|
18
|
+
arn: str
|
|
19
|
+
encryption_enabled: bool
|
|
20
|
+
kms_key_id: Optional[str]
|
|
21
|
+
performance_mode: str # "generalPurpose" or "maxIO"
|
|
22
|
+
lifecycle_state: str # "available", "creating", "deleting", "deleted"
|
|
23
|
+
tags: Dict[str, str]
|
|
24
|
+
region: str
|
|
25
|
+
created_at: datetime
|
|
26
|
+
|
|
27
|
+
def validate(self) -> bool:
|
|
28
|
+
"""Validate EFS file system data.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
True if valid, raises ValueError if invalid
|
|
32
|
+
"""
|
|
33
|
+
# Validate file_system_id format (must start with fs-)
|
|
34
|
+
if not re.match(r"^fs-[a-fA-F0-9]+$", self.file_system_id):
|
|
35
|
+
raise ValueError(f"Invalid file_system_id format: {self.file_system_id}. Must match pattern: fs-*")
|
|
36
|
+
|
|
37
|
+
# Validate performance_mode
|
|
38
|
+
valid_performance_modes = ["generalPurpose", "maxIO"]
|
|
39
|
+
if self.performance_mode not in valid_performance_modes:
|
|
40
|
+
raise ValueError(
|
|
41
|
+
f"Invalid performance_mode: {self.performance_mode}. "
|
|
42
|
+
f"Must be one of: {', '.join(valid_performance_modes)}"
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Validate lifecycle_state
|
|
46
|
+
valid_states = ["available", "creating", "deleting", "deleted"]
|
|
47
|
+
if self.lifecycle_state not in valid_states:
|
|
48
|
+
raise ValueError(
|
|
49
|
+
f"Invalid lifecycle_state: {self.lifecycle_state}. " f"Must be one of: {', '.join(valid_states)}"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
return True
|
|
53
|
+
|
|
54
|
+
def to_resource_dict(self) -> Dict[str, Any]:
|
|
55
|
+
"""Convert to Resource-compatible dictionary.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Dictionary that can be used to create a Resource object
|
|
59
|
+
"""
|
|
60
|
+
# Build raw_config with all EFS-specific attributes
|
|
61
|
+
raw_config = {
|
|
62
|
+
"file_system_id": self.file_system_id,
|
|
63
|
+
"arn": self.arn,
|
|
64
|
+
"encryption_enabled": self.encryption_enabled,
|
|
65
|
+
"kms_key_id": self.kms_key_id,
|
|
66
|
+
"performance_mode": self.performance_mode,
|
|
67
|
+
"lifecycle_state": self.lifecycle_state,
|
|
68
|
+
"region": self.region,
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
"arn": self.arn,
|
|
73
|
+
"resource_type": "efs:file-system",
|
|
74
|
+
"name": self.file_system_id,
|
|
75
|
+
"region": self.region,
|
|
76
|
+
"tags": self.tags,
|
|
77
|
+
"created_at": self.created_at,
|
|
78
|
+
"raw_config": raw_config,
|
|
79
|
+
"config_hash": compute_config_hash(raw_config),
|
|
80
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""ElastiCache resource model."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Any, Dict
|
|
7
|
+
|
|
8
|
+
from ..utils.hash import compute_config_hash
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class ElastiCacheCluster:
|
|
13
|
+
"""Represents an AWS ElastiCache cluster (Redis or Memcached).
|
|
14
|
+
|
|
15
|
+
This model captures both Redis and Memcached clusters with their
|
|
16
|
+
encryption settings, node configuration, and metadata.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
cluster_id: Unique identifier for the cluster (max 50 chars)
|
|
20
|
+
arn: Amazon Resource Name for the cluster
|
|
21
|
+
engine: Cache engine type (redis or memcached)
|
|
22
|
+
node_type: Cache node type (e.g., cache.t3.micro)
|
|
23
|
+
num_cache_nodes: Number of cache nodes in the cluster
|
|
24
|
+
engine_version: Engine version string
|
|
25
|
+
encryption_at_rest: Whether data at rest is encrypted
|
|
26
|
+
encryption_in_transit: Whether data in transit is encrypted
|
|
27
|
+
tags: Resource tags as key-value pairs
|
|
28
|
+
region: AWS region
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
cluster_id: str
|
|
32
|
+
arn: str
|
|
33
|
+
engine: str
|
|
34
|
+
node_type: str
|
|
35
|
+
num_cache_nodes: int
|
|
36
|
+
engine_version: str
|
|
37
|
+
encryption_at_rest: bool
|
|
38
|
+
encryption_in_transit: bool
|
|
39
|
+
tags: Dict[str, str] = field(default_factory=dict)
|
|
40
|
+
region: str = "us-east-1"
|
|
41
|
+
|
|
42
|
+
def __post_init__(self) -> None:
|
|
43
|
+
"""Validate ElastiCache cluster data after initialization."""
|
|
44
|
+
# Validate cluster_id length (AWS limit is 50 characters)
|
|
45
|
+
if len(self.cluster_id) > 50:
|
|
46
|
+
raise ValueError("cluster_id must be 50 characters or less")
|
|
47
|
+
|
|
48
|
+
# Validate engine type
|
|
49
|
+
if self.engine not in ("redis", "memcached"):
|
|
50
|
+
raise ValueError("engine must be 'redis' or 'memcached'")
|
|
51
|
+
|
|
52
|
+
# Memcached does not support encryption at rest
|
|
53
|
+
if self.engine == "memcached" and self.encryption_at_rest:
|
|
54
|
+
raise ValueError("Memcached does not support encryption at rest")
|
|
55
|
+
|
|
56
|
+
# Validate num_cache_nodes
|
|
57
|
+
if self.num_cache_nodes < 1:
|
|
58
|
+
raise ValueError("num_cache_nodes must be at least 1")
|
|
59
|
+
|
|
60
|
+
def to_resource_dict(self) -> Dict[str, Any]:
|
|
61
|
+
"""Convert ElastiCache cluster to Resource dictionary.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Dictionary with Resource fields suitable for creating a Resource object
|
|
65
|
+
"""
|
|
66
|
+
# Build raw_config that matches ElastiCache API response structure
|
|
67
|
+
raw_config = {
|
|
68
|
+
"CacheClusterId": self.cluster_id,
|
|
69
|
+
"ARN": self.arn,
|
|
70
|
+
"Engine": self.engine,
|
|
71
|
+
"CacheNodeType": self.node_type,
|
|
72
|
+
"NumCacheNodes": self.num_cache_nodes,
|
|
73
|
+
"EngineVersion": self.engine_version,
|
|
74
|
+
"AtRestEncryptionEnabled": self.encryption_at_rest,
|
|
75
|
+
"TransitEncryptionEnabled": self.encryption_in_transit,
|
|
76
|
+
"CacheClusterStatus": "available",
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# Compute config hash from raw_config
|
|
80
|
+
config_hash = compute_config_hash(raw_config)
|
|
81
|
+
|
|
82
|
+
return {
|
|
83
|
+
"arn": self.arn,
|
|
84
|
+
"resource_type": "elasticache:cluster",
|
|
85
|
+
"name": self.cluster_id,
|
|
86
|
+
"region": self.region,
|
|
87
|
+
"tags": self.tags,
|
|
88
|
+
"config_hash": config_hash,
|
|
89
|
+
"raw_config": raw_config,
|
|
90
|
+
}
|