cdk-factory 0.16.15__py3-none-any.whl → 0.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cdk-factory might be problematic. Click here for more details.
- cdk_factory/configurations/base_config.py +23 -24
- cdk_factory/configurations/cdk_config.py +1 -1
- cdk_factory/configurations/deployment.py +12 -0
- cdk_factory/configurations/devops.py +1 -1
- cdk_factory/configurations/resources/acm.py +9 -2
- cdk_factory/configurations/resources/auto_scaling.py +7 -5
- cdk_factory/configurations/resources/cloudfront.py +7 -2
- cdk_factory/configurations/resources/ecr.py +1 -1
- cdk_factory/configurations/resources/ecs_cluster.py +12 -5
- cdk_factory/configurations/resources/ecs_service.py +30 -3
- cdk_factory/configurations/resources/lambda_edge.py +18 -4
- cdk_factory/configurations/resources/load_balancer.py +8 -9
- cdk_factory/configurations/resources/monitoring.py +8 -3
- cdk_factory/configurations/resources/rds.py +8 -9
- cdk_factory/configurations/resources/route53.py +5 -0
- cdk_factory/configurations/resources/rum.py +7 -2
- cdk_factory/configurations/resources/s3.py +10 -2
- cdk_factory/configurations/resources/security_group_full_stack.py +7 -8
- cdk_factory/configurations/resources/vpc.py +19 -0
- cdk_factory/configurations/workload.py +32 -2
- cdk_factory/constructs/cloudfront/cloudfront_distribution_construct.py +1 -1
- cdk_factory/constructs/ecr/ecr_construct.py +9 -2
- cdk_factory/constructs/lambdas/policies/policy_docs.py +4 -4
- cdk_factory/interfaces/istack.py +4 -4
- cdk_factory/interfaces/networked_stack_mixin.py +6 -6
- cdk_factory/interfaces/standardized_ssm_mixin.py +684 -0
- cdk_factory/interfaces/vpc_provider_mixin.py +64 -33
- cdk_factory/lambdas/edge/ip_gate/handler.py +42 -40
- cdk_factory/pipeline/pipeline_factory.py +3 -3
- cdk_factory/stack_library/__init__.py +3 -2
- cdk_factory/stack_library/acm/acm_stack.py +7 -17
- cdk_factory/stack_library/api_gateway/api_gateway_stack.py +84 -59
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py +454 -537
- cdk_factory/stack_library/cloudfront/cloudfront_stack.py +76 -22
- cdk_factory/stack_library/code_artifact/code_artifact_stack.py +5 -27
- cdk_factory/stack_library/cognito/cognito_stack.py +152 -92
- cdk_factory/stack_library/dynamodb/dynamodb_stack.py +19 -15
- cdk_factory/stack_library/ecr/ecr_stack.py +2 -2
- cdk_factory/stack_library/ecs/__init__.py +1 -3
- cdk_factory/stack_library/ecs/ecs_cluster_stack.py +159 -75
- cdk_factory/stack_library/ecs/ecs_service_stack.py +59 -52
- cdk_factory/stack_library/lambda_edge/EDGE_LOG_RETENTION_TODO.md +226 -0
- cdk_factory/stack_library/lambda_edge/LAMBDA_EDGE_LOG_RETENTION_BLOG.md +215 -0
- cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py +240 -83
- cdk_factory/stack_library/load_balancer/load_balancer_stack.py +139 -212
- cdk_factory/stack_library/rds/rds_stack.py +74 -98
- cdk_factory/stack_library/route53/route53_stack.py +246 -40
- cdk_factory/stack_library/rum/rum_stack.py +108 -91
- cdk_factory/stack_library/security_group/security_group_full_stack.py +10 -53
- cdk_factory/stack_library/security_group/security_group_stack.py +12 -19
- cdk_factory/stack_library/simple_queue_service/sqs_stack.py +1 -34
- cdk_factory/stack_library/stack_base.py +5 -0
- cdk_factory/stack_library/vpc/vpc_stack.py +171 -130
- cdk_factory/stack_library/websites/static_website_stack.py +7 -3
- cdk_factory/utilities/api_gateway_integration_utility.py +24 -16
- cdk_factory/utilities/environment_services.py +5 -5
- cdk_factory/utilities/json_loading_utility.py +1 -1
- cdk_factory/validation/config_validator.py +483 -0
- cdk_factory/version.py +1 -1
- {cdk_factory-0.16.15.dist-info → cdk_factory-0.20.0.dist-info}/METADATA +1 -1
- {cdk_factory-0.16.15.dist-info → cdk_factory-0.20.0.dist-info}/RECORD +64 -62
- cdk_factory/interfaces/enhanced_ssm_parameter_mixin.py +0 -321
- cdk_factory/interfaces/ssm_parameter_mixin.py +0 -454
- {cdk_factory-0.16.15.dist-info → cdk_factory-0.20.0.dist-info}/WHEEL +0 -0
- {cdk_factory-0.16.15.dist-info → cdk_factory-0.20.0.dist-info}/entry_points.txt +0 -0
- {cdk_factory-0.16.15.dist-info → cdk_factory-0.20.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Auto Scaling Group Stack Pattern for CDK-Factory
|
|
2
|
+
Auto Scaling Group Stack Pattern for CDK-Factory (Standardized SSM Version)
|
|
3
3
|
Maintainers: Eric Wilson
|
|
4
4
|
MIT License. See Project Root for the license information.
|
|
5
5
|
"""
|
|
@@ -9,11 +9,11 @@ from typing import Dict, Any, List, Optional
|
|
|
9
9
|
import aws_cdk as cdk
|
|
10
10
|
from aws_cdk import aws_ec2 as ec2
|
|
11
11
|
from aws_cdk import aws_autoscaling as autoscaling
|
|
12
|
-
from aws_cdk import aws_cloudwatch as cloudwatch
|
|
13
12
|
from aws_cdk import aws_iam as iam
|
|
14
|
-
from aws_cdk import aws_ssm as ssm
|
|
15
13
|
from aws_cdk import aws_ecs as ecs
|
|
16
|
-
from aws_cdk import Duration
|
|
14
|
+
from aws_cdk import Duration
|
|
15
|
+
|
|
16
|
+
from aws_cdk.aws_autoscaling import HealthChecks, AdditionalHealthCheckType
|
|
17
17
|
from aws_lambda_powertools import Logger
|
|
18
18
|
from constructs import Construct
|
|
19
19
|
|
|
@@ -22,29 +22,38 @@ from cdk_factory.configurations.stack import StackConfig
|
|
|
22
22
|
from cdk_factory.configurations.resources.auto_scaling import AutoScalingConfig
|
|
23
23
|
from cdk_factory.interfaces.istack import IStack
|
|
24
24
|
from cdk_factory.interfaces.vpc_provider_mixin import VPCProviderMixin
|
|
25
|
+
from cdk_factory.interfaces.standardized_ssm_mixin import StandardizedSsmMixin
|
|
25
26
|
from cdk_factory.stack.stack_module_registry import register_stack
|
|
26
27
|
from cdk_factory.workload.workload_factory import WorkloadConfig
|
|
27
28
|
|
|
28
|
-
logger = Logger(service="
|
|
29
|
+
logger = Logger(service="AutoScalingStackStandardized")
|
|
29
30
|
|
|
30
31
|
|
|
31
32
|
@register_stack("auto_scaling_library_module")
|
|
32
33
|
@register_stack("auto_scaling_stack")
|
|
33
|
-
class AutoScalingStack(IStack, VPCProviderMixin):
|
|
34
|
+
class AutoScalingStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
34
35
|
"""
|
|
35
|
-
Reusable stack for AWS Auto Scaling Groups.
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
36
|
+
Reusable stack for AWS Auto Scaling Groups with standardized SSM integration.
|
|
37
|
+
|
|
38
|
+
This version uses the StandardizedSsmMixin to provide consistent SSM parameter
|
|
39
|
+
handling across all CDK Factory modules.
|
|
40
|
+
|
|
41
|
+
Key Features:
|
|
42
|
+
- Standardized SSM import/export patterns
|
|
43
|
+
- Template variable resolution
|
|
44
|
+
- Comprehensive validation
|
|
45
|
+
- Clear error handling
|
|
46
|
+
- Backward compatibility
|
|
39
47
|
"""
|
|
40
48
|
|
|
41
49
|
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
|
|
42
|
-
# Initialize parent
|
|
50
|
+
# Initialize parent classes properly
|
|
43
51
|
super().__init__(scope, id, **kwargs)
|
|
44
|
-
|
|
52
|
+
|
|
45
53
|
# Initialize VPC cache from mixin
|
|
46
54
|
self._initialize_vpc_cache()
|
|
47
|
-
|
|
55
|
+
|
|
56
|
+
# Initialize module attributes
|
|
48
57
|
self.asg_config = None
|
|
49
58
|
self.stack_config = None
|
|
50
59
|
self.deployment = None
|
|
@@ -56,9 +65,6 @@ class AutoScalingStack(IStack, VPCProviderMixin):
|
|
|
56
65
|
self.user_data = None
|
|
57
66
|
self.user_data_commands = [] # Store raw commands for ECS cluster detection
|
|
58
67
|
self.ecs_cluster = None
|
|
59
|
-
|
|
60
|
-
# SSM imports storage is now handled by the enhanced SsmParameterMixin via IStack
|
|
61
|
-
# VPC caching is now handled by VPCProviderMixin
|
|
62
68
|
|
|
63
69
|
def build(
|
|
64
70
|
self,
|
|
@@ -85,21 +91,33 @@ class AutoScalingStack(IStack, VPCProviderMixin):
|
|
|
85
91
|
)
|
|
86
92
|
asg_name = deployment.build_resource_name(self.asg_config.name)
|
|
87
93
|
|
|
88
|
-
#
|
|
89
|
-
self.
|
|
94
|
+
# Setup standardized SSM integration
|
|
95
|
+
self.setup_ssm_integration(
|
|
96
|
+
scope=self,
|
|
97
|
+
config=self.asg_config,
|
|
98
|
+
resource_type="auto_scaling",
|
|
99
|
+
resource_name=asg_name,
|
|
100
|
+
deployment=deployment,
|
|
101
|
+
workload=workload,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Process SSM imports using standardized method
|
|
105
|
+
self.process_ssm_imports()
|
|
90
106
|
|
|
91
|
-
# Get security groups
|
|
107
|
+
# Get security groups using standardized approach
|
|
92
108
|
self.security_groups = self._get_security_groups()
|
|
93
109
|
|
|
94
110
|
# Create IAM role for instances
|
|
95
111
|
self.instance_role = self._create_instance_role(asg_name)
|
|
96
112
|
|
|
97
|
-
# Create
|
|
98
|
-
self.
|
|
113
|
+
# Create VPC once to be reused by both ECS cluster and ASG
|
|
114
|
+
self._vpc = None # Store VPC for reuse
|
|
99
115
|
|
|
100
116
|
# Create ECS cluster if ECS configuration is detected
|
|
101
|
-
|
|
102
|
-
|
|
117
|
+
self.ecs_cluster = self._create_ecs_cluster_if_needed()
|
|
118
|
+
|
|
119
|
+
# Create user data (after ECS cluster so it can reference it)
|
|
120
|
+
self.user_data = self._create_user_data()
|
|
103
121
|
|
|
104
122
|
# Create launch template
|
|
105
123
|
self.launch_template = self._create_launch_template(asg_name)
|
|
@@ -110,82 +128,101 @@ class AutoScalingStack(IStack, VPCProviderMixin):
|
|
|
110
128
|
# Add scaling policies
|
|
111
129
|
self._add_scaling_policies()
|
|
112
130
|
|
|
113
|
-
# Add
|
|
114
|
-
self.
|
|
115
|
-
|
|
116
|
-
# Export resources
|
|
117
|
-
self._export_resources(asg_name)
|
|
118
|
-
|
|
119
|
-
@property
|
|
120
|
-
def vpc(self) -> ec2.IVpc:
|
|
121
|
-
"""Get the VPC for the Auto Scaling Group using VPCProviderMixin"""
|
|
122
|
-
if not self.asg_config:
|
|
123
|
-
raise AttributeError("AutoScalingStack not properly initialized. Call build() first.")
|
|
124
|
-
|
|
125
|
-
# Use VPCProviderMixin to resolve VPC with proper subnet handling
|
|
126
|
-
return self.resolve_vpc(
|
|
127
|
-
config=self.asg_config,
|
|
128
|
-
deployment=self.deployment,
|
|
129
|
-
workload=self.workload
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def _get_target_group_arns(self) -> List[str]:
|
|
133
|
-
"""Get target group ARNs from SSM imports using enhanced SsmParameterMixin"""
|
|
134
|
-
target_group_arns = []
|
|
135
|
-
|
|
136
|
-
# Check if we have SSM imports for target groups using enhanced mixin
|
|
137
|
-
if self.has_ssm_import("target_group_arns"):
|
|
138
|
-
imported_tg_arns = self.get_ssm_imported_value("target_group_arns", [])
|
|
139
|
-
if isinstance(imported_tg_arns, list):
|
|
140
|
-
target_group_arns.extend(imported_tg_arns)
|
|
141
|
-
else:
|
|
142
|
-
target_group_arns.append(imported_tg_arns)
|
|
143
|
-
|
|
144
|
-
# see if we have any directly defined in the config
|
|
145
|
-
if self.asg_config.target_group_arns:
|
|
146
|
-
for arn in self.asg_config.target_group_arns:
|
|
147
|
-
logger.info(f"Adding target group ARN: {arn}")
|
|
148
|
-
target_group_arns.append(arn)
|
|
131
|
+
# Add update policy
|
|
132
|
+
self._add_update_policy()
|
|
149
133
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def _attach_target_groups(self, asg: autoscaling.AutoScalingGroup) -> None:
|
|
153
|
-
"""Attach the Auto Scaling Group to target groups"""
|
|
154
|
-
target_group_arns = self._get_target_group_arns()
|
|
134
|
+
# Export SSM parameters
|
|
135
|
+
self._export_ssm_parameters()
|
|
155
136
|
|
|
156
|
-
|
|
157
|
-
logger.warning("No target group ARNs found for Auto Scaling Group")
|
|
158
|
-
print(
|
|
159
|
-
"⚠️ No target group ARNs found for Auto Scaling Group. Nothing will be attached."
|
|
160
|
-
)
|
|
161
|
-
return
|
|
137
|
+
logger.info(f"Auto Scaling Group {asg_name} built successfully")
|
|
162
138
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
139
|
+
def _get_ssm_imports(self) -> Dict[str, Any]:
|
|
140
|
+
"""Get SSM imports from standardized mixin processing"""
|
|
141
|
+
return self.get_all_ssm_imports()
|
|
166
142
|
|
|
167
143
|
def _get_security_groups(self) -> List[ec2.ISecurityGroup]:
|
|
168
|
-
"""
|
|
144
|
+
"""
|
|
145
|
+
Get security groups for the Auto Scaling Group using standardized SSM imports.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
List of security group references
|
|
149
|
+
"""
|
|
169
150
|
security_groups = []
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
151
|
+
|
|
152
|
+
# Primary method: Use standardized SSM imports
|
|
153
|
+
ssm_imports = self._get_ssm_imports()
|
|
154
|
+
if "security_group_ids" in ssm_imports:
|
|
155
|
+
imported_sg_ids = ssm_imports["security_group_ids"]
|
|
156
|
+
if isinstance(imported_sg_ids, list):
|
|
157
|
+
for idx, sg_id in enumerate(imported_sg_ids):
|
|
175
158
|
security_groups.append(
|
|
176
159
|
ec2.SecurityGroup.from_security_group_id(
|
|
177
|
-
self, f"SecurityGroup-{
|
|
160
|
+
self, f"SecurityGroup-SSM-{idx}", sg_id
|
|
178
161
|
)
|
|
179
162
|
)
|
|
163
|
+
logger.info(
|
|
164
|
+
f"Added {len(imported_sg_ids)} security groups from SSM imports"
|
|
165
|
+
)
|
|
180
166
|
else:
|
|
181
|
-
# TODO: add some additional checks to make it more robust
|
|
182
167
|
security_groups.append(
|
|
183
168
|
ec2.SecurityGroup.from_security_group_id(
|
|
184
|
-
self, f"SecurityGroup-
|
|
169
|
+
self, f"SecurityGroup-SSM-0", imported_sg_ids
|
|
185
170
|
)
|
|
186
171
|
)
|
|
172
|
+
logger.info(f"Added security group from SSM imports")
|
|
173
|
+
|
|
174
|
+
# Fallback: Check for direct configuration (backward compatibility)
|
|
175
|
+
elif self.asg_config.security_group_ids:
|
|
176
|
+
logger.warning(
|
|
177
|
+
"Using direct security group configuration - consider migrating to SSM imports"
|
|
178
|
+
)
|
|
179
|
+
for idx, sg_id in enumerate(self.asg_config.security_group_ids):
|
|
180
|
+
logger.info(f"Adding security group from direct config: {sg_id}")
|
|
181
|
+
# Handle comma-separated security group IDs
|
|
182
|
+
if "," in sg_id:
|
|
183
|
+
blocks = sg_id.split(",")
|
|
184
|
+
for block_idx, block in enumerate(blocks):
|
|
185
|
+
security_groups.append(
|
|
186
|
+
ec2.SecurityGroup.from_security_group_id(
|
|
187
|
+
self,
|
|
188
|
+
f"SecurityGroup-Direct-{idx}-{block_idx}",
|
|
189
|
+
block.strip(),
|
|
190
|
+
)
|
|
191
|
+
)
|
|
192
|
+
else:
|
|
193
|
+
security_groups.append(
|
|
194
|
+
ec2.SecurityGroup.from_security_group_id(
|
|
195
|
+
self, f"SecurityGroup-Direct-{idx}", sg_id
|
|
196
|
+
)
|
|
197
|
+
)
|
|
198
|
+
else:
|
|
199
|
+
logger.warning(
|
|
200
|
+
"No security groups found from SSM imports or direct configuration"
|
|
201
|
+
)
|
|
202
|
+
|
|
187
203
|
return security_groups
|
|
188
204
|
|
|
205
|
+
def _get_vpc_id(self) -> str:
|
|
206
|
+
"""
|
|
207
|
+
Get VPC ID using the centralized VPC provider mixin.
|
|
208
|
+
"""
|
|
209
|
+
# Use the centralized VPC resolution from VPCProviderMixin
|
|
210
|
+
vpc = self.resolve_vpc(
|
|
211
|
+
config=self.asg_config, deployment=self.deployment, workload=self.workload
|
|
212
|
+
)
|
|
213
|
+
return vpc.vpc_id
|
|
214
|
+
|
|
215
|
+
def _get_subnet_ids(self) -> List[str]:
|
|
216
|
+
"""
|
|
217
|
+
Get subnet IDs using standardized SSM approach.
|
|
218
|
+
"""
|
|
219
|
+
# Primary method: Use standardized SSM imports
|
|
220
|
+
# ssm_imports = self._get_ssm_imports()
|
|
221
|
+
|
|
222
|
+
subnet_ids = self.get_subnet_ids(self.asg_config)
|
|
223
|
+
|
|
224
|
+
return subnet_ids
|
|
225
|
+
|
|
189
226
|
def _create_instance_role(self, asg_name: str) -> iam.Role:
|
|
190
227
|
"""Create IAM role for EC2 instances"""
|
|
191
228
|
role = iam.Role(
|
|
@@ -201,521 +238,401 @@ class AutoScalingStack(IStack, VPCProviderMixin):
|
|
|
201
238
|
iam.ManagedPolicy.from_aws_managed_policy_name(policy_name)
|
|
202
239
|
)
|
|
203
240
|
|
|
204
|
-
|
|
205
|
-
for policy_config in self.asg_config.iam_inline_policies:
|
|
206
|
-
policy_name = policy_config.get("name", "CustomPolicy")
|
|
207
|
-
statements = policy_config.get("statements", [])
|
|
208
|
-
|
|
209
|
-
if not statements:
|
|
210
|
-
logger.warning(f"No statements found for inline policy {policy_name}, skipping")
|
|
211
|
-
continue
|
|
212
|
-
|
|
213
|
-
# Build policy statements
|
|
214
|
-
policy_statements = []
|
|
215
|
-
for stmt in statements:
|
|
216
|
-
effect = iam.Effect.ALLOW if stmt.get("effect", "Allow") == "Allow" else iam.Effect.DENY
|
|
217
|
-
actions = stmt.get("actions", [])
|
|
218
|
-
resources = stmt.get("resources", [])
|
|
219
|
-
|
|
220
|
-
if not actions or not resources:
|
|
221
|
-
logger.warning(f"Incomplete statement in policy {policy_name}, skipping")
|
|
222
|
-
continue
|
|
223
|
-
|
|
224
|
-
policy_statements.append(
|
|
225
|
-
iam.PolicyStatement(
|
|
226
|
-
effect=effect,
|
|
227
|
-
actions=actions,
|
|
228
|
-
resources=resources
|
|
229
|
-
)
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
if policy_statements:
|
|
233
|
-
role.add_to_principal_policy(policy_statements[0])
|
|
234
|
-
for stmt in policy_statements[1:]:
|
|
235
|
-
role.add_to_principal_policy(stmt)
|
|
236
|
-
|
|
237
|
-
logger.info(f"Added inline policy {policy_name} with {len(policy_statements)} statements")
|
|
238
|
-
|
|
241
|
+
logger.info(f"Created instance role: {role.role_name}")
|
|
239
242
|
return role
|
|
240
243
|
|
|
241
244
|
def _create_user_data(self) -> ec2.UserData:
|
|
242
245
|
"""Create user data for EC2 instances"""
|
|
243
246
|
user_data = ec2.UserData.for_linux()
|
|
244
247
|
|
|
245
|
-
#
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
#
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
#
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
248
|
+
# Add basic setup commands
|
|
249
|
+
# this will break amazon linux 2023 which uses dnf instead of yum
|
|
250
|
+
# user_data.add_commands(
|
|
251
|
+
# "#!/bin/bash",
|
|
252
|
+
# "yum update -y",
|
|
253
|
+
# "yum install -y aws-cfn-bootstrap",
|
|
254
|
+
# )
|
|
255
|
+
|
|
256
|
+
# Add user data commands from configuration
|
|
257
|
+
if self.asg_config.user_data_commands:
|
|
258
|
+
# Process template variables in user data commands
|
|
259
|
+
processed_commands = []
|
|
260
|
+
ssm_imports = self._get_ssm_imports()
|
|
261
|
+
for command in self.asg_config.user_data_commands:
|
|
262
|
+
processed_command = command
|
|
263
|
+
# Substitute SSM-imported values
|
|
264
|
+
if "cluster_name" in ssm_imports and "{{cluster_name}}" in command:
|
|
265
|
+
cluster_name = ssm_imports["cluster_name"]
|
|
266
|
+
processed_command = command.replace(
|
|
267
|
+
"{{cluster_name}}", cluster_name
|
|
268
|
+
)
|
|
269
|
+
processed_commands.append(processed_command)
|
|
270
|
+
|
|
271
|
+
user_data.add_commands(*processed_commands)
|
|
272
|
+
self.user_data_commands = processed_commands
|
|
273
|
+
|
|
274
|
+
# Add ECS cluster configuration if needed
|
|
275
|
+
if self.ecs_cluster:
|
|
276
|
+
# Use the SSM-imported cluster name if available, otherwise fallback to default format
|
|
277
|
+
ssm_imports = self._get_ssm_imports()
|
|
278
|
+
if "cluster_name" in ssm_imports:
|
|
279
|
+
cluster_name = ssm_imports["cluster_name"]
|
|
280
|
+
ecs_commands = [
|
|
281
|
+
f"echo 'ECS_CLUSTER={cluster_name}' >> /etc/ecs/ecs.config",
|
|
282
|
+
"systemctl restart ecs",
|
|
283
|
+
]
|
|
284
|
+
else:
|
|
285
|
+
# Fallback to default naming pattern
|
|
286
|
+
ecs_commands = [
|
|
287
|
+
"echo 'ECS_CLUSTER={}{}' >> /etc/ecs/ecs.config".format(
|
|
288
|
+
self.deployment.workload_name, self.deployment.environment
|
|
289
|
+
),
|
|
290
|
+
"systemctl restart ecs",
|
|
291
|
+
]
|
|
292
|
+
user_data.add_commands(*ecs_commands)
|
|
257
293
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
294
|
+
logger.info(
|
|
295
|
+
f"Created user data with {len(self.user_data_commands)} custom commands"
|
|
296
|
+
)
|
|
297
|
+
return user_data
|
|
261
298
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
if
|
|
265
|
-
self.
|
|
299
|
+
def _get_or_create_vpc(self) -> ec2.Vpc:
|
|
300
|
+
"""Get or create VPC for reuse across the stack"""
|
|
301
|
+
if self._vpc is None:
|
|
302
|
+
vpc_id = self._get_vpc_id()
|
|
303
|
+
subnet_ids = self._get_subnet_ids()
|
|
266
304
|
|
|
267
|
-
|
|
305
|
+
# Create VPC and subnets from imported values
|
|
306
|
+
self._vpc = ec2.Vpc.from_vpc_attributes(
|
|
307
|
+
self,
|
|
308
|
+
"ImportedVPC",
|
|
309
|
+
vpc_id=vpc_id,
|
|
310
|
+
availability_zones=[
|
|
311
|
+
"us-east-1a",
|
|
312
|
+
"us-east-1b",
|
|
313
|
+
], # Add required availability zones
|
|
314
|
+
)
|
|
268
315
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
for script_config in self.asg_config.user_data_scripts:
|
|
278
|
-
script_type = script_config.get("type", "file")
|
|
279
|
-
|
|
280
|
-
if script_type == "file":
|
|
281
|
-
# Load script from file
|
|
282
|
-
script_path = script_config.get("path")
|
|
283
|
-
if not script_path:
|
|
284
|
-
logger.warning("Script path not specified, skipping")
|
|
285
|
-
continue
|
|
286
|
-
|
|
287
|
-
# Resolve path (relative to project root or absolute)
|
|
288
|
-
path = Path(script_path)
|
|
289
|
-
if not path.is_absolute():
|
|
290
|
-
# Try relative to current working directory
|
|
291
|
-
path = Path.cwd() / script_path
|
|
292
|
-
|
|
293
|
-
if not path.exists():
|
|
294
|
-
logger.warning(f"Script file not found: {path}, skipping")
|
|
295
|
-
continue
|
|
296
|
-
|
|
297
|
-
# Read script content
|
|
298
|
-
try:
|
|
299
|
-
with open(path, 'r') as f:
|
|
300
|
-
script_content = f.read()
|
|
301
|
-
except Exception as e:
|
|
302
|
-
logger.error(f"Failed to read script file {path}: {e}")
|
|
303
|
-
continue
|
|
304
|
-
|
|
305
|
-
elif script_type == "inline":
|
|
306
|
-
# Use inline script content
|
|
307
|
-
script_content = script_config.get("content", "")
|
|
308
|
-
if not script_content:
|
|
309
|
-
logger.warning("Inline script content is empty, skipping")
|
|
310
|
-
continue
|
|
316
|
+
# Create and store subnets if we have subnet IDs
|
|
317
|
+
self._subnets = []
|
|
318
|
+
if subnet_ids:
|
|
319
|
+
for i, subnet_id in enumerate(subnet_ids):
|
|
320
|
+
subnet = ec2.Subnet.from_subnet_id(
|
|
321
|
+
self, f"ImportedSubnet-{i}", subnet_id
|
|
322
|
+
)
|
|
323
|
+
self._subnets.append(subnet)
|
|
311
324
|
else:
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
def _substitute_variables(self, command: str) -> str:
|
|
330
|
-
"""
|
|
331
|
-
Perform variable substitution on a user data command.
|
|
332
|
-
Uses workload and deployment configuration for substitution.
|
|
333
|
-
"""
|
|
334
|
-
if not command:
|
|
335
|
-
return command
|
|
336
|
-
|
|
337
|
-
# Start with the original command
|
|
338
|
-
substituted_command = command
|
|
339
|
-
|
|
340
|
-
# Define available variables for substitution
|
|
341
|
-
variables = {}
|
|
342
|
-
|
|
343
|
-
# Add workload variables
|
|
344
|
-
if self.workload:
|
|
345
|
-
variables.update({
|
|
346
|
-
"WORKLOAD_NAME": getattr(self.workload, 'name', ''),
|
|
347
|
-
"ENVIRONMENT": getattr(self.workload, 'environment', ''),
|
|
348
|
-
"WORKLOAD": getattr(self.workload, 'name', ''),
|
|
349
|
-
})
|
|
350
|
-
|
|
351
|
-
# Add deployment variables
|
|
352
|
-
if self.deployment:
|
|
353
|
-
variables.update({
|
|
354
|
-
"DEPLOYMENT_NAME": getattr(self.deployment, 'name', ''),
|
|
355
|
-
"REGION": getattr(self.deployment, 'region', ''),
|
|
356
|
-
"ACCOUNT": getattr(self.deployment, 'account', ''),
|
|
357
|
-
})
|
|
358
|
-
|
|
359
|
-
# Add stack-level variables
|
|
360
|
-
variables.update({
|
|
361
|
-
"STACK_NAME": self.stack_name,
|
|
362
|
-
})
|
|
363
|
-
|
|
364
|
-
# Perform substitution
|
|
365
|
-
for var_name, var_value in variables.items():
|
|
366
|
-
if var_value is not None:
|
|
367
|
-
placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
|
|
368
|
-
substituted_command = substituted_command.replace(placeholder, str(var_value))
|
|
369
|
-
|
|
370
|
-
return substituted_command
|
|
371
|
-
|
|
372
|
-
def _add_container_user_data(
|
|
373
|
-
self, user_data: ec2.UserData, container_config: Dict[str, Any]
|
|
374
|
-
) -> None:
|
|
375
|
-
"""Add container-specific user data commands"""
|
|
376
|
-
# Install Docker
|
|
377
|
-
user_data.add_commands(
|
|
378
|
-
"dnf -y update", "dnf -y install docker jq", "systemctl enable --now docker"
|
|
379
|
-
)
|
|
380
|
-
|
|
381
|
-
# ECR configuration
|
|
382
|
-
if "ecr" in container_config:
|
|
383
|
-
ecr_config = container_config["ecr"]
|
|
384
|
-
user_data.add_commands(
|
|
385
|
-
f"ACCOUNT_ID={ecr_config.get('account_id', self.account)}",
|
|
386
|
-
f"REGION={ecr_config.get('region', self.region)}",
|
|
387
|
-
f"REPO={ecr_config.get('repo', 'app')}",
|
|
388
|
-
f"TAG={ecr_config.get('tag', 'latest')}",
|
|
389
|
-
"aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com",
|
|
390
|
-
"docker pull ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}",
|
|
325
|
+
# Use default subnets from VPC
|
|
326
|
+
self._subnets = self._vpc.public_subnets
|
|
327
|
+
|
|
328
|
+
return self._vpc
|
|
329
|
+
|
|
330
|
+
def _get_subnets(self) -> List[ec2.Subnet]:
|
|
331
|
+
"""Get the subnets from the shared VPC"""
|
|
332
|
+
return getattr(self, "_subnets", [])
|
|
333
|
+
|
|
334
|
+
def _create_ecs_cluster_if_needed(self) -> Optional[ecs.Cluster]:
|
|
335
|
+
"""Create ECS cluster if ECS configuration is detected"""
|
|
336
|
+
# Check if user data contains ECS configuration (use raw config since user_data_commands might not be set yet)
|
|
337
|
+
ecs_detected = False
|
|
338
|
+
if self.asg_config.user_data_commands:
|
|
339
|
+
ecs_detected = any(
|
|
340
|
+
"ECS_CLUSTER" in cmd for cmd in self.asg_config.user_data_commands
|
|
391
341
|
)
|
|
392
342
|
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
343
|
+
if ecs_detected:
|
|
344
|
+
ssm_imports = self._get_ssm_imports()
|
|
345
|
+
if "cluster_name" in ssm_imports:
|
|
346
|
+
cluster_name = ssm_imports["cluster_name"]
|
|
347
|
+
|
|
348
|
+
# Use the shared VPC
|
|
349
|
+
vpc = self._get_or_create_vpc()
|
|
350
|
+
|
|
351
|
+
self.ecs_cluster = ecs.Cluster.from_cluster_attributes(
|
|
352
|
+
self, "ImportedECSCluster", cluster_name=cluster_name, vpc=vpc
|
|
402
353
|
)
|
|
354
|
+
logger.info(f"Connected to existing ECS cluster: {cluster_name}")
|
|
403
355
|
|
|
404
|
-
|
|
405
|
-
if "run_command" in container_config:
|
|
406
|
-
user_data.add_commands(container_config["run_command"])
|
|
407
|
-
elif "ecr" in container_config:
|
|
408
|
-
port = container_config.get("port", 8080)
|
|
409
|
-
user_data.add_commands(
|
|
410
|
-
f"docker run -d --name app -p {port}:{port} "
|
|
411
|
-
'-e DB_HOST="$DB_HOST" -e DB_USER="$DB_USER" -e DB_PASS="$DB_PASS" -e DB_NAME="$DB_NAME" '
|
|
412
|
-
"--restart=always ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}"
|
|
413
|
-
)
|
|
356
|
+
return self.ecs_cluster
|
|
414
357
|
|
|
415
358
|
def _create_launch_template(self, asg_name: str) -> ec2.LaunchTemplate:
|
|
416
|
-
"""Create launch template for
|
|
417
|
-
|
|
418
|
-
|
|
359
|
+
"""Create launch template for Auto Scaling Group"""
|
|
360
|
+
|
|
361
|
+
# Use the configured AMI ID or fall back to appropriate lookup
|
|
419
362
|
if self.asg_config.ami_id:
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
instance_class = ec2.InstanceClass[parts[0].upper()]
|
|
438
|
-
instance_size = ec2.InstanceSize[parts[1].upper()]
|
|
439
|
-
instance_type = ec2.InstanceType.of(instance_class, instance_size)
|
|
440
|
-
except (KeyError, ValueError):
|
|
441
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
363
|
+
# Use explicit AMI ID provided by user
|
|
364
|
+
machine_image = ec2.MachineImage.lookup(name=self.asg_config.ami_id)
|
|
365
|
+
elif self.asg_config.ami_type:
|
|
366
|
+
# Use AMI type for dynamic lookup
|
|
367
|
+
if self.asg_config.ami_type.upper() == "AMAZON-LINUX-2023":
|
|
368
|
+
machine_image = ec2.MachineImage.latest_amazon_linux2023()
|
|
369
|
+
elif self.asg_config.ami_type.upper() == "AMAZON-LINUX-2022":
|
|
370
|
+
machine_image = ec2.MachineImage.latest_amazon_linux2022()
|
|
371
|
+
elif self.asg_config.ami_type.upper() == "AMAZON-LINUX-2":
|
|
372
|
+
machine_image = ec2.MachineImage.latest_amazon_linux2()
|
|
373
|
+
elif self.asg_config.ami_type.upper() == "ECS_OPTIMIZED":
|
|
374
|
+
# Use ECS-optimized AMI from SSM parameter
|
|
375
|
+
from aws_cdk import aws_ssm as ssm
|
|
376
|
+
|
|
377
|
+
machine_image = ec2.MachineImage.from_ssm_parameter(
|
|
378
|
+
parameter_name="/aws/service/ecs/optimized-ami/amazon-linux-2023/recommended/image_id"
|
|
379
|
+
)
|
|
442
380
|
else:
|
|
443
|
-
|
|
381
|
+
# Default to latest Amazon Linux
|
|
382
|
+
machine_image = ec2.MachineImage.latest_amazon_linux2023()
|
|
444
383
|
else:
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
# Create block device mappings
|
|
448
|
-
block_devices = []
|
|
449
|
-
for device in self.asg_config.block_devices:
|
|
450
|
-
block_devices.append(
|
|
451
|
-
ec2.BlockDevice(
|
|
452
|
-
device_name=device.get("device_name", "/dev/xvda"),
|
|
453
|
-
volume=ec2.BlockDeviceVolume.ebs(
|
|
454
|
-
volume_size=device.get("volume_size", 8),
|
|
455
|
-
volume_type=ec2.EbsDeviceVolumeType(
|
|
456
|
-
str(device.get("volume_type", "gp3")).upper()
|
|
457
|
-
),
|
|
458
|
-
delete_on_termination=device.get("delete_on_termination", True),
|
|
459
|
-
encrypted=device.get("encrypted", True),
|
|
460
|
-
),
|
|
461
|
-
)
|
|
462
|
-
)
|
|
384
|
+
# Default fallback
|
|
385
|
+
machine_image = ec2.MachineImage.latest_amazon_linux2023()
|
|
463
386
|
|
|
464
|
-
# Create launch template
|
|
465
387
|
launch_template = ec2.LaunchTemplate(
|
|
466
388
|
self,
|
|
467
389
|
f"{asg_name}-LaunchTemplate",
|
|
468
|
-
|
|
469
|
-
|
|
390
|
+
instance_type=ec2.InstanceType(self.asg_config.instance_type),
|
|
391
|
+
machine_image=machine_image,
|
|
470
392
|
role=self.instance_role,
|
|
471
|
-
security_group=self.security_groups[0] if self.security_groups else None,
|
|
472
393
|
user_data=self.user_data,
|
|
394
|
+
security_group=self.security_groups[0] if self.security_groups else None,
|
|
395
|
+
key_name=self.asg_config.key_name,
|
|
473
396
|
detailed_monitoring=self.asg_config.detailed_monitoring,
|
|
474
|
-
block_devices=
|
|
397
|
+
block_devices=(
|
|
398
|
+
[
|
|
399
|
+
ec2.BlockDevice(
|
|
400
|
+
device_name=block_device.get("device_name", "/dev/xvda"),
|
|
401
|
+
volume=ec2.BlockDeviceVolume.ebs(
|
|
402
|
+
volume_size=block_device.get("volume_size", 8),
|
|
403
|
+
volume_type=getattr(
|
|
404
|
+
ec2.EbsDeviceVolumeType,
|
|
405
|
+
block_device.get("volume_type", "GP3").upper(),
|
|
406
|
+
),
|
|
407
|
+
delete_on_termination=block_device.get(
|
|
408
|
+
"delete_on_termination", True
|
|
409
|
+
),
|
|
410
|
+
encrypted=block_device.get("encrypted", False),
|
|
411
|
+
),
|
|
412
|
+
)
|
|
413
|
+
for block_device in self.asg_config.block_devices
|
|
414
|
+
]
|
|
415
|
+
if self.asg_config.block_devices
|
|
416
|
+
else None
|
|
417
|
+
),
|
|
475
418
|
)
|
|
476
419
|
|
|
420
|
+
logger.info(f"Created launch template: {launch_template.launch_template_name}")
|
|
477
421
|
return launch_template
|
|
478
422
|
|
|
479
423
|
def _create_auto_scaling_group(self, asg_name: str) -> autoscaling.AutoScalingGroup:
|
|
480
|
-
"""Create
|
|
481
|
-
#
|
|
482
|
-
|
|
483
|
-
subnets =
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
424
|
+
"""Create Auto Scaling Group"""
|
|
425
|
+
# Use the shared VPC and subnets
|
|
426
|
+
vpc = self._get_or_create_vpc()
|
|
427
|
+
subnets = self._get_subnets()
|
|
428
|
+
|
|
429
|
+
health_checks = (
|
|
430
|
+
# ELB + EC2 (EC2 is always included; ELB is “additional”)
|
|
431
|
+
HealthChecks.with_additional_checks(
|
|
432
|
+
additional_types=[AdditionalHealthCheckType.ELB],
|
|
433
|
+
grace_period=Duration.seconds(
|
|
434
|
+
self.asg_config.health_check_grace_period
|
|
435
|
+
),
|
|
490
436
|
)
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
437
|
+
if self.asg_config.health_check_type.upper() == "ELB"
|
|
438
|
+
# EC2-only
|
|
439
|
+
else HealthChecks.ec2(
|
|
440
|
+
grace_period=Duration.seconds(
|
|
441
|
+
self.asg_config.health_check_grace_period
|
|
442
|
+
),
|
|
443
|
+
)
|
|
444
|
+
)
|
|
445
|
+
auto_scaling_group = autoscaling.AutoScalingGroup(
|
|
494
446
|
self,
|
|
495
|
-
asg_name,
|
|
496
|
-
vpc=
|
|
497
|
-
vpc_subnets=subnets,
|
|
447
|
+
f"{asg_name}-ASG",
|
|
448
|
+
vpc=vpc,
|
|
449
|
+
vpc_subnets=ec2.SubnetSelection(subnets=subnets),
|
|
450
|
+
launch_template=self.launch_template,
|
|
498
451
|
min_capacity=self.asg_config.min_capacity,
|
|
499
452
|
max_capacity=self.asg_config.max_capacity,
|
|
500
453
|
desired_capacity=self.asg_config.desired_capacity,
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
cooldown=Duration.seconds(self.asg_config.cooldown),
|
|
454
|
+
health_checks=health_checks,
|
|
455
|
+
cooldown=cdk.Duration.seconds(self.asg_config.cooldown),
|
|
504
456
|
termination_policies=[
|
|
505
|
-
autoscaling.TerminationPolicy(
|
|
457
|
+
getattr(autoscaling.TerminationPolicy, policy.upper())
|
|
506
458
|
for policy in self.asg_config.termination_policies
|
|
507
459
|
],
|
|
508
460
|
)
|
|
509
461
|
|
|
510
|
-
#
|
|
511
|
-
self.
|
|
512
|
-
|
|
513
|
-
# Configure update policy
|
|
514
|
-
# Only apply update policy if it was explicitly configured
|
|
515
|
-
if "update_policy" in self.stack_config.dictionary.get("auto_scaling", {}):
|
|
516
|
-
update_policy = self.asg_config.update_policy
|
|
517
|
-
# Apply the update policy to the ASG's CloudFormation resource
|
|
518
|
-
cfn_asg = asg.node.default_child
|
|
519
|
-
cfn_asg.add_override(
|
|
520
|
-
"UpdatePolicy",
|
|
521
|
-
{
|
|
522
|
-
"AutoScalingRollingUpdate": {
|
|
523
|
-
"MinInstancesInService": update_policy.get(
|
|
524
|
-
"min_instances_in_service", 1
|
|
525
|
-
),
|
|
526
|
-
"MaxBatchSize": update_policy.get("max_batch_size", 1),
|
|
527
|
-
"PauseTime": f"PT{update_policy.get('pause_time', 300) // 60}M",
|
|
528
|
-
}
|
|
529
|
-
},
|
|
530
|
-
)
|
|
531
|
-
|
|
532
|
-
# Add tags
|
|
533
|
-
for key, value in self.asg_config.tags.items():
|
|
534
|
-
cdk.Tags.of(asg).add(key, value)
|
|
462
|
+
# Add instance refresh if configured
|
|
463
|
+
if self.asg_config.instance_refresh:
|
|
464
|
+
self._configure_instance_refresh(auto_scaling_group)
|
|
535
465
|
|
|
536
|
-
|
|
466
|
+
# Attach target groups if configured
|
|
467
|
+
self._attach_target_groups(auto_scaling_group)
|
|
537
468
|
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
for policy in self.asg_config.scaling_policies:
|
|
541
|
-
policy_type = policy.get("type", "target_tracking")
|
|
469
|
+
logger.info(f"Created Auto Scaling Group: {asg_name}")
|
|
470
|
+
return auto_scaling_group
|
|
542
471
|
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
metric=self._get_metric(policy),
|
|
547
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
548
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
549
|
-
)
|
|
550
|
-
elif policy_type == "step":
|
|
551
|
-
self.auto_scaling_group.scale_on_metric(
|
|
552
|
-
f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
|
|
553
|
-
metric=self._get_metric(policy),
|
|
554
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
555
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
556
|
-
)
|
|
472
|
+
def _attach_target_groups(self, asg: autoscaling.AutoScalingGroup) -> None:
|
|
473
|
+
"""Attach the Auto Scaling Group to target groups"""
|
|
474
|
+
target_group_arns = self._get_target_group_arns()
|
|
557
475
|
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
# In a real-world scenario, you would use CloudWatch metrics
|
|
562
|
-
return cloudwatch.Metric(
|
|
563
|
-
namespace="AWS/EC2",
|
|
564
|
-
metric_name=policy.get("metric_name", "CPUUtilization"),
|
|
565
|
-
dimensions_map={
|
|
566
|
-
"AutoScalingGroupName": self.auto_scaling_group.auto_scaling_group_name
|
|
567
|
-
},
|
|
568
|
-
statistic=policy.get("statistic", "Average"),
|
|
569
|
-
period=Duration.seconds(policy.get("period", 60)),
|
|
570
|
-
)
|
|
476
|
+
if not target_group_arns:
|
|
477
|
+
logger.warning("No target group ARNs found for Auto Scaling Group")
|
|
478
|
+
return
|
|
571
479
|
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
"""Get scaling steps for scaling policy"""
|
|
576
|
-
steps = policy.get("steps", [])
|
|
577
|
-
scaling_intervals = []
|
|
578
|
-
|
|
579
|
-
for step in steps:
|
|
580
|
-
# Handle upper bound - if not specified, don't set it (let CDK handle it)
|
|
581
|
-
interval_kwargs = {
|
|
582
|
-
"lower": step.get("lower", 0),
|
|
583
|
-
"change": step.get("change", 1),
|
|
584
|
-
}
|
|
480
|
+
# Get the underlying CloudFormation resource to add target group ARNs
|
|
481
|
+
cfn_asg = asg.node.default_child
|
|
482
|
+
cfn_asg.add_property_override("TargetGroupARNs", target_group_arns)
|
|
585
483
|
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
484
|
+
def _get_target_group_arns(self) -> List[str]:
|
|
485
|
+
"""Get target group ARNs using standardized SSM approach"""
|
|
486
|
+
target_group_arns = []
|
|
589
487
|
|
|
590
|
-
|
|
488
|
+
# Use standardized SSM imports
|
|
489
|
+
ssm_imports = self._get_ssm_imports()
|
|
490
|
+
if "target_group_arns" in ssm_imports:
|
|
491
|
+
imported_arns = ssm_imports["target_group_arns"]
|
|
492
|
+
if isinstance(imported_arns, list):
|
|
493
|
+
target_group_arns.extend(imported_arns)
|
|
494
|
+
else:
|
|
495
|
+
target_group_arns.append(imported_arns)
|
|
591
496
|
|
|
592
|
-
|
|
497
|
+
# Fallback: Direct configuration
|
|
498
|
+
elif self.asg_config.target_group_arns:
|
|
499
|
+
target_group_arns.extend(self.asg_config.target_group_arns)
|
|
593
500
|
|
|
594
|
-
|
|
595
|
-
"""Add CloudFormation outputs for the Auto Scaling Group"""
|
|
596
|
-
if self.auto_scaling_group:
|
|
597
|
-
# Auto Scaling Group Name
|
|
598
|
-
cdk.CfnOutput(
|
|
599
|
-
self,
|
|
600
|
-
f"{asg_name}-name",
|
|
601
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
602
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
603
|
-
)
|
|
501
|
+
return target_group_arns
|
|
604
502
|
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
value=self.auto_scaling_group.auto_scaling_group_arn,
|
|
610
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
|
|
611
|
-
)
|
|
503
|
+
def _add_scaling_policies(self) -> None:
|
|
504
|
+
"""Add scaling policies to the Auto Scaling Group"""
|
|
505
|
+
if not self.asg_config.scaling_policies:
|
|
506
|
+
return
|
|
612
507
|
|
|
613
|
-
|
|
614
|
-
if
|
|
615
|
-
|
|
508
|
+
for policy_config in self.asg_config.scaling_policies:
|
|
509
|
+
if policy_config.get("type") == "target_tracking":
|
|
510
|
+
# Create a target tracking scaling policy for CPU utilization
|
|
511
|
+
scaling_policy = autoscaling.CfnScalingPolicy(
|
|
616
512
|
self,
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
513
|
+
"CPUScalingPolicy",
|
|
514
|
+
auto_scaling_group_name=self.auto_scaling_group.auto_scaling_group_name,
|
|
515
|
+
policy_type="TargetTrackingScaling",
|
|
516
|
+
target_tracking_configuration=autoscaling.CfnScalingPolicy.TargetTrackingConfigurationProperty(
|
|
517
|
+
target_value=policy_config.get("target_cpu", 70),
|
|
518
|
+
predefined_metric_specification=autoscaling.CfnScalingPolicy.PredefinedMetricSpecificationProperty(
|
|
519
|
+
predefined_metric_type="ASGAverageCPUUtilization"
|
|
520
|
+
),
|
|
521
|
+
),
|
|
620
522
|
)
|
|
523
|
+
logger.info("Added CPU utilization scaling policy")
|
|
621
524
|
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
for policy_config in self.asg_config.scaling_policies:
|
|
626
|
-
# Scaling policy implementation would go here
|
|
627
|
-
pass
|
|
525
|
+
def _add_update_policy(self) -> None:
|
|
526
|
+
"""Add update policy to the Auto Scaling Group"""
|
|
527
|
+
update_policy = self.asg_config.update_policy
|
|
628
528
|
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
# Scheduled action implementation would go here
|
|
633
|
-
pass
|
|
529
|
+
if not update_policy:
|
|
530
|
+
# No update policy configured, don't add one
|
|
531
|
+
return
|
|
634
532
|
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
""
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
#
|
|
644
|
-
|
|
645
|
-
|
|
533
|
+
# Get the underlying CloudFormation resource to add update policy
|
|
534
|
+
cfn_asg = self.auto_scaling_group.node.default_child
|
|
535
|
+
|
|
536
|
+
# Get CDK's default policy first (if any)
|
|
537
|
+
default_policy = getattr(cfn_asg, "update_policy", {})
|
|
538
|
+
|
|
539
|
+
# Merge with defaults, then use the robust add_override method
|
|
540
|
+
merged_policy = {
|
|
541
|
+
**default_policy, # Preserve CDK defaults
|
|
542
|
+
"AutoScalingRollingUpdate": {
|
|
543
|
+
"MinInstancesInService": update_policy.get(
|
|
544
|
+
"min_instances_in_service", 1
|
|
545
|
+
),
|
|
546
|
+
"MaxBatchSize": update_policy.get("max_batch_size", 1),
|
|
547
|
+
"PauseTime": f"PT{update_policy.get('pause_time', 300)}S",
|
|
548
|
+
},
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
# Use the robust CDK-documented approach
|
|
552
|
+
cfn_asg.add_override("UpdatePolicy", merged_policy)
|
|
553
|
+
|
|
554
|
+
logger.info("Added rolling update policy to Auto Scaling Group")
|
|
555
|
+
|
|
556
|
+
def _export_ssm_parameters(self) -> None:
|
|
557
|
+
"""Export SSM parameters using standardized approach"""
|
|
558
|
+
if not self.auto_scaling_group:
|
|
559
|
+
logger.warning("No Auto Scaling Group to export")
|
|
646
560
|
return
|
|
647
|
-
|
|
648
|
-
logger.warning(
|
|
649
|
-
"No ECS cluster name found in SSM imports. "
|
|
650
|
-
"Use the dedicated EcsClusterStack module to create ECS clusters."
|
|
651
|
-
)
|
|
652
561
|
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
562
|
+
# Prepare resource values for export
|
|
563
|
+
resource_values = {
|
|
564
|
+
"auto_scaling_group_name": self.auto_scaling_group.auto_scaling_group_name,
|
|
565
|
+
"auto_scaling_group_arn": self.auto_scaling_group.auto_scaling_group_arn,
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
# Export using standardized SSM mixin
|
|
569
|
+
exported_params = self.export_ssm_parameters(resource_values)
|
|
570
|
+
|
|
571
|
+
logger.info(f"Exported SSM parameters: {exported_params}")
|
|
572
|
+
|
|
573
|
+
def _configure_instance_refresh(self, asg: autoscaling.AutoScalingGroup) -> None:
|
|
574
|
+
"""Configure instance refresh for rolling updates"""
|
|
575
|
+
instance_refresh_config = self.asg_config.instance_refresh
|
|
576
|
+
|
|
577
|
+
if not instance_refresh_config.get("enabled", False):
|
|
661
578
|
return
|
|
662
|
-
|
|
663
|
-
injected_commands = []
|
|
664
|
-
cluster_name_injected = False
|
|
665
|
-
|
|
666
|
-
for command in self.user_data_commands:
|
|
667
|
-
# If this command already sets ECS_CLUSTER, replace it
|
|
668
|
-
if 'ECS_CLUSTER=' in command:
|
|
669
|
-
# Replace existing ECS_CLUSTER setting with our cluster name
|
|
670
|
-
parts = command.split('ECS_CLUSTER=')
|
|
671
|
-
if len(parts) > 1:
|
|
672
|
-
# Keep everything before ECS_CLUSTER=, add our cluster name, then add the rest
|
|
673
|
-
before = parts[0]
|
|
674
|
-
after_parts = parts[1].split(None, 1) # Split on first whitespace
|
|
675
|
-
after = after_parts[1] if len(after_parts) > 1 else ''
|
|
676
|
-
new_command = f"{before}ECS_CLUSTER={cluster_name} {after}".strip()
|
|
677
|
-
injected_commands.append(new_command)
|
|
678
|
-
cluster_name_injected = True
|
|
679
|
-
else:
|
|
680
|
-
injected_commands.append(f"{command}ECS_CLUSTER={cluster_name}")
|
|
681
|
-
cluster_name_injected = True
|
|
682
|
-
else:
|
|
683
|
-
injected_commands.append(command)
|
|
684
|
-
|
|
685
|
-
# If no ECS_CLUSTER was found in existing commands, add it
|
|
686
|
-
if not cluster_name_injected:
|
|
687
|
-
injected_commands.append(f"echo ECS_CLUSTER={cluster_name} >> /etc/ecs/ecs.config")
|
|
688
|
-
|
|
689
|
-
# Update the user data with the injected commands
|
|
690
|
-
self.user_data_commands = injected_commands
|
|
691
|
-
|
|
692
|
-
# If user data object exists, we need to recreate it with the updated commands
|
|
693
|
-
if hasattr(self, 'user_data') and self.user_data:
|
|
694
|
-
self.user_data = self._recreate_user_data_with_commands(injected_commands)
|
|
695
|
-
|
|
696
|
-
def _recreate_user_data_with_commands(self, commands: List[str]) -> ec2.UserData:
|
|
697
|
-
"""Recreate user data with updated commands"""
|
|
698
|
-
user_data = ec2.UserData.for_linux()
|
|
699
|
-
|
|
700
|
-
for command in commands:
|
|
701
|
-
user_data.add_commands(command)
|
|
702
|
-
|
|
703
|
-
return user_data
|
|
704
579
|
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
# Export ASG name
|
|
708
|
-
cdk.CfnOutput(
|
|
709
|
-
self,
|
|
710
|
-
f"{asg_name}-name",
|
|
711
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
712
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
713
|
-
)
|
|
580
|
+
logger.warning("Instance refresh is not supported in this version of the CDK")
|
|
581
|
+
return
|
|
714
582
|
|
|
715
|
-
#
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
583
|
+
# Get the CloudFormation ASG resource
|
|
584
|
+
cfn_asg = asg.node.default_child
|
|
585
|
+
|
|
586
|
+
# Configure instance refresh using CloudFormation UpdatePolicy
|
|
587
|
+
# UpdatePolicy is added at the resource level, not as a property
|
|
588
|
+
update_policy = {
|
|
589
|
+
"AutoScalingRollingUpdate": {
|
|
590
|
+
"PauseTime": "PT300S", # 5 minutes pause
|
|
591
|
+
"MinInstancesInService": "1",
|
|
592
|
+
"MaxBatchSize": "1",
|
|
593
|
+
"WaitOnResourceSignals": True,
|
|
594
|
+
"SuspendProcesses": [
|
|
595
|
+
"HealthCheck",
|
|
596
|
+
"ReplaceUnhealthy",
|
|
597
|
+
"AZRebalance",
|
|
598
|
+
"AlarmNotification",
|
|
599
|
+
"ScheduledActions",
|
|
600
|
+
],
|
|
601
|
+
}
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
# # Apply instance refresh using CloudFormation's cfn_options.update_policy
|
|
605
|
+
# cfn_asg.cfn_options.update_policy = cdk.CfnUpdatePolicy.from_rolling_update(
|
|
606
|
+
# pause_time=cdk.Duration.seconds(300),
|
|
607
|
+
# min_instances_in_service=1,
|
|
608
|
+
# max_batch_size=1,
|
|
609
|
+
# wait_on_resource_signals=True
|
|
610
|
+
# )
|
|
611
|
+
|
|
612
|
+
# Grab the L1 to attach UpdatePolicy.InstanceRefresh
|
|
613
|
+
cfn_asg: autoscaling.CfnAutoScalingGroup = asg.node.default_child
|
|
614
|
+
|
|
615
|
+
# cfn_asg.cfn_options.update_policy = CfnUpdatePolicy.from_auto_scaling_instance_refresh(
|
|
616
|
+
# # Triggers tell CFN *what* changes should start a refresh
|
|
617
|
+
# triggers=[CfnUpdatePolicy.InstanceRefreshTrigger.LAUNCH_TEMPLATE],
|
|
618
|
+
# preferences=CfnUpdatePolicy.InstanceRefreshPreferences(
|
|
619
|
+
# # warmup is like “grace” before counting a new instance healthy
|
|
620
|
+
# instance_warmup=Duration.minutes(5),
|
|
621
|
+
# # how aggressive the refresh is; 90 keeps capacity high
|
|
622
|
+
# min_healthy_percentage=90,
|
|
623
|
+
# # skip instances that already match the new LT (fast when only userdata/env tweaked)
|
|
624
|
+
# skip_matching=True,
|
|
625
|
+
# # optional: put instances in Standby first; default is rolling terminate/launch
|
|
626
|
+
# # standby_instances=CfnUpdatePolicy.StandbyInstances.TERMINATE,
|
|
627
|
+
# # checkpoint_percentages=[25, 50, 75], # optional: progressive checkpoints
|
|
628
|
+
# # checkpoint_delay=Duration.minutes(2), # optional delay at checkpoints
|
|
629
|
+
# ),
|
|
630
|
+
# )
|
|
631
|
+
logger.info(f"Configured instance refresh via CDK CfnUpdatePolicy")
|
|
632
|
+
|
|
633
|
+
# Note: This provides rolling update functionality similar to instance refresh
|
|
634
|
+
# For true instance refresh with preferences, we would need CDK v2.80+ or custom CloudFormation
|
|
635
|
+
|
|
636
|
+
|
|
637
|
+
# Backward compatibility alias
|
|
638
|
+
AutoScalingStackStandardized = AutoScalingStack
|