cdk-factory 0.16.16__py3-none-any.whl → 0.17.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cdk_factory/configurations/base_config.py +23 -24
- cdk_factory/configurations/cdk_config.py +1 -1
- cdk_factory/configurations/devops.py +1 -1
- cdk_factory/configurations/resources/cloudfront.py +7 -2
- cdk_factory/configurations/resources/ecr.py +1 -1
- cdk_factory/configurations/resources/ecs_cluster.py +7 -5
- cdk_factory/configurations/resources/ecs_service.py +7 -2
- cdk_factory/configurations/resources/load_balancer.py +8 -9
- cdk_factory/configurations/resources/monitoring.py +8 -3
- cdk_factory/configurations/resources/rds.py +7 -8
- cdk_factory/configurations/resources/rum.py +7 -2
- cdk_factory/configurations/resources/s3.py +1 -1
- cdk_factory/configurations/resources/security_group_full_stack.py +7 -8
- cdk_factory/configurations/resources/vpc.py +19 -0
- cdk_factory/configurations/workload.py +32 -2
- cdk_factory/constructs/ecr/ecr_construct.py +9 -2
- cdk_factory/constructs/lambdas/policies/policy_docs.py +4 -4
- cdk_factory/interfaces/istack.py +4 -4
- cdk_factory/interfaces/networked_stack_mixin.py +6 -6
- cdk_factory/interfaces/standardized_ssm_mixin.py +612 -0
- cdk_factory/interfaces/vpc_provider_mixin.py +53 -29
- cdk_factory/lambdas/edge/ip_gate/handler.py +42 -40
- cdk_factory/pipeline/pipeline_factory.py +3 -3
- cdk_factory/stack_library/__init__.py +3 -2
- cdk_factory/stack_library/acm/acm_stack.py +2 -2
- cdk_factory/stack_library/api_gateway/api_gateway_stack.py +84 -59
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py +530 -0
- cdk_factory/stack_library/code_artifact/code_artifact_stack.py +2 -2
- cdk_factory/stack_library/cognito/cognito_stack.py +152 -92
- cdk_factory/stack_library/dynamodb/dynamodb_stack.py +19 -15
- cdk_factory/stack_library/ecr/ecr_stack.py +2 -2
- cdk_factory/stack_library/ecs/__init__.py +1 -1
- cdk_factory/stack_library/ecs/ecs_cluster_stack_standardized.py +305 -0
- cdk_factory/stack_library/ecs/ecs_service_stack.py +10 -26
- cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py +2 -2
- cdk_factory/stack_library/load_balancer/load_balancer_stack.py +11 -35
- cdk_factory/stack_library/rds/rds_stack.py +10 -27
- cdk_factory/stack_library/route53/route53_stack.py +2 -2
- cdk_factory/stack_library/rum/rum_stack.py +102 -91
- cdk_factory/stack_library/security_group/security_group_full_stack.py +9 -22
- cdk_factory/stack_library/security_group/security_group_stack.py +11 -11
- cdk_factory/stack_library/vpc/vpc_stack_standardized.py +411 -0
- cdk_factory/utilities/api_gateway_integration_utility.py +24 -16
- cdk_factory/utilities/environment_services.py +3 -3
- cdk_factory/utilities/json_loading_utility.py +1 -1
- cdk_factory/validation/config_validator.py +483 -0
- cdk_factory/version.py +1 -1
- {cdk_factory-0.16.16.dist-info → cdk_factory-0.17.0.dist-info}/METADATA +1 -1
- {cdk_factory-0.16.16.dist-info → cdk_factory-0.17.0.dist-info}/RECORD +52 -52
- cdk_factory/interfaces/enhanced_ssm_parameter_mixin.py +0 -321
- cdk_factory/interfaces/ssm_parameter_mixin.py +0 -454
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py +0 -748
- cdk_factory/stack_library/ecs/ecs_cluster_stack.py +0 -232
- cdk_factory/stack_library/vpc/vpc_stack.py +0 -298
- {cdk_factory-0.16.16.dist-info → cdk_factory-0.17.0.dist-info}/WHEEL +0 -0
- {cdk_factory-0.16.16.dist-info → cdk_factory-0.17.0.dist-info}/entry_points.txt +0 -0
- {cdk_factory-0.16.16.dist-info → cdk_factory-0.17.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,748 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Auto Scaling Group Stack Pattern for CDK-Factory
|
|
3
|
-
Maintainers: Eric Wilson
|
|
4
|
-
MIT License. See Project Root for the license information.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from typing import Dict, Any, List, Optional
|
|
8
|
-
|
|
9
|
-
import aws_cdk as cdk
|
|
10
|
-
from aws_cdk import aws_ec2 as ec2
|
|
11
|
-
from aws_cdk import aws_autoscaling as autoscaling
|
|
12
|
-
from aws_cdk import aws_cloudwatch as cloudwatch
|
|
13
|
-
from aws_cdk import aws_iam as iam
|
|
14
|
-
from aws_cdk import aws_ssm as ssm
|
|
15
|
-
from aws_cdk import aws_ecs as ecs
|
|
16
|
-
from aws_cdk import Duration, Stack
|
|
17
|
-
from aws_lambda_powertools import Logger
|
|
18
|
-
from constructs import Construct
|
|
19
|
-
|
|
20
|
-
from cdk_factory.configurations.deployment import DeploymentConfig
|
|
21
|
-
from cdk_factory.configurations.stack import StackConfig
|
|
22
|
-
from cdk_factory.configurations.resources.auto_scaling import AutoScalingConfig
|
|
23
|
-
from cdk_factory.interfaces.istack import IStack
|
|
24
|
-
from cdk_factory.interfaces.vpc_provider_mixin import VPCProviderMixin
|
|
25
|
-
from cdk_factory.stack.stack_module_registry import register_stack
|
|
26
|
-
from cdk_factory.workload.workload_factory import WorkloadConfig
|
|
27
|
-
|
|
28
|
-
logger = Logger(service="AutoScalingStack")
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@register_stack("auto_scaling_library_module")
|
|
32
|
-
@register_stack("auto_scaling_stack")
|
|
33
|
-
class AutoScalingStack(IStack, VPCProviderMixin):
|
|
34
|
-
"""
|
|
35
|
-
Reusable stack for AWS Auto Scaling Groups.
|
|
36
|
-
Supports creating EC2 Auto Scaling Groups with customizable configurations.
|
|
37
|
-
|
|
38
|
-
Uses enhanced SsmParameterMixin (via IStack) to eliminate SSM code duplication.
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
|
|
42
|
-
# Initialize parent class properly - IStack inherits from enhanced SsmParameterMixin
|
|
43
|
-
super().__init__(scope, id, **kwargs)
|
|
44
|
-
|
|
45
|
-
# Initialize VPC cache from mixin
|
|
46
|
-
self._initialize_vpc_cache()
|
|
47
|
-
|
|
48
|
-
self.asg_config = None
|
|
49
|
-
self.stack_config = None
|
|
50
|
-
self.deployment = None
|
|
51
|
-
self.workload = None
|
|
52
|
-
self.security_groups = []
|
|
53
|
-
self.auto_scaling_group = None
|
|
54
|
-
self.launch_template = None
|
|
55
|
-
self.instance_role = None
|
|
56
|
-
self.user_data = None
|
|
57
|
-
self.user_data_commands = [] # Store raw commands for ECS cluster detection
|
|
58
|
-
self.ecs_cluster = None
|
|
59
|
-
|
|
60
|
-
# SSM imports storage is now handled by the enhanced SsmParameterMixin via IStack
|
|
61
|
-
# VPC caching is now handled by VPCProviderMixin
|
|
62
|
-
|
|
63
|
-
def build(
|
|
64
|
-
self,
|
|
65
|
-
stack_config: StackConfig,
|
|
66
|
-
deployment: DeploymentConfig,
|
|
67
|
-
workload: WorkloadConfig,
|
|
68
|
-
) -> None:
|
|
69
|
-
"""Build the Auto Scaling Group stack"""
|
|
70
|
-
self._build(stack_config, deployment, workload)
|
|
71
|
-
|
|
72
|
-
def _build(
|
|
73
|
-
self,
|
|
74
|
-
stack_config: StackConfig,
|
|
75
|
-
deployment: DeploymentConfig,
|
|
76
|
-
workload: WorkloadConfig,
|
|
77
|
-
) -> None:
|
|
78
|
-
"""Internal build method for the Auto Scaling Group stack"""
|
|
79
|
-
self.stack_config = stack_config
|
|
80
|
-
self.deployment = deployment
|
|
81
|
-
self.workload = workload
|
|
82
|
-
|
|
83
|
-
self.asg_config = AutoScalingConfig(
|
|
84
|
-
stack_config.dictionary.get("auto_scaling", {}), deployment
|
|
85
|
-
)
|
|
86
|
-
asg_name = deployment.build_resource_name(self.asg_config.name)
|
|
87
|
-
|
|
88
|
-
# Process SSM imports using enhanced SsmParameterMixin
|
|
89
|
-
self.process_ssm_imports(self.asg_config, deployment, "Auto Scaling Group")
|
|
90
|
-
|
|
91
|
-
# Get security groups
|
|
92
|
-
self.security_groups = self._get_security_groups()
|
|
93
|
-
|
|
94
|
-
# Create IAM role for instances
|
|
95
|
-
self.instance_role = self._create_instance_role(asg_name)
|
|
96
|
-
|
|
97
|
-
# Create user data
|
|
98
|
-
self.user_data = self._create_user_data()
|
|
99
|
-
|
|
100
|
-
# Create ECS cluster if ECS configuration is detected
|
|
101
|
-
# This must happen before launch template creation so user data can be updated
|
|
102
|
-
self._create_ecs_cluster_if_needed(asg_name)
|
|
103
|
-
|
|
104
|
-
# Create launch template
|
|
105
|
-
self.launch_template = self._create_launch_template(asg_name)
|
|
106
|
-
|
|
107
|
-
# Create Auto Scaling Group
|
|
108
|
-
self.auto_scaling_group = self._create_auto_scaling_group(asg_name)
|
|
109
|
-
|
|
110
|
-
# Add scaling policies
|
|
111
|
-
self._add_scaling_policies()
|
|
112
|
-
|
|
113
|
-
# Add scheduled actions
|
|
114
|
-
self._add_scheduled_actions()
|
|
115
|
-
|
|
116
|
-
# Export resources
|
|
117
|
-
self._export_resources(asg_name)
|
|
118
|
-
|
|
119
|
-
@property
|
|
120
|
-
def vpc(self) -> ec2.IVpc:
|
|
121
|
-
"""Get the VPC for the Auto Scaling Group using VPCProviderMixin"""
|
|
122
|
-
if not self.asg_config:
|
|
123
|
-
raise AttributeError("AutoScalingStack not properly initialized. Call build() first.")
|
|
124
|
-
|
|
125
|
-
# Use VPCProviderMixin to resolve VPC with proper subnet handling
|
|
126
|
-
return self.resolve_vpc(
|
|
127
|
-
config=self.asg_config,
|
|
128
|
-
deployment=self.deployment,
|
|
129
|
-
workload=self.workload
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def _get_target_group_arns(self) -> List[str]:
|
|
133
|
-
"""Get target group ARNs from SSM imports using enhanced SsmParameterMixin"""
|
|
134
|
-
target_group_arns = []
|
|
135
|
-
|
|
136
|
-
# Check if we have SSM imports for target groups using enhanced mixin
|
|
137
|
-
if self.has_ssm_import("target_group_arns"):
|
|
138
|
-
imported_tg_arns = self.get_ssm_imported_value("target_group_arns", [])
|
|
139
|
-
if isinstance(imported_tg_arns, list):
|
|
140
|
-
target_group_arns.extend(imported_tg_arns)
|
|
141
|
-
else:
|
|
142
|
-
target_group_arns.append(imported_tg_arns)
|
|
143
|
-
|
|
144
|
-
# see if we have any directly defined in the config
|
|
145
|
-
if self.asg_config.target_group_arns:
|
|
146
|
-
for arn in self.asg_config.target_group_arns:
|
|
147
|
-
logger.info(f"Adding target group ARN: {arn}")
|
|
148
|
-
target_group_arns.append(arn)
|
|
149
|
-
|
|
150
|
-
return target_group_arns
|
|
151
|
-
|
|
152
|
-
def _attach_target_groups(self, asg: autoscaling.AutoScalingGroup) -> None:
|
|
153
|
-
"""Attach the Auto Scaling Group to target groups"""
|
|
154
|
-
target_group_arns = self._get_target_group_arns()
|
|
155
|
-
|
|
156
|
-
if not target_group_arns:
|
|
157
|
-
logger.warning("No target group ARNs found for Auto Scaling Group")
|
|
158
|
-
print(
|
|
159
|
-
"⚠️ No target group ARNs found for Auto Scaling Group. Nothing will be attached."
|
|
160
|
-
)
|
|
161
|
-
return
|
|
162
|
-
|
|
163
|
-
# Get the underlying CloudFormation resource to add target group ARNs
|
|
164
|
-
cfn_asg = asg.node.default_child
|
|
165
|
-
cfn_asg.add_property_override("TargetGroupARNs", target_group_arns)
|
|
166
|
-
|
|
167
|
-
def _get_security_groups(self) -> List[ec2.ISecurityGroup]:
|
|
168
|
-
"""Get security groups for the Auto Scaling Group"""
|
|
169
|
-
security_groups = []
|
|
170
|
-
|
|
171
|
-
# Check if we have SSM imports for security groups using enhanced mixin
|
|
172
|
-
if self.has_ssm_import("security_group_ids"):
|
|
173
|
-
imported_sg_ids = self.get_ssm_imported_value("security_group_ids", [])
|
|
174
|
-
if isinstance(imported_sg_ids, list):
|
|
175
|
-
for idx, sg_id in enumerate(imported_sg_ids):
|
|
176
|
-
security_groups.append(
|
|
177
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
178
|
-
self, f"SecurityGroup-SSM-{idx}", sg_id
|
|
179
|
-
)
|
|
180
|
-
)
|
|
181
|
-
logger.info(f"Added {len(imported_sg_ids)} security groups from SSM imports")
|
|
182
|
-
else:
|
|
183
|
-
security_groups.append(
|
|
184
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
185
|
-
self, f"SecurityGroup-SSM-0", imported_sg_ids
|
|
186
|
-
)
|
|
187
|
-
)
|
|
188
|
-
logger.info(f"Added security group from SSM imports")
|
|
189
|
-
|
|
190
|
-
# Also check if we have any directly defined in the config
|
|
191
|
-
if self.asg_config.security_group_ids:
|
|
192
|
-
for idx, sg_id in enumerate(self.asg_config.security_group_ids):
|
|
193
|
-
logger.info(f"Adding security group from direct config: {sg_id}")
|
|
194
|
-
# if the security group id contains a comma, it is a list of security group ids
|
|
195
|
-
if "," in sg_id:
|
|
196
|
-
blocks = sg_id.split(",")
|
|
197
|
-
for block_idx, block in enumerate(blocks):
|
|
198
|
-
security_groups.append(
|
|
199
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
200
|
-
self, f"SecurityGroup-Direct-{idx}-{block_idx}", block
|
|
201
|
-
)
|
|
202
|
-
)
|
|
203
|
-
else:
|
|
204
|
-
# TODO: add some additional checks to make it more robust
|
|
205
|
-
security_groups.append(
|
|
206
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
207
|
-
self, f"SecurityGroup-Direct-{idx}", sg_id
|
|
208
|
-
)
|
|
209
|
-
)
|
|
210
|
-
|
|
211
|
-
if not security_groups:
|
|
212
|
-
logger.warning("No security groups found from SSM imports or direct configuration")
|
|
213
|
-
|
|
214
|
-
return security_groups
|
|
215
|
-
|
|
216
|
-
def _create_instance_role(self, asg_name: str) -> iam.Role:
|
|
217
|
-
"""Create IAM role for EC2 instances"""
|
|
218
|
-
role = iam.Role(
|
|
219
|
-
self,
|
|
220
|
-
f"{asg_name}-InstanceRole",
|
|
221
|
-
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
|
|
222
|
-
role_name=f"{asg_name}-role",
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
# Add managed policies
|
|
226
|
-
for policy_name in self.asg_config.managed_policies:
|
|
227
|
-
role.add_managed_policy(
|
|
228
|
-
iam.ManagedPolicy.from_aws_managed_policy_name(policy_name)
|
|
229
|
-
)
|
|
230
|
-
|
|
231
|
-
# Add inline policies (for custom permissions like S3 bucket access)
|
|
232
|
-
for policy_config in self.asg_config.iam_inline_policies:
|
|
233
|
-
policy_name = policy_config.get("name", "CustomPolicy")
|
|
234
|
-
statements = policy_config.get("statements", [])
|
|
235
|
-
|
|
236
|
-
if not statements:
|
|
237
|
-
logger.warning(f"No statements found for inline policy {policy_name}, skipping")
|
|
238
|
-
continue
|
|
239
|
-
|
|
240
|
-
# Build policy statements
|
|
241
|
-
policy_statements = []
|
|
242
|
-
for stmt in statements:
|
|
243
|
-
effect = iam.Effect.ALLOW if stmt.get("effect", "Allow") == "Allow" else iam.Effect.DENY
|
|
244
|
-
actions = stmt.get("actions", [])
|
|
245
|
-
resources = stmt.get("resources", [])
|
|
246
|
-
|
|
247
|
-
if not actions or not resources:
|
|
248
|
-
logger.warning(f"Incomplete statement in policy {policy_name}, skipping")
|
|
249
|
-
continue
|
|
250
|
-
|
|
251
|
-
policy_statements.append(
|
|
252
|
-
iam.PolicyStatement(
|
|
253
|
-
effect=effect,
|
|
254
|
-
actions=actions,
|
|
255
|
-
resources=resources
|
|
256
|
-
)
|
|
257
|
-
)
|
|
258
|
-
|
|
259
|
-
if policy_statements:
|
|
260
|
-
role.add_to_principal_policy(policy_statements[0])
|
|
261
|
-
for stmt in policy_statements[1:]:
|
|
262
|
-
role.add_to_principal_policy(stmt)
|
|
263
|
-
|
|
264
|
-
logger.info(f"Added inline policy {policy_name} with {len(policy_statements)} statements")
|
|
265
|
-
|
|
266
|
-
return role
|
|
267
|
-
|
|
268
|
-
def _create_user_data(self) -> ec2.UserData:
|
|
269
|
-
"""Create user data for EC2 instances"""
|
|
270
|
-
user_data = ec2.UserData.for_linux()
|
|
271
|
-
|
|
272
|
-
# Store raw commands for ECS cluster detection
|
|
273
|
-
self.user_data_commands = ["set -euxo pipefail"]
|
|
274
|
-
|
|
275
|
-
# Add base commands
|
|
276
|
-
user_data.add_commands("set -euxo pipefail")
|
|
277
|
-
|
|
278
|
-
# Add custom commands from config (with variable substitution)
|
|
279
|
-
for command in self.asg_config.user_data_commands:
|
|
280
|
-
# Perform variable substitution on the command
|
|
281
|
-
substituted_command = self._substitute_variables(command)
|
|
282
|
-
user_data.add_commands(substituted_command)
|
|
283
|
-
self.user_data_commands.append(substituted_command)
|
|
284
|
-
|
|
285
|
-
# Add user data scripts from files (with variable substitution)
|
|
286
|
-
if self.asg_config.user_data_scripts:
|
|
287
|
-
self._add_user_data_scripts_from_files(user_data)
|
|
288
|
-
|
|
289
|
-
# Add container configuration if specified
|
|
290
|
-
container_config = self.asg_config.container_config
|
|
291
|
-
if container_config:
|
|
292
|
-
self._add_container_user_data(user_data, container_config)
|
|
293
|
-
|
|
294
|
-
return user_data
|
|
295
|
-
|
|
296
|
-
def _add_user_data_scripts_from_files(self, user_data: ec2.UserData) -> None:
|
|
297
|
-
"""
|
|
298
|
-
Add user data scripts from external files with variable substitution.
|
|
299
|
-
Supports loading shell scripts and injecting them into user data with
|
|
300
|
-
placeholder replacement.
|
|
301
|
-
"""
|
|
302
|
-
from pathlib import Path
|
|
303
|
-
|
|
304
|
-
for script_config in self.asg_config.user_data_scripts:
|
|
305
|
-
script_type = script_config.get("type", "file")
|
|
306
|
-
|
|
307
|
-
if script_type == "file":
|
|
308
|
-
# Load script from file
|
|
309
|
-
script_path = script_config.get("path")
|
|
310
|
-
if not script_path:
|
|
311
|
-
logger.warning("Script path not specified, skipping")
|
|
312
|
-
continue
|
|
313
|
-
|
|
314
|
-
# Resolve path (relative to project root or absolute)
|
|
315
|
-
path = Path(script_path)
|
|
316
|
-
if not path.is_absolute():
|
|
317
|
-
# Try relative to current working directory
|
|
318
|
-
path = Path.cwd() / script_path
|
|
319
|
-
|
|
320
|
-
if not path.exists():
|
|
321
|
-
logger.warning(f"Script file not found: {path}, skipping")
|
|
322
|
-
continue
|
|
323
|
-
|
|
324
|
-
# Read script content
|
|
325
|
-
try:
|
|
326
|
-
with open(path, 'r') as f:
|
|
327
|
-
script_content = f.read()
|
|
328
|
-
except Exception as e:
|
|
329
|
-
logger.error(f"Failed to read script file {path}: {e}")
|
|
330
|
-
continue
|
|
331
|
-
|
|
332
|
-
elif script_type == "inline":
|
|
333
|
-
# Use inline script content
|
|
334
|
-
script_content = script_config.get("content", "")
|
|
335
|
-
if not script_content:
|
|
336
|
-
logger.warning("Inline script content is empty, skipping")
|
|
337
|
-
continue
|
|
338
|
-
else:
|
|
339
|
-
logger.warning(f"Unknown script type: {script_type}, skipping")
|
|
340
|
-
continue
|
|
341
|
-
|
|
342
|
-
# Perform variable substitution
|
|
343
|
-
variables = script_config.get("variables", {})
|
|
344
|
-
for var_name, var_value in variables.items():
|
|
345
|
-
placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
|
|
346
|
-
script_content = script_content.replace(placeholder, str(var_value))
|
|
347
|
-
|
|
348
|
-
# Add script to user data
|
|
349
|
-
# Split by lines and add each line as a command
|
|
350
|
-
for line in script_content.split('\n'):
|
|
351
|
-
if line.strip(): # Skip empty lines
|
|
352
|
-
user_data.add_commands(line)
|
|
353
|
-
|
|
354
|
-
logger.info(f"Added user data script from {script_type}: {script_config.get('path', 'inline')}")
|
|
355
|
-
|
|
356
|
-
def _substitute_variables(self, command: str) -> str:
|
|
357
|
-
"""
|
|
358
|
-
Perform variable substitution on a user data command.
|
|
359
|
-
Uses workload and deployment configuration for substitution.
|
|
360
|
-
"""
|
|
361
|
-
if not command:
|
|
362
|
-
return command
|
|
363
|
-
|
|
364
|
-
# Start with the original command
|
|
365
|
-
substituted_command = command
|
|
366
|
-
|
|
367
|
-
# Define available variables for substitution
|
|
368
|
-
variables = {}
|
|
369
|
-
|
|
370
|
-
# Add workload variables
|
|
371
|
-
if self.workload:
|
|
372
|
-
variables.update({
|
|
373
|
-
"WORKLOAD_NAME": getattr(self.workload, 'name', ''),
|
|
374
|
-
"ENVIRONMENT": getattr(self.workload, 'environment', ''),
|
|
375
|
-
"WORKLOAD": getattr(self.workload, 'name', ''),
|
|
376
|
-
})
|
|
377
|
-
|
|
378
|
-
# Add deployment variables
|
|
379
|
-
if self.deployment:
|
|
380
|
-
variables.update({
|
|
381
|
-
"DEPLOYMENT_NAME": getattr(self.deployment, 'name', ''),
|
|
382
|
-
"REGION": getattr(self.deployment, 'region', ''),
|
|
383
|
-
"ACCOUNT": getattr(self.deployment, 'account', ''),
|
|
384
|
-
})
|
|
385
|
-
|
|
386
|
-
# Add stack-level variables
|
|
387
|
-
variables.update({
|
|
388
|
-
"STACK_NAME": self.stack_name,
|
|
389
|
-
})
|
|
390
|
-
|
|
391
|
-
# Perform substitution
|
|
392
|
-
for var_name, var_value in variables.items():
|
|
393
|
-
if var_value is not None:
|
|
394
|
-
placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
|
|
395
|
-
substituted_command = substituted_command.replace(placeholder, str(var_value))
|
|
396
|
-
|
|
397
|
-
return substituted_command
|
|
398
|
-
|
|
399
|
-
def _add_container_user_data(
|
|
400
|
-
self, user_data: ec2.UserData, container_config: Dict[str, Any]
|
|
401
|
-
) -> None:
|
|
402
|
-
"""Add container-specific user data commands"""
|
|
403
|
-
# Install Docker
|
|
404
|
-
user_data.add_commands(
|
|
405
|
-
"dnf -y update", "dnf -y install docker jq", "systemctl enable --now docker"
|
|
406
|
-
)
|
|
407
|
-
|
|
408
|
-
# ECR configuration
|
|
409
|
-
if "ecr" in container_config:
|
|
410
|
-
ecr_config = container_config["ecr"]
|
|
411
|
-
user_data.add_commands(
|
|
412
|
-
f"ACCOUNT_ID={ecr_config.get('account_id', self.account)}",
|
|
413
|
-
f"REGION={ecr_config.get('region', self.region)}",
|
|
414
|
-
f"REPO={ecr_config.get('repo', 'app')}",
|
|
415
|
-
f"TAG={ecr_config.get('tag', 'latest')}",
|
|
416
|
-
"aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com",
|
|
417
|
-
"docker pull ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}",
|
|
418
|
-
)
|
|
419
|
-
|
|
420
|
-
# Database configuration
|
|
421
|
-
if "database" in container_config:
|
|
422
|
-
db_config = container_config["database"]
|
|
423
|
-
secret_arn = db_config.get("secret_arn", "")
|
|
424
|
-
if secret_arn:
|
|
425
|
-
user_data.add_commands(
|
|
426
|
-
f"DB_SECRET_ARN={secret_arn}",
|
|
427
|
-
'if [ -n "$DB_SECRET_ARN" ]; then DB_JSON=$(aws secretsmanager get-secret-value --secret-id $DB_SECRET_ARN --query SecretString --output text --region $REGION); fi',
|
|
428
|
-
'if [ -n "$DB_SECRET_ARN" ]; then DB_HOST=$(echo $DB_JSON | jq -r .host); DB_USER=$(echo $DB_JSON | jq -r .username); DB_PASS=$(echo $DB_JSON | jq -r .password); DB_NAME=$(echo $DB_JSON | jq -r .dbname); fi',
|
|
429
|
-
)
|
|
430
|
-
|
|
431
|
-
# Run container
|
|
432
|
-
if "run_command" in container_config:
|
|
433
|
-
user_data.add_commands(container_config["run_command"])
|
|
434
|
-
elif "ecr" in container_config:
|
|
435
|
-
port = container_config.get("port", 8080)
|
|
436
|
-
user_data.add_commands(
|
|
437
|
-
f"docker run -d --name app -p {port}:{port} "
|
|
438
|
-
'-e DB_HOST="$DB_HOST" -e DB_USER="$DB_USER" -e DB_PASS="$DB_PASS" -e DB_NAME="$DB_NAME" '
|
|
439
|
-
"--restart=always ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}"
|
|
440
|
-
)
|
|
441
|
-
|
|
442
|
-
def _create_launch_template(self, asg_name: str) -> ec2.LaunchTemplate:
|
|
443
|
-
"""Create launch template for the Auto Scaling Group"""
|
|
444
|
-
# Get AMI
|
|
445
|
-
ami = None
|
|
446
|
-
if self.asg_config.ami_id:
|
|
447
|
-
ami = ec2.MachineImage.generic_linux({self.region: self.asg_config.ami_id})
|
|
448
|
-
else:
|
|
449
|
-
if self.asg_config.ami_type == "amazon-linux-2023":
|
|
450
|
-
ami = ec2.MachineImage.latest_amazon_linux2023()
|
|
451
|
-
elif self.asg_config.ami_type == "amazon-linux-2":
|
|
452
|
-
ami = ec2.MachineImage.latest_amazon_linux2()
|
|
453
|
-
else:
|
|
454
|
-
ami = ec2.MachineImage.latest_amazon_linux2023()
|
|
455
|
-
|
|
456
|
-
# Parse instance type
|
|
457
|
-
instance_type_str = self.asg_config.instance_type
|
|
458
|
-
instance_type = None
|
|
459
|
-
|
|
460
|
-
if "." in instance_type_str:
|
|
461
|
-
parts = instance_type_str.split(".")
|
|
462
|
-
if len(parts) == 2:
|
|
463
|
-
try:
|
|
464
|
-
instance_class = ec2.InstanceClass[parts[0].upper()]
|
|
465
|
-
instance_size = ec2.InstanceSize[parts[1].upper()]
|
|
466
|
-
instance_type = ec2.InstanceType.of(instance_class, instance_size)
|
|
467
|
-
except (KeyError, ValueError):
|
|
468
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
469
|
-
else:
|
|
470
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
471
|
-
else:
|
|
472
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
473
|
-
|
|
474
|
-
# Create block device mappings
|
|
475
|
-
block_devices = []
|
|
476
|
-
for device in self.asg_config.block_devices:
|
|
477
|
-
block_devices.append(
|
|
478
|
-
ec2.BlockDevice(
|
|
479
|
-
device_name=device.get("device_name", "/dev/xvda"),
|
|
480
|
-
volume=ec2.BlockDeviceVolume.ebs(
|
|
481
|
-
volume_size=device.get("volume_size", 8),
|
|
482
|
-
volume_type=ec2.EbsDeviceVolumeType(
|
|
483
|
-
str(device.get("volume_type", "gp3")).upper()
|
|
484
|
-
),
|
|
485
|
-
delete_on_termination=device.get("delete_on_termination", True),
|
|
486
|
-
encrypted=device.get("encrypted", True),
|
|
487
|
-
),
|
|
488
|
-
)
|
|
489
|
-
)
|
|
490
|
-
|
|
491
|
-
# Create launch template
|
|
492
|
-
launch_template = ec2.LaunchTemplate(
|
|
493
|
-
self,
|
|
494
|
-
f"{asg_name}-LaunchTemplate",
|
|
495
|
-
machine_image=ami,
|
|
496
|
-
instance_type=instance_type,
|
|
497
|
-
role=self.instance_role,
|
|
498
|
-
security_group=self.security_groups[0] if self.security_groups else None,
|
|
499
|
-
user_data=self.user_data,
|
|
500
|
-
detailed_monitoring=self.asg_config.detailed_monitoring,
|
|
501
|
-
block_devices=block_devices if block_devices else None,
|
|
502
|
-
)
|
|
503
|
-
|
|
504
|
-
return launch_template
|
|
505
|
-
|
|
506
|
-
def _create_auto_scaling_group(self, asg_name: str) -> autoscaling.AutoScalingGroup:
|
|
507
|
-
"""Create the Auto Scaling Group"""
|
|
508
|
-
# Configure subnet selection
|
|
509
|
-
subnet_group_name = self.asg_config.subnet_group_name
|
|
510
|
-
subnets = ec2.SubnetSelection(subnet_group_name=subnet_group_name)
|
|
511
|
-
|
|
512
|
-
# Configure health check
|
|
513
|
-
health_check_type = autoscaling.HealthCheck.ec2()
|
|
514
|
-
if self.asg_config.health_check_type.upper() == "ELB":
|
|
515
|
-
health_check_type = autoscaling.HealthCheck.elb(
|
|
516
|
-
grace=Duration.seconds(self.asg_config.health_check_grace_period)
|
|
517
|
-
)
|
|
518
|
-
|
|
519
|
-
# Create Auto Scaling Group
|
|
520
|
-
asg = autoscaling.AutoScalingGroup(
|
|
521
|
-
self,
|
|
522
|
-
asg_name,
|
|
523
|
-
vpc=self.vpc,
|
|
524
|
-
vpc_subnets=subnets,
|
|
525
|
-
min_capacity=self.asg_config.min_capacity,
|
|
526
|
-
max_capacity=self.asg_config.max_capacity,
|
|
527
|
-
desired_capacity=self.asg_config.desired_capacity,
|
|
528
|
-
launch_template=self.launch_template,
|
|
529
|
-
health_check=health_check_type,
|
|
530
|
-
cooldown=Duration.seconds(self.asg_config.cooldown),
|
|
531
|
-
termination_policies=[
|
|
532
|
-
autoscaling.TerminationPolicy(policy)
|
|
533
|
-
for policy in self.asg_config.termination_policies
|
|
534
|
-
],
|
|
535
|
-
)
|
|
536
|
-
|
|
537
|
-
# Attach to target groups after ASG creation
|
|
538
|
-
self._attach_target_groups(asg)
|
|
539
|
-
|
|
540
|
-
# Configure update policy
|
|
541
|
-
# Only apply update policy if it was explicitly configured
|
|
542
|
-
if "update_policy" in self.stack_config.dictionary.get("auto_scaling", {}):
|
|
543
|
-
update_policy = self.asg_config.update_policy
|
|
544
|
-
# Apply the update policy to the ASG's CloudFormation resource
|
|
545
|
-
cfn_asg = asg.node.default_child
|
|
546
|
-
cfn_asg.add_override(
|
|
547
|
-
"UpdatePolicy",
|
|
548
|
-
{
|
|
549
|
-
"AutoScalingRollingUpdate": {
|
|
550
|
-
"MinInstancesInService": update_policy.get(
|
|
551
|
-
"min_instances_in_service", 1
|
|
552
|
-
),
|
|
553
|
-
"MaxBatchSize": update_policy.get("max_batch_size", 1),
|
|
554
|
-
"PauseTime": f"PT{update_policy.get('pause_time', 300) // 60}M",
|
|
555
|
-
}
|
|
556
|
-
},
|
|
557
|
-
)
|
|
558
|
-
|
|
559
|
-
# Add tags
|
|
560
|
-
for key, value in self.asg_config.tags.items():
|
|
561
|
-
cdk.Tags.of(asg).add(key, value)
|
|
562
|
-
|
|
563
|
-
return asg
|
|
564
|
-
|
|
565
|
-
def _configure_scaling_policies(self) -> None:
|
|
566
|
-
"""Configure scaling policies for the Auto Scaling Group"""
|
|
567
|
-
for policy in self.asg_config.scaling_policies:
|
|
568
|
-
policy_type = policy.get("type", "target_tracking")
|
|
569
|
-
|
|
570
|
-
if policy_type == "target_tracking":
|
|
571
|
-
self.auto_scaling_group.scale_on_metric(
|
|
572
|
-
f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
|
|
573
|
-
metric=self._get_metric(policy),
|
|
574
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
575
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
576
|
-
)
|
|
577
|
-
elif policy_type == "step":
|
|
578
|
-
self.auto_scaling_group.scale_on_metric(
|
|
579
|
-
f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
|
|
580
|
-
metric=self._get_metric(policy),
|
|
581
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
582
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
583
|
-
)
|
|
584
|
-
|
|
585
|
-
def _get_metric(self, policy: Dict[str, Any]) -> cloudwatch.Metric:
|
|
586
|
-
"""Get metric for scaling policy"""
|
|
587
|
-
# This is a simplified implementation
|
|
588
|
-
# In a real-world scenario, you would use CloudWatch metrics
|
|
589
|
-
return cloudwatch.Metric(
|
|
590
|
-
namespace="AWS/EC2",
|
|
591
|
-
metric_name=policy.get("metric_name", "CPUUtilization"),
|
|
592
|
-
dimensions_map={
|
|
593
|
-
"AutoScalingGroupName": self.auto_scaling_group.auto_scaling_group_name
|
|
594
|
-
},
|
|
595
|
-
statistic=policy.get("statistic", "Average"),
|
|
596
|
-
period=Duration.seconds(policy.get("period", 60)),
|
|
597
|
-
)
|
|
598
|
-
|
|
599
|
-
def _get_scaling_steps(
|
|
600
|
-
self, policy: Dict[str, Any]
|
|
601
|
-
) -> List[autoscaling.ScalingInterval]:
|
|
602
|
-
"""Get scaling steps for scaling policy"""
|
|
603
|
-
steps = policy.get("steps", [])
|
|
604
|
-
scaling_intervals = []
|
|
605
|
-
|
|
606
|
-
for step in steps:
|
|
607
|
-
# Handle upper bound - if not specified, don't set it (let CDK handle it)
|
|
608
|
-
interval_kwargs = {
|
|
609
|
-
"lower": step.get("lower", 0),
|
|
610
|
-
"change": step.get("change", 1),
|
|
611
|
-
}
|
|
612
|
-
|
|
613
|
-
# Only set upper if it's explicitly provided
|
|
614
|
-
if "upper" in step:
|
|
615
|
-
interval_kwargs["upper"] = step["upper"]
|
|
616
|
-
|
|
617
|
-
scaling_intervals.append(autoscaling.ScalingInterval(**interval_kwargs))
|
|
618
|
-
|
|
619
|
-
return scaling_intervals
|
|
620
|
-
|
|
621
|
-
def _add_outputs(self, asg_name: str) -> None:
|
|
622
|
-
"""Add CloudFormation outputs for the Auto Scaling Group"""
|
|
623
|
-
if self.auto_scaling_group:
|
|
624
|
-
# Auto Scaling Group Name
|
|
625
|
-
cdk.CfnOutput(
|
|
626
|
-
self,
|
|
627
|
-
f"{asg_name}-name",
|
|
628
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
629
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
630
|
-
)
|
|
631
|
-
|
|
632
|
-
# Auto Scaling Group ARN
|
|
633
|
-
cdk.CfnOutput(
|
|
634
|
-
self,
|
|
635
|
-
f"{asg_name}-arn",
|
|
636
|
-
value=self.auto_scaling_group.auto_scaling_group_arn,
|
|
637
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
|
|
638
|
-
)
|
|
639
|
-
|
|
640
|
-
# Launch Template ID
|
|
641
|
-
if self.launch_template:
|
|
642
|
-
cdk.CfnOutput(
|
|
643
|
-
self,
|
|
644
|
-
f"{asg_name}-launch-template-id",
|
|
645
|
-
value=self.launch_template.launch_template_id,
|
|
646
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-launch-template-id",
|
|
647
|
-
)
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
def _add_scaling_policies(self) -> None:
|
|
651
|
-
"""Add scaling policies to the Auto Scaling Group"""
|
|
652
|
-
for policy_config in self.asg_config.scaling_policies:
|
|
653
|
-
# Scaling policy implementation would go here
|
|
654
|
-
pass
|
|
655
|
-
|
|
656
|
-
def _add_scheduled_actions(self) -> None:
|
|
657
|
-
"""Add scheduled actions to the Auto Scaling Group"""
|
|
658
|
-
for action_config in self.asg_config.scheduled_actions:
|
|
659
|
-
# Scheduled action implementation would go here
|
|
660
|
-
pass
|
|
661
|
-
|
|
662
|
-
def _create_ecs_cluster_if_needed(self, asg_name: str):
|
|
663
|
-
"""
|
|
664
|
-
ECS cluster creation should be handled by the dedicated EcsClusterStack module.
|
|
665
|
-
This method only handles SSM imports for cluster name injection.
|
|
666
|
-
"""
|
|
667
|
-
# Check if ECS cluster name is available via SSM imports
|
|
668
|
-
if self.has_ssm_import("ecs_cluster_name"):
|
|
669
|
-
logger.info(f"ECS cluster name available via SSM imports")
|
|
670
|
-
# Inject cluster name into user data if available
|
|
671
|
-
if self.user_data and self.user_data_commands:
|
|
672
|
-
self._inject_cluster_name_into_user_data()
|
|
673
|
-
return
|
|
674
|
-
|
|
675
|
-
logger.warning(
|
|
676
|
-
"No ECS cluster name found in SSM imports. "
|
|
677
|
-
"Use the dedicated EcsClusterStack module to create ECS clusters."
|
|
678
|
-
)
|
|
679
|
-
|
|
680
|
-
def _inject_cluster_name_into_user_data(self) -> None:
|
|
681
|
-
"""Inject the ECS cluster name into user data commands using SSM imports"""
|
|
682
|
-
# Check if ECS cluster name is available via SSM imports
|
|
683
|
-
if self.has_ssm_import("ecs_cluster_name"):
|
|
684
|
-
cluster_name = self.get_ssm_imported_value("ecs_cluster_name")
|
|
685
|
-
logger.info(f"Using ECS cluster name from SSM: {cluster_name}")
|
|
686
|
-
else:
|
|
687
|
-
logger.warning("No ECS cluster name found in SSM imports, skipping cluster name injection")
|
|
688
|
-
return
|
|
689
|
-
|
|
690
|
-
injected_commands = []
|
|
691
|
-
cluster_name_injected = False
|
|
692
|
-
|
|
693
|
-
for command in self.user_data_commands:
|
|
694
|
-
# If this command already sets ECS_CLUSTER, replace it
|
|
695
|
-
if 'ECS_CLUSTER=' in command:
|
|
696
|
-
# Replace existing ECS_CLUSTER setting with our cluster name
|
|
697
|
-
parts = command.split('ECS_CLUSTER=')
|
|
698
|
-
if len(parts) > 1:
|
|
699
|
-
# Keep everything before ECS_CLUSTER=, add our cluster name, then add the rest
|
|
700
|
-
before = parts[0]
|
|
701
|
-
after_parts = parts[1].split(None, 1) # Split on first whitespace
|
|
702
|
-
after = after_parts[1] if len(after_parts) > 1 else ''
|
|
703
|
-
new_command = f"{before}ECS_CLUSTER={cluster_name} {after}".strip()
|
|
704
|
-
injected_commands.append(new_command)
|
|
705
|
-
cluster_name_injected = True
|
|
706
|
-
else:
|
|
707
|
-
injected_commands.append(f"{command}ECS_CLUSTER={cluster_name}")
|
|
708
|
-
cluster_name_injected = True
|
|
709
|
-
else:
|
|
710
|
-
injected_commands.append(command)
|
|
711
|
-
|
|
712
|
-
# If no ECS_CLUSTER was found in existing commands, add it
|
|
713
|
-
if not cluster_name_injected:
|
|
714
|
-
injected_commands.append(f"echo ECS_CLUSTER={cluster_name} >> /etc/ecs/ecs.config")
|
|
715
|
-
|
|
716
|
-
# Update the user data with the injected commands
|
|
717
|
-
self.user_data_commands = injected_commands
|
|
718
|
-
|
|
719
|
-
# If user data object exists, we need to recreate it with the updated commands
|
|
720
|
-
if hasattr(self, 'user_data') and self.user_data:
|
|
721
|
-
self.user_data = self._recreate_user_data_with_commands(injected_commands)
|
|
722
|
-
|
|
723
|
-
def _recreate_user_data_with_commands(self, commands: List[str]) -> ec2.UserData:
|
|
724
|
-
"""Recreate user data with updated commands"""
|
|
725
|
-
user_data = ec2.UserData.for_linux()
|
|
726
|
-
|
|
727
|
-
for command in commands:
|
|
728
|
-
user_data.add_commands(command)
|
|
729
|
-
|
|
730
|
-
return user_data
|
|
731
|
-
|
|
732
|
-
def _export_resources(self, asg_name: str) -> None:
|
|
733
|
-
"""Export stack resources to SSM and CloudFormation outputs"""
|
|
734
|
-
# Export ASG name
|
|
735
|
-
cdk.CfnOutput(
|
|
736
|
-
self,
|
|
737
|
-
f"{asg_name}-name",
|
|
738
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
739
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
740
|
-
)
|
|
741
|
-
|
|
742
|
-
# Export ASG ARN
|
|
743
|
-
cdk.CfnOutput(
|
|
744
|
-
self,
|
|
745
|
-
f"{asg_name}-arn",
|
|
746
|
-
value=self.auto_scaling_group.auto_scaling_group_arn,
|
|
747
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
|
|
748
|
-
)
|