cdk-factory 0.18.5__py3-none-any.whl → 0.18.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cdk-factory might be problematic. Click here for more details.
- cdk_factory/interfaces/standardized_ssm_mixin.py +39 -35
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py +5 -26
- cdk_factory/stack_library/load_balancer/load_balancer_stack.py +27 -25
- cdk_factory/stack_library/rds/rds_stack.py +64 -47
- cdk_factory/version.py +1 -1
- {cdk_factory-0.18.5.dist-info → cdk_factory-0.18.6.dist-info}/METADATA +1 -1
- {cdk_factory-0.18.5.dist-info → cdk_factory-0.18.6.dist-info}/RECORD +10 -11
- cdk_factory/stack_library/auto_scaling/auto_scaling_old.py +0 -721
- {cdk_factory-0.18.5.dist-info → cdk_factory-0.18.6.dist-info}/WHEEL +0 -0
- {cdk_factory-0.18.5.dist-info → cdk_factory-0.18.6.dist-info}/entry_points.txt +0 -0
- {cdk_factory-0.18.5.dist-info → cdk_factory-0.18.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -531,6 +531,44 @@ class StandardizedSsmMixin:
|
|
|
531
531
|
return self._ssm_exported_values.copy()
|
|
532
532
|
|
|
533
533
|
|
|
534
|
+
def get_subnet_ids(self, config) -> List[str]:
|
|
535
|
+
"""
|
|
536
|
+
Helper function to parse subnet IDs from SSM imports.
|
|
537
|
+
|
|
538
|
+
This common pattern handles:
|
|
539
|
+
1. Comma-separated subnet ID strings from SSM
|
|
540
|
+
2. List of subnet IDs from SSM
|
|
541
|
+
3. Fallback to config attributes
|
|
542
|
+
|
|
543
|
+
Args:
|
|
544
|
+
config: Configuration object that might have subnet_ids attribute
|
|
545
|
+
|
|
546
|
+
Returns:
|
|
547
|
+
List of subnet IDs (empty list if not found or invalid format)
|
|
548
|
+
"""
|
|
549
|
+
# Use the standardized SSM imports
|
|
550
|
+
ssm_imports = self.get_all_ssm_imports()
|
|
551
|
+
if "subnet_ids" in ssm_imports:
|
|
552
|
+
subnet_ids = ssm_imports["subnet_ids"]
|
|
553
|
+
|
|
554
|
+
# Handle comma-separated string or list
|
|
555
|
+
if isinstance(subnet_ids, str):
|
|
556
|
+
# Split comma-separated string
|
|
557
|
+
parsed_ids = [sid.strip() for sid in subnet_ids.split(',') if sid.strip()]
|
|
558
|
+
return parsed_ids
|
|
559
|
+
elif isinstance(subnet_ids, list):
|
|
560
|
+
return subnet_ids
|
|
561
|
+
else:
|
|
562
|
+
logger.warning(f"Unexpected subnet_ids type: {type(subnet_ids)}")
|
|
563
|
+
return []
|
|
564
|
+
|
|
565
|
+
# Fallback: Check config attributes
|
|
566
|
+
elif hasattr(config, 'subnet_ids') and config.subnet_ids:
|
|
567
|
+
return config.subnet_ids
|
|
568
|
+
|
|
569
|
+
else:
|
|
570
|
+
logger.warning("No subnet IDs found, using default behavior")
|
|
571
|
+
return []
|
|
534
572
|
|
|
535
573
|
class ValidationResult:
|
|
536
574
|
"""Result of configuration validation."""
|
|
@@ -614,38 +652,4 @@ class SsmStandardValidator:
|
|
|
614
652
|
return errors
|
|
615
653
|
|
|
616
654
|
|
|
617
|
-
|
|
618
|
-
"""
|
|
619
|
-
Helper function to parse subnet IDs from SSM imports.
|
|
620
|
-
|
|
621
|
-
This common pattern handles:
|
|
622
|
-
1. Comma-separated subnet ID strings from SSM
|
|
623
|
-
2. List of subnet IDs from SSM
|
|
624
|
-
3. CDK Token resolution for deployment-time values
|
|
625
|
-
|
|
626
|
-
Args:
|
|
627
|
-
subnet_ids_key: The key used for subnet IDs in SSM imports (default: "subnet_ids")
|
|
628
|
-
|
|
629
|
-
Returns:
|
|
630
|
-
List of subnet IDs (empty list if not found or invalid format)
|
|
631
|
-
"""
|
|
632
|
-
ssm_imports = self.get_all_ssm_imports()
|
|
633
|
-
|
|
634
|
-
if subnet_ids_key not in ssm_imports:
|
|
635
|
-
logger.warning(f"No subnet IDs found in SSM imports with key: {subnet_ids_key}")
|
|
636
|
-
return []
|
|
637
|
-
|
|
638
|
-
subnet_ids = ssm_imports[subnet_ids_key]
|
|
639
|
-
|
|
640
|
-
# Handle comma-separated string or list
|
|
641
|
-
if isinstance(subnet_ids, str):
|
|
642
|
-
# Split comma-separated string
|
|
643
|
-
parsed_ids = [sid.strip() for sid in subnet_ids.split(',') if sid.strip()]
|
|
644
|
-
logger.info(f"Parsed {len(parsed_ids)} subnet IDs from comma-separated string")
|
|
645
|
-
return parsed_ids
|
|
646
|
-
elif isinstance(subnet_ids, list):
|
|
647
|
-
logger.info(f"Using {len(subnet_ids)} subnet IDs from list")
|
|
648
|
-
return subnet_ids
|
|
649
|
-
else:
|
|
650
|
-
logger.warning(f"Unexpected subnet_ids type: {type(subnet_ids)}")
|
|
651
|
-
return []
|
|
655
|
+
|
|
@@ -211,33 +211,12 @@ class AutoScalingStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
211
211
|
Get subnet IDs using standardized SSM approach.
|
|
212
212
|
"""
|
|
213
213
|
# Primary method: Use standardized SSM imports
|
|
214
|
-
ssm_imports = self._get_ssm_imports()
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
subnet_ids = ssm_imports["subnet_ids"]
|
|
218
|
-
|
|
219
|
-
# Handle comma-separated string or list
|
|
220
|
-
if isinstance(subnet_ids, str):
|
|
221
|
-
# Split comma-separated string
|
|
222
|
-
parsed_ids = [sid.strip() for sid in subnet_ids.split(',') if sid.strip()]
|
|
223
|
-
return parsed_ids
|
|
224
|
-
elif isinstance(subnet_ids, list):
|
|
225
|
-
return subnet_ids
|
|
226
|
-
else:
|
|
227
|
-
logger.warning(f"Unexpected subnet_ids type: {type(subnet_ids)}")
|
|
228
|
-
return []
|
|
229
|
-
|
|
230
|
-
# Fallback: Use VPC provider mixin (backward compatibility)
|
|
231
|
-
elif hasattr(self, '_get_subnets_from_provider'):
|
|
232
|
-
return self._get_subnets_from_provider()
|
|
233
|
-
|
|
234
|
-
# Final fallback: Direct configuration
|
|
235
|
-
elif hasattr(self.asg_config, 'subnet_ids') and self.asg_config.subnet_ids:
|
|
236
|
-
return self.asg_config.subnet_ids
|
|
214
|
+
# ssm_imports = self._get_ssm_imports()
|
|
215
|
+
|
|
216
|
+
subnet_ids = self.get_subnet_ids(self.asg_config)
|
|
237
217
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
return []
|
|
218
|
+
return subnet_ids
|
|
219
|
+
|
|
241
220
|
|
|
242
221
|
def _create_instance_role(self, asg_name: str) -> iam.Role:
|
|
243
222
|
"""Create IAM role for EC2 instances"""
|
|
@@ -156,16 +156,22 @@ class LoadBalancerStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
156
156
|
|
|
157
157
|
# If subnets is None, check if we have SSM-imported subnet_ids as a token
|
|
158
158
|
# We need to use Fn.Split to convert the comma-separated string to an array
|
|
159
|
-
if subnets is None
|
|
160
|
-
|
|
161
|
-
if
|
|
162
|
-
|
|
163
|
-
#
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
"
|
|
167
|
-
cdk.
|
|
168
|
-
|
|
159
|
+
if subnets is None:
|
|
160
|
+
subnet_ids = self.get_subnet_ids(self.lb_config)
|
|
161
|
+
if subnet_ids:
|
|
162
|
+
# For CloudFormation token resolution, we still need Fn.split
|
|
163
|
+
# but we use the helper to determine if subnet IDs are available
|
|
164
|
+
ssm_imports = self.get_all_ssm_imports()
|
|
165
|
+
if "subnet_ids" in ssm_imports:
|
|
166
|
+
subnet_ids_value = ssm_imports["subnet_ids"]
|
|
167
|
+
if cdk.Token.is_unresolved(subnet_ids_value):
|
|
168
|
+
logger.info("Using Fn.Split to convert comma-separated subnet IDs token to array")
|
|
169
|
+
# Use CloudFormation escape hatch to set Subnets property with Fn.Split
|
|
170
|
+
cfn_lb = load_balancer.node.default_child
|
|
171
|
+
cfn_lb.add_property_override(
|
|
172
|
+
"Subnets",
|
|
173
|
+
cdk.Fn.split(",", subnet_ids_value)
|
|
174
|
+
)
|
|
169
175
|
|
|
170
176
|
# Add tags
|
|
171
177
|
for key, value in self.lb_config.tags.items():
|
|
@@ -261,9 +267,16 @@ class LoadBalancerStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
261
267
|
"""Get subnets for the Load Balancer"""
|
|
262
268
|
subnets = []
|
|
263
269
|
|
|
264
|
-
#
|
|
265
|
-
|
|
266
|
-
|
|
270
|
+
# Use the standardized helper function to get subnet IDs
|
|
271
|
+
subnet_ids = self.get_subnet_ids(self.lb_config)
|
|
272
|
+
|
|
273
|
+
if not subnet_ids:
|
|
274
|
+
return None
|
|
275
|
+
|
|
276
|
+
# Check if we have unresolved tokens from SSM
|
|
277
|
+
ssm_imports = self.get_all_ssm_imports()
|
|
278
|
+
if "subnet_ids" in ssm_imports:
|
|
279
|
+
subnet_ids_value = ssm_imports["subnet_ids"]
|
|
267
280
|
|
|
268
281
|
# Check if this is a CDK token (unresolved SSM parameter)
|
|
269
282
|
if cdk.Token.is_unresolved(subnet_ids_value):
|
|
@@ -272,19 +285,8 @@ class LoadBalancerStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
272
285
|
# The ALB construct will handle the token-based subnet IDs
|
|
273
286
|
logger.info("Subnet IDs are unresolved tokens, will use vpc_subnets with token resolution")
|
|
274
287
|
return None
|
|
275
|
-
elif isinstance(subnet_ids_value, str):
|
|
276
|
-
# If it's a resolved string, split it
|
|
277
|
-
subnet_ids = [s.strip() for s in subnet_ids_value.split(',')]
|
|
278
|
-
elif isinstance(subnet_ids_value, list):
|
|
279
|
-
subnet_ids = subnet_ids_value
|
|
280
|
-
else:
|
|
281
|
-
subnet_ids = [subnet_ids_value]
|
|
282
|
-
else:
|
|
283
|
-
subnet_ids = self.lb_config.subnets
|
|
284
|
-
|
|
285
|
-
if not subnet_ids:
|
|
286
|
-
return None
|
|
287
288
|
|
|
289
|
+
# Convert subnet IDs to subnet objects
|
|
288
290
|
for idx, subnet_id in enumerate(subnet_ids):
|
|
289
291
|
subnets.append(
|
|
290
292
|
ec2.Subnet.from_subnet_id(self, f"Subnet-{idx}", subnet_id)
|
|
@@ -69,8 +69,18 @@ class RdsStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
69
69
|
self.rds_config = RdsConfig(stack_config.dictionary.get("rds", {}), deployment)
|
|
70
70
|
db_name = deployment.build_resource_name(self.rds_config.name)
|
|
71
71
|
|
|
72
|
-
#
|
|
73
|
-
self.
|
|
72
|
+
# Setup standardized SSM integration
|
|
73
|
+
self.setup_standardized_ssm_integration(
|
|
74
|
+
scope=self,
|
|
75
|
+
config=self.rds_config,
|
|
76
|
+
resource_type="rds",
|
|
77
|
+
resource_name=self.rds_config.name,
|
|
78
|
+
deployment=deployment,
|
|
79
|
+
workload=workload
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Process SSM imports
|
|
83
|
+
self.process_standardized_ssm_imports()
|
|
74
84
|
|
|
75
85
|
# Get VPC and security groups
|
|
76
86
|
self.security_groups = self._get_security_groups()
|
|
@@ -87,40 +97,13 @@ class RdsStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
87
97
|
# Export to SSM Parameter Store
|
|
88
98
|
self._export_ssm_parameters(db_name)
|
|
89
99
|
|
|
90
|
-
def _process_ssm_imports(self) -> None:
|
|
91
|
-
"""Process SSM imports from configuration"""
|
|
92
|
-
ssm_imports = self.rds_config.ssm_imports
|
|
93
|
-
|
|
94
|
-
if not ssm_imports:
|
|
95
|
-
logger.debug("No SSM imports configured for RDS")
|
|
96
|
-
return
|
|
97
|
-
|
|
98
|
-
logger.info(f"Processing {len(ssm_imports)} SSM imports for RDS")
|
|
99
|
-
|
|
100
|
-
for param_key, param_path in ssm_imports.items():
|
|
101
|
-
try:
|
|
102
|
-
if not param_path.startswith('/'):
|
|
103
|
-
param_path = f"/{param_path}"
|
|
104
|
-
|
|
105
|
-
construct_id = f"ssm-import-{param_key}-{hash(param_path) % 10000}"
|
|
106
|
-
param = ssm.StringParameter.from_string_parameter_name(
|
|
107
|
-
self, construct_id, param_path
|
|
108
|
-
)
|
|
109
|
-
|
|
110
|
-
self.ssm_imported_values[param_key] = param.string_value
|
|
111
|
-
logger.info(f"Imported SSM parameter: {param_key} from {param_path}")
|
|
112
|
-
|
|
113
|
-
except Exception as e:
|
|
114
|
-
logger.error(f"Failed to import SSM parameter {param_key} from {param_path}: {e}")
|
|
115
|
-
raise
|
|
116
|
-
|
|
117
100
|
@property
|
|
118
101
|
def vpc(self) -> ec2.IVpc:
|
|
119
102
|
"""Get the VPC for the RDS instance using centralized VPC provider mixin."""
|
|
120
|
-
if self._vpc:
|
|
103
|
+
if hasattr(self, '_vpc') and self._vpc:
|
|
121
104
|
return self._vpc
|
|
122
105
|
|
|
123
|
-
#
|
|
106
|
+
# Resolve VPC using the centralized VPC provider mixin
|
|
124
107
|
self._vpc = self.resolve_vpc(
|
|
125
108
|
config=self.rds_config,
|
|
126
109
|
deployment=self.deployment,
|
|
@@ -133,8 +116,9 @@ class RdsStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
133
116
|
security_groups = []
|
|
134
117
|
|
|
135
118
|
# Check SSM imports first for security group ID
|
|
136
|
-
|
|
137
|
-
|
|
119
|
+
ssm_imports = self.get_all_ssm_imports()
|
|
120
|
+
if "security_group_rds_id" in ssm_imports:
|
|
121
|
+
sg_id = ssm_imports["security_group_rds_id"]
|
|
138
122
|
security_groups.append(
|
|
139
123
|
ec2.SecurityGroup.from_security_group_id(
|
|
140
124
|
self, "RDSSecurityGroup", sg_id
|
|
@@ -151,27 +135,60 @@ class RdsStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
151
135
|
|
|
152
136
|
return security_groups
|
|
153
137
|
|
|
138
|
+
def _get_subnet_selection(self) -> ec2.SubnetSelection:
|
|
139
|
+
"""
|
|
140
|
+
Get subnet selection based on available subnet types in the VPC.
|
|
141
|
+
|
|
142
|
+
RDS instances require private subnets for security, but we'll fall back
|
|
143
|
+
to available subnets if the preferred types aren't available.
|
|
144
|
+
"""
|
|
145
|
+
vpc = self.vpc
|
|
146
|
+
|
|
147
|
+
# Check for isolated subnets first (most secure for RDS)
|
|
148
|
+
if vpc.isolated_subnets:
|
|
149
|
+
logger.info("Using isolated subnets for RDS instance")
|
|
150
|
+
return ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
|
|
151
|
+
|
|
152
|
+
# Check for private subnets next
|
|
153
|
+
elif vpc.private_subnets:
|
|
154
|
+
logger.info("Using private subnets for RDS instance")
|
|
155
|
+
return ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS)
|
|
156
|
+
|
|
157
|
+
# Fall back to public subnets (not recommended for production)
|
|
158
|
+
elif vpc.public_subnets:
|
|
159
|
+
logger.warning("Using public subnets for RDS instance - not recommended for production")
|
|
160
|
+
return ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
|
|
161
|
+
|
|
162
|
+
else:
|
|
163
|
+
raise ValueError("No subnets available in VPC for RDS instance")
|
|
164
|
+
|
|
154
165
|
def _create_db_instance(self, db_name: str) -> rds.DatabaseInstance:
|
|
155
166
|
"""Create a new RDS instance"""
|
|
156
167
|
# Configure subnet group
|
|
157
168
|
# If we have subnet IDs from SSM, create a DB subnet group explicitly
|
|
158
169
|
db_subnet_group = None
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
170
|
+
subnet_ids = self.get_subnet_ids(self.rds_config)
|
|
171
|
+
|
|
172
|
+
if subnet_ids:
|
|
173
|
+
# For CloudFormation token resolution, we need to get the raw SSM value
|
|
174
|
+
# Use the standardized SSM imports
|
|
175
|
+
ssm_imports = self.get_all_ssm_imports()
|
|
176
|
+
if "subnet_ids" in ssm_imports:
|
|
177
|
+
subnet_ids_str = ssm_imports["subnet_ids"]
|
|
178
|
+
# Split the comma-separated token into a list for CloudFormation
|
|
179
|
+
subnet_ids_list = cdk.Fn.split(",", subnet_ids_str)
|
|
180
|
+
|
|
181
|
+
# Create DB subnet group with the token-based subnet list
|
|
182
|
+
db_subnet_group = rds.CfnDBSubnetGroup(
|
|
183
|
+
self,
|
|
184
|
+
"DBSubnetGroup",
|
|
185
|
+
db_subnet_group_description=f"Subnet group for {db_name}",
|
|
186
|
+
subnet_ids=subnet_ids_list,
|
|
187
|
+
db_subnet_group_name=f"{db_name}-subnet-group"
|
|
188
|
+
)
|
|
172
189
|
|
|
173
190
|
# Configure subnet selection for VPC (when not using SSM imports)
|
|
174
|
-
subnets = None if db_subnet_group else
|
|
191
|
+
subnets = None if db_subnet_group else self._get_subnet_selection()
|
|
175
192
|
|
|
176
193
|
# Configure engine
|
|
177
194
|
engine_version = None
|
cdk_factory/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.18.
|
|
1
|
+
__version__ = "0.18.6"
|
|
@@ -2,7 +2,7 @@ cdk_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
cdk_factory/app.py,sha256=RnX0-pwdTAPAdKJK_j13Zl8anf9zYKBwboR0KA8K8xM,10346
|
|
3
3
|
cdk_factory/cdk.json,sha256=SKZKhJ2PBpFH78j-F8S3VDYW-lf76--Q2I3ON-ZIQfw,3106
|
|
4
4
|
cdk_factory/cli.py,sha256=FGbCTS5dYCNsfp-etshzvFlGDCjC28r6rtzYbe7KoHI,6407
|
|
5
|
-
cdk_factory/version.py,sha256=
|
|
5
|
+
cdk_factory/version.py,sha256=uKA6-JXiIkK71mAwJK5D762_yrliPWqnH0bkkAczVnU,23
|
|
6
6
|
cdk_factory/builds/README.md,sha256=9BBWd7bXpyKdMU_g2UljhQwrC9i5O_Tvkb6oPvndoZk,90
|
|
7
7
|
cdk_factory/commands/command_loader.py,sha256=QbLquuP_AdxtlxlDy-2IWCQ6D-7qa58aphnDPtp_uTs,3744
|
|
8
8
|
cdk_factory/configurations/base_config.py,sha256=eJ3Pl3GWk1jVr_bYQaaWlw4_-ZiFGaiXllI_fOOX1i0,9323
|
|
@@ -66,7 +66,7 @@ cdk_factory/constructs/sqs/policies/sqs_policies.py,sha256=4p0G8G-fqNKSr68I55fvq
|
|
|
66
66
|
cdk_factory/interfaces/istack.py,sha256=3xqGw5kNTt_KeLHdMxI7rIR0YORqcWQOqsacmDlTAv0,1167
|
|
67
67
|
cdk_factory/interfaces/live_ssm_resolver.py,sha256=3FIr9a02SXqZmbFs3RT0WxczWEQR_CF7QSt7kWbDrVE,8163
|
|
68
68
|
cdk_factory/interfaces/networked_stack_mixin.py,sha256=69pJp4IE1n_tdHh2UZQ08O6ZW-v5P4uJJ_fleNaj6Nw,2897
|
|
69
|
-
cdk_factory/interfaces/standardized_ssm_mixin.py,sha256=
|
|
69
|
+
cdk_factory/interfaces/standardized_ssm_mixin.py,sha256=O9d0LQY3r9hP4onE8g1448BNnR899L3hDqIFO1rWWMI,24238
|
|
70
70
|
cdk_factory/interfaces/vpc_provider_mixin.py,sha256=Kj0mmZd54NINprixJLs8zL-WWiSd0AQBtGdwNg8cz14,8207
|
|
71
71
|
cdk_factory/lambdas/health_handler.py,sha256=dd40ykKMxWCFEIyp2ZdQvAGNjw_ylI9CSm1N24Hp2ME,196
|
|
72
72
|
cdk_factory/lambdas/edge/ip_gate/handler.py,sha256=gUevgX462mqGYddtQIyJ1-Jk3oXhFmbmd46jlqjai9E,10657
|
|
@@ -86,8 +86,7 @@ cdk_factory/stack_library/acm/__init__.py,sha256=4FNRLykblcKZvq_wieYwvv9N_jgrZnJ
|
|
|
86
86
|
cdk_factory/stack_library/acm/acm_stack.py,sha256=QJ3GkT17PmWoGkfO5Um02hvrfyJ9HbiPMnclwDP7IbA,5846
|
|
87
87
|
cdk_factory/stack_library/api_gateway/api_gateway_stack.py,sha256=_wbPBsgh7FHq9cnL44CiuffBj3XCO5ErQx_yclxFsVY,39669
|
|
88
88
|
cdk_factory/stack_library/auto_scaling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
|
-
cdk_factory/stack_library/auto_scaling/
|
|
90
|
-
cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py,sha256=qF_-cs9gAYGCjlO4C0UE79526jpqhoz6SpEIN3h1t48,22720
|
|
89
|
+
cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py,sha256=X167Tj43TEIcacI9ZuOFWqyN3XlVcm_difLx0qE1Kvk,21743
|
|
91
90
|
cdk_factory/stack_library/aws_lambdas/lambda_stack.py,sha256=SFbBPvvCopbyiuYtq-O5sQkFCf94Wzua6aDUXiFDSB4,26161
|
|
92
91
|
cdk_factory/stack_library/buckets/README.md,sha256=XkK3UNVtRLE7NtUvbhCOBBYUYi8hlrrSaI1s3GJVrqI,78
|
|
93
92
|
cdk_factory/stack_library/buckets/bucket_stack.py,sha256=SLoZqSffAqmeBBEVUQg54D_8Ad5UKdkjEAmKAVgAqQo,1778
|
|
@@ -104,11 +103,11 @@ cdk_factory/stack_library/ecs/ecs_service_stack.py,sha256=3en447kWBOqd0d_i2C8mRR
|
|
|
104
103
|
cdk_factory/stack_library/lambda_edge/__init__.py,sha256=ByBJ_CWdc4UtTmFBZH-6pzBMNkjkdtE65AmnB0Fs6lM,156
|
|
105
104
|
cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py,sha256=ft5AxHy8__F90ZYDaoJwTjACGIfrn2Sd9Zr2CdHO7GE,16398
|
|
106
105
|
cdk_factory/stack_library/load_balancer/__init__.py,sha256=wZpKw2OecLJGdF5mPayCYAEhu2H3c2gJFFIxwXftGDU,52
|
|
107
|
-
cdk_factory/stack_library/load_balancer/load_balancer_stack.py,sha256=
|
|
106
|
+
cdk_factory/stack_library/load_balancer/load_balancer_stack.py,sha256=6PXrgg1laK2W5B2iKxVKJPWoZ4SjpAd35nmVyylAiqk,30202
|
|
108
107
|
cdk_factory/stack_library/monitoring/__init__.py,sha256=k1G_KDx47Aw0UugaL99PN_TKlyLK4nkJVApCaAK7GJg,153
|
|
109
108
|
cdk_factory/stack_library/monitoring/monitoring_stack.py,sha256=N_1YvEXE7fboH_S3kv_dSKZsufxMuPdFMjGzlNFpuSo,19283
|
|
110
109
|
cdk_factory/stack_library/rds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
111
|
-
cdk_factory/stack_library/rds/rds_stack.py,sha256=
|
|
110
|
+
cdk_factory/stack_library/rds/rds_stack.py,sha256=lgPtTHdHCRsUBkdTrCQ_-fdZB6Ymrnlei5LSRDHs808,15403
|
|
112
111
|
cdk_factory/stack_library/route53/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
113
112
|
cdk_factory/stack_library/route53/route53_stack.py,sha256=mPUJta6maUNlkXup7xKsqq7gefsRI1w_ulInE29wJs4,8387
|
|
114
113
|
cdk_factory/stack_library/rum/__init__.py,sha256=gUrWQdzd4rZ2J0YzAQC8PsEGAS7QgyYjB2ZCUKWasy4,90
|
|
@@ -137,8 +136,8 @@ cdk_factory/utilities/os_execute.py,sha256=5Op0LY_8Y-pUm04y1k8MTpNrmQvcLmQHPQITE
|
|
|
137
136
|
cdk_factory/utils/api_gateway_utilities.py,sha256=If7Xu5s_UxmuV-kL3JkXxPLBdSVUKoLtohm0IUFoiV8,4378
|
|
138
137
|
cdk_factory/validation/config_validator.py,sha256=Pb0TkLiPFzUplBOgMorhRCVm08vEzZhRU5xXCDTa5CA,17602
|
|
139
138
|
cdk_factory/workload/workload_factory.py,sha256=yDI3cRhVI5ELNDcJPLpk9UY54Uind1xQoV3spzT4z7E,6068
|
|
140
|
-
cdk_factory-0.18.
|
|
141
|
-
cdk_factory-0.18.
|
|
142
|
-
cdk_factory-0.18.
|
|
143
|
-
cdk_factory-0.18.
|
|
144
|
-
cdk_factory-0.18.
|
|
139
|
+
cdk_factory-0.18.6.dist-info/METADATA,sha256=RirvlHJ4ioZ9NvqGjc8TpS80qGxPg2iJlgDUuwTlbVU,2451
|
|
140
|
+
cdk_factory-0.18.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
141
|
+
cdk_factory-0.18.6.dist-info/entry_points.txt,sha256=S1DPe0ORcdiwEALMN_WIo3UQrW_g4YdQCLEsc_b0Swg,53
|
|
142
|
+
cdk_factory-0.18.6.dist-info/licenses/LICENSE,sha256=NOtdOeLwg2il_XBJdXUPFPX8JlV4dqTdDGAd2-khxT8,1066
|
|
143
|
+
cdk_factory-0.18.6.dist-info/RECORD,,
|
|
@@ -1,721 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Auto Scaling Group Stack Pattern for CDK-Factory
|
|
3
|
-
Maintainers: Eric Wilson
|
|
4
|
-
MIT License. See Project Root for the license information.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from typing import Dict, Any, List, Optional
|
|
8
|
-
|
|
9
|
-
import aws_cdk as cdk
|
|
10
|
-
from aws_cdk import aws_ec2 as ec2
|
|
11
|
-
from aws_cdk import aws_autoscaling as autoscaling
|
|
12
|
-
from aws_cdk import aws_cloudwatch as cloudwatch
|
|
13
|
-
from aws_cdk import aws_iam as iam
|
|
14
|
-
from aws_cdk import aws_ssm as ssm
|
|
15
|
-
from aws_cdk import aws_ecs as ecs
|
|
16
|
-
from aws_cdk import Duration, Stack
|
|
17
|
-
from aws_lambda_powertools import Logger
|
|
18
|
-
from constructs import Construct
|
|
19
|
-
|
|
20
|
-
from cdk_factory.configurations.deployment import DeploymentConfig
|
|
21
|
-
from cdk_factory.configurations.stack import StackConfig
|
|
22
|
-
from cdk_factory.configurations.resources.auto_scaling import AutoScalingConfig
|
|
23
|
-
from cdk_factory.interfaces.istack import IStack
|
|
24
|
-
from cdk_factory.interfaces.vpc_provider_mixin import VPCProviderMixin
|
|
25
|
-
from cdk_factory.stack.stack_module_registry import register_stack
|
|
26
|
-
from cdk_factory.workload.workload_factory import WorkloadConfig
|
|
27
|
-
|
|
28
|
-
logger = Logger(service="AutoScalingStack")
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@register_stack("auto_scaling_library_module_old")
|
|
32
|
-
@register_stack("auto_scaling_stack_old")
|
|
33
|
-
class AutoScalingStack(IStack, VPCProviderMixin):
|
|
34
|
-
"""
|
|
35
|
-
Reusable stack for AWS Auto Scaling Groups.
|
|
36
|
-
Supports creating EC2 Auto Scaling Groups with customizable configurations.
|
|
37
|
-
|
|
38
|
-
Uses enhanced SsmParameterMixin (via IStack) to eliminate SSM code duplication.
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
|
|
42
|
-
# Initialize parent class properly - IStack inherits from enhanced SsmParameterMixin
|
|
43
|
-
super().__init__(scope, id, **kwargs)
|
|
44
|
-
|
|
45
|
-
# Initialize VPC cache from mixin
|
|
46
|
-
self._initialize_vpc_cache()
|
|
47
|
-
|
|
48
|
-
self.asg_config = None
|
|
49
|
-
self.stack_config = None
|
|
50
|
-
self.deployment = None
|
|
51
|
-
self.workload = None
|
|
52
|
-
self.security_groups = []
|
|
53
|
-
self.auto_scaling_group = None
|
|
54
|
-
self.launch_template = None
|
|
55
|
-
self.instance_role = None
|
|
56
|
-
self.user_data = None
|
|
57
|
-
self.user_data_commands = [] # Store raw commands for ECS cluster detection
|
|
58
|
-
self.ecs_cluster = None
|
|
59
|
-
|
|
60
|
-
# SSM imports storage is now handled by the enhanced SsmParameterMixin via IStack
|
|
61
|
-
# VPC caching is now handled by VPCProviderMixin
|
|
62
|
-
|
|
63
|
-
def build(
|
|
64
|
-
self,
|
|
65
|
-
stack_config: StackConfig,
|
|
66
|
-
deployment: DeploymentConfig,
|
|
67
|
-
workload: WorkloadConfig,
|
|
68
|
-
) -> None:
|
|
69
|
-
"""Build the Auto Scaling Group stack"""
|
|
70
|
-
self._build(stack_config, deployment, workload)
|
|
71
|
-
|
|
72
|
-
def _build(
|
|
73
|
-
self,
|
|
74
|
-
stack_config: StackConfig,
|
|
75
|
-
deployment: DeploymentConfig,
|
|
76
|
-
workload: WorkloadConfig,
|
|
77
|
-
) -> None:
|
|
78
|
-
"""Internal build method for the Auto Scaling Group stack"""
|
|
79
|
-
self.stack_config = stack_config
|
|
80
|
-
self.deployment = deployment
|
|
81
|
-
self.workload = workload
|
|
82
|
-
|
|
83
|
-
self.asg_config = AutoScalingConfig(
|
|
84
|
-
stack_config.dictionary.get("auto_scaling", {}), deployment
|
|
85
|
-
)
|
|
86
|
-
asg_name = deployment.build_resource_name(self.asg_config.name)
|
|
87
|
-
|
|
88
|
-
# Process SSM imports using enhanced SsmParameterMixin
|
|
89
|
-
self.process_ssm_imports(self.asg_config, deployment, "Auto Scaling Group")
|
|
90
|
-
|
|
91
|
-
# Get security groups
|
|
92
|
-
self.security_groups = self._get_security_groups()
|
|
93
|
-
|
|
94
|
-
# Create IAM role for instances
|
|
95
|
-
self.instance_role = self._create_instance_role(asg_name)
|
|
96
|
-
|
|
97
|
-
# Create user data
|
|
98
|
-
self.user_data = self._create_user_data()
|
|
99
|
-
|
|
100
|
-
# Create ECS cluster if ECS configuration is detected
|
|
101
|
-
# This must happen before launch template creation so user data can be updated
|
|
102
|
-
self._create_ecs_cluster_if_needed(asg_name)
|
|
103
|
-
|
|
104
|
-
# Create launch template
|
|
105
|
-
self.launch_template = self._create_launch_template(asg_name)
|
|
106
|
-
|
|
107
|
-
# Create Auto Scaling Group
|
|
108
|
-
self.auto_scaling_group = self._create_auto_scaling_group(asg_name)
|
|
109
|
-
|
|
110
|
-
# Add scaling policies
|
|
111
|
-
self._add_scaling_policies()
|
|
112
|
-
|
|
113
|
-
# Add scheduled actions
|
|
114
|
-
self._add_scheduled_actions()
|
|
115
|
-
|
|
116
|
-
# Export resources
|
|
117
|
-
self._export_resources(asg_name)
|
|
118
|
-
|
|
119
|
-
@property
|
|
120
|
-
def vpc(self) -> ec2.IVpc:
|
|
121
|
-
"""Get the VPC for the Auto Scaling Group using VPCProviderMixin"""
|
|
122
|
-
if not self.asg_config:
|
|
123
|
-
raise AttributeError("AutoScalingStack not properly initialized. Call build() first.")
|
|
124
|
-
|
|
125
|
-
# Use VPCProviderMixin to resolve VPC with proper subnet handling
|
|
126
|
-
return self.resolve_vpc(
|
|
127
|
-
config=self.asg_config,
|
|
128
|
-
deployment=self.deployment,
|
|
129
|
-
workload=self.workload
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def _get_target_group_arns(self) -> List[str]:
|
|
133
|
-
"""Get target group ARNs from SSM imports using enhanced SsmParameterMixin"""
|
|
134
|
-
target_group_arns = []
|
|
135
|
-
|
|
136
|
-
# Check if we have SSM imports for target groups using enhanced mixin
|
|
137
|
-
if self.has_ssm_import("target_group_arns"):
|
|
138
|
-
imported_tg_arns = self.get_ssm_imported_value("target_group_arns", [])
|
|
139
|
-
if isinstance(imported_tg_arns, list):
|
|
140
|
-
target_group_arns.extend(imported_tg_arns)
|
|
141
|
-
else:
|
|
142
|
-
target_group_arns.append(imported_tg_arns)
|
|
143
|
-
|
|
144
|
-
# see if we have any directly defined in the config
|
|
145
|
-
if self.asg_config.target_group_arns:
|
|
146
|
-
for arn in self.asg_config.target_group_arns:
|
|
147
|
-
logger.info(f"Adding target group ARN: {arn}")
|
|
148
|
-
target_group_arns.append(arn)
|
|
149
|
-
|
|
150
|
-
return target_group_arns
|
|
151
|
-
|
|
152
|
-
def _attach_target_groups(self, asg: autoscaling.AutoScalingGroup) -> None:
|
|
153
|
-
"""Attach the Auto Scaling Group to target groups"""
|
|
154
|
-
target_group_arns = self._get_target_group_arns()
|
|
155
|
-
|
|
156
|
-
if not target_group_arns:
|
|
157
|
-
logger.warning("No target group ARNs found for Auto Scaling Group")
|
|
158
|
-
print(
|
|
159
|
-
"⚠️ No target group ARNs found for Auto Scaling Group. Nothing will be attached."
|
|
160
|
-
)
|
|
161
|
-
return
|
|
162
|
-
|
|
163
|
-
# Get the underlying CloudFormation resource to add target group ARNs
|
|
164
|
-
cfn_asg = asg.node.default_child
|
|
165
|
-
cfn_asg.add_property_override("TargetGroupARNs", target_group_arns)
|
|
166
|
-
|
|
167
|
-
def _get_security_groups(self) -> List[ec2.ISecurityGroup]:
|
|
168
|
-
"""Get security groups for the Auto Scaling Group"""
|
|
169
|
-
security_groups = []
|
|
170
|
-
for sg_id in self.asg_config.security_group_ids:
|
|
171
|
-
# if the security group id contains a comma, it is a list of security group ids
|
|
172
|
-
if "," in sg_id:
|
|
173
|
-
blocks = sg_id.split(",")
|
|
174
|
-
for block in blocks:
|
|
175
|
-
security_groups.append(
|
|
176
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
177
|
-
self, f"SecurityGroup-{block}", block
|
|
178
|
-
)
|
|
179
|
-
)
|
|
180
|
-
else:
|
|
181
|
-
# TODO: add some additional checks to make it more robust
|
|
182
|
-
security_groups.append(
|
|
183
|
-
ec2.SecurityGroup.from_security_group_id(
|
|
184
|
-
self, f"SecurityGroup-{sg_id}", sg_id
|
|
185
|
-
)
|
|
186
|
-
)
|
|
187
|
-
return security_groups
|
|
188
|
-
|
|
189
|
-
def _create_instance_role(self, asg_name: str) -> iam.Role:
|
|
190
|
-
"""Create IAM role for EC2 instances"""
|
|
191
|
-
role = iam.Role(
|
|
192
|
-
self,
|
|
193
|
-
f"{asg_name}-InstanceRole",
|
|
194
|
-
assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
|
|
195
|
-
role_name=f"{asg_name}-role",
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
# Add managed policies
|
|
199
|
-
for policy_name in self.asg_config.managed_policies:
|
|
200
|
-
role.add_managed_policy(
|
|
201
|
-
iam.ManagedPolicy.from_aws_managed_policy_name(policy_name)
|
|
202
|
-
)
|
|
203
|
-
|
|
204
|
-
# Add inline policies (for custom permissions like S3 bucket access)
|
|
205
|
-
for policy_config in self.asg_config.iam_inline_policies:
|
|
206
|
-
policy_name = policy_config.get("name", "CustomPolicy")
|
|
207
|
-
statements = policy_config.get("statements", [])
|
|
208
|
-
|
|
209
|
-
if not statements:
|
|
210
|
-
logger.warning(f"No statements found for inline policy {policy_name}, skipping")
|
|
211
|
-
continue
|
|
212
|
-
|
|
213
|
-
# Build policy statements
|
|
214
|
-
policy_statements = []
|
|
215
|
-
for stmt in statements:
|
|
216
|
-
effect = iam.Effect.ALLOW if stmt.get("effect", "Allow") == "Allow" else iam.Effect.DENY
|
|
217
|
-
actions = stmt.get("actions", [])
|
|
218
|
-
resources = stmt.get("resources", [])
|
|
219
|
-
|
|
220
|
-
if not actions or not resources:
|
|
221
|
-
logger.warning(f"Incomplete statement in policy {policy_name}, skipping")
|
|
222
|
-
continue
|
|
223
|
-
|
|
224
|
-
policy_statements.append(
|
|
225
|
-
iam.PolicyStatement(
|
|
226
|
-
effect=effect,
|
|
227
|
-
actions=actions,
|
|
228
|
-
resources=resources
|
|
229
|
-
)
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
if policy_statements:
|
|
233
|
-
role.add_to_principal_policy(policy_statements[0])
|
|
234
|
-
for stmt in policy_statements[1:]:
|
|
235
|
-
role.add_to_principal_policy(stmt)
|
|
236
|
-
|
|
237
|
-
logger.info(f"Added inline policy {policy_name} with {len(policy_statements)} statements")
|
|
238
|
-
|
|
239
|
-
return role
|
|
240
|
-
|
|
241
|
-
def _create_user_data(self) -> ec2.UserData:
|
|
242
|
-
"""Create user data for EC2 instances"""
|
|
243
|
-
user_data = ec2.UserData.for_linux()
|
|
244
|
-
|
|
245
|
-
# Store raw commands for ECS cluster detection
|
|
246
|
-
self.user_data_commands = ["set -euxo pipefail"]
|
|
247
|
-
|
|
248
|
-
# Add base commands
|
|
249
|
-
user_data.add_commands("set -euxo pipefail")
|
|
250
|
-
|
|
251
|
-
# Add custom commands from config (with variable substitution)
|
|
252
|
-
for command in self.asg_config.user_data_commands:
|
|
253
|
-
# Perform variable substitution on the command
|
|
254
|
-
substituted_command = self._substitute_variables(command)
|
|
255
|
-
user_data.add_commands(substituted_command)
|
|
256
|
-
self.user_data_commands.append(substituted_command)
|
|
257
|
-
|
|
258
|
-
# Add user data scripts from files (with variable substitution)
|
|
259
|
-
if self.asg_config.user_data_scripts:
|
|
260
|
-
self._add_user_data_scripts_from_files(user_data)
|
|
261
|
-
|
|
262
|
-
# Add container configuration if specified
|
|
263
|
-
container_config = self.asg_config.container_config
|
|
264
|
-
if container_config:
|
|
265
|
-
self._add_container_user_data(user_data, container_config)
|
|
266
|
-
|
|
267
|
-
return user_data
|
|
268
|
-
|
|
269
|
-
def _add_user_data_scripts_from_files(self, user_data: ec2.UserData) -> None:
|
|
270
|
-
"""
|
|
271
|
-
Add user data scripts from external files with variable substitution.
|
|
272
|
-
Supports loading shell scripts and injecting them into user data with
|
|
273
|
-
placeholder replacement.
|
|
274
|
-
"""
|
|
275
|
-
from pathlib import Path
|
|
276
|
-
|
|
277
|
-
for script_config in self.asg_config.user_data_scripts:
|
|
278
|
-
script_type = script_config.get("type", "file")
|
|
279
|
-
|
|
280
|
-
if script_type == "file":
|
|
281
|
-
# Load script from file
|
|
282
|
-
script_path = script_config.get("path")
|
|
283
|
-
if not script_path:
|
|
284
|
-
logger.warning("Script path not specified, skipping")
|
|
285
|
-
continue
|
|
286
|
-
|
|
287
|
-
# Resolve path (relative to project root or absolute)
|
|
288
|
-
path = Path(script_path)
|
|
289
|
-
if not path.is_absolute():
|
|
290
|
-
# Try relative to current working directory
|
|
291
|
-
path = Path.cwd() / script_path
|
|
292
|
-
|
|
293
|
-
if not path.exists():
|
|
294
|
-
logger.warning(f"Script file not found: {path}, skipping")
|
|
295
|
-
continue
|
|
296
|
-
|
|
297
|
-
# Read script content
|
|
298
|
-
try:
|
|
299
|
-
with open(path, 'r') as f:
|
|
300
|
-
script_content = f.read()
|
|
301
|
-
except Exception as e:
|
|
302
|
-
logger.error(f"Failed to read script file {path}: {e}")
|
|
303
|
-
continue
|
|
304
|
-
|
|
305
|
-
elif script_type == "inline":
|
|
306
|
-
# Use inline script content
|
|
307
|
-
script_content = script_config.get("content", "")
|
|
308
|
-
if not script_content:
|
|
309
|
-
logger.warning("Inline script content is empty, skipping")
|
|
310
|
-
continue
|
|
311
|
-
else:
|
|
312
|
-
logger.warning(f"Unknown script type: {script_type}, skipping")
|
|
313
|
-
continue
|
|
314
|
-
|
|
315
|
-
# Perform variable substitution
|
|
316
|
-
variables = script_config.get("variables", {})
|
|
317
|
-
for var_name, var_value in variables.items():
|
|
318
|
-
placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
|
|
319
|
-
script_content = script_content.replace(placeholder, str(var_value))
|
|
320
|
-
|
|
321
|
-
# Add script to user data
|
|
322
|
-
# Split by lines and add each line as a command
|
|
323
|
-
for line in script_content.split('\n'):
|
|
324
|
-
if line.strip(): # Skip empty lines
|
|
325
|
-
user_data.add_commands(line)
|
|
326
|
-
|
|
327
|
-
logger.info(f"Added user data script from {script_type}: {script_config.get('path', 'inline')}")
|
|
328
|
-
|
|
329
|
-
def _substitute_variables(self, command: str) -> str:
|
|
330
|
-
"""
|
|
331
|
-
Perform variable substitution on a user data command.
|
|
332
|
-
Uses workload and deployment configuration for substitution.
|
|
333
|
-
"""
|
|
334
|
-
if not command:
|
|
335
|
-
return command
|
|
336
|
-
|
|
337
|
-
# Start with the original command
|
|
338
|
-
substituted_command = command
|
|
339
|
-
|
|
340
|
-
# Define available variables for substitution
|
|
341
|
-
variables = {}
|
|
342
|
-
|
|
343
|
-
# Add workload variables
|
|
344
|
-
if self.workload:
|
|
345
|
-
variables.update({
|
|
346
|
-
"WORKLOAD_NAME": getattr(self.workload, 'name', ''),
|
|
347
|
-
"ENVIRONMENT": getattr(self.workload, 'environment', ''),
|
|
348
|
-
"WORKLOAD": getattr(self.workload, 'name', ''),
|
|
349
|
-
})
|
|
350
|
-
|
|
351
|
-
# Add deployment variables
|
|
352
|
-
if self.deployment:
|
|
353
|
-
variables.update({
|
|
354
|
-
"DEPLOYMENT_NAME": getattr(self.deployment, 'name', ''),
|
|
355
|
-
"REGION": getattr(self.deployment, 'region', ''),
|
|
356
|
-
"ACCOUNT": getattr(self.deployment, 'account', ''),
|
|
357
|
-
})
|
|
358
|
-
|
|
359
|
-
# Add stack-level variables
|
|
360
|
-
variables.update({
|
|
361
|
-
"STACK_NAME": self.stack_name,
|
|
362
|
-
})
|
|
363
|
-
|
|
364
|
-
# Perform substitution
|
|
365
|
-
for var_name, var_value in variables.items():
|
|
366
|
-
if var_value is not None:
|
|
367
|
-
placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
|
|
368
|
-
substituted_command = substituted_command.replace(placeholder, str(var_value))
|
|
369
|
-
|
|
370
|
-
return substituted_command
|
|
371
|
-
|
|
372
|
-
def _add_container_user_data(
|
|
373
|
-
self, user_data: ec2.UserData, container_config: Dict[str, Any]
|
|
374
|
-
) -> None:
|
|
375
|
-
"""Add container-specific user data commands"""
|
|
376
|
-
# Install Docker
|
|
377
|
-
user_data.add_commands(
|
|
378
|
-
"dnf -y update", "dnf -y install docker jq", "systemctl enable --now docker"
|
|
379
|
-
)
|
|
380
|
-
|
|
381
|
-
# ECR configuration
|
|
382
|
-
if "ecr" in container_config:
|
|
383
|
-
ecr_config = container_config["ecr"]
|
|
384
|
-
user_data.add_commands(
|
|
385
|
-
f"ACCOUNT_ID={ecr_config.get('account_id', self.account)}",
|
|
386
|
-
f"REGION={ecr_config.get('region', self.region)}",
|
|
387
|
-
f"REPO={ecr_config.get('repo', 'app')}",
|
|
388
|
-
f"TAG={ecr_config.get('tag', 'latest')}",
|
|
389
|
-
"aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com",
|
|
390
|
-
"docker pull ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}",
|
|
391
|
-
)
|
|
392
|
-
|
|
393
|
-
# Database configuration
|
|
394
|
-
if "database" in container_config:
|
|
395
|
-
db_config = container_config["database"]
|
|
396
|
-
secret_arn = db_config.get("secret_arn", "")
|
|
397
|
-
if secret_arn:
|
|
398
|
-
user_data.add_commands(
|
|
399
|
-
f"DB_SECRET_ARN={secret_arn}",
|
|
400
|
-
'if [ -n "$DB_SECRET_ARN" ]; then DB_JSON=$(aws secretsmanager get-secret-value --secret-id $DB_SECRET_ARN --query SecretString --output text --region $REGION); fi',
|
|
401
|
-
'if [ -n "$DB_SECRET_ARN" ]; then DB_HOST=$(echo $DB_JSON | jq -r .host); DB_USER=$(echo $DB_JSON | jq -r .username); DB_PASS=$(echo $DB_JSON | jq -r .password); DB_NAME=$(echo $DB_JSON | jq -r .dbname); fi',
|
|
402
|
-
)
|
|
403
|
-
|
|
404
|
-
# Run container
|
|
405
|
-
if "run_command" in container_config:
|
|
406
|
-
user_data.add_commands(container_config["run_command"])
|
|
407
|
-
elif "ecr" in container_config:
|
|
408
|
-
port = container_config.get("port", 8080)
|
|
409
|
-
user_data.add_commands(
|
|
410
|
-
f"docker run -d --name app -p {port}:{port} "
|
|
411
|
-
'-e DB_HOST="$DB_HOST" -e DB_USER="$DB_USER" -e DB_PASS="$DB_PASS" -e DB_NAME="$DB_NAME" '
|
|
412
|
-
"--restart=always ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}"
|
|
413
|
-
)
|
|
414
|
-
|
|
415
|
-
def _create_launch_template(self, asg_name: str) -> ec2.LaunchTemplate:
|
|
416
|
-
"""Create launch template for the Auto Scaling Group"""
|
|
417
|
-
# Get AMI
|
|
418
|
-
ami = None
|
|
419
|
-
if self.asg_config.ami_id:
|
|
420
|
-
ami = ec2.MachineImage.generic_linux({self.region: self.asg_config.ami_id})
|
|
421
|
-
else:
|
|
422
|
-
if self.asg_config.ami_type == "amazon-linux-2023":
|
|
423
|
-
ami = ec2.MachineImage.latest_amazon_linux2023()
|
|
424
|
-
elif self.asg_config.ami_type == "amazon-linux-2":
|
|
425
|
-
ami = ec2.MachineImage.latest_amazon_linux2()
|
|
426
|
-
else:
|
|
427
|
-
ami = ec2.MachineImage.latest_amazon_linux2023()
|
|
428
|
-
|
|
429
|
-
# Parse instance type
|
|
430
|
-
instance_type_str = self.asg_config.instance_type
|
|
431
|
-
instance_type = None
|
|
432
|
-
|
|
433
|
-
if "." in instance_type_str:
|
|
434
|
-
parts = instance_type_str.split(".")
|
|
435
|
-
if len(parts) == 2:
|
|
436
|
-
try:
|
|
437
|
-
instance_class = ec2.InstanceClass[parts[0].upper()]
|
|
438
|
-
instance_size = ec2.InstanceSize[parts[1].upper()]
|
|
439
|
-
instance_type = ec2.InstanceType.of(instance_class, instance_size)
|
|
440
|
-
except (KeyError, ValueError):
|
|
441
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
442
|
-
else:
|
|
443
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
444
|
-
else:
|
|
445
|
-
instance_type = ec2.InstanceType(instance_type_str)
|
|
446
|
-
|
|
447
|
-
# Create block device mappings
|
|
448
|
-
block_devices = []
|
|
449
|
-
for device in self.asg_config.block_devices:
|
|
450
|
-
block_devices.append(
|
|
451
|
-
ec2.BlockDevice(
|
|
452
|
-
device_name=device.get("device_name", "/dev/xvda"),
|
|
453
|
-
volume=ec2.BlockDeviceVolume.ebs(
|
|
454
|
-
volume_size=device.get("volume_size", 8),
|
|
455
|
-
volume_type=ec2.EbsDeviceVolumeType(
|
|
456
|
-
str(device.get("volume_type", "gp3")).upper()
|
|
457
|
-
),
|
|
458
|
-
delete_on_termination=device.get("delete_on_termination", True),
|
|
459
|
-
encrypted=device.get("encrypted", True),
|
|
460
|
-
),
|
|
461
|
-
)
|
|
462
|
-
)
|
|
463
|
-
|
|
464
|
-
# Create launch template
|
|
465
|
-
launch_template = ec2.LaunchTemplate(
|
|
466
|
-
self,
|
|
467
|
-
f"{asg_name}-LaunchTemplate",
|
|
468
|
-
machine_image=ami,
|
|
469
|
-
instance_type=instance_type,
|
|
470
|
-
role=self.instance_role,
|
|
471
|
-
security_group=self.security_groups[0] if self.security_groups else None,
|
|
472
|
-
user_data=self.user_data,
|
|
473
|
-
detailed_monitoring=self.asg_config.detailed_monitoring,
|
|
474
|
-
block_devices=block_devices if block_devices else None,
|
|
475
|
-
)
|
|
476
|
-
|
|
477
|
-
return launch_template
|
|
478
|
-
|
|
479
|
-
def _create_auto_scaling_group(self, asg_name: str) -> autoscaling.AutoScalingGroup:
|
|
480
|
-
"""Create the Auto Scaling Group"""
|
|
481
|
-
# Configure subnet selection
|
|
482
|
-
subnet_group_name = self.asg_config.subnet_group_name
|
|
483
|
-
subnets = ec2.SubnetSelection(subnet_group_name=subnet_group_name)
|
|
484
|
-
|
|
485
|
-
# Configure health check
|
|
486
|
-
health_check_type = autoscaling.HealthCheck.ec2()
|
|
487
|
-
if self.asg_config.health_check_type.upper() == "ELB":
|
|
488
|
-
health_check_type = autoscaling.HealthCheck.elb(
|
|
489
|
-
grace=Duration.seconds(self.asg_config.health_check_grace_period)
|
|
490
|
-
)
|
|
491
|
-
|
|
492
|
-
# Create Auto Scaling Group
|
|
493
|
-
asg = autoscaling.AutoScalingGroup(
|
|
494
|
-
self,
|
|
495
|
-
asg_name,
|
|
496
|
-
vpc=self.vpc,
|
|
497
|
-
vpc_subnets=subnets,
|
|
498
|
-
min_capacity=self.asg_config.min_capacity,
|
|
499
|
-
max_capacity=self.asg_config.max_capacity,
|
|
500
|
-
desired_capacity=self.asg_config.desired_capacity,
|
|
501
|
-
launch_template=self.launch_template,
|
|
502
|
-
health_check=health_check_type,
|
|
503
|
-
cooldown=Duration.seconds(self.asg_config.cooldown),
|
|
504
|
-
termination_policies=[
|
|
505
|
-
autoscaling.TerminationPolicy(policy)
|
|
506
|
-
for policy in self.asg_config.termination_policies
|
|
507
|
-
],
|
|
508
|
-
)
|
|
509
|
-
|
|
510
|
-
# Attach to target groups after ASG creation
|
|
511
|
-
self._attach_target_groups(asg)
|
|
512
|
-
|
|
513
|
-
# Configure update policy
|
|
514
|
-
# Only apply update policy if it was explicitly configured
|
|
515
|
-
if "update_policy" in self.stack_config.dictionary.get("auto_scaling", {}):
|
|
516
|
-
update_policy = self.asg_config.update_policy
|
|
517
|
-
# Apply the update policy to the ASG's CloudFormation resource
|
|
518
|
-
cfn_asg = asg.node.default_child
|
|
519
|
-
cfn_asg.add_override(
|
|
520
|
-
"UpdatePolicy",
|
|
521
|
-
{
|
|
522
|
-
"AutoScalingRollingUpdate": {
|
|
523
|
-
"MinInstancesInService": update_policy.get(
|
|
524
|
-
"min_instances_in_service", 1
|
|
525
|
-
),
|
|
526
|
-
"MaxBatchSize": update_policy.get("max_batch_size", 1),
|
|
527
|
-
"PauseTime": f"PT{update_policy.get('pause_time', 300) // 60}M",
|
|
528
|
-
}
|
|
529
|
-
},
|
|
530
|
-
)
|
|
531
|
-
|
|
532
|
-
# Add tags
|
|
533
|
-
for key, value in self.asg_config.tags.items():
|
|
534
|
-
cdk.Tags.of(asg).add(key, value)
|
|
535
|
-
|
|
536
|
-
return asg
|
|
537
|
-
|
|
538
|
-
def _configure_scaling_policies(self) -> None:
|
|
539
|
-
"""Configure scaling policies for the Auto Scaling Group"""
|
|
540
|
-
for policy in self.asg_config.scaling_policies:
|
|
541
|
-
policy_type = policy.get("type", "target_tracking")
|
|
542
|
-
|
|
543
|
-
if policy_type == "target_tracking":
|
|
544
|
-
self.auto_scaling_group.scale_on_metric(
|
|
545
|
-
f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
|
|
546
|
-
metric=self._get_metric(policy),
|
|
547
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
548
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
549
|
-
)
|
|
550
|
-
elif policy_type == "step":
|
|
551
|
-
self.auto_scaling_group.scale_on_metric(
|
|
552
|
-
f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
|
|
553
|
-
metric=self._get_metric(policy),
|
|
554
|
-
scaling_steps=self._get_scaling_steps(policy),
|
|
555
|
-
adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
|
|
556
|
-
)
|
|
557
|
-
|
|
558
|
-
def _get_metric(self, policy: Dict[str, Any]) -> cloudwatch.Metric:
|
|
559
|
-
"""Get metric for scaling policy"""
|
|
560
|
-
# This is a simplified implementation
|
|
561
|
-
# In a real-world scenario, you would use CloudWatch metrics
|
|
562
|
-
return cloudwatch.Metric(
|
|
563
|
-
namespace="AWS/EC2",
|
|
564
|
-
metric_name=policy.get("metric_name", "CPUUtilization"),
|
|
565
|
-
dimensions_map={
|
|
566
|
-
"AutoScalingGroupName": self.auto_scaling_group.auto_scaling_group_name
|
|
567
|
-
},
|
|
568
|
-
statistic=policy.get("statistic", "Average"),
|
|
569
|
-
period=Duration.seconds(policy.get("period", 60)),
|
|
570
|
-
)
|
|
571
|
-
|
|
572
|
-
def _get_scaling_steps(
|
|
573
|
-
self, policy: Dict[str, Any]
|
|
574
|
-
) -> List[autoscaling.ScalingInterval]:
|
|
575
|
-
"""Get scaling steps for scaling policy"""
|
|
576
|
-
steps = policy.get("steps", [])
|
|
577
|
-
scaling_intervals = []
|
|
578
|
-
|
|
579
|
-
for step in steps:
|
|
580
|
-
# Handle upper bound - if not specified, don't set it (let CDK handle it)
|
|
581
|
-
interval_kwargs = {
|
|
582
|
-
"lower": step.get("lower", 0),
|
|
583
|
-
"change": step.get("change", 1),
|
|
584
|
-
}
|
|
585
|
-
|
|
586
|
-
# Only set upper if it's explicitly provided
|
|
587
|
-
if "upper" in step:
|
|
588
|
-
interval_kwargs["upper"] = step["upper"]
|
|
589
|
-
|
|
590
|
-
scaling_intervals.append(autoscaling.ScalingInterval(**interval_kwargs))
|
|
591
|
-
|
|
592
|
-
return scaling_intervals
|
|
593
|
-
|
|
594
|
-
def _add_outputs(self, asg_name: str) -> None:
|
|
595
|
-
"""Add CloudFormation outputs for the Auto Scaling Group"""
|
|
596
|
-
if self.auto_scaling_group:
|
|
597
|
-
# Auto Scaling Group Name
|
|
598
|
-
cdk.CfnOutput(
|
|
599
|
-
self,
|
|
600
|
-
f"{asg_name}-name",
|
|
601
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
602
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
603
|
-
)
|
|
604
|
-
|
|
605
|
-
# Auto Scaling Group ARN
|
|
606
|
-
cdk.CfnOutput(
|
|
607
|
-
self,
|
|
608
|
-
f"{asg_name}-arn",
|
|
609
|
-
value=self.auto_scaling_group.auto_scaling_group_arn,
|
|
610
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
|
|
611
|
-
)
|
|
612
|
-
|
|
613
|
-
# Launch Template ID
|
|
614
|
-
if self.launch_template:
|
|
615
|
-
cdk.CfnOutput(
|
|
616
|
-
self,
|
|
617
|
-
f"{asg_name}-launch-template-id",
|
|
618
|
-
value=self.launch_template.launch_template_id,
|
|
619
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-launch-template-id",
|
|
620
|
-
)
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
def _add_scaling_policies(self) -> None:
|
|
624
|
-
"""Add scaling policies to the Auto Scaling Group"""
|
|
625
|
-
for policy_config in self.asg_config.scaling_policies:
|
|
626
|
-
# Scaling policy implementation would go here
|
|
627
|
-
pass
|
|
628
|
-
|
|
629
|
-
def _add_scheduled_actions(self) -> None:
|
|
630
|
-
"""Add scheduled actions to the Auto Scaling Group"""
|
|
631
|
-
for action_config in self.asg_config.scheduled_actions:
|
|
632
|
-
# Scheduled action implementation would go here
|
|
633
|
-
pass
|
|
634
|
-
|
|
635
|
-
def _create_ecs_cluster_if_needed(self, asg_name: str):
|
|
636
|
-
"""
|
|
637
|
-
ECS cluster creation should be handled by the dedicated EcsClusterStack module.
|
|
638
|
-
This method only handles SSM imports for cluster name injection.
|
|
639
|
-
"""
|
|
640
|
-
# Check if ECS cluster name is available via SSM imports
|
|
641
|
-
if self.has_ssm_import("ecs_cluster_name"):
|
|
642
|
-
logger.info(f"ECS cluster name available via SSM imports")
|
|
643
|
-
# Inject cluster name into user data if available
|
|
644
|
-
if self.user_data and self.user_data_commands:
|
|
645
|
-
self._inject_cluster_name_into_user_data()
|
|
646
|
-
return
|
|
647
|
-
|
|
648
|
-
logger.warning(
|
|
649
|
-
"No ECS cluster name found in SSM imports. "
|
|
650
|
-
"Use the dedicated EcsClusterStack module to create ECS clusters."
|
|
651
|
-
)
|
|
652
|
-
|
|
653
|
-
def _inject_cluster_name_into_user_data(self) -> None:
|
|
654
|
-
"""Inject the ECS cluster name into user data commands using SSM imports"""
|
|
655
|
-
# Check if ECS cluster name is available via SSM imports
|
|
656
|
-
if self.has_ssm_import("ecs_cluster_name"):
|
|
657
|
-
cluster_name = self.get_ssm_imported_value("ecs_cluster_name")
|
|
658
|
-
logger.info(f"Using ECS cluster name from SSM: {cluster_name}")
|
|
659
|
-
else:
|
|
660
|
-
logger.warning("No ECS cluster name found in SSM imports, skipping cluster name injection")
|
|
661
|
-
return
|
|
662
|
-
|
|
663
|
-
injected_commands = []
|
|
664
|
-
cluster_name_injected = False
|
|
665
|
-
|
|
666
|
-
for command in self.user_data_commands:
|
|
667
|
-
# If this command already sets ECS_CLUSTER, replace it
|
|
668
|
-
if 'ECS_CLUSTER=' in command:
|
|
669
|
-
# Replace existing ECS_CLUSTER setting with our cluster name
|
|
670
|
-
parts = command.split('ECS_CLUSTER=')
|
|
671
|
-
if len(parts) > 1:
|
|
672
|
-
# Keep everything before ECS_CLUSTER=, add our cluster name, then add the rest
|
|
673
|
-
before = parts[0]
|
|
674
|
-
after_parts = parts[1].split(None, 1) # Split on first whitespace
|
|
675
|
-
after = after_parts[1] if len(after_parts) > 1 else ''
|
|
676
|
-
new_command = f"{before}ECS_CLUSTER={cluster_name} {after}".strip()
|
|
677
|
-
injected_commands.append(new_command)
|
|
678
|
-
cluster_name_injected = True
|
|
679
|
-
else:
|
|
680
|
-
injected_commands.append(f"{command}ECS_CLUSTER={cluster_name}")
|
|
681
|
-
cluster_name_injected = True
|
|
682
|
-
else:
|
|
683
|
-
injected_commands.append(command)
|
|
684
|
-
|
|
685
|
-
# If no ECS_CLUSTER was found in existing commands, add it
|
|
686
|
-
if not cluster_name_injected:
|
|
687
|
-
injected_commands.append(f"echo ECS_CLUSTER={cluster_name} >> /etc/ecs/ecs.config")
|
|
688
|
-
|
|
689
|
-
# Update the user data with the injected commands
|
|
690
|
-
self.user_data_commands = injected_commands
|
|
691
|
-
|
|
692
|
-
# If user data object exists, we need to recreate it with the updated commands
|
|
693
|
-
if hasattr(self, 'user_data') and self.user_data:
|
|
694
|
-
self.user_data = self._recreate_user_data_with_commands(injected_commands)
|
|
695
|
-
|
|
696
|
-
def _recreate_user_data_with_commands(self, commands: List[str]) -> ec2.UserData:
|
|
697
|
-
"""Recreate user data with updated commands"""
|
|
698
|
-
user_data = ec2.UserData.for_linux()
|
|
699
|
-
|
|
700
|
-
for command in commands:
|
|
701
|
-
user_data.add_commands(command)
|
|
702
|
-
|
|
703
|
-
return user_data
|
|
704
|
-
|
|
705
|
-
def _export_resources(self, asg_name: str) -> None:
|
|
706
|
-
"""Export stack resources to SSM and CloudFormation outputs"""
|
|
707
|
-
# Export ASG name
|
|
708
|
-
cdk.CfnOutput(
|
|
709
|
-
self,
|
|
710
|
-
f"{asg_name}-name",
|
|
711
|
-
value=self.auto_scaling_group.auto_scaling_group_name,
|
|
712
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
|
|
713
|
-
)
|
|
714
|
-
|
|
715
|
-
# Export ASG ARN
|
|
716
|
-
cdk.CfnOutput(
|
|
717
|
-
self,
|
|
718
|
-
f"{asg_name}-arn",
|
|
719
|
-
value=self.auto_scaling_group.auto_scaling_group_arn,
|
|
720
|
-
export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
|
|
721
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|