cdk-factory 0.18.3__py3-none-any.whl → 0.18.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,6 +24,8 @@ import os
24
24
  import re
25
25
  from typing import Dict, Any, Optional, List, Union
26
26
  from aws_cdk import aws_ssm as ssm
27
+ from aws_cdk import aws_ec2 as ec2
28
+ from aws_cdk import aws_ecs as ecs
27
29
  from constructs import Construct
28
30
  from aws_lambda_powertools import Logger
29
31
  from cdk_factory.configurations.deployment import DeploymentConfig
@@ -610,3 +612,40 @@ class SsmStandardValidator:
610
612
  errors.append(f"{context}: SSM path should use template variables: {path}")
611
613
 
612
614
  return errors
615
+
616
+
617
+ def parse_subnet_ids_from_ssm(self, subnet_ids_key: str = "subnet_ids") -> List[str]:
618
+ """
619
+ Helper function to parse subnet IDs from SSM imports.
620
+
621
+ This common pattern handles:
622
+ 1. Comma-separated subnet ID strings from SSM
623
+ 2. List of subnet IDs from SSM
624
+ 3. CDK Token resolution for deployment-time values
625
+
626
+ Args:
627
+ subnet_ids_key: The key used for subnet IDs in SSM imports (default: "subnet_ids")
628
+
629
+ Returns:
630
+ List of subnet IDs (empty list if not found or invalid format)
631
+ """
632
+ ssm_imports = self.get_all_ssm_imports()
633
+
634
+ if subnet_ids_key not in ssm_imports:
635
+ logger.warning(f"No subnet IDs found in SSM imports with key: {subnet_ids_key}")
636
+ return []
637
+
638
+ subnet_ids = ssm_imports[subnet_ids_key]
639
+
640
+ # Handle comma-separated string or list
641
+ if isinstance(subnet_ids, str):
642
+ # Split comma-separated string
643
+ parsed_ids = [sid.strip() for sid in subnet_ids.split(',') if sid.strip()]
644
+ logger.info(f"Parsed {len(parsed_ids)} subnet IDs from comma-separated string")
645
+ return parsed_ids
646
+ elif isinstance(subnet_ids, list):
647
+ logger.info(f"Using {len(subnet_ids)} subnet IDs from list")
648
+ return subnet_ids
649
+ else:
650
+ logger.warning(f"Unexpected subnet_ids type: {type(subnet_ids)}")
651
+ return []
@@ -0,0 +1,721 @@
1
+ """
2
+ Auto Scaling Group Stack Pattern for CDK-Factory
3
+ Maintainers: Eric Wilson
4
+ MIT License. See Project Root for the license information.
5
+ """
6
+
7
+ from typing import Dict, Any, List, Optional
8
+
9
+ import aws_cdk as cdk
10
+ from aws_cdk import aws_ec2 as ec2
11
+ from aws_cdk import aws_autoscaling as autoscaling
12
+ from aws_cdk import aws_cloudwatch as cloudwatch
13
+ from aws_cdk import aws_iam as iam
14
+ from aws_cdk import aws_ssm as ssm
15
+ from aws_cdk import aws_ecs as ecs
16
+ from aws_cdk import Duration, Stack
17
+ from aws_lambda_powertools import Logger
18
+ from constructs import Construct
19
+
20
+ from cdk_factory.configurations.deployment import DeploymentConfig
21
+ from cdk_factory.configurations.stack import StackConfig
22
+ from cdk_factory.configurations.resources.auto_scaling import AutoScalingConfig
23
+ from cdk_factory.interfaces.istack import IStack
24
+ from cdk_factory.interfaces.vpc_provider_mixin import VPCProviderMixin
25
+ from cdk_factory.stack.stack_module_registry import register_stack
26
+ from cdk_factory.workload.workload_factory import WorkloadConfig
27
+
28
+ logger = Logger(service="AutoScalingStack")
29
+
30
+
31
+ @register_stack("auto_scaling_library_module_old")
32
+ @register_stack("auto_scaling_stack_old")
33
+ class AutoScalingStack(IStack, VPCProviderMixin):
34
+ """
35
+ Reusable stack for AWS Auto Scaling Groups.
36
+ Supports creating EC2 Auto Scaling Groups with customizable configurations.
37
+
38
+ Uses enhanced SsmParameterMixin (via IStack) to eliminate SSM code duplication.
39
+ """
40
+
41
+ def __init__(self, scope: Construct, id: str, **kwargs) -> None:
42
+ # Initialize parent class properly - IStack inherits from enhanced SsmParameterMixin
43
+ super().__init__(scope, id, **kwargs)
44
+
45
+ # Initialize VPC cache from mixin
46
+ self._initialize_vpc_cache()
47
+
48
+ self.asg_config = None
49
+ self.stack_config = None
50
+ self.deployment = None
51
+ self.workload = None
52
+ self.security_groups = []
53
+ self.auto_scaling_group = None
54
+ self.launch_template = None
55
+ self.instance_role = None
56
+ self.user_data = None
57
+ self.user_data_commands = [] # Store raw commands for ECS cluster detection
58
+ self.ecs_cluster = None
59
+
60
+ # SSM imports storage is now handled by the enhanced SsmParameterMixin via IStack
61
+ # VPC caching is now handled by VPCProviderMixin
62
+
63
+ def build(
64
+ self,
65
+ stack_config: StackConfig,
66
+ deployment: DeploymentConfig,
67
+ workload: WorkloadConfig,
68
+ ) -> None:
69
+ """Build the Auto Scaling Group stack"""
70
+ self._build(stack_config, deployment, workload)
71
+
72
+ def _build(
73
+ self,
74
+ stack_config: StackConfig,
75
+ deployment: DeploymentConfig,
76
+ workload: WorkloadConfig,
77
+ ) -> None:
78
+ """Internal build method for the Auto Scaling Group stack"""
79
+ self.stack_config = stack_config
80
+ self.deployment = deployment
81
+ self.workload = workload
82
+
83
+ self.asg_config = AutoScalingConfig(
84
+ stack_config.dictionary.get("auto_scaling", {}), deployment
85
+ )
86
+ asg_name = deployment.build_resource_name(self.asg_config.name)
87
+
88
+ # Process SSM imports using enhanced SsmParameterMixin
89
+ self.process_ssm_imports(self.asg_config, deployment, "Auto Scaling Group")
90
+
91
+ # Get security groups
92
+ self.security_groups = self._get_security_groups()
93
+
94
+ # Create IAM role for instances
95
+ self.instance_role = self._create_instance_role(asg_name)
96
+
97
+ # Create user data
98
+ self.user_data = self._create_user_data()
99
+
100
+ # Create ECS cluster if ECS configuration is detected
101
+ # This must happen before launch template creation so user data can be updated
102
+ self._create_ecs_cluster_if_needed(asg_name)
103
+
104
+ # Create launch template
105
+ self.launch_template = self._create_launch_template(asg_name)
106
+
107
+ # Create Auto Scaling Group
108
+ self.auto_scaling_group = self._create_auto_scaling_group(asg_name)
109
+
110
+ # Add scaling policies
111
+ self._add_scaling_policies()
112
+
113
+ # Add scheduled actions
114
+ self._add_scheduled_actions()
115
+
116
+ # Export resources
117
+ self._export_resources(asg_name)
118
+
119
+ @property
120
+ def vpc(self) -> ec2.IVpc:
121
+ """Get the VPC for the Auto Scaling Group using VPCProviderMixin"""
122
+ if not self.asg_config:
123
+ raise AttributeError("AutoScalingStack not properly initialized. Call build() first.")
124
+
125
+ # Use VPCProviderMixin to resolve VPC with proper subnet handling
126
+ return self.resolve_vpc(
127
+ config=self.asg_config,
128
+ deployment=self.deployment,
129
+ workload=self.workload
130
+ )
131
+
132
+ def _get_target_group_arns(self) -> List[str]:
133
+ """Get target group ARNs from SSM imports using enhanced SsmParameterMixin"""
134
+ target_group_arns = []
135
+
136
+ # Check if we have SSM imports for target groups using enhanced mixin
137
+ if self.has_ssm_import("target_group_arns"):
138
+ imported_tg_arns = self.get_ssm_imported_value("target_group_arns", [])
139
+ if isinstance(imported_tg_arns, list):
140
+ target_group_arns.extend(imported_tg_arns)
141
+ else:
142
+ target_group_arns.append(imported_tg_arns)
143
+
144
+ # see if we have any directly defined in the config
145
+ if self.asg_config.target_group_arns:
146
+ for arn in self.asg_config.target_group_arns:
147
+ logger.info(f"Adding target group ARN: {arn}")
148
+ target_group_arns.append(arn)
149
+
150
+ return target_group_arns
151
+
152
+ def _attach_target_groups(self, asg: autoscaling.AutoScalingGroup) -> None:
153
+ """Attach the Auto Scaling Group to target groups"""
154
+ target_group_arns = self._get_target_group_arns()
155
+
156
+ if not target_group_arns:
157
+ logger.warning("No target group ARNs found for Auto Scaling Group")
158
+ print(
159
+ "⚠️ No target group ARNs found for Auto Scaling Group. Nothing will be attached."
160
+ )
161
+ return
162
+
163
+ # Get the underlying CloudFormation resource to add target group ARNs
164
+ cfn_asg = asg.node.default_child
165
+ cfn_asg.add_property_override("TargetGroupARNs", target_group_arns)
166
+
167
+ def _get_security_groups(self) -> List[ec2.ISecurityGroup]:
168
+ """Get security groups for the Auto Scaling Group"""
169
+ security_groups = []
170
+ for sg_id in self.asg_config.security_group_ids:
171
+ # if the security group id contains a comma, it is a list of security group ids
172
+ if "," in sg_id:
173
+ blocks = sg_id.split(",")
174
+ for block in blocks:
175
+ security_groups.append(
176
+ ec2.SecurityGroup.from_security_group_id(
177
+ self, f"SecurityGroup-{block}", block
178
+ )
179
+ )
180
+ else:
181
+ # TODO: add some additional checks to make it more robust
182
+ security_groups.append(
183
+ ec2.SecurityGroup.from_security_group_id(
184
+ self, f"SecurityGroup-{sg_id}", sg_id
185
+ )
186
+ )
187
+ return security_groups
188
+
189
+ def _create_instance_role(self, asg_name: str) -> iam.Role:
190
+ """Create IAM role for EC2 instances"""
191
+ role = iam.Role(
192
+ self,
193
+ f"{asg_name}-InstanceRole",
194
+ assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"),
195
+ role_name=f"{asg_name}-role",
196
+ )
197
+
198
+ # Add managed policies
199
+ for policy_name in self.asg_config.managed_policies:
200
+ role.add_managed_policy(
201
+ iam.ManagedPolicy.from_aws_managed_policy_name(policy_name)
202
+ )
203
+
204
+ # Add inline policies (for custom permissions like S3 bucket access)
205
+ for policy_config in self.asg_config.iam_inline_policies:
206
+ policy_name = policy_config.get("name", "CustomPolicy")
207
+ statements = policy_config.get("statements", [])
208
+
209
+ if not statements:
210
+ logger.warning(f"No statements found for inline policy {policy_name}, skipping")
211
+ continue
212
+
213
+ # Build policy statements
214
+ policy_statements = []
215
+ for stmt in statements:
216
+ effect = iam.Effect.ALLOW if stmt.get("effect", "Allow") == "Allow" else iam.Effect.DENY
217
+ actions = stmt.get("actions", [])
218
+ resources = stmt.get("resources", [])
219
+
220
+ if not actions or not resources:
221
+ logger.warning(f"Incomplete statement in policy {policy_name}, skipping")
222
+ continue
223
+
224
+ policy_statements.append(
225
+ iam.PolicyStatement(
226
+ effect=effect,
227
+ actions=actions,
228
+ resources=resources
229
+ )
230
+ )
231
+
232
+ if policy_statements:
233
+ role.add_to_principal_policy(policy_statements[0])
234
+ for stmt in policy_statements[1:]:
235
+ role.add_to_principal_policy(stmt)
236
+
237
+ logger.info(f"Added inline policy {policy_name} with {len(policy_statements)} statements")
238
+
239
+ return role
240
+
241
+ def _create_user_data(self) -> ec2.UserData:
242
+ """Create user data for EC2 instances"""
243
+ user_data = ec2.UserData.for_linux()
244
+
245
+ # Store raw commands for ECS cluster detection
246
+ self.user_data_commands = ["set -euxo pipefail"]
247
+
248
+ # Add base commands
249
+ user_data.add_commands("set -euxo pipefail")
250
+
251
+ # Add custom commands from config (with variable substitution)
252
+ for command in self.asg_config.user_data_commands:
253
+ # Perform variable substitution on the command
254
+ substituted_command = self._substitute_variables(command)
255
+ user_data.add_commands(substituted_command)
256
+ self.user_data_commands.append(substituted_command)
257
+
258
+ # Add user data scripts from files (with variable substitution)
259
+ if self.asg_config.user_data_scripts:
260
+ self._add_user_data_scripts_from_files(user_data)
261
+
262
+ # Add container configuration if specified
263
+ container_config = self.asg_config.container_config
264
+ if container_config:
265
+ self._add_container_user_data(user_data, container_config)
266
+
267
+ return user_data
268
+
269
+ def _add_user_data_scripts_from_files(self, user_data: ec2.UserData) -> None:
270
+ """
271
+ Add user data scripts from external files with variable substitution.
272
+ Supports loading shell scripts and injecting them into user data with
273
+ placeholder replacement.
274
+ """
275
+ from pathlib import Path
276
+
277
+ for script_config in self.asg_config.user_data_scripts:
278
+ script_type = script_config.get("type", "file")
279
+
280
+ if script_type == "file":
281
+ # Load script from file
282
+ script_path = script_config.get("path")
283
+ if not script_path:
284
+ logger.warning("Script path not specified, skipping")
285
+ continue
286
+
287
+ # Resolve path (relative to project root or absolute)
288
+ path = Path(script_path)
289
+ if not path.is_absolute():
290
+ # Try relative to current working directory
291
+ path = Path.cwd() / script_path
292
+
293
+ if not path.exists():
294
+ logger.warning(f"Script file not found: {path}, skipping")
295
+ continue
296
+
297
+ # Read script content
298
+ try:
299
+ with open(path, 'r') as f:
300
+ script_content = f.read()
301
+ except Exception as e:
302
+ logger.error(f"Failed to read script file {path}: {e}")
303
+ continue
304
+
305
+ elif script_type == "inline":
306
+ # Use inline script content
307
+ script_content = script_config.get("content", "")
308
+ if not script_content:
309
+ logger.warning("Inline script content is empty, skipping")
310
+ continue
311
+ else:
312
+ logger.warning(f"Unknown script type: {script_type}, skipping")
313
+ continue
314
+
315
+ # Perform variable substitution
316
+ variables = script_config.get("variables", {})
317
+ for var_name, var_value in variables.items():
318
+ placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
319
+ script_content = script_content.replace(placeholder, str(var_value))
320
+
321
+ # Add script to user data
322
+ # Split by lines and add each line as a command
323
+ for line in script_content.split('\n'):
324
+ if line.strip(): # Skip empty lines
325
+ user_data.add_commands(line)
326
+
327
+ logger.info(f"Added user data script from {script_type}: {script_config.get('path', 'inline')}")
328
+
329
+ def _substitute_variables(self, command: str) -> str:
330
+ """
331
+ Perform variable substitution on a user data command.
332
+ Uses workload and deployment configuration for substitution.
333
+ """
334
+ if not command:
335
+ return command
336
+
337
+ # Start with the original command
338
+ substituted_command = command
339
+
340
+ # Define available variables for substitution
341
+ variables = {}
342
+
343
+ # Add workload variables
344
+ if self.workload:
345
+ variables.update({
346
+ "WORKLOAD_NAME": getattr(self.workload, 'name', ''),
347
+ "ENVIRONMENT": getattr(self.workload, 'environment', ''),
348
+ "WORKLOAD": getattr(self.workload, 'name', ''),
349
+ })
350
+
351
+ # Add deployment variables
352
+ if self.deployment:
353
+ variables.update({
354
+ "DEPLOYMENT_NAME": getattr(self.deployment, 'name', ''),
355
+ "REGION": getattr(self.deployment, 'region', ''),
356
+ "ACCOUNT": getattr(self.deployment, 'account', ''),
357
+ })
358
+
359
+ # Add stack-level variables
360
+ variables.update({
361
+ "STACK_NAME": self.stack_name,
362
+ })
363
+
364
+ # Perform substitution
365
+ for var_name, var_value in variables.items():
366
+ if var_value is not None:
367
+ placeholder = f"{{{{{var_name}}}}}" # {{VAR_NAME}}
368
+ substituted_command = substituted_command.replace(placeholder, str(var_value))
369
+
370
+ return substituted_command
371
+
372
+ def _add_container_user_data(
373
+ self, user_data: ec2.UserData, container_config: Dict[str, Any]
374
+ ) -> None:
375
+ """Add container-specific user data commands"""
376
+ # Install Docker
377
+ user_data.add_commands(
378
+ "dnf -y update", "dnf -y install docker jq", "systemctl enable --now docker"
379
+ )
380
+
381
+ # ECR configuration
382
+ if "ecr" in container_config:
383
+ ecr_config = container_config["ecr"]
384
+ user_data.add_commands(
385
+ f"ACCOUNT_ID={ecr_config.get('account_id', self.account)}",
386
+ f"REGION={ecr_config.get('region', self.region)}",
387
+ f"REPO={ecr_config.get('repo', 'app')}",
388
+ f"TAG={ecr_config.get('tag', 'latest')}",
389
+ "aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com",
390
+ "docker pull ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}",
391
+ )
392
+
393
+ # Database configuration
394
+ if "database" in container_config:
395
+ db_config = container_config["database"]
396
+ secret_arn = db_config.get("secret_arn", "")
397
+ if secret_arn:
398
+ user_data.add_commands(
399
+ f"DB_SECRET_ARN={secret_arn}",
400
+ 'if [ -n "$DB_SECRET_ARN" ]; then DB_JSON=$(aws secretsmanager get-secret-value --secret-id $DB_SECRET_ARN --query SecretString --output text --region $REGION); fi',
401
+ 'if [ -n "$DB_SECRET_ARN" ]; then DB_HOST=$(echo $DB_JSON | jq -r .host); DB_USER=$(echo $DB_JSON | jq -r .username); DB_PASS=$(echo $DB_JSON | jq -r .password); DB_NAME=$(echo $DB_JSON | jq -r .dbname); fi',
402
+ )
403
+
404
+ # Run container
405
+ if "run_command" in container_config:
406
+ user_data.add_commands(container_config["run_command"])
407
+ elif "ecr" in container_config:
408
+ port = container_config.get("port", 8080)
409
+ user_data.add_commands(
410
+ f"docker run -d --name app -p {port}:{port} "
411
+ '-e DB_HOST="$DB_HOST" -e DB_USER="$DB_USER" -e DB_PASS="$DB_PASS" -e DB_NAME="$DB_NAME" '
412
+ "--restart=always ${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${REPO}:${TAG}"
413
+ )
414
+
415
+ def _create_launch_template(self, asg_name: str) -> ec2.LaunchTemplate:
416
+ """Create launch template for the Auto Scaling Group"""
417
+ # Get AMI
418
+ ami = None
419
+ if self.asg_config.ami_id:
420
+ ami = ec2.MachineImage.generic_linux({self.region: self.asg_config.ami_id})
421
+ else:
422
+ if self.asg_config.ami_type == "amazon-linux-2023":
423
+ ami = ec2.MachineImage.latest_amazon_linux2023()
424
+ elif self.asg_config.ami_type == "amazon-linux-2":
425
+ ami = ec2.MachineImage.latest_amazon_linux2()
426
+ else:
427
+ ami = ec2.MachineImage.latest_amazon_linux2023()
428
+
429
+ # Parse instance type
430
+ instance_type_str = self.asg_config.instance_type
431
+ instance_type = None
432
+
433
+ if "." in instance_type_str:
434
+ parts = instance_type_str.split(".")
435
+ if len(parts) == 2:
436
+ try:
437
+ instance_class = ec2.InstanceClass[parts[0].upper()]
438
+ instance_size = ec2.InstanceSize[parts[1].upper()]
439
+ instance_type = ec2.InstanceType.of(instance_class, instance_size)
440
+ except (KeyError, ValueError):
441
+ instance_type = ec2.InstanceType(instance_type_str)
442
+ else:
443
+ instance_type = ec2.InstanceType(instance_type_str)
444
+ else:
445
+ instance_type = ec2.InstanceType(instance_type_str)
446
+
447
+ # Create block device mappings
448
+ block_devices = []
449
+ for device in self.asg_config.block_devices:
450
+ block_devices.append(
451
+ ec2.BlockDevice(
452
+ device_name=device.get("device_name", "/dev/xvda"),
453
+ volume=ec2.BlockDeviceVolume.ebs(
454
+ volume_size=device.get("volume_size", 8),
455
+ volume_type=ec2.EbsDeviceVolumeType(
456
+ str(device.get("volume_type", "gp3")).upper()
457
+ ),
458
+ delete_on_termination=device.get("delete_on_termination", True),
459
+ encrypted=device.get("encrypted", True),
460
+ ),
461
+ )
462
+ )
463
+
464
+ # Create launch template
465
+ launch_template = ec2.LaunchTemplate(
466
+ self,
467
+ f"{asg_name}-LaunchTemplate",
468
+ machine_image=ami,
469
+ instance_type=instance_type,
470
+ role=self.instance_role,
471
+ security_group=self.security_groups[0] if self.security_groups else None,
472
+ user_data=self.user_data,
473
+ detailed_monitoring=self.asg_config.detailed_monitoring,
474
+ block_devices=block_devices if block_devices else None,
475
+ )
476
+
477
+ return launch_template
478
+
479
+ def _create_auto_scaling_group(self, asg_name: str) -> autoscaling.AutoScalingGroup:
480
+ """Create the Auto Scaling Group"""
481
+ # Configure subnet selection
482
+ subnet_group_name = self.asg_config.subnet_group_name
483
+ subnets = ec2.SubnetSelection(subnet_group_name=subnet_group_name)
484
+
485
+ # Configure health check
486
+ health_check_type = autoscaling.HealthCheck.ec2()
487
+ if self.asg_config.health_check_type.upper() == "ELB":
488
+ health_check_type = autoscaling.HealthCheck.elb(
489
+ grace=Duration.seconds(self.asg_config.health_check_grace_period)
490
+ )
491
+
492
+ # Create Auto Scaling Group
493
+ asg = autoscaling.AutoScalingGroup(
494
+ self,
495
+ asg_name,
496
+ vpc=self.vpc,
497
+ vpc_subnets=subnets,
498
+ min_capacity=self.asg_config.min_capacity,
499
+ max_capacity=self.asg_config.max_capacity,
500
+ desired_capacity=self.asg_config.desired_capacity,
501
+ launch_template=self.launch_template,
502
+ health_check=health_check_type,
503
+ cooldown=Duration.seconds(self.asg_config.cooldown),
504
+ termination_policies=[
505
+ autoscaling.TerminationPolicy(policy)
506
+ for policy in self.asg_config.termination_policies
507
+ ],
508
+ )
509
+
510
+ # Attach to target groups after ASG creation
511
+ self._attach_target_groups(asg)
512
+
513
+ # Configure update policy
514
+ # Only apply update policy if it was explicitly configured
515
+ if "update_policy" in self.stack_config.dictionary.get("auto_scaling", {}):
516
+ update_policy = self.asg_config.update_policy
517
+ # Apply the update policy to the ASG's CloudFormation resource
518
+ cfn_asg = asg.node.default_child
519
+ cfn_asg.add_override(
520
+ "UpdatePolicy",
521
+ {
522
+ "AutoScalingRollingUpdate": {
523
+ "MinInstancesInService": update_policy.get(
524
+ "min_instances_in_service", 1
525
+ ),
526
+ "MaxBatchSize": update_policy.get("max_batch_size", 1),
527
+ "PauseTime": f"PT{update_policy.get('pause_time', 300) // 60}M",
528
+ }
529
+ },
530
+ )
531
+
532
+ # Add tags
533
+ for key, value in self.asg_config.tags.items():
534
+ cdk.Tags.of(asg).add(key, value)
535
+
536
+ return asg
537
+
538
+ def _configure_scaling_policies(self) -> None:
539
+ """Configure scaling policies for the Auto Scaling Group"""
540
+ for policy in self.asg_config.scaling_policies:
541
+ policy_type = policy.get("type", "target_tracking")
542
+
543
+ if policy_type == "target_tracking":
544
+ self.auto_scaling_group.scale_on_metric(
545
+ f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
546
+ metric=self._get_metric(policy),
547
+ scaling_steps=self._get_scaling_steps(policy),
548
+ adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
549
+ )
550
+ elif policy_type == "step":
551
+ self.auto_scaling_group.scale_on_metric(
552
+ f"{self.asg_config.name}-{policy.get('name', 'scaling-policy')}",
553
+ metric=self._get_metric(policy),
554
+ scaling_steps=self._get_scaling_steps(policy),
555
+ adjustment_type=autoscaling.AdjustmentType.CHANGE_IN_CAPACITY,
556
+ )
557
+
558
+ def _get_metric(self, policy: Dict[str, Any]) -> cloudwatch.Metric:
559
+ """Get metric for scaling policy"""
560
+ # This is a simplified implementation
561
+ # In a real-world scenario, you would use CloudWatch metrics
562
+ return cloudwatch.Metric(
563
+ namespace="AWS/EC2",
564
+ metric_name=policy.get("metric_name", "CPUUtilization"),
565
+ dimensions_map={
566
+ "AutoScalingGroupName": self.auto_scaling_group.auto_scaling_group_name
567
+ },
568
+ statistic=policy.get("statistic", "Average"),
569
+ period=Duration.seconds(policy.get("period", 60)),
570
+ )
571
+
572
+ def _get_scaling_steps(
573
+ self, policy: Dict[str, Any]
574
+ ) -> List[autoscaling.ScalingInterval]:
575
+ """Get scaling steps for scaling policy"""
576
+ steps = policy.get("steps", [])
577
+ scaling_intervals = []
578
+
579
+ for step in steps:
580
+ # Handle upper bound - if not specified, don't set it (let CDK handle it)
581
+ interval_kwargs = {
582
+ "lower": step.get("lower", 0),
583
+ "change": step.get("change", 1),
584
+ }
585
+
586
+ # Only set upper if it's explicitly provided
587
+ if "upper" in step:
588
+ interval_kwargs["upper"] = step["upper"]
589
+
590
+ scaling_intervals.append(autoscaling.ScalingInterval(**interval_kwargs))
591
+
592
+ return scaling_intervals
593
+
594
+ def _add_outputs(self, asg_name: str) -> None:
595
+ """Add CloudFormation outputs for the Auto Scaling Group"""
596
+ if self.auto_scaling_group:
597
+ # Auto Scaling Group Name
598
+ cdk.CfnOutput(
599
+ self,
600
+ f"{asg_name}-name",
601
+ value=self.auto_scaling_group.auto_scaling_group_name,
602
+ export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
603
+ )
604
+
605
+ # Auto Scaling Group ARN
606
+ cdk.CfnOutput(
607
+ self,
608
+ f"{asg_name}-arn",
609
+ value=self.auto_scaling_group.auto_scaling_group_arn,
610
+ export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
611
+ )
612
+
613
+ # Launch Template ID
614
+ if self.launch_template:
615
+ cdk.CfnOutput(
616
+ self,
617
+ f"{asg_name}-launch-template-id",
618
+ value=self.launch_template.launch_template_id,
619
+ export_name=f"{self.deployment.build_resource_name(asg_name)}-launch-template-id",
620
+ )
621
+
622
+
623
+ def _add_scaling_policies(self) -> None:
624
+ """Add scaling policies to the Auto Scaling Group"""
625
+ for policy_config in self.asg_config.scaling_policies:
626
+ # Scaling policy implementation would go here
627
+ pass
628
+
629
+ def _add_scheduled_actions(self) -> None:
630
+ """Add scheduled actions to the Auto Scaling Group"""
631
+ for action_config in self.asg_config.scheduled_actions:
632
+ # Scheduled action implementation would go here
633
+ pass
634
+
635
+ def _create_ecs_cluster_if_needed(self, asg_name: str):
636
+ """
637
+ ECS cluster creation should be handled by the dedicated EcsClusterStack module.
638
+ This method only handles SSM imports for cluster name injection.
639
+ """
640
+ # Check if ECS cluster name is available via SSM imports
641
+ if self.has_ssm_import("ecs_cluster_name"):
642
+ logger.info(f"ECS cluster name available via SSM imports")
643
+ # Inject cluster name into user data if available
644
+ if self.user_data and self.user_data_commands:
645
+ self._inject_cluster_name_into_user_data()
646
+ return
647
+
648
+ logger.warning(
649
+ "No ECS cluster name found in SSM imports. "
650
+ "Use the dedicated EcsClusterStack module to create ECS clusters."
651
+ )
652
+
653
+ def _inject_cluster_name_into_user_data(self) -> None:
654
+ """Inject the ECS cluster name into user data commands using SSM imports"""
655
+ # Check if ECS cluster name is available via SSM imports
656
+ if self.has_ssm_import("ecs_cluster_name"):
657
+ cluster_name = self.get_ssm_imported_value("ecs_cluster_name")
658
+ logger.info(f"Using ECS cluster name from SSM: {cluster_name}")
659
+ else:
660
+ logger.warning("No ECS cluster name found in SSM imports, skipping cluster name injection")
661
+ return
662
+
663
+ injected_commands = []
664
+ cluster_name_injected = False
665
+
666
+ for command in self.user_data_commands:
667
+ # If this command already sets ECS_CLUSTER, replace it
668
+ if 'ECS_CLUSTER=' in command:
669
+ # Replace existing ECS_CLUSTER setting with our cluster name
670
+ parts = command.split('ECS_CLUSTER=')
671
+ if len(parts) > 1:
672
+ # Keep everything before ECS_CLUSTER=, add our cluster name, then add the rest
673
+ before = parts[0]
674
+ after_parts = parts[1].split(None, 1) # Split on first whitespace
675
+ after = after_parts[1] if len(after_parts) > 1 else ''
676
+ new_command = f"{before}ECS_CLUSTER={cluster_name} {after}".strip()
677
+ injected_commands.append(new_command)
678
+ cluster_name_injected = True
679
+ else:
680
+ injected_commands.append(f"{command}ECS_CLUSTER={cluster_name}")
681
+ cluster_name_injected = True
682
+ else:
683
+ injected_commands.append(command)
684
+
685
+ # If no ECS_CLUSTER was found in existing commands, add it
686
+ if not cluster_name_injected:
687
+ injected_commands.append(f"echo ECS_CLUSTER={cluster_name} >> /etc/ecs/ecs.config")
688
+
689
+ # Update the user data with the injected commands
690
+ self.user_data_commands = injected_commands
691
+
692
+ # If user data object exists, we need to recreate it with the updated commands
693
+ if hasattr(self, 'user_data') and self.user_data:
694
+ self.user_data = self._recreate_user_data_with_commands(injected_commands)
695
+
696
+ def _recreate_user_data_with_commands(self, commands: List[str]) -> ec2.UserData:
697
+ """Recreate user data with updated commands"""
698
+ user_data = ec2.UserData.for_linux()
699
+
700
+ for command in commands:
701
+ user_data.add_commands(command)
702
+
703
+ return user_data
704
+
705
+ def _export_resources(self, asg_name: str) -> None:
706
+ """Export stack resources to SSM and CloudFormation outputs"""
707
+ # Export ASG name
708
+ cdk.CfnOutput(
709
+ self,
710
+ f"{asg_name}-name",
711
+ value=self.auto_scaling_group.auto_scaling_group_name,
712
+ export_name=f"{self.deployment.build_resource_name(asg_name)}-name",
713
+ )
714
+
715
+ # Export ASG ARN
716
+ cdk.CfnOutput(
717
+ self,
718
+ f"{asg_name}-arn",
719
+ value=self.auto_scaling_group.auto_scaling_group_arn,
720
+ export_name=f"{self.deployment.build_resource_name(asg_name)}-arn",
721
+ )
@@ -212,8 +212,20 @@ class AutoScalingStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
212
212
  """
213
213
  # Primary method: Use standardized SSM imports
214
214
  ssm_imports = self._get_ssm_imports()
215
+
215
216
  if "subnet_ids" in ssm_imports:
216
- return ssm_imports["subnet_ids"]
217
+ subnet_ids = ssm_imports["subnet_ids"]
218
+
219
+ # Handle comma-separated string or list
220
+ if isinstance(subnet_ids, str):
221
+ # Split comma-separated string
222
+ parsed_ids = [sid.strip() for sid in subnet_ids.split(',') if sid.strip()]
223
+ return parsed_ids
224
+ elif isinstance(subnet_ids, list):
225
+ return subnet_ids
226
+ else:
227
+ logger.warning(f"Unexpected subnet_ids type: {type(subnet_ids)}")
228
+ return []
217
229
 
218
230
  # Fallback: Use VPC provider mixin (backward compatibility)
219
231
  elif hasattr(self, '_get_subnets_from_provider'):
@@ -497,14 +509,21 @@ class AutoScalingStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
497
509
  # Get the underlying CloudFormation resource to add update policy
498
510
  cfn_asg = self.auto_scaling_group.node.default_child
499
511
 
500
- # Clear any existing update policy and set the rolling update policy
501
- cfn_asg.add_property_override("UpdatePolicy", {
512
+ # Get CDK's default policy first (if any)
513
+ default_policy = getattr(cfn_asg, 'update_policy', {})
514
+
515
+ # Merge with defaults, then use the robust add_override method
516
+ merged_policy = {
517
+ **default_policy, # Preserve CDK defaults
502
518
  "AutoScalingRollingUpdate": {
503
519
  "MinInstancesInService": update_policy.get("min_instances_in_service", 1),
504
520
  "MaxBatchSize": update_policy.get("max_batch_size", 1),
505
521
  "PauseTime": f"PT{update_policy.get('pause_time', 300)}S"
506
522
  }
507
- })
523
+ }
524
+
525
+ # Use the robust CDK-documented approach
526
+ cfn_asg.add_override("UpdatePolicy", merged_policy)
508
527
 
509
528
  logger.info("Added rolling update policy to Auto Scaling Group")
510
529
 
cdk_factory/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.18.3"
1
+ __version__ = "0.18.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cdk_factory
3
- Version: 0.18.3
3
+ Version: 0.18.5
4
4
  Summary: CDK Factory. A QuickStarter and best practices setup for CDK projects
5
5
  Author-email: Eric Wilson <eric.wilson@geekcafe.com>
6
6
  License: MIT License
@@ -2,7 +2,7 @@ cdk_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  cdk_factory/app.py,sha256=RnX0-pwdTAPAdKJK_j13Zl8anf9zYKBwboR0KA8K8xM,10346
3
3
  cdk_factory/cdk.json,sha256=SKZKhJ2PBpFH78j-F8S3VDYW-lf76--Q2I3ON-ZIQfw,3106
4
4
  cdk_factory/cli.py,sha256=FGbCTS5dYCNsfp-etshzvFlGDCjC28r6rtzYbe7KoHI,6407
5
- cdk_factory/version.py,sha256=gFZ2jBuhpo2tEeGzLqhbiYFDJrpaFW_VBCOIhFFdh2I,23
5
+ cdk_factory/version.py,sha256=iPUl2pWX4a0MJCvP-avqrH4JfPv3yJGwmx8dAy0QJ40,23
6
6
  cdk_factory/builds/README.md,sha256=9BBWd7bXpyKdMU_g2UljhQwrC9i5O_Tvkb6oPvndoZk,90
7
7
  cdk_factory/commands/command_loader.py,sha256=QbLquuP_AdxtlxlDy-2IWCQ6D-7qa58aphnDPtp_uTs,3744
8
8
  cdk_factory/configurations/base_config.py,sha256=eJ3Pl3GWk1jVr_bYQaaWlw4_-ZiFGaiXllI_fOOX1i0,9323
@@ -66,7 +66,7 @@ cdk_factory/constructs/sqs/policies/sqs_policies.py,sha256=4p0G8G-fqNKSr68I55fvq
66
66
  cdk_factory/interfaces/istack.py,sha256=3xqGw5kNTt_KeLHdMxI7rIR0YORqcWQOqsacmDlTAv0,1167
67
67
  cdk_factory/interfaces/live_ssm_resolver.py,sha256=3FIr9a02SXqZmbFs3RT0WxczWEQR_CF7QSt7kWbDrVE,8163
68
68
  cdk_factory/interfaces/networked_stack_mixin.py,sha256=69pJp4IE1n_tdHh2UZQ08O6ZW-v5P4uJJ_fleNaj6Nw,2897
69
- cdk_factory/interfaces/standardized_ssm_mixin.py,sha256=-BT-K7mro2f3taS7biAm_oaxC7z2lurUfNUpryvahXk,22680
69
+ cdk_factory/interfaces/standardized_ssm_mixin.py,sha256=OVDzKjGWifT7jQuTdeQhEQM_FYkHlt3kcpMLF-fYHkg,24247
70
70
  cdk_factory/interfaces/vpc_provider_mixin.py,sha256=Kj0mmZd54NINprixJLs8zL-WWiSd0AQBtGdwNg8cz14,8207
71
71
  cdk_factory/lambdas/health_handler.py,sha256=dd40ykKMxWCFEIyp2ZdQvAGNjw_ylI9CSm1N24Hp2ME,196
72
72
  cdk_factory/lambdas/edge/ip_gate/handler.py,sha256=gUevgX462mqGYddtQIyJ1-Jk3oXhFmbmd46jlqjai9E,10657
@@ -86,7 +86,8 @@ cdk_factory/stack_library/acm/__init__.py,sha256=4FNRLykblcKZvq_wieYwvv9N_jgrZnJ
86
86
  cdk_factory/stack_library/acm/acm_stack.py,sha256=QJ3GkT17PmWoGkfO5Um02hvrfyJ9HbiPMnclwDP7IbA,5846
87
87
  cdk_factory/stack_library/api_gateway/api_gateway_stack.py,sha256=_wbPBsgh7FHq9cnL44CiuffBj3XCO5ErQx_yclxFsVY,39669
88
88
  cdk_factory/stack_library/auto_scaling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
89
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py,sha256=Den7t4AAEn1slQPRHVq_DdzJSp1NRJgvObJEB8diQ8g,21959
89
+ cdk_factory/stack_library/auto_scaling/auto_scaling_old.py,sha256=WvEwIao4KPOLa8e3zaQjxlTydCIK0GfgjZn4-CNyuF0,30135
90
+ cdk_factory/stack_library/auto_scaling/auto_scaling_stack_standardized.py,sha256=qF_-cs9gAYGCjlO4C0UE79526jpqhoz6SpEIN3h1t48,22720
90
91
  cdk_factory/stack_library/aws_lambdas/lambda_stack.py,sha256=SFbBPvvCopbyiuYtq-O5sQkFCf94Wzua6aDUXiFDSB4,26161
91
92
  cdk_factory/stack_library/buckets/README.md,sha256=XkK3UNVtRLE7NtUvbhCOBBYUYi8hlrrSaI1s3GJVrqI,78
92
93
  cdk_factory/stack_library/buckets/bucket_stack.py,sha256=SLoZqSffAqmeBBEVUQg54D_8Ad5UKdkjEAmKAVgAqQo,1778
@@ -136,8 +137,8 @@ cdk_factory/utilities/os_execute.py,sha256=5Op0LY_8Y-pUm04y1k8MTpNrmQvcLmQHPQITE
136
137
  cdk_factory/utils/api_gateway_utilities.py,sha256=If7Xu5s_UxmuV-kL3JkXxPLBdSVUKoLtohm0IUFoiV8,4378
137
138
  cdk_factory/validation/config_validator.py,sha256=Pb0TkLiPFzUplBOgMorhRCVm08vEzZhRU5xXCDTa5CA,17602
138
139
  cdk_factory/workload/workload_factory.py,sha256=yDI3cRhVI5ELNDcJPLpk9UY54Uind1xQoV3spzT4z7E,6068
139
- cdk_factory-0.18.3.dist-info/METADATA,sha256=bjGa--we56QRn-Mf3pRHKvva3If16v1JH85-uiX3KOc,2451
140
- cdk_factory-0.18.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
141
- cdk_factory-0.18.3.dist-info/entry_points.txt,sha256=S1DPe0ORcdiwEALMN_WIo3UQrW_g4YdQCLEsc_b0Swg,53
142
- cdk_factory-0.18.3.dist-info/licenses/LICENSE,sha256=NOtdOeLwg2il_XBJdXUPFPX8JlV4dqTdDGAd2-khxT8,1066
143
- cdk_factory-0.18.3.dist-info/RECORD,,
140
+ cdk_factory-0.18.5.dist-info/METADATA,sha256=hRJbftyqnC8GtrySAlYFoXmf5AWDQcRXqx9gJQUtYy8,2451
141
+ cdk_factory-0.18.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
142
+ cdk_factory-0.18.5.dist-info/entry_points.txt,sha256=S1DPe0ORcdiwEALMN_WIo3UQrW_g4YdQCLEsc_b0Swg,53
143
+ cdk_factory-0.18.5.dist-info/licenses/LICENSE,sha256=NOtdOeLwg2il_XBJdXUPFPX8JlV4dqTdDGAd2-khxT8,1066
144
+ cdk_factory-0.18.5.dist-info/RECORD,,