cdk-factory 0.19.19__py3-none-any.whl → 0.20.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py +122 -85
  2. cdk_factory/stack_library/ecs/ecs_cluster_stack.py +2 -2
  3. cdk_factory/stack_library/ecs/ecs_service_stack.py +2 -0
  4. cdk_factory/stack_library/lambda_edge/functions/README.md +0 -0
  5. cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/__init__.py +33 -0
  6. cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/app.py +30 -0
  7. cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/edge_log_retention.py +85 -0
  8. cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/requirements.txt +2 -0
  9. cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/test.py +22 -0
  10. cdk_factory/stack_library/lambda_edge/lambda_edge_log_retention_stack.py +0 -0
  11. cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py +94 -8
  12. cdk_factory/stack_library/load_balancer/load_balancer_stack.py +4 -0
  13. cdk_factory/stack_library/route53/route53_stack.py +97 -133
  14. cdk_factory/version.py +1 -1
  15. {cdk_factory-0.19.19.dist-info → cdk_factory-0.20.5.dist-info}/METADATA +1 -1
  16. {cdk_factory-0.19.19.dist-info → cdk_factory-0.20.5.dist-info}/RECORD +19 -13
  17. cdk_factory/stack_library/lambda_edge/EDGE_LOG_RETENTION_TODO.md +0 -226
  18. {cdk_factory-0.19.19.dist-info → cdk_factory-0.20.5.dist-info}/WHEEL +0 -0
  19. {cdk_factory-0.19.19.dist-info → cdk_factory-0.20.5.dist-info}/entry_points.txt +0 -0
  20. {cdk_factory-0.19.19.dist-info → cdk_factory-0.20.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,85 @@
1
+ #!/usr/bin/env python3
2
+ import boto3
3
+ from botocore.exceptions import ClientError
4
+ import os
5
+
6
+ profile_name = os.getenv('AWS_PROFILE')
7
+ session = boto3.Session(region_name='us-east-1', profile_name=profile_name)
8
+ ec2 = session.client('ec2')
9
+
10
+ def set_edge_log_retention(retention_days=7, dry_run=True):
11
+ """
12
+ Find Lambda@Edge log groups across all regions and set retention policies.
13
+
14
+ Args:
15
+ retention_days (int): Number of days to retain logs
16
+ dry_run (bool): If True, only show what would be changed
17
+ """
18
+ # Get all AWS regions
19
+ regions = [region['RegionName'] for region in ec2.describe_regions()['Regions']]
20
+
21
+ edge_log_groups = []
22
+ total_changed = 0
23
+
24
+ print(f"🔍 Hunting for Lambda@Edge log groups across {len(regions)} regions...")
25
+ print(f"🎯 Target retention: {retention_days} days")
26
+ print(f"🧪 Dry run: {dry_run}")
27
+ print("=" * 60)
28
+
29
+ for region in regions:
30
+ try:
31
+ logs = session.client('logs', region_name=region)
32
+
33
+ # Find log groups with us-east-1 prefix (indicating Edge functions)
34
+ paginator = logs.get_paginator('describe_log_groups')
35
+ for page in paginator.paginate():
36
+ for log_group in page.get('logGroups', []):
37
+ log_group_name = log_group['logGroupName']
38
+
39
+ # Check if it's a Lambda@Edge log group
40
+ if '/aws/lambda/us-east-1.' in log_group_name:
41
+ current_retention = log_group.get('retentionInDays')
42
+
43
+ edge_log_groups.append({
44
+ 'region': region,
45
+ 'name': log_group_name,
46
+ 'current_retention': current_retention,
47
+ 'stored_bytes': log_group.get('storedBytes', 0)
48
+ })
49
+
50
+ # Set retention if needed
51
+ if current_retention != retention_days:
52
+ if dry_run:
53
+ print(f"📍 {region}: Would set {log_group_name} to {retention_days} days (current: {current_retention})")
54
+ else:
55
+ try:
56
+ logs.put_retention_policy(
57
+ logGroupName=log_group_name,
58
+ retentionInDays=retention_days
59
+ )
60
+ print(f"✅ {region}: Set {log_group_name} to {retention_days} days")
61
+ total_changed += 1
62
+ except ClientError as e:
63
+ print(f"❌ {region}: Failed to set {log_group_name} - {e}")
64
+ else:
65
+ print(f"✓ {region}: {log_group_name} already has {retention_days} days retention")
66
+
67
+ except ClientError as e:
68
+ # Skip regions where CloudWatch Logs isn't available
69
+ continue
70
+
71
+ print("=" * 60)
72
+ print(f"📊 Summary:")
73
+ print(f" Found {len(edge_log_groups)} Lambda@Edge log groups")
74
+ print(f" Total storage: {sum(g['stored_bytes'] for g in edge_log_groups) / (1024**3):.2f} GB")
75
+ if not dry_run:
76
+ print(f" Changed {total_changed} log groups")
77
+
78
+ return edge_log_groups
79
+
80
+ if __name__ == "__main__":
81
+ # Dry run first to see what would be changed
82
+ edge_logs = set_edge_log_retention(retention_days=7, dry_run=True)
83
+
84
+ # Uncomment the line below to actually make changes
85
+ # set_edge_log_retention(retention_days=7, dry_run=False)
@@ -0,0 +1,2 @@
1
+ # aws-lambda-powertools
2
+ aws-lambda-powertools
@@ -0,0 +1,22 @@
1
+ from app import lambda_handler
2
+
3
+ if __name__ == "__main__":
4
+
5
+ event = {
6
+ "version": "0",
7
+ "id": "12345678-1234-1234-1234-123456789012",
8
+ "detail-type": "Scheduled Event",
9
+ "source": "aws.events",
10
+ "account": "123456789012",
11
+ "time": "2024-01-15T10:00:00Z",
12
+ "region": "us-east-1",
13
+ "resources": [
14
+ "arn:aws:events:us-east-1:123456789012:rule/LogRetentionManager"
15
+ ],
16
+ "detail": {
17
+ "days": 7,
18
+ "dry_run": False
19
+ }
20
+ }
21
+
22
+ lambda_handler(event, None)
@@ -134,7 +134,12 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
134
134
 
135
135
  resolved_env = {}
136
136
 
137
- for key, value in self.edge_config.environment.items():
137
+ # Use the new simplified configuration structure
138
+ configuration = self.edge_config.dictionary.get("configuration", {})
139
+ runtime_config = configuration.get("runtime", {})
140
+ ui_config = configuration.get("ui", {})
141
+
142
+ for key, value in runtime_config.items():
138
143
  # Check if value is an SSM parameter reference
139
144
  if isinstance(value, str) and value.startswith("{{ssm:") and value.endswith("}}"):
140
145
  # Extract SSM parameter path
@@ -216,11 +221,23 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
216
221
  # Since Lambda@Edge doesn't support environment variables, we bundle a config file
217
222
  # Use the full function_name (e.g., "tech-talk-dev-ip-gate") not just the base name
218
223
  resolved_env = self._resolve_environment_variables()
224
+
225
+ # Get the UI configuration
226
+ configuration = self.edge_config.dictionary.get("configuration", {})
227
+ ui_config = configuration.get("ui", {})
228
+
229
+
230
+ workload_name = self.deployment.workload.get("name")
231
+
232
+ if not workload_name:
233
+ raise ValueError("Workload name is required for Lambda@Edge function")
219
234
  runtime_config = {
220
235
  'environment': self.deployment.environment,
236
+ 'workload': workload_name,
221
237
  'function_name': function_name,
222
238
  'region': self.deployment.region,
223
- 'environment_variables': resolved_env # Add actual environment variables
239
+ 'runtime': resolved_env, # Runtime variables (SSM, etc.)
240
+ 'ui': ui_config # UI configuration (colors, messages, etc.)
224
241
  }
225
242
 
226
243
  runtime_config_path = temp_code_dir / 'runtime_config.json'
@@ -250,12 +267,15 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
250
267
  )
251
268
 
252
269
  # Log warning if environment variables are configured
253
- if self.edge_config.environment:
270
+ configuration = self.edge_config.dictionary.get("configuration", {})
271
+ runtime_config = configuration.get("runtime", {})
272
+
273
+ if runtime_config:
254
274
  logger.warning(
255
275
  f"Lambda@Edge function '{function_name}' has environment variables configured, "
256
276
  "but Lambda@Edge does not support environment variables. The function must fetch these values from SSM Parameter Store at runtime."
257
277
  )
258
- for key, value in self.edge_config.environment.items():
278
+ for key, value in runtime_config.items():
259
279
  logger.warning(f" - {key}: {value}")
260
280
 
261
281
  # Create execution role with CloudWatch Logs and SSM permissions
@@ -276,7 +296,7 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
276
296
  )
277
297
 
278
298
  # Add SSM read permissions if environment variables reference SSM parameters
279
- if self.edge_config.environment:
299
+ if runtime_config:
280
300
  execution_role.add_to_policy(
281
301
  iam.PolicyStatement(
282
302
  effect=iam.Effect.ALLOW,
@@ -286,7 +306,7 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
286
306
  "ssm:GetParametersByPath"
287
307
  ],
288
308
  resources=[
289
- f"arn:aws:ssm:*:{cdk.Aws.ACCOUNT_ID}:parameter/*"
309
+ f"arn:aws:ssm:*:{self.deployment.account}:parameter/*"
290
310
  ]
291
311
  )
292
312
  )
@@ -300,7 +320,70 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
300
320
  "secretsmanager:DescribeSecret"
301
321
  ],
302
322
  resources=[
303
- f"arn:aws:secretsmanager:*:{cdk.Aws.ACCOUNT_ID}:secret:{self.deployment.environment}/{self.workload.name}/origin-secret*"
323
+ f"arn:aws:secretsmanager:*:{self.deployment.account}:secret:{self.deployment.environment}/{self.workload.name}/origin-secret*"
324
+ ]
325
+ )
326
+ )
327
+
328
+ # Add ELB permissions for target health API access
329
+ execution_role.add_to_policy(
330
+ iam.PolicyStatement(
331
+ effect=iam.Effect.ALLOW,
332
+ actions=[
333
+ "elasticloadbalancing:DescribeTargetHealth",
334
+ "elasticloadbalancing:DescribeTargetGroups",
335
+ "elasticloadbalancing:DescribeLoadBalancers",
336
+ "elasticloadbalancing:DescribeListeners",
337
+ "elasticloadbalancing:DescribeTags"
338
+ ],
339
+ resources=[
340
+ "*"
341
+ ]
342
+ )
343
+ )
344
+
345
+ # Add ACM permissions for certificate validation
346
+ execution_role.add_to_policy(
347
+ iam.PolicyStatement(
348
+ effect=iam.Effect.ALLOW,
349
+ actions=[
350
+ "acm:DescribeCertificate",
351
+ "acm:ListCertificates"
352
+ ],
353
+ resources=[
354
+ f"arn:aws:acm:*:{self.deployment.account}:certificate/*"
355
+ ]
356
+ )
357
+ )
358
+
359
+ # Add Route 53 permissions for health check access
360
+ execution_role.add_to_policy(
361
+ iam.PolicyStatement(
362
+ effect=iam.Effect.ALLOW,
363
+ actions=[
364
+ "route53:GetHealthCheckStatus",
365
+ "route53:ListHealthChecks",
366
+ "route53:GetHealthCheck"
367
+ ],
368
+ resources=[
369
+ f"arn:aws:route53:::{self.deployment.account}:health-check/*"
370
+ ]
371
+ )
372
+ )
373
+
374
+ # Add CloudWatch permissions for enhanced logging and metrics
375
+ execution_role.add_to_policy(
376
+ iam.PolicyStatement(
377
+ effect=iam.Effect.ALLOW,
378
+ actions=[
379
+ "logs:CreateLogGroup",
380
+ "logs:CreateLogStream",
381
+ "logs:PutLogEvents",
382
+ "cloudwatch:PutMetricData"
383
+ ],
384
+ resources=[
385
+ f"arn:aws:logs:*:{self.deployment.account}:log-group:/aws/lambda/*",
386
+ f"arn:aws:cloudwatch:*:{self.deployment.account}:metric:*"
304
387
  ]
305
388
  )
306
389
  )
@@ -437,8 +520,11 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
437
520
  configuration = self.edge_config.dictionary.get("configuration", {})
438
521
  environment_variables = configuration.get("environment_variables", {})
439
522
 
523
+ # Build full configuration that Lambda@Edge expects
440
524
  full_config = {
441
- "environment_variables": environment_variables
525
+ "environment_variables": environment_variables,
526
+ "runtime": configuration.get("runtime", {}),
527
+ "ui": configuration.get("ui", {})
442
528
  }
443
529
 
444
530
  self.export_ssm_parameter(
@@ -459,6 +459,10 @@ class LoadBalancerStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
459
459
  # Parse AWS ALB conditions format
460
460
  aws_conditions = rule_config.get("conditions", [])
461
461
  for condition in aws_conditions:
462
+ enabled = str(condition.get("enabled", True)).lower()
463
+ if enabled != "true":
464
+ continue
465
+
462
466
  field = condition.get("field")
463
467
  if field == "http-header" and "http_header_config" in condition:
464
468
  header_config = condition["http_header_config"]
@@ -27,6 +27,7 @@ from cdk_factory.interfaces.standardized_ssm_mixin import StandardizedSsmMixin
27
27
  from cdk_factory.stack.stack_module_registry import register_stack
28
28
  from cdk_factory.workload.workload_factory import WorkloadConfig
29
29
 
30
+
30
31
  logger = Logger(service="Route53Stack")
31
32
 
32
33
 
@@ -48,7 +49,8 @@ class Route53Stack(IStack, StandardizedSsmMixin):
48
49
  self.hosted_zone = None
49
50
  self.certificate = None
50
51
  self.records = {}
51
- self._distribution_cache = {} # Cache for reusing distributions
52
+ self._local_cache = {} # Cache for reusing distributions
53
+ self._missing_configurations = []
52
54
 
53
55
  def build(self, stack_config: StackConfig, deployment: DeploymentConfig, workload: WorkloadConfig) -> None:
54
56
  """Build the Route53 stack"""
@@ -119,7 +121,7 @@ class Route53Stack(IStack, StandardizedSsmMixin):
119
121
  return certificate
120
122
 
121
123
  def _create_dns_records(self) -> None:
122
- self._create_dns_records_old()
124
+ # self._create_dns_records_old()
123
125
  self._create_dns_records_new()
124
126
 
125
127
 
@@ -128,7 +130,7 @@ class Route53Stack(IStack, StandardizedSsmMixin):
128
130
  # Create a unique cache key from distribution domain and ID
129
131
  cache_key = f"{distribution_domain}-{distribution_id}"
130
132
 
131
- if cache_key not in self._distribution_cache:
133
+ if cache_key not in self._local_cache:
132
134
  # Create the distribution construct with a unique ID
133
135
  unique_id = f"CF-{distribution_domain.replace('.', '-').replace('*', 'wildcard')}-{hash(cache_key) % 10000}"
134
136
  distribution = cloudfront.Distribution.from_distribution_attributes(
@@ -136,104 +138,108 @@ class Route53Stack(IStack, StandardizedSsmMixin):
136
138
  domain_name=distribution_domain,
137
139
  distribution_id=distribution_id
138
140
  )
139
- self._distribution_cache[cache_key] = distribution
141
+ self._local_cache[cache_key] = distribution
140
142
  logger.info(f"Created CloudFront distribution construct for {distribution_domain}")
141
143
 
142
- return self._distribution_cache[cache_key]
144
+ return self._local_cache[cache_key]
145
+
146
+ def _get_or_create_alb_target(self, record_name: str, target_value: str, load_balancer_zone_id: str, security_group_id: str, load_balancer_dns_name: str) -> targets.LoadBalancerTarget:
147
+ """Get or create a CloudFront distribution, reusing if already created"""
148
+ # Create a unique cache key from distribution domain and ID
149
+ cache_key = f"{record_name}-alb"
150
+
151
+ if cache_key not in self._local_cache:
152
+ # Create the distribution construct with a unique ID
153
+ target = targets.LoadBalancerTarget(
154
+ elbv2.ApplicationLoadBalancer.from_application_load_balancer_attributes(
155
+ self, f"ALB-{record_name}",
156
+ load_balancer_arn=target_value,
157
+ load_balancer_canonical_hosted_zone_id=load_balancer_zone_id,
158
+ security_group_id=security_group_id,
159
+ load_balancer_dns_name=load_balancer_dns_name,
160
+
161
+ )
162
+ )
163
+ self._local_cache[cache_key] = target
164
+ logger.info(f"Created ALB target construct for ALB-{record_name}")
165
+
166
+ return self._local_cache[cache_key]
143
167
 
144
168
  def _create_dns_records_new(self) -> None:
145
169
  """Create DNS records based on configuration - generic implementation"""
146
170
 
147
- missing_configurations = []
171
+
148
172
 
149
173
  for record in self.route53_config.records:
150
- record_name = record.get("name", "")
151
- record_type = record.get("type", "")
174
+ t = record.get("type")
175
+ record_name = self._get_resolved_value(config=record, key="name", record_type=t)
176
+ record_type = self._get_resolved_value(config=record, key="type", record_type=t)
177
+
152
178
 
153
- if not record_name or not record_type:
154
- message = f"Record missing name or type: {record}"
155
- logger.warning(message)
156
- missing_configurations.append(message)
157
- continue
158
179
 
159
180
  # Handle alias records
160
181
  if "alias" in record:
161
182
  alias_config = record["alias"]
162
- target_type = alias_config.get("target_type", "")
163
- target_value = alias_config.get("target_value", "")
164
- hosted_zone_id = alias_config.get("hosted_zone_id", "")
165
183
 
166
- unique_id = f"{record_name}-{record_type}"
167
- # Handle SSM parameter references in target_value
168
- target_value = self.resolve_ssm_value(self, target_value, unique_id=unique_id)
184
+ target_type = self._get_resolved_value(config=alias_config, key="target_type", record_type=record_type)
185
+ target_value = self._get_resolved_value(config=alias_config, key="target_value", record_type=record_type)
186
+
187
+
169
188
 
170
- if not target_type or not target_value:
171
- message = f"Alias record missing target_type or target_value: {record}"
172
- logger.warning(message)
173
- missing_configurations.append(message)
174
- continue
175
189
 
176
190
  # Create appropriate target based on type
177
191
  alias_target = None
178
192
  if target_type == "cloudfront":
179
193
  # CloudFront distribution target
180
194
  distribution_domain = target_value
181
- distribution_id = alias_config.get("distribution_id", "")
182
- if not distribution_id:
183
- message = f"Alias record missing distribution_id: {record}"
184
- logger.warning(message)
185
- missing_configurations.append(message)
186
- continue
195
+ distribution_id = self._get_resolved_value(config=alias_config, key="distribution_id", record_type=record_type)
196
+
187
197
 
188
198
  # Get or create the distribution (reuses if already created)
189
199
  distribution = self._get_or_create_cloudfront_distribution(distribution_domain, distribution_id)
190
200
  alias_target = route53.RecordTarget.from_alias(
191
201
  targets.CloudFrontTarget(distribution)
192
202
  )
193
- elif target_type == "loadbalancer" or target_type == "alb":
194
- # Load Balancer target
195
- alias_target = route53.RecordTarget.from_alias(
196
- targets.LoadBalancerTarget(
197
- elbv2.ApplicationLoadBalancer.from_load_balancer_attributes(
198
- self, f"ALB-{record_name}",
199
- load_balancer_dns_name=target_value,
200
- load_balancer_canonical_hosted_zone_id=hosted_zone_id
201
- )
202
- )
203
- )
204
- elif target_type == "elbv2":
205
- # Generic ELBv2 target
206
- alias_target = route53.RecordTarget.from_alias(
207
- targets.LoadBalancerTarget(
208
- elbv2.ApplicationLoadBalancer.from_load_balancer_attributes(
209
- self, f"ELB-{record_name}",
210
- load_balancer_dns_name=target_value,
211
- load_balancer_canonical_hosted_zone_id=hosted_zone_id
212
- )
213
- )
214
- )
203
+ elif target_type == "loadbalancer" or target_type == "alb" or target_type == "elbv2":
204
+ # ALB alias target using imported load balancer attributes
205
+
206
+ security_group_id=self._get_resolved_value(config=alias_config, key="security_group_id", record_type=record_type)
207
+ load_balancer_dns_name = self._get_resolved_value(config=alias_config, key="load_balancer_dns_name", record_type=record_type)
208
+ load_balancer_zone_id = self._get_resolved_value(config=alias_config, key="load_balancer_zone_id", record_type=record_type)
209
+
210
+
211
+
212
+ target = self._get_or_create_alb_target(record_name, target_value, load_balancer_zone_id, security_group_id, load_balancer_dns_name)
213
+
214
+ alias_target = route53.RecordTarget.from_alias(target)
215
+
215
216
  else:
216
217
  message = f"Unsupported alias target type: {target_type}"
217
218
  logger.warning(message)
218
219
  missing_configurations.append(message)
219
220
  continue
220
221
 
221
- # Create the alias record
222
- route53.ARecord(
223
- self,
224
- f"AliasRecord-{record_name}-{record_type}",
225
- zone=self.hosted_zone,
226
- record_name=record_name,
227
- target=alias_target,
228
- ttl=cdk.Duration.seconds(record.get("ttl", 300))
229
- ) if record_type == "A" else route53.AaaaRecord(
230
- self,
231
- f"AliasRecord-{record_name}-{record_type}",
232
- zone=self.hosted_zone,
222
+ route_53_record = None
223
+ id = f"AliasRecord-{record_name}-{record_type}"
224
+ print(f"creating record {id}")
225
+ if record_type == "A":
226
+ route_53_record = route53.ARecord(
227
+ self,
228
+ id,
229
+ zone=self.hosted_zone,
233
230
  record_name=record_name,
234
231
  target=alias_target,
235
232
  ttl=cdk.Duration.seconds(record.get("ttl", 300))
236
233
  )
234
+ elif record_type == "AAAA":
235
+ route_53_record = route53.AaaaRecord(
236
+ self,
237
+ id,
238
+ zone=self.hosted_zone,
239
+ record_name=record_name,
240
+ target=alias_target,
241
+ ttl=cdk.Duration.seconds(record.get("ttl", 300))
242
+ )
237
243
 
238
244
  # Handle standard records with values
239
245
  elif "values" in record:
@@ -326,89 +332,47 @@ class Route53Stack(IStack, StandardizedSsmMixin):
326
332
  else:
327
333
  message = f"Unsupported record type: {record_type}"
328
334
  logger.warning(message)
329
- missing_configurations.append(message)
335
+ self._missing_configurations.append(message)
330
336
  continue
331
337
 
332
338
  else:
333
339
  message = f"Record missing 'alias' or 'values' configuration: {record}"
334
340
  logger.warning(message)
335
- missing_configurations.append(message)
341
+ self._missing_configurations.append(message)
336
342
  continue
337
343
 
338
- if missing_configurations and len(missing_configurations) > 0:
344
+ if self._missing_configurations and len(self._missing_configurations) > 0:
339
345
  # print all missing configurations
340
346
  print("Missing configurations:")
341
- for message in missing_configurations:
347
+ for message in self._missing_configurations:
342
348
  print(message)
343
349
 
344
- messages = "\n".join(missing_configurations)
350
+ messages = "\n".join(self._missing_configurations)
345
351
  raise ValueError(f"Missing Configurations:\n{messages}")
346
352
 
347
- def _create_dns_records_old(self) -> None:
348
- """Create DNS records based on configuration"""
349
- # Create alias records
350
- for alias_record in self.route53_config.aliases:
351
- record_name = alias_record.get("name", "")
352
- target_type = alias_record.get("target_type", "")
353
- target_value = alias_record.get("target_value", "")
354
-
355
- # target value needs to handle SSM parameters
356
- if "{{ssm:" in target_value and "}}" in target_value:
357
- # Extract SSM parameter path from template like {{ssm:/path/to/parameter}}
358
- ssm_path = target_value.split("{{ssm:")[1].split("}}")[0]
359
- target_value = self.get_ssm_imported_value(ssm_path)
353
+ def _get_resolved_value(self, *, config: dict, key: str, required: bool = True, record_type: str = "" ) -> str:
354
+
355
+ value = config.get(key, "")
356
+ x = str(value).replace("{", "").replace("}", "").replace(":", "")
357
+ unique_id = f"{key}-id-{record_type}-{x}"
360
358
 
361
- if not record_name or not target_type or not target_value:
362
- continue
363
-
364
- # Determine the alias target
365
- alias_target = None
366
- if target_type == "alb":
367
- # Get the ALB from the workload if available
368
- if hasattr(self.workload, "load_balancer"):
369
- alb = self.workload.load_balancer
370
- alias_target = route53.RecordTarget.from_alias(targets.LoadBalancerTarget(alb))
371
- else:
372
- # Try to get ALB from target value
373
- alb = elbv2.ApplicationLoadBalancer.from_lookup(
374
- self,
375
- f"ALB-{record_name}",
376
- load_balancer_arn=target_value
377
- )
378
- alias_target = route53.RecordTarget.from_alias(targets.LoadBalancerTarget(alb))
379
- elif target_type == "cloudfront":
380
- # For CloudFront, we would need the distribution
381
- # This is a simplified implementation
382
- pass
383
-
384
- if alias_target:
385
- record = route53.ARecord(
386
- self,
387
- f"AliasRecord-{record_name}",
388
- zone=self.hosted_zone,
389
- record_name=record_name,
390
- target=alias_target
391
- )
392
- self.records[record_name] = record
393
-
394
- # Create CNAME records
395
- for cname_record in self.route53_config.cname_records:
396
- record_name = cname_record.get("name", "")
397
- target_domain = cname_record.get("target_domain", "")
398
- ttl = cname_record.get("ttl", 300)
399
-
400
- if not record_name or not target_domain:
401
- continue
402
-
403
- record = route53.CnameRecord(
404
- self,
405
- f"CnameRecord-{record_name}",
406
- zone=self.hosted_zone,
407
- record_name=record_name,
408
- domain_name=target_domain,
409
- ttl=cdk.Duration.seconds(ttl)
410
- )
411
- self.records[record_name] = record
359
+ if unique_id in self._local_cache:
360
+ return self._local_cache[unique_id]
361
+
362
+
363
+
364
+ # Handle SSM parameter references in target_value
365
+ value = self.resolve_ssm_value(self, value, unique_id=unique_id)
366
+
367
+ if required and not value:
368
+ self._missing_configurations.append(f"Missing required value for key: {key}")
369
+
370
+ self._local_cache[unique_id] = value
371
+
372
+
373
+ return value
374
+
375
+
412
376
 
413
377
  def _add_outputs(self) -> None:
414
378
  """Add CloudFormation outputs for the Route53 resources"""
cdk_factory/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.19.19"
1
+ __version__ = "0.20.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cdk_factory
3
- Version: 0.19.19
3
+ Version: 0.20.5
4
4
  Summary: CDK Factory. A QuickStarter and best practices setup for CDK projects
5
5
  Author-email: Eric Wilson <eric.wilson@geekcafe.com>
6
6
  License: MIT License