cdk-factory 0.19.13__py3-none-any.whl → 0.20.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py +122 -85
- cdk_factory/stack_library/ecs/ecs_cluster_stack.py +2 -2
- cdk_factory/stack_library/ecs/ecs_service_stack.py +2 -0
- cdk_factory/stack_library/lambda_edge/functions/README.md +0 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/__init__.py +33 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/app.py +30 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/edge_log_retention.py +85 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/requirements.txt +2 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/test.py +22 -0
- cdk_factory/stack_library/lambda_edge/lambda_edge_log_retention_stack.py +0 -0
- cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py +166 -97
- cdk_factory/stack_library/load_balancer/load_balancer_stack.py +4 -0
- cdk_factory/stack_library/route53/route53_stack.py +97 -133
- cdk_factory/version.py +1 -1
- {cdk_factory-0.19.13.dist-info → cdk_factory-0.20.5.dist-info}/METADATA +1 -1
- {cdk_factory-0.19.13.dist-info → cdk_factory-0.20.5.dist-info}/RECORD +19 -12
- {cdk_factory-0.19.13.dist-info → cdk_factory-0.20.5.dist-info}/WHEEL +0 -0
- {cdk_factory-0.19.13.dist-info → cdk_factory-0.20.5.dist-info}/entry_points.txt +0 -0
- {cdk_factory-0.19.13.dist-info → cdk_factory-0.20.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import boto3
|
|
3
|
+
from botocore.exceptions import ClientError
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
profile_name = os.getenv('AWS_PROFILE')
|
|
7
|
+
session = boto3.Session(region_name='us-east-1', profile_name=profile_name)
|
|
8
|
+
ec2 = session.client('ec2')
|
|
9
|
+
|
|
10
|
+
def set_edge_log_retention(retention_days=7, dry_run=True):
|
|
11
|
+
"""
|
|
12
|
+
Find Lambda@Edge log groups across all regions and set retention policies.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
retention_days (int): Number of days to retain logs
|
|
16
|
+
dry_run (bool): If True, only show what would be changed
|
|
17
|
+
"""
|
|
18
|
+
# Get all AWS regions
|
|
19
|
+
regions = [region['RegionName'] for region in ec2.describe_regions()['Regions']]
|
|
20
|
+
|
|
21
|
+
edge_log_groups = []
|
|
22
|
+
total_changed = 0
|
|
23
|
+
|
|
24
|
+
print(f"🔍 Hunting for Lambda@Edge log groups across {len(regions)} regions...")
|
|
25
|
+
print(f"🎯 Target retention: {retention_days} days")
|
|
26
|
+
print(f"🧪 Dry run: {dry_run}")
|
|
27
|
+
print("=" * 60)
|
|
28
|
+
|
|
29
|
+
for region in regions:
|
|
30
|
+
try:
|
|
31
|
+
logs = session.client('logs', region_name=region)
|
|
32
|
+
|
|
33
|
+
# Find log groups with us-east-1 prefix (indicating Edge functions)
|
|
34
|
+
paginator = logs.get_paginator('describe_log_groups')
|
|
35
|
+
for page in paginator.paginate():
|
|
36
|
+
for log_group in page.get('logGroups', []):
|
|
37
|
+
log_group_name = log_group['logGroupName']
|
|
38
|
+
|
|
39
|
+
# Check if it's a Lambda@Edge log group
|
|
40
|
+
if '/aws/lambda/us-east-1.' in log_group_name:
|
|
41
|
+
current_retention = log_group.get('retentionInDays')
|
|
42
|
+
|
|
43
|
+
edge_log_groups.append({
|
|
44
|
+
'region': region,
|
|
45
|
+
'name': log_group_name,
|
|
46
|
+
'current_retention': current_retention,
|
|
47
|
+
'stored_bytes': log_group.get('storedBytes', 0)
|
|
48
|
+
})
|
|
49
|
+
|
|
50
|
+
# Set retention if needed
|
|
51
|
+
if current_retention != retention_days:
|
|
52
|
+
if dry_run:
|
|
53
|
+
print(f"📍 {region}: Would set {log_group_name} to {retention_days} days (current: {current_retention})")
|
|
54
|
+
else:
|
|
55
|
+
try:
|
|
56
|
+
logs.put_retention_policy(
|
|
57
|
+
logGroupName=log_group_name,
|
|
58
|
+
retentionInDays=retention_days
|
|
59
|
+
)
|
|
60
|
+
print(f"✅ {region}: Set {log_group_name} to {retention_days} days")
|
|
61
|
+
total_changed += 1
|
|
62
|
+
except ClientError as e:
|
|
63
|
+
print(f"❌ {region}: Failed to set {log_group_name} - {e}")
|
|
64
|
+
else:
|
|
65
|
+
print(f"✓ {region}: {log_group_name} already has {retention_days} days retention")
|
|
66
|
+
|
|
67
|
+
except ClientError as e:
|
|
68
|
+
# Skip regions where CloudWatch Logs isn't available
|
|
69
|
+
continue
|
|
70
|
+
|
|
71
|
+
print("=" * 60)
|
|
72
|
+
print(f"📊 Summary:")
|
|
73
|
+
print(f" Found {len(edge_log_groups)} Lambda@Edge log groups")
|
|
74
|
+
print(f" Total storage: {sum(g['stored_bytes'] for g in edge_log_groups) / (1024**3):.2f} GB")
|
|
75
|
+
if not dry_run:
|
|
76
|
+
print(f" Changed {total_changed} log groups")
|
|
77
|
+
|
|
78
|
+
return edge_log_groups
|
|
79
|
+
|
|
80
|
+
if __name__ == "__main__":
|
|
81
|
+
# Dry run first to see what would be changed
|
|
82
|
+
edge_logs = set_edge_log_retention(retention_days=7, dry_run=True)
|
|
83
|
+
|
|
84
|
+
# Uncomment the line below to actually make changes
|
|
85
|
+
# set_edge_log_retention(retention_days=7, dry_run=False)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from app import lambda_handler
|
|
2
|
+
|
|
3
|
+
if __name__ == "__main__":
|
|
4
|
+
|
|
5
|
+
event = {
|
|
6
|
+
"version": "0",
|
|
7
|
+
"id": "12345678-1234-1234-1234-123456789012",
|
|
8
|
+
"detail-type": "Scheduled Event",
|
|
9
|
+
"source": "aws.events",
|
|
10
|
+
"account": "123456789012",
|
|
11
|
+
"time": "2024-01-15T10:00:00Z",
|
|
12
|
+
"region": "us-east-1",
|
|
13
|
+
"resources": [
|
|
14
|
+
"arn:aws:events:us-east-1:123456789012:rule/LogRetentionManager"
|
|
15
|
+
],
|
|
16
|
+
"detail": {
|
|
17
|
+
"days": 7,
|
|
18
|
+
"dry_run": False
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
lambda_handler(event, None)
|
|
File without changes
|
|
@@ -134,7 +134,12 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
134
134
|
|
|
135
135
|
resolved_env = {}
|
|
136
136
|
|
|
137
|
-
|
|
137
|
+
# Use the new simplified configuration structure
|
|
138
|
+
configuration = self.edge_config.dictionary.get("configuration", {})
|
|
139
|
+
runtime_config = configuration.get("runtime", {})
|
|
140
|
+
ui_config = configuration.get("ui", {})
|
|
141
|
+
|
|
142
|
+
for key, value in runtime_config.items():
|
|
138
143
|
# Check if value is an SSM parameter reference
|
|
139
144
|
if isinstance(value, str) and value.startswith("{{ssm:") and value.endswith("}}"):
|
|
140
145
|
# Extract SSM parameter path
|
|
@@ -216,11 +221,23 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
216
221
|
# Since Lambda@Edge doesn't support environment variables, we bundle a config file
|
|
217
222
|
# Use the full function_name (e.g., "tech-talk-dev-ip-gate") not just the base name
|
|
218
223
|
resolved_env = self._resolve_environment_variables()
|
|
224
|
+
|
|
225
|
+
# Get the UI configuration
|
|
226
|
+
configuration = self.edge_config.dictionary.get("configuration", {})
|
|
227
|
+
ui_config = configuration.get("ui", {})
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
workload_name = self.deployment.workload.get("name")
|
|
231
|
+
|
|
232
|
+
if not workload_name:
|
|
233
|
+
raise ValueError("Workload name is required for Lambda@Edge function")
|
|
219
234
|
runtime_config = {
|
|
220
235
|
'environment': self.deployment.environment,
|
|
236
|
+
'workload': workload_name,
|
|
221
237
|
'function_name': function_name,
|
|
222
238
|
'region': self.deployment.region,
|
|
223
|
-
'
|
|
239
|
+
'runtime': resolved_env, # Runtime variables (SSM, etc.)
|
|
240
|
+
'ui': ui_config # UI configuration (colors, messages, etc.)
|
|
224
241
|
}
|
|
225
242
|
|
|
226
243
|
runtime_config_path = temp_code_dir / 'runtime_config.json'
|
|
@@ -248,21 +265,17 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
248
265
|
self.edge_config.runtime,
|
|
249
266
|
_lambda.Runtime.PYTHON_3_11
|
|
250
267
|
)
|
|
251
|
-
|
|
252
|
-
# Lambda@Edge does NOT support environment variables
|
|
253
|
-
# Configuration must be handled via:
|
|
254
|
-
# 1. Hardcoded in the function code
|
|
255
|
-
# 2. Fetched from SSM Parameter Store at runtime
|
|
256
|
-
# 3. Other configuration mechanisms
|
|
257
|
-
|
|
268
|
+
|
|
258
269
|
# Log warning if environment variables are configured
|
|
259
|
-
|
|
270
|
+
configuration = self.edge_config.dictionary.get("configuration", {})
|
|
271
|
+
runtime_config = configuration.get("runtime", {})
|
|
272
|
+
|
|
273
|
+
if runtime_config:
|
|
260
274
|
logger.warning(
|
|
261
275
|
f"Lambda@Edge function '{function_name}' has environment variables configured, "
|
|
262
|
-
"but Lambda@Edge does not support environment variables. "
|
|
263
|
-
"The function must fetch these values from SSM Parameter Store at runtime."
|
|
276
|
+
"but Lambda@Edge does not support environment variables. The function must fetch these values from SSM Parameter Store at runtime."
|
|
264
277
|
)
|
|
265
|
-
for key, value in
|
|
278
|
+
for key, value in runtime_config.items():
|
|
266
279
|
logger.warning(f" - {key}: {value}")
|
|
267
280
|
|
|
268
281
|
# Create execution role with CloudWatch Logs and SSM permissions
|
|
@@ -283,7 +296,7 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
283
296
|
)
|
|
284
297
|
|
|
285
298
|
# Add SSM read permissions if environment variables reference SSM parameters
|
|
286
|
-
if
|
|
299
|
+
if runtime_config:
|
|
287
300
|
execution_role.add_to_policy(
|
|
288
301
|
iam.PolicyStatement(
|
|
289
302
|
effect=iam.Effect.ALLOW,
|
|
@@ -293,12 +306,88 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
293
306
|
"ssm:GetParametersByPath"
|
|
294
307
|
],
|
|
295
308
|
resources=[
|
|
296
|
-
f"arn:aws:ssm:*:{
|
|
309
|
+
f"arn:aws:ssm:*:{self.deployment.account}:parameter/*"
|
|
297
310
|
]
|
|
298
311
|
)
|
|
299
312
|
)
|
|
300
313
|
|
|
301
|
-
#
|
|
314
|
+
# Add Secrets Manager permissions for origin secret access
|
|
315
|
+
execution_role.add_to_policy(
|
|
316
|
+
iam.PolicyStatement(
|
|
317
|
+
effect=iam.Effect.ALLOW,
|
|
318
|
+
actions=[
|
|
319
|
+
"secretsmanager:GetSecretValue",
|
|
320
|
+
"secretsmanager:DescribeSecret"
|
|
321
|
+
],
|
|
322
|
+
resources=[
|
|
323
|
+
f"arn:aws:secretsmanager:*:{self.deployment.account}:secret:{self.deployment.environment}/{self.workload.name}/origin-secret*"
|
|
324
|
+
]
|
|
325
|
+
)
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
# Add ELB permissions for target health API access
|
|
329
|
+
execution_role.add_to_policy(
|
|
330
|
+
iam.PolicyStatement(
|
|
331
|
+
effect=iam.Effect.ALLOW,
|
|
332
|
+
actions=[
|
|
333
|
+
"elasticloadbalancing:DescribeTargetHealth",
|
|
334
|
+
"elasticloadbalancing:DescribeTargetGroups",
|
|
335
|
+
"elasticloadbalancing:DescribeLoadBalancers",
|
|
336
|
+
"elasticloadbalancing:DescribeListeners",
|
|
337
|
+
"elasticloadbalancing:DescribeTags"
|
|
338
|
+
],
|
|
339
|
+
resources=[
|
|
340
|
+
"*"
|
|
341
|
+
]
|
|
342
|
+
)
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
# Add ACM permissions for certificate validation
|
|
346
|
+
execution_role.add_to_policy(
|
|
347
|
+
iam.PolicyStatement(
|
|
348
|
+
effect=iam.Effect.ALLOW,
|
|
349
|
+
actions=[
|
|
350
|
+
"acm:DescribeCertificate",
|
|
351
|
+
"acm:ListCertificates"
|
|
352
|
+
],
|
|
353
|
+
resources=[
|
|
354
|
+
f"arn:aws:acm:*:{self.deployment.account}:certificate/*"
|
|
355
|
+
]
|
|
356
|
+
)
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
# Add Route 53 permissions for health check access
|
|
360
|
+
execution_role.add_to_policy(
|
|
361
|
+
iam.PolicyStatement(
|
|
362
|
+
effect=iam.Effect.ALLOW,
|
|
363
|
+
actions=[
|
|
364
|
+
"route53:GetHealthCheckStatus",
|
|
365
|
+
"route53:ListHealthChecks",
|
|
366
|
+
"route53:GetHealthCheck"
|
|
367
|
+
],
|
|
368
|
+
resources=[
|
|
369
|
+
f"arn:aws:route53:::{self.deployment.account}:health-check/*"
|
|
370
|
+
]
|
|
371
|
+
)
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# Add CloudWatch permissions for enhanced logging and metrics
|
|
375
|
+
execution_role.add_to_policy(
|
|
376
|
+
iam.PolicyStatement(
|
|
377
|
+
effect=iam.Effect.ALLOW,
|
|
378
|
+
actions=[
|
|
379
|
+
"logs:CreateLogGroup",
|
|
380
|
+
"logs:CreateLogStream",
|
|
381
|
+
"logs:PutLogEvents",
|
|
382
|
+
"cloudwatch:PutMetricData"
|
|
383
|
+
],
|
|
384
|
+
resources=[
|
|
385
|
+
f"arn:aws:logs:*:{self.deployment.account}:log-group:/aws/lambda/*",
|
|
386
|
+
f"arn:aws:cloudwatch:*:{self.deployment.account}:metric:*"
|
|
387
|
+
]
|
|
388
|
+
)
|
|
389
|
+
)
|
|
390
|
+
|
|
302
391
|
self.function = _lambda.Function(
|
|
303
392
|
self,
|
|
304
393
|
function_name,
|
|
@@ -311,6 +400,7 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
311
400
|
description=self.edge_config.description,
|
|
312
401
|
role=execution_role,
|
|
313
402
|
# Lambda@Edge does NOT support environment variables
|
|
403
|
+
# Configuration must be fetched from SSM at runtime
|
|
314
404
|
log_retention=logs.RetentionDays.ONE_WEEK,
|
|
315
405
|
)
|
|
316
406
|
|
|
@@ -365,60 +455,45 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
365
455
|
|
|
366
456
|
def _configure_edge_log_retention(self, function_name: str) -> None:
|
|
367
457
|
"""
|
|
368
|
-
Configure log retention for Lambda@Edge
|
|
458
|
+
Configure log retention for Lambda@Edge log groups in all edge regions
|
|
369
459
|
|
|
370
|
-
|
|
371
|
-
|
|
460
|
+
TODO: IMPLEMENT POST-DEPLOYMENT SOLUTION
|
|
461
|
+
--------------------------------------
|
|
462
|
+
Lambda@Edge log groups are created on-demand when the function is invoked
|
|
463
|
+
at edge locations, not during deployment. This means we cannot set retention
|
|
464
|
+
policies during CloudFormation deployment.
|
|
465
|
+
|
|
466
|
+
Possible solutions to implement:
|
|
467
|
+
1. EventBridge rule that triggers on log group creation
|
|
468
|
+
2. Custom Lambda function that runs periodically to set retention
|
|
469
|
+
3. Post-deployment script that waits for log groups to appear
|
|
470
|
+
4. CloudWatch Logs subscription filter that handles new log groups
|
|
471
|
+
|
|
472
|
+
Current behavior: DISABLED to prevent deployment failures
|
|
372
473
|
"""
|
|
373
|
-
from aws_cdk import custom_resources as cr
|
|
374
474
|
|
|
375
|
-
#
|
|
475
|
+
# DISABLED: Edge log groups don't exist during deployment
|
|
476
|
+
# Lambda@Edge creates log groups on-demand at edge locations
|
|
477
|
+
# Setting retention policies during deployment fails with "log group does not exist"
|
|
478
|
+
|
|
376
479
|
edge_retention_days = self.edge_config.dictionary.get("edge_log_retention_days", 7)
|
|
480
|
+
logger.warning(
|
|
481
|
+
f"Edge log retention configuration disabled - log groups are created on-demand. "
|
|
482
|
+
f"Desired retention: {edge_retention_days} days. "
|
|
483
|
+
f"See TODO in _configure_edge_log_retention() for implementation approach."
|
|
484
|
+
)
|
|
377
485
|
|
|
378
|
-
#
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
'ca-central-1', 'sa-east-1'
|
|
384
|
-
]
|
|
385
|
-
|
|
386
|
-
# Create custom resource to set log retention for each region
|
|
387
|
-
for region in edge_regions:
|
|
388
|
-
log_group_name = f"/aws/lambda/{region}.{function_name}"
|
|
389
|
-
|
|
390
|
-
# Use AwsCustomResource to set log retention
|
|
391
|
-
cr.AwsCustomResource(
|
|
392
|
-
self, f"EdgeLogRetention-{region}",
|
|
393
|
-
on_update={
|
|
394
|
-
"service": "Logs",
|
|
395
|
-
"action": "putRetentionPolicy",
|
|
396
|
-
"parameters": {
|
|
397
|
-
"logGroupName": log_group_name,
|
|
398
|
-
"retentionInDays": edge_retention_days
|
|
399
|
-
},
|
|
400
|
-
"physical_resource_id": cr.PhysicalResourceId.from_response("logGroupName")
|
|
401
|
-
},
|
|
402
|
-
on_delete={
|
|
403
|
-
"service": "Logs",
|
|
404
|
-
"action": "deleteRetentionPolicy",
|
|
405
|
-
"parameters": {
|
|
406
|
-
"logGroupName": log_group_name
|
|
407
|
-
},
|
|
408
|
-
"physical_resource_id": cr.PhysicalResourceId.from_response("logGroupName")
|
|
409
|
-
},
|
|
410
|
-
policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
|
|
411
|
-
resources=[f"arn:aws:logs:{region}:*:log-group:{log_group_name}*"]
|
|
412
|
-
)
|
|
413
|
-
)
|
|
486
|
+
# TODO: Implement one of these solutions:
|
|
487
|
+
# 1. EventBridge + Lambda: Trigger on log group creation and set retention
|
|
488
|
+
# 2. Periodic Lambda: Scan for edge log groups and apply retention policies
|
|
489
|
+
# 3. Post-deployment script: Wait for log groups to appear after edge replication
|
|
490
|
+
# 4. CloudWatch Logs subscription: Process new log group events
|
|
414
491
|
|
|
415
|
-
|
|
492
|
+
return
|
|
416
493
|
|
|
417
494
|
def _add_outputs(self, function_name: str) -> None:
|
|
418
495
|
"""Add CloudFormation outputs and SSM exports"""
|
|
419
496
|
|
|
420
|
-
|
|
421
|
-
|
|
422
497
|
# SSM Parameter Store exports (if configured)
|
|
423
498
|
ssm_exports = self.edge_config.dictionary.get("ssm", {}).get("exports", {})
|
|
424
499
|
if ssm_exports:
|
|
@@ -440,40 +515,34 @@ class LambdaEdgeStack(IStack, StandardizedSsmMixin):
|
|
|
440
515
|
description=f"{key} for Lambda@Edge function {function_name}"
|
|
441
516
|
)
|
|
442
517
|
|
|
443
|
-
# Export
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
for
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
self,
|
|
475
|
-
f"env-{env_key}-param",
|
|
476
|
-
env_value,
|
|
477
|
-
ssm_path,
|
|
478
|
-
description=f"Configuration for Lambda@Edge: {env_key}"
|
|
479
|
-
)
|
|
518
|
+
# Export the complete configuration as a single SSM parameter
|
|
519
|
+
config_ssm_path = f"/{self.deployment.environment}/{self.workload.name}/lambda-edge/config"
|
|
520
|
+
configuration = self.edge_config.dictionary.get("configuration", {})
|
|
521
|
+
environment_variables = configuration.get("environment_variables", {})
|
|
522
|
+
|
|
523
|
+
# Build full configuration that Lambda@Edge expects
|
|
524
|
+
full_config = {
|
|
525
|
+
"environment_variables": environment_variables,
|
|
526
|
+
"runtime": configuration.get("runtime", {}),
|
|
527
|
+
"ui": configuration.get("ui", {})
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
self.export_ssm_parameter(
|
|
531
|
+
self,
|
|
532
|
+
"full-config-param",
|
|
533
|
+
json.dumps(full_config),
|
|
534
|
+
config_ssm_path,
|
|
535
|
+
description=f"Complete Lambda@Edge configuration for {function_name} - update this for dynamic changes"
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
# Export cache TTL parameter for dynamic cache control
|
|
539
|
+
cache_ttl_ssm_path = f"/{self.deployment.environment}/{self.workload.name}/lambda-edge/cache-ttl"
|
|
540
|
+
default_cache_ttl = self.edge_config.dictionary.get("cache_ttl_seconds", 300) # Default 5 minutes
|
|
541
|
+
|
|
542
|
+
self.export_ssm_parameter(
|
|
543
|
+
self,
|
|
544
|
+
"cache-ttl-param",
|
|
545
|
+
str(default_cache_ttl),
|
|
546
|
+
cache_ttl_ssm_path,
|
|
547
|
+
description=f"Lambda@Edge configuration cache TTL in seconds for {function_name} - adjust for maintenance windows (30-3600)"
|
|
548
|
+
)
|
|
@@ -459,6 +459,10 @@ class LoadBalancerStack(IStack, VPCProviderMixin, StandardizedSsmMixin):
|
|
|
459
459
|
# Parse AWS ALB conditions format
|
|
460
460
|
aws_conditions = rule_config.get("conditions", [])
|
|
461
461
|
for condition in aws_conditions:
|
|
462
|
+
enabled = str(condition.get("enabled", True)).lower()
|
|
463
|
+
if enabled != "true":
|
|
464
|
+
continue
|
|
465
|
+
|
|
462
466
|
field = condition.get("field")
|
|
463
467
|
if field == "http-header" and "http_header_config" in condition:
|
|
464
468
|
header_config = condition["http_header_config"]
|