cdk-factory 0.19.19__py3-none-any.whl → 0.21.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cdk_factory/configurations/resources/rds.py +17 -8
- cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py +122 -85
- cdk_factory/stack_library/ecs/ecs_cluster_stack.py +2 -2
- cdk_factory/stack_library/ecs/ecs_service_stack.py +2 -0
- cdk_factory/stack_library/lambda_edge/functions/README.md +0 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/__init__.py +33 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/app.py +30 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/edge_log_retention.py +85 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/requirements.txt +2 -0
- cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/test.py +22 -0
- cdk_factory/stack_library/lambda_edge/lambda_edge_log_retention_stack.py +0 -0
- cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py +94 -8
- cdk_factory/stack_library/load_balancer/load_balancer_stack.py +4 -0
- cdk_factory/stack_library/rds/rds_stack.py +15 -13
- cdk_factory/stack_library/route53/route53_stack.py +97 -133
- cdk_factory/version.py +1 -1
- {cdk_factory-0.19.19.dist-info → cdk_factory-0.21.1.dist-info}/METADATA +1 -1
- {cdk_factory-0.19.19.dist-info → cdk_factory-0.21.1.dist-info}/RECORD +21 -15
- cdk_factory/stack_library/lambda_edge/EDGE_LOG_RETENTION_TODO.md +0 -226
- {cdk_factory-0.19.19.dist-info → cdk_factory-0.21.1.dist-info}/WHEEL +0 -0
- {cdk_factory-0.19.19.dist-info → cdk_factory-0.21.1.dist-info}/entry_points.txt +0 -0
- {cdk_factory-0.19.19.dist-info → cdk_factory-0.21.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -27,6 +27,7 @@ from cdk_factory.interfaces.standardized_ssm_mixin import StandardizedSsmMixin
|
|
|
27
27
|
from cdk_factory.stack.stack_module_registry import register_stack
|
|
28
28
|
from cdk_factory.workload.workload_factory import WorkloadConfig
|
|
29
29
|
|
|
30
|
+
|
|
30
31
|
logger = Logger(service="Route53Stack")
|
|
31
32
|
|
|
32
33
|
|
|
@@ -48,7 +49,8 @@ class Route53Stack(IStack, StandardizedSsmMixin):
|
|
|
48
49
|
self.hosted_zone = None
|
|
49
50
|
self.certificate = None
|
|
50
51
|
self.records = {}
|
|
51
|
-
self.
|
|
52
|
+
self._local_cache = {} # Cache for reusing distributions
|
|
53
|
+
self._missing_configurations = []
|
|
52
54
|
|
|
53
55
|
def build(self, stack_config: StackConfig, deployment: DeploymentConfig, workload: WorkloadConfig) -> None:
|
|
54
56
|
"""Build the Route53 stack"""
|
|
@@ -119,7 +121,7 @@ class Route53Stack(IStack, StandardizedSsmMixin):
|
|
|
119
121
|
return certificate
|
|
120
122
|
|
|
121
123
|
def _create_dns_records(self) -> None:
|
|
122
|
-
self._create_dns_records_old()
|
|
124
|
+
# self._create_dns_records_old()
|
|
123
125
|
self._create_dns_records_new()
|
|
124
126
|
|
|
125
127
|
|
|
@@ -128,7 +130,7 @@ class Route53Stack(IStack, StandardizedSsmMixin):
|
|
|
128
130
|
# Create a unique cache key from distribution domain and ID
|
|
129
131
|
cache_key = f"{distribution_domain}-{distribution_id}"
|
|
130
132
|
|
|
131
|
-
if cache_key not in self.
|
|
133
|
+
if cache_key not in self._local_cache:
|
|
132
134
|
# Create the distribution construct with a unique ID
|
|
133
135
|
unique_id = f"CF-{distribution_domain.replace('.', '-').replace('*', 'wildcard')}-{hash(cache_key) % 10000}"
|
|
134
136
|
distribution = cloudfront.Distribution.from_distribution_attributes(
|
|
@@ -136,104 +138,108 @@ class Route53Stack(IStack, StandardizedSsmMixin):
|
|
|
136
138
|
domain_name=distribution_domain,
|
|
137
139
|
distribution_id=distribution_id
|
|
138
140
|
)
|
|
139
|
-
self.
|
|
141
|
+
self._local_cache[cache_key] = distribution
|
|
140
142
|
logger.info(f"Created CloudFront distribution construct for {distribution_domain}")
|
|
141
143
|
|
|
142
|
-
return self.
|
|
144
|
+
return self._local_cache[cache_key]
|
|
145
|
+
|
|
146
|
+
def _get_or_create_alb_target(self, record_name: str, target_value: str, load_balancer_zone_id: str, security_group_id: str, load_balancer_dns_name: str) -> targets.LoadBalancerTarget:
|
|
147
|
+
"""Get or create a CloudFront distribution, reusing if already created"""
|
|
148
|
+
# Create a unique cache key from distribution domain and ID
|
|
149
|
+
cache_key = f"{record_name}-alb"
|
|
150
|
+
|
|
151
|
+
if cache_key not in self._local_cache:
|
|
152
|
+
# Create the distribution construct with a unique ID
|
|
153
|
+
target = targets.LoadBalancerTarget(
|
|
154
|
+
elbv2.ApplicationLoadBalancer.from_application_load_balancer_attributes(
|
|
155
|
+
self, f"ALB-{record_name}",
|
|
156
|
+
load_balancer_arn=target_value,
|
|
157
|
+
load_balancer_canonical_hosted_zone_id=load_balancer_zone_id,
|
|
158
|
+
security_group_id=security_group_id,
|
|
159
|
+
load_balancer_dns_name=load_balancer_dns_name,
|
|
160
|
+
|
|
161
|
+
)
|
|
162
|
+
)
|
|
163
|
+
self._local_cache[cache_key] = target
|
|
164
|
+
logger.info(f"Created ALB target construct for ALB-{record_name}")
|
|
165
|
+
|
|
166
|
+
return self._local_cache[cache_key]
|
|
143
167
|
|
|
144
168
|
def _create_dns_records_new(self) -> None:
|
|
145
169
|
"""Create DNS records based on configuration - generic implementation"""
|
|
146
170
|
|
|
147
|
-
|
|
171
|
+
|
|
148
172
|
|
|
149
173
|
for record in self.route53_config.records:
|
|
150
|
-
|
|
151
|
-
|
|
174
|
+
t = record.get("type")
|
|
175
|
+
record_name = self._get_resolved_value(config=record, key="name", record_type=t)
|
|
176
|
+
record_type = self._get_resolved_value(config=record, key="type", record_type=t)
|
|
177
|
+
|
|
152
178
|
|
|
153
|
-
if not record_name or not record_type:
|
|
154
|
-
message = f"Record missing name or type: {record}"
|
|
155
|
-
logger.warning(message)
|
|
156
|
-
missing_configurations.append(message)
|
|
157
|
-
continue
|
|
158
179
|
|
|
159
180
|
# Handle alias records
|
|
160
181
|
if "alias" in record:
|
|
161
182
|
alias_config = record["alias"]
|
|
162
|
-
target_type = alias_config.get("target_type", "")
|
|
163
|
-
target_value = alias_config.get("target_value", "")
|
|
164
|
-
hosted_zone_id = alias_config.get("hosted_zone_id", "")
|
|
165
183
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
184
|
+
target_type = self._get_resolved_value(config=alias_config, key="target_type", record_type=record_type)
|
|
185
|
+
target_value = self._get_resolved_value(config=alias_config, key="target_value", record_type=record_type)
|
|
186
|
+
|
|
187
|
+
|
|
169
188
|
|
|
170
|
-
if not target_type or not target_value:
|
|
171
|
-
message = f"Alias record missing target_type or target_value: {record}"
|
|
172
|
-
logger.warning(message)
|
|
173
|
-
missing_configurations.append(message)
|
|
174
|
-
continue
|
|
175
189
|
|
|
176
190
|
# Create appropriate target based on type
|
|
177
191
|
alias_target = None
|
|
178
192
|
if target_type == "cloudfront":
|
|
179
193
|
# CloudFront distribution target
|
|
180
194
|
distribution_domain = target_value
|
|
181
|
-
distribution_id =
|
|
182
|
-
|
|
183
|
-
message = f"Alias record missing distribution_id: {record}"
|
|
184
|
-
logger.warning(message)
|
|
185
|
-
missing_configurations.append(message)
|
|
186
|
-
continue
|
|
195
|
+
distribution_id = self._get_resolved_value(config=alias_config, key="distribution_id", record_type=record_type)
|
|
196
|
+
|
|
187
197
|
|
|
188
198
|
# Get or create the distribution (reuses if already created)
|
|
189
199
|
distribution = self._get_or_create_cloudfront_distribution(distribution_domain, distribution_id)
|
|
190
200
|
alias_target = route53.RecordTarget.from_alias(
|
|
191
201
|
targets.CloudFrontTarget(distribution)
|
|
192
202
|
)
|
|
193
|
-
elif target_type == "loadbalancer" or target_type == "alb":
|
|
194
|
-
#
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
alias_target = route53.RecordTarget.from_alias(
|
|
207
|
-
targets.LoadBalancerTarget(
|
|
208
|
-
elbv2.ApplicationLoadBalancer.from_load_balancer_attributes(
|
|
209
|
-
self, f"ELB-{record_name}",
|
|
210
|
-
load_balancer_dns_name=target_value,
|
|
211
|
-
load_balancer_canonical_hosted_zone_id=hosted_zone_id
|
|
212
|
-
)
|
|
213
|
-
)
|
|
214
|
-
)
|
|
203
|
+
elif target_type == "loadbalancer" or target_type == "alb" or target_type == "elbv2":
|
|
204
|
+
# ALB alias target using imported load balancer attributes
|
|
205
|
+
|
|
206
|
+
security_group_id=self._get_resolved_value(config=alias_config, key="security_group_id", record_type=record_type)
|
|
207
|
+
load_balancer_dns_name = self._get_resolved_value(config=alias_config, key="load_balancer_dns_name", record_type=record_type)
|
|
208
|
+
load_balancer_zone_id = self._get_resolved_value(config=alias_config, key="load_balancer_zone_id", record_type=record_type)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
target = self._get_or_create_alb_target(record_name, target_value, load_balancer_zone_id, security_group_id, load_balancer_dns_name)
|
|
213
|
+
|
|
214
|
+
alias_target = route53.RecordTarget.from_alias(target)
|
|
215
|
+
|
|
215
216
|
else:
|
|
216
217
|
message = f"Unsupported alias target type: {target_type}"
|
|
217
218
|
logger.warning(message)
|
|
218
219
|
missing_configurations.append(message)
|
|
219
220
|
continue
|
|
220
221
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
) if record_type == "A" else route53.AaaaRecord(
|
|
230
|
-
self,
|
|
231
|
-
f"AliasRecord-{record_name}-{record_type}",
|
|
232
|
-
zone=self.hosted_zone,
|
|
222
|
+
route_53_record = None
|
|
223
|
+
id = f"AliasRecord-{record_name}-{record_type}"
|
|
224
|
+
print(f"creating record {id}")
|
|
225
|
+
if record_type == "A":
|
|
226
|
+
route_53_record = route53.ARecord(
|
|
227
|
+
self,
|
|
228
|
+
id,
|
|
229
|
+
zone=self.hosted_zone,
|
|
233
230
|
record_name=record_name,
|
|
234
231
|
target=alias_target,
|
|
235
232
|
ttl=cdk.Duration.seconds(record.get("ttl", 300))
|
|
236
233
|
)
|
|
234
|
+
elif record_type == "AAAA":
|
|
235
|
+
route_53_record = route53.AaaaRecord(
|
|
236
|
+
self,
|
|
237
|
+
id,
|
|
238
|
+
zone=self.hosted_zone,
|
|
239
|
+
record_name=record_name,
|
|
240
|
+
target=alias_target,
|
|
241
|
+
ttl=cdk.Duration.seconds(record.get("ttl", 300))
|
|
242
|
+
)
|
|
237
243
|
|
|
238
244
|
# Handle standard records with values
|
|
239
245
|
elif "values" in record:
|
|
@@ -326,89 +332,47 @@ class Route53Stack(IStack, StandardizedSsmMixin):
|
|
|
326
332
|
else:
|
|
327
333
|
message = f"Unsupported record type: {record_type}"
|
|
328
334
|
logger.warning(message)
|
|
329
|
-
|
|
335
|
+
self._missing_configurations.append(message)
|
|
330
336
|
continue
|
|
331
337
|
|
|
332
338
|
else:
|
|
333
339
|
message = f"Record missing 'alias' or 'values' configuration: {record}"
|
|
334
340
|
logger.warning(message)
|
|
335
|
-
|
|
341
|
+
self._missing_configurations.append(message)
|
|
336
342
|
continue
|
|
337
343
|
|
|
338
|
-
if
|
|
344
|
+
if self._missing_configurations and len(self._missing_configurations) > 0:
|
|
339
345
|
# print all missing configurations
|
|
340
346
|
print("Missing configurations:")
|
|
341
|
-
for message in
|
|
347
|
+
for message in self._missing_configurations:
|
|
342
348
|
print(message)
|
|
343
349
|
|
|
344
|
-
messages = "\n".join(
|
|
350
|
+
messages = "\n".join(self._missing_configurations)
|
|
345
351
|
raise ValueError(f"Missing Configurations:\n{messages}")
|
|
346
352
|
|
|
347
|
-
def
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
target_type = alias_record.get("target_type", "")
|
|
353
|
-
target_value = alias_record.get("target_value", "")
|
|
354
|
-
|
|
355
|
-
# target value needs to handle SSM parameters
|
|
356
|
-
if "{{ssm:" in target_value and "}}" in target_value:
|
|
357
|
-
# Extract SSM parameter path from template like {{ssm:/path/to/parameter}}
|
|
358
|
-
ssm_path = target_value.split("{{ssm:")[1].split("}}")[0]
|
|
359
|
-
target_value = self.get_ssm_imported_value(ssm_path)
|
|
353
|
+
def _get_resolved_value(self, *, config: dict, key: str, required: bool = True, record_type: str = "" ) -> str:
|
|
354
|
+
|
|
355
|
+
value = config.get(key, "")
|
|
356
|
+
x = str(value).replace("{", "").replace("}", "").replace(":", "")
|
|
357
|
+
unique_id = f"{key}-id-{record_type}-{x}"
|
|
360
358
|
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
alias_target = route53.RecordTarget.from_alias(targets.LoadBalancerTarget(alb))
|
|
379
|
-
elif target_type == "cloudfront":
|
|
380
|
-
# For CloudFront, we would need the distribution
|
|
381
|
-
# This is a simplified implementation
|
|
382
|
-
pass
|
|
383
|
-
|
|
384
|
-
if alias_target:
|
|
385
|
-
record = route53.ARecord(
|
|
386
|
-
self,
|
|
387
|
-
f"AliasRecord-{record_name}",
|
|
388
|
-
zone=self.hosted_zone,
|
|
389
|
-
record_name=record_name,
|
|
390
|
-
target=alias_target
|
|
391
|
-
)
|
|
392
|
-
self.records[record_name] = record
|
|
393
|
-
|
|
394
|
-
# Create CNAME records
|
|
395
|
-
for cname_record in self.route53_config.cname_records:
|
|
396
|
-
record_name = cname_record.get("name", "")
|
|
397
|
-
target_domain = cname_record.get("target_domain", "")
|
|
398
|
-
ttl = cname_record.get("ttl", 300)
|
|
399
|
-
|
|
400
|
-
if not record_name or not target_domain:
|
|
401
|
-
continue
|
|
402
|
-
|
|
403
|
-
record = route53.CnameRecord(
|
|
404
|
-
self,
|
|
405
|
-
f"CnameRecord-{record_name}",
|
|
406
|
-
zone=self.hosted_zone,
|
|
407
|
-
record_name=record_name,
|
|
408
|
-
domain_name=target_domain,
|
|
409
|
-
ttl=cdk.Duration.seconds(ttl)
|
|
410
|
-
)
|
|
411
|
-
self.records[record_name] = record
|
|
359
|
+
if unique_id in self._local_cache:
|
|
360
|
+
return self._local_cache[unique_id]
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
# Handle SSM parameter references in target_value
|
|
365
|
+
value = self.resolve_ssm_value(self, value, unique_id=unique_id)
|
|
366
|
+
|
|
367
|
+
if required and not value:
|
|
368
|
+
self._missing_configurations.append(f"Missing required value for key: {key}")
|
|
369
|
+
|
|
370
|
+
self._local_cache[unique_id] = value
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
return value
|
|
374
|
+
|
|
375
|
+
|
|
412
376
|
|
|
413
377
|
def _add_outputs(self) -> None:
|
|
414
378
|
"""Add CloudFormation outputs for the Route53 resources"""
|
cdk_factory/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.21.1"
|
|
@@ -2,7 +2,7 @@ cdk_factory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
2
2
|
cdk_factory/app.py,sha256=RnX0-pwdTAPAdKJK_j13Zl8anf9zYKBwboR0KA8K8xM,10346
|
|
3
3
|
cdk_factory/cdk.json,sha256=SKZKhJ2PBpFH78j-F8S3VDYW-lf76--Q2I3ON-ZIQfw,3106
|
|
4
4
|
cdk_factory/cli.py,sha256=FGbCTS5dYCNsfp-etshzvFlGDCjC28r6rtzYbe7KoHI,6407
|
|
5
|
-
cdk_factory/version.py,sha256=
|
|
5
|
+
cdk_factory/version.py,sha256=SgjjYIjRx1j_wfbQTv5G4w3woiZ-Jp6WCRh6Z-HHPxc,23
|
|
6
6
|
cdk_factory/builds/README.md,sha256=9BBWd7bXpyKdMU_g2UljhQwrC9i5O_Tvkb6oPvndoZk,90
|
|
7
7
|
cdk_factory/commands/command_loader.py,sha256=QbLquuP_AdxtlxlDy-2IWCQ6D-7qa58aphnDPtp_uTs,3744
|
|
8
8
|
cdk_factory/configurations/base_config.py,sha256=eJ3Pl3GWk1jVr_bYQaaWlw4_-ZiFGaiXllI_fOOX1i0,9323
|
|
@@ -40,7 +40,7 @@ cdk_factory/configurations/resources/lambda_layers.py,sha256=gVeP_-LC3Eq0lkPaG_J
|
|
|
40
40
|
cdk_factory/configurations/resources/lambda_triggers.py,sha256=MD7cdMNKEulNBhtMLIFnWJuJ5R-yyIqa0LHUgbSQerA,834
|
|
41
41
|
cdk_factory/configurations/resources/load_balancer.py,sha256=P-jKemIjIWWqScmQKspmRy1m3BrwNkRtTNHDStOAJds,5617
|
|
42
42
|
cdk_factory/configurations/resources/monitoring.py,sha256=CPYWbUbWQzoPqDhdPiB4Vahq-pPi6BEkavkVohadSIo,2422
|
|
43
|
-
cdk_factory/configurations/resources/rds.py,sha256=
|
|
43
|
+
cdk_factory/configurations/resources/rds.py,sha256=fK2_GBIlVEF_I0Q73EaCz5AIuaTDIn2YQk-WcV-vf6s,16329
|
|
44
44
|
cdk_factory/configurations/resources/resource_mapping.py,sha256=cwv3n63RJ6E59ErsmSTdkW4i-g8huhHtKI0ExbRhJxA,2182
|
|
45
45
|
cdk_factory/configurations/resources/resource_naming.py,sha256=VE9S2cpzp11qqPL2z1sX79wXH0o1SntO2OG74nEmWC8,5508
|
|
46
46
|
cdk_factory/configurations/resources/resource_types.py,sha256=1WQHyDoErb-M-tETZZzyLDtbq_jdC85-I403dM48pgE,2317
|
|
@@ -86,7 +86,7 @@ cdk_factory/stack_library/acm/__init__.py,sha256=4FNRLykblcKZvq_wieYwvv9N_jgrZnJ
|
|
|
86
86
|
cdk_factory/stack_library/acm/acm_stack.py,sha256=LW4QgzcMDvtSpqwfc4ykgpzDGvXe4udvWVE_DtBN4Zg,5414
|
|
87
87
|
cdk_factory/stack_library/api_gateway/api_gateway_stack.py,sha256=PvLdGvcopGpLP0FwpfUcfXNiTIfYLTXqrG-TniE38yc,39643
|
|
88
88
|
cdk_factory/stack_library/auto_scaling/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
89
|
-
cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py,sha256=
|
|
89
|
+
cdk_factory/stack_library/auto_scaling/auto_scaling_stack.py,sha256=gjDxYWC4QZEZ4UOhGjgziggP7391dEMEe3vaslHSZ0c,25884
|
|
90
90
|
cdk_factory/stack_library/aws_lambdas/lambda_stack.py,sha256=SFbBPvvCopbyiuYtq-O5sQkFCf94Wzua6aDUXiFDSB4,26161
|
|
91
91
|
cdk_factory/stack_library/buckets/README.md,sha256=XkK3UNVtRLE7NtUvbhCOBBYUYi8hlrrSaI1s3GJVrqI,78
|
|
92
92
|
cdk_factory/stack_library/buckets/bucket_stack.py,sha256=SLoZqSffAqmeBBEVUQg54D_8Ad5UKdkjEAmKAVgAqQo,1778
|
|
@@ -98,19 +98,25 @@ cdk_factory/stack_library/dynamodb/dynamodb_stack.py,sha256=-_Ij1zXIxUuZIWgdevam
|
|
|
98
98
|
cdk_factory/stack_library/ecr/README.md,sha256=xw2wPx9WN03Y4BBwqvbi9lAFGNyaD1FUNpqxVJX14Oo,179
|
|
99
99
|
cdk_factory/stack_library/ecr/ecr_stack.py,sha256=KLbd5WN5-ZiojsS5wJ4PX-tIL0cCylCSvXjO6sVrgWY,2102
|
|
100
100
|
cdk_factory/stack_library/ecs/__init__.py,sha256=o5vGDtD_h-gVXb3-Ysr8xUNpEcMsnmMVgZv2Pupcdow,219
|
|
101
|
-
cdk_factory/stack_library/ecs/ecs_cluster_stack.py,sha256=
|
|
102
|
-
cdk_factory/stack_library/ecs/ecs_service_stack.py,sha256=
|
|
103
|
-
cdk_factory/stack_library/lambda_edge/EDGE_LOG_RETENTION_TODO.md,sha256=nD49nLm5OyrZUvcGNFBy9H1MfSUOuZ7sasHNI-IO0Zk,6635
|
|
101
|
+
cdk_factory/stack_library/ecs/ecs_cluster_stack.py,sha256=j0Cc7CyTK8MQDGCeZzo7XYrj1AM1hQBLH9DGXs7k-hQ,12165
|
|
102
|
+
cdk_factory/stack_library/ecs/ecs_service_stack.py,sha256=fm3Q2oeMN5JULatYFrQDfyTs58k283weQjPEuhpz1Sc,28297
|
|
104
103
|
cdk_factory/stack_library/lambda_edge/__init__.py,sha256=ByBJ_CWdc4UtTmFBZH-6pzBMNkjkdtE65AmnB0Fs6lM,156
|
|
105
|
-
cdk_factory/stack_library/lambda_edge/
|
|
104
|
+
cdk_factory/stack_library/lambda_edge/lambda_edge_log_retention_stack.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
105
|
+
cdk_factory/stack_library/lambda_edge/lambda_edge_stack.py,sha256=QxuyJ1mR8018HRqDXERkmsFMuEHd5YsVIACmXitYZDY,23344
|
|
106
|
+
cdk_factory/stack_library/lambda_edge/functions/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
107
|
+
cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/__init__.py,sha256=0JJIk47ubXY4QddDJ6hyyDMXBGYd0IzQyd3-SPqVikU,964
|
|
108
|
+
cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/app.py,sha256=i77H3f0KATiNhnPo0Ca8-B5OWPtDRT8def4HHiCp8Zw,858
|
|
109
|
+
cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/edge_log_retention.py,sha256=a-i0N_44RehzHA_hSx0Z9NFLfy4T-RZKA1sMS-CcYAw,3744
|
|
110
|
+
cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/requirements.txt,sha256=nJ0ZsOr7cuZEKNQd6e3EPStDJI7Y5Jnshi-FoydLE7o,46
|
|
111
|
+
cdk_factory/stack_library/lambda_edge/functions/log_retention_manager/test.py,sha256=CwjyMIjQ5kbeYicMUnAIK3eflBYbobEaM1noq-7Ck1Q,567
|
|
106
112
|
cdk_factory/stack_library/load_balancer/__init__.py,sha256=wZpKw2OecLJGdF5mPayCYAEhu2H3c2gJFFIxwXftGDU,52
|
|
107
|
-
cdk_factory/stack_library/load_balancer/load_balancer_stack.py,sha256=
|
|
113
|
+
cdk_factory/stack_library/load_balancer/load_balancer_stack.py,sha256=iR-l5ujLS4zUReI-deWs8sDIUbmFYoWKt9kEZri1z2A,28441
|
|
108
114
|
cdk_factory/stack_library/monitoring/__init__.py,sha256=k1G_KDx47Aw0UugaL99PN_TKlyLK4nkJVApCaAK7GJg,153
|
|
109
115
|
cdk_factory/stack_library/monitoring/monitoring_stack.py,sha256=N_1YvEXE7fboH_S3kv_dSKZsufxMuPdFMjGzlNFpuSo,19283
|
|
110
116
|
cdk_factory/stack_library/rds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
111
|
-
cdk_factory/stack_library/rds/rds_stack.py,sha256=
|
|
117
|
+
cdk_factory/stack_library/rds/rds_stack.py,sha256=UwLaDpyNNvPnMV8DfZon8fyusuGkvcpJiUB5V3Gjrt4,14748
|
|
112
118
|
cdk_factory/stack_library/route53/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
113
|
-
cdk_factory/stack_library/route53/route53_stack.py,sha256=
|
|
119
|
+
cdk_factory/stack_library/route53/route53_stack.py,sha256=PaYpVVYC_jZ-pbsmK0vW_OVleJ4En0L2O_a92jlWi18,16601
|
|
114
120
|
cdk_factory/stack_library/rum/__init__.py,sha256=gUrWQdzd4rZ2J0YzAQC8PsEGAS7QgyYjB2ZCUKWasy4,90
|
|
115
121
|
cdk_factory/stack_library/rum/rum_stack.py,sha256=c67m0Jbyx8hx9TTx9TBBhZMDqtSK7QCqKx_Ec1t8LgY,14067
|
|
116
122
|
cdk_factory/stack_library/security_group/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -137,8 +143,8 @@ cdk_factory/utilities/os_execute.py,sha256=5Op0LY_8Y-pUm04y1k8MTpNrmQvcLmQHPQITE
|
|
|
137
143
|
cdk_factory/utils/api_gateway_utilities.py,sha256=If7Xu5s_UxmuV-kL3JkXxPLBdSVUKoLtohm0IUFoiV8,4378
|
|
138
144
|
cdk_factory/validation/config_validator.py,sha256=Pb0TkLiPFzUplBOgMorhRCVm08vEzZhRU5xXCDTa5CA,17602
|
|
139
145
|
cdk_factory/workload/workload_factory.py,sha256=yDI3cRhVI5ELNDcJPLpk9UY54Uind1xQoV3spzT4z7E,6068
|
|
140
|
-
cdk_factory-0.
|
|
141
|
-
cdk_factory-0.
|
|
142
|
-
cdk_factory-0.
|
|
143
|
-
cdk_factory-0.
|
|
144
|
-
cdk_factory-0.
|
|
146
|
+
cdk_factory-0.21.1.dist-info/METADATA,sha256=9FoDlo8hg9_wGFmPmaVmnEQUO_Os-We2cYJgCiWSKZ4,2451
|
|
147
|
+
cdk_factory-0.21.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
148
|
+
cdk_factory-0.21.1.dist-info/entry_points.txt,sha256=S1DPe0ORcdiwEALMN_WIo3UQrW_g4YdQCLEsc_b0Swg,53
|
|
149
|
+
cdk_factory-0.21.1.dist-info/licenses/LICENSE,sha256=NOtdOeLwg2il_XBJdXUPFPX8JlV4dqTdDGAd2-khxT8,1066
|
|
150
|
+
cdk_factory-0.21.1.dist-info/RECORD,,
|
|
@@ -1,226 +0,0 @@
|
|
|
1
|
-
# Lambda@Edge Log Retention - Implementation Plan
|
|
2
|
-
|
|
3
|
-
## 🚨 Current Status: DISABLED
|
|
4
|
-
|
|
5
|
-
Lambda@Edge log retention configuration has been **disabled** because edge log groups are created on-demand when the function is invoked at edge locations, not during CloudFormation deployment.
|
|
6
|
-
|
|
7
|
-
## 🔍 Problem Analysis
|
|
8
|
-
|
|
9
|
-
### Why Deployment-Time Configuration Fails
|
|
10
|
-
1. **On-Demand Creation**: Lambda@Edge log groups are created only when the function is actually invoked at edge locations
|
|
11
|
-
2. **Timing Issue**: CloudFormation deployment happens before any edge invocations occur
|
|
12
|
-
3. **Error**: `The specified log group does not exist` when trying to set retention policies
|
|
13
|
-
|
|
14
|
-
### Log Group Naming Pattern
|
|
15
|
-
```
|
|
16
|
-
Pattern: /aws/lambda/{edge-region}.{function-name}
|
|
17
|
-
Example: /aws/lambda/eu-central-1.trav-talks-blue-green-edge-function
|
|
18
|
-
Location: All edge log groups are created in us-east-1
|
|
19
|
-
```
|
|
20
|
-
|
|
21
|
-
## 💡 Proposed Solutions
|
|
22
|
-
|
|
23
|
-
### Solution 1: EventBridge + Lambda (Recommended)
|
|
24
|
-
```yaml
|
|
25
|
-
# EventBridge rule to detect log group creation
|
|
26
|
-
EventPattern:
|
|
27
|
-
source: ["aws.logs"]
|
|
28
|
-
detail-type: ["AWS API Call via CloudTrail"]
|
|
29
|
-
detail:
|
|
30
|
-
eventSource: ["logs.amazonaws.com"]
|
|
31
|
-
eventName: ["CreateLogGroup"]
|
|
32
|
-
requestParameters:
|
|
33
|
-
logGroupName: ["/aws/lambda/*.edge-function"]
|
|
34
|
-
```
|
|
35
|
-
|
|
36
|
-
**Implementation:**
|
|
37
|
-
1. Create EventBridge rule that triggers on log group creation
|
|
38
|
-
2. Lambda function receives event and sets retention policy
|
|
39
|
-
3. Automatic handling of new edge log groups
|
|
40
|
-
|
|
41
|
-
**Pros:**
|
|
42
|
-
- Automatic and real-time
|
|
43
|
-
- No manual intervention required
|
|
44
|
-
- Handles all edge regions
|
|
45
|
-
|
|
46
|
-
**Cons:**
|
|
47
|
-
- Additional Lambda function to maintain
|
|
48
|
-
- Requires CloudTrail enabled for CloudWatch Logs
|
|
49
|
-
|
|
50
|
-
### Solution 2: Periodic Lambda Function
|
|
51
|
-
```python
|
|
52
|
-
def lambda_handler(event, context):
|
|
53
|
-
# Scan for edge log groups
|
|
54
|
-
log_groups = logs.describe_log_groups(
|
|
55
|
-
logGroupNamePrefix='/aws/lambda/eu-central-1.trav-talks-blue-green-edge-function'
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
# Apply retention policy
|
|
59
|
-
for log_group in log_groups['logGroups']:
|
|
60
|
-
logs.put_retention_policy(
|
|
61
|
-
logGroupName=log_group['logGroupName'],
|
|
62
|
-
retentionInDays=7
|
|
63
|
-
)
|
|
64
|
-
```
|
|
65
|
-
|
|
66
|
-
**Implementation:**
|
|
67
|
-
1. Create Lambda function on schedule (e.g., every hour)
|
|
68
|
-
2. Scan for edge log groups with function name pattern
|
|
69
|
-
3. Apply retention policy if not already set
|
|
70
|
-
|
|
71
|
-
**Pros:**
|
|
72
|
-
- Simple to implement
|
|
73
|
-
- No CloudTrail dependency
|
|
74
|
-
- Can handle existing log groups
|
|
75
|
-
|
|
76
|
-
**Cons:**
|
|
77
|
-
- Not real-time (delayed retention)
|
|
78
|
-
- Runs periodically even when not needed
|
|
79
|
-
|
|
80
|
-
### Solution 3: Post-Deployment Script
|
|
81
|
-
```bash
|
|
82
|
-
#!/bin/bash
|
|
83
|
-
# Wait for edge log groups to appear
|
|
84
|
-
function_name="trav-talks-blue-green-edge-function"
|
|
85
|
-
edge_regions=("eu-central-1" "eu-west-1" "ap-southeast-1")
|
|
86
|
-
|
|
87
|
-
for region in "${edge_regions[@]}"; do
|
|
88
|
-
log_group="/aws/lambda/${region}.${function_name}"
|
|
89
|
-
|
|
90
|
-
# Wait for log group to exist
|
|
91
|
-
until aws logs describe-log-groups --log-group-name-prefix "$log_group" --region us-east-1; do
|
|
92
|
-
echo "Waiting for log group: $log_group"
|
|
93
|
-
sleep 30
|
|
94
|
-
done
|
|
95
|
-
|
|
96
|
-
# Set retention policy
|
|
97
|
-
aws logs put-retention-policy --log-group-name "$log_group" --retention-in-days 7 --region us-east-1
|
|
98
|
-
done
|
|
99
|
-
```
|
|
100
|
-
|
|
101
|
-
**Implementation:**
|
|
102
|
-
1. Script runs after Lambda@Edge deployment
|
|
103
|
-
2. Waits for edge log groups to be created
|
|
104
|
-
3. Sets retention policies when they appear
|
|
105
|
-
|
|
106
|
-
**Pros:**
|
|
107
|
-
- Direct control over timing
|
|
108
|
-
- No additional AWS resources needed
|
|
109
|
-
|
|
110
|
-
**Cons:**
|
|
111
|
-
- Manual process
|
|
112
|
-
- Hard to determine when log groups will appear
|
|
113
|
-
- Not automated
|
|
114
|
-
|
|
115
|
-
### Solution 4: CloudWatch Logs Subscription
|
|
116
|
-
```python
|
|
117
|
-
# Lambda triggered by log group creation via subscription filter
|
|
118
|
-
def lambda_handler(event, context):
|
|
119
|
-
for record in event['Records']:
|
|
120
|
-
log_group = record['logGroup']
|
|
121
|
-
if 'edge-function' in log_group:
|
|
122
|
-
# Set retention policy
|
|
123
|
-
logs.put_retention_policy(
|
|
124
|
-
logGroupName=log_group,
|
|
125
|
-
retentionInDays=7
|
|
126
|
-
)
|
|
127
|
-
```
|
|
128
|
-
|
|
129
|
-
**Implementation:**
|
|
130
|
-
1. Create subscription filter on log group pattern
|
|
131
|
-
2. Lambda function triggered by log events
|
|
132
|
-
3. Set retention policy on first log event
|
|
133
|
-
|
|
134
|
-
**Pros:**
|
|
135
|
-
- Event-driven
|
|
136
|
-
- No CloudTrail needed
|
|
137
|
-
|
|
138
|
-
**Cons:**
|
|
139
|
-
- Requires log group to exist first
|
|
140
|
-
- Complex subscription filter setup
|
|
141
|
-
|
|
142
|
-
## 🎯 Recommended Implementation
|
|
143
|
-
|
|
144
|
-
### Phase 1: Quick Win (Solution 2)
|
|
145
|
-
Implement periodic Lambda function as temporary solution:
|
|
146
|
-
- Easy to implement quickly
|
|
147
|
-
- Solves immediate problem
|
|
148
|
-
- Can be replaced later with better solution
|
|
149
|
-
|
|
150
|
-
### Phase 2: Production Solution (Solution 1)
|
|
151
|
-
Implement EventBridge + Lambda for production:
|
|
152
|
-
- Real-time response
|
|
153
|
-
- Automatic handling
|
|
154
|
-
- Best long-term solution
|
|
155
|
-
|
|
156
|
-
## 📋 Implementation Steps for Solution 1
|
|
157
|
-
|
|
158
|
-
### 1. Create EventBridge Rule
|
|
159
|
-
```python
|
|
160
|
-
event_rule = events.Rule(
|
|
161
|
-
self, "EdgeLogGroupRule",
|
|
162
|
-
event_pattern=events.EventPattern(
|
|
163
|
-
source=["aws.logs"],
|
|
164
|
-
detail_type=["AWS API Call via CloudTrail"],
|
|
165
|
-
detail={
|
|
166
|
-
"eventSource": ["logs.amazonaws.com"],
|
|
167
|
-
"eventName": ["CreateLogGroup"],
|
|
168
|
-
"requestParameters": {
|
|
169
|
-
"logGroupName": [{"prefix": "/aws/lambda/"}]
|
|
170
|
-
}
|
|
171
|
-
}
|
|
172
|
-
)
|
|
173
|
-
)
|
|
174
|
-
```
|
|
175
|
-
|
|
176
|
-
### 2. Create Lambda Function
|
|
177
|
-
```python
|
|
178
|
-
retention_handler = _lambda.Function(
|
|
179
|
-
self, "EdgeLogRetentionHandler",
|
|
180
|
-
runtime=_lambda.Runtime.PYTHON_3_9,
|
|
181
|
-
handler="handler.lambda_handler",
|
|
182
|
-
code=_lambda.Code.from_asset("lambda/edge_log_retention"),
|
|
183
|
-
environment={
|
|
184
|
-
"RETENTION_DAYS": "7",
|
|
185
|
-
"FUNCTION_NAME_PATTERN": "*edge-function"
|
|
186
|
-
}
|
|
187
|
-
)
|
|
188
|
-
```
|
|
189
|
-
|
|
190
|
-
### 3. Add Permissions
|
|
191
|
-
```python
|
|
192
|
-
retention_handler.add_to_role_policy(
|
|
193
|
-
iam.PolicyStatement(
|
|
194
|
-
actions=["logs:PutRetentionPolicy", "logs:DescribeLogGroups"],
|
|
195
|
-
resources=["*"]
|
|
196
|
-
)
|
|
197
|
-
)
|
|
198
|
-
```
|
|
199
|
-
|
|
200
|
-
### 4. Connect EventBridge to Lambda
|
|
201
|
-
```python
|
|
202
|
-
event_rule.add_target(targets.LambdaFunction(retention_handler))
|
|
203
|
-
```
|
|
204
|
-
|
|
205
|
-
## 🔧 Current Configuration
|
|
206
|
-
|
|
207
|
-
The edge log retention configuration is currently **disabled** in the Lambda Edge stack:
|
|
208
|
-
|
|
209
|
-
```python
|
|
210
|
-
def _configure_edge_log_retention(self, function_name: str) -> None:
|
|
211
|
-
# DISABLED: See implementation plan above
|
|
212
|
-
logger.warning("Edge log retention disabled - see TODO for implementation")
|
|
213
|
-
return
|
|
214
|
-
```
|
|
215
|
-
|
|
216
|
-
## 📊 Configuration Impact
|
|
217
|
-
|
|
218
|
-
| Setting | Current Behavior | Target Behavior |
|
|
219
|
-
|---------|------------------|-----------------|
|
|
220
|
-
| `edge_log_retention_days` | Warning logged, no action applied | Retention policy set on all edge log groups |
|
|
221
|
-
| Edge log groups | Created with default retention (never expire) | Created with specified retention (e.g., 7 days) |
|
|
222
|
-
| Cost impact | Potential high log storage costs | Controlled log storage costs |
|
|
223
|
-
|
|
224
|
-
---
|
|
225
|
-
|
|
226
|
-
**Status**: Ready for implementation when edge log retention is required.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|