aws-cdk-lib 2.183.0__py3-none-any.whl → 2.184.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

aws_cdk/_jsii/__init__.py CHANGED
@@ -34,7 +34,7 @@ import aws_cdk.cloud_assembly_schema._jsii
34
34
  import constructs._jsii
35
35
 
36
36
  __jsii_assembly__ = jsii.JSIIAssembly.load(
37
- "aws-cdk-lib", "2.183.0", __name__[0:-6], "aws-cdk-lib@2.183.0.jsii.tgz"
37
+ "aws-cdk-lib", "2.184.1", __name__[0:-6], "aws-cdk-lib@2.184.1.jsii.tgz"
38
38
  )
39
39
 
40
40
  __all__ = [
@@ -418,7 +418,7 @@ class EdgeFunction(
418
418
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
419
419
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
420
420
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
421
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
421
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
422
422
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
423
423
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
424
424
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -426,7 +426,7 @@ class EdgeFunction(
426
426
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
427
427
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
428
428
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
429
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
429
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
430
430
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
431
431
  '''
432
432
  if __debug__:
@@ -8890,7 +8890,7 @@ class EventField(
8890
8890
 
8891
8891
 
8892
8892
  rule.add_target(targets.CloudWatchLogGroup(log_group,
8893
- log_event=targets.LogGroupTargetInput.from_object(
8893
+ log_event=targets.LogGroupTargetInput.from_object_v2(
8894
8894
  timestamp=events.EventField.from_path("$.time"),
8895
8895
  message=events.EventField.from_path("$.detail-type")
8896
8896
  )
@@ -11230,6 +11230,8 @@ class RuleTargetInput(
11230
11230
  matched event.
11231
11231
 
11232
11232
  :param obj: -
11233
+
11234
+ :return: RuleTargetInput
11233
11235
  '''
11234
11236
  if __debug__:
11235
11237
  type_hints = typing.get_type_hints(_typecheckingstub__8091475ef36fdd615b50f3492db20e93a218f280b559ea557d766f61df68f0d0)
@@ -103,7 +103,7 @@ import aws_cdk.aws_logs as logs
103
103
 
104
104
 
105
105
  rule.add_target(targets.CloudWatchLogGroup(log_group,
106
- log_event=targets.LogGroupTargetInput.from_object(
106
+ log_event=targets.LogGroupTargetInput.from_object_v2(
107
107
  timestamp=events.EventField.from_path("$.time"),
108
108
  message=events.EventField.from_path("$.detail-type")
109
109
  )
@@ -120,7 +120,7 @@ import aws_cdk.aws_logs as logs
120
120
 
121
121
 
122
122
  rule.add_target(targets.CloudWatchLogGroup(log_group,
123
- log_event=targets.LogGroupTargetInput.from_object(
123
+ log_event=targets.LogGroupTargetInput.from_object_v2(
124
124
  message=JSON.stringify({
125
125
  "CustomField": "CustomValue"
126
126
  })
@@ -1493,7 +1493,7 @@ class CloudWatchLogGroup(
1493
1493
 
1494
1494
 
1495
1495
  rule.add_target(targets.CloudWatchLogGroup(log_group,
1496
- log_event=targets.LogGroupTargetInput.from_object(
1496
+ log_event=targets.LogGroupTargetInput.from_object_v2(
1497
1497
  message=JSON.stringify({
1498
1498
  "CustomField": "CustomValue"
1499
1499
  })
@@ -2632,7 +2632,7 @@ class LogGroupTargetInput(
2632
2632
 
2633
2633
 
2634
2634
  rule.add_target(targets.CloudWatchLogGroup(log_group,
2635
- log_event=targets.LogGroupTargetInput.from_object(
2635
+ log_event=targets.LogGroupTargetInput.from_object_v2(
2636
2636
  timestamp=events.EventField.from_path("$.time"),
2637
2637
  message=events.EventField.from_path("$.detail-type")
2638
2638
  )
@@ -2650,18 +2650,42 @@ class LogGroupTargetInput(
2650
2650
  message: typing.Any = None,
2651
2651
  timestamp: typing.Any = None,
2652
2652
  ) -> _RuleTargetInput_6beca786:
2653
- '''Pass a JSON object to the the log group event target.
2653
+ '''(deprecated) Pass a JSON object to the the log group event target.
2654
2654
 
2655
2655
  May contain strings returned by ``EventField.from()`` to substitute in parts of the
2656
2656
  matched event.
2657
2657
 
2658
2658
  :param message: The value provided here will be used in the Log "message" field. This field must be a string. If an object is passed (e.g. JSON data) it will not throw an error, but the message that makes it to CloudWatch logs will be incorrect. This is a likely scenario if doing something like: EventField.fromPath('$.detail') since in most cases the ``detail`` field contains JSON data. Default: EventField.detailType
2659
2659
  :param timestamp: The timestamp that will appear in the CloudWatch Logs record. Default: EventField.time
2660
+
2661
+ :deprecated: use fromObjectV2
2662
+
2663
+ :stability: deprecated
2660
2664
  '''
2661
2665
  options = LogGroupTargetInputOptions(message=message, timestamp=timestamp)
2662
2666
 
2663
2667
  return typing.cast(_RuleTargetInput_6beca786, jsii.sinvoke(cls, "fromObject", [options]))
2664
2668
 
2669
+ @jsii.member(jsii_name="fromObjectV2")
2670
+ @builtins.classmethod
2671
+ def from_object_v2(
2672
+ cls,
2673
+ *,
2674
+ message: typing.Any = None,
2675
+ timestamp: typing.Any = None,
2676
+ ) -> "LogGroupTargetInput":
2677
+ '''Pass a JSON object to the the log group event target.
2678
+
2679
+ May contain strings returned by ``EventField.from()`` to substitute in parts of the
2680
+ matched event.
2681
+
2682
+ :param message: The value provided here will be used in the Log "message" field. This field must be a string. If an object is passed (e.g. JSON data) it will not throw an error, but the message that makes it to CloudWatch logs will be incorrect. This is a likely scenario if doing something like: EventField.fromPath('$.detail') since in most cases the ``detail`` field contains JSON data. Default: EventField.detailType
2683
+ :param timestamp: The timestamp that will appear in the CloudWatch Logs record. Default: EventField.time
2684
+ '''
2685
+ options = LogGroupTargetInputOptions(message=message, timestamp=timestamp)
2686
+
2687
+ return typing.cast("LogGroupTargetInput", jsii.sinvoke(cls, "fromObjectV2", [options]))
2688
+
2665
2689
  @jsii.member(jsii_name="bind")
2666
2690
  @abc.abstractmethod
2667
2691
  def bind(self, rule: _IRule_af9e3d28) -> _RuleTargetInputProperties_38e7b0db:
@@ -2715,7 +2739,7 @@ class LogGroupTargetInputOptions:
2715
2739
 
2716
2740
 
2717
2741
  rule.add_target(targets.CloudWatchLogGroup(log_group,
2718
- log_event=targets.LogGroupTargetInput.from_object(
2742
+ log_event=targets.LogGroupTargetInput.from_object_v2(
2719
2743
  timestamp=events.EventField.from_path("$.time"),
2720
2744
  message=events.EventField.from_path("$.detail-type")
2721
2745
  )
@@ -5326,7 +5350,7 @@ class LogGroupProps(TargetBaseProps):
5326
5350
 
5327
5351
 
5328
5352
  rule.add_target(targets.CloudWatchLogGroup(log_group,
5329
- log_event=targets.LogGroupTargetInput.from_object(
5353
+ log_event=targets.LogGroupTargetInput.from_object_v2(
5330
5354
  message=JSON.stringify({
5331
5355
  "CustomField": "CustomValue"
5332
5356
  })
@@ -8371,7 +8371,7 @@ class IPrincipal(IGrantable, typing_extensions.Protocol):
8371
8371
  Notifications Service).
8372
8372
 
8373
8373
  A single logical Principal may also map to a set of physical principals.
8374
- For example, ``new OrganizationPrincipal('o-12345abcde')`` represents all
8374
+ For example, ``new OrganizationPrincipal('o-1234')`` represents all
8375
8375
  identities that are part of the given AWS Organization.
8376
8376
  '''
8377
8377
 
@@ -8427,7 +8427,7 @@ class _IPrincipalProxy(
8427
8427
  Notifications Service).
8428
8428
 
8429
8429
  A single logical Principal may also map to a set of physical principals.
8430
- For example, ``new OrganizationPrincipal('o-12345abcde')`` represents all
8430
+ For example, ``new OrganizationPrincipal('o-1234')`` represents all
8431
8431
  identities that are part of the given AWS Organization.
8432
8432
  '''
8433
8433
 
@@ -14477,9 +14477,6 @@ class OrganizationPrincipal(
14477
14477
  ):
14478
14478
  '''A principal that represents an AWS Organization.
14479
14479
 
14480
- Property organizationId must match regex pattern ^o-[a-z0-9]{10,32}$
14481
-
14482
- :see: https://docs.aws.amazon.com/organizations/latest/APIReference/API_Organization.html
14483
14480
  :exampleMetadata: infused
14484
14481
 
14485
14482
  Example::
@@ -10180,24 +10180,22 @@ class DeliveryStreamProps:
10180
10180
 
10181
10181
  Example::
10182
10182
 
10183
- # bucket: s3.Bucket
10184
- # Provide a Lambda function that will transform records before delivery, with custom
10185
- # buffering and retry configuration
10186
- lambda_function = lambda_.Function(self, "Processor",
10187
- runtime=lambda_.Runtime.NODEJS_LATEST,
10188
- handler="index.handler",
10189
- code=lambda_.Code.from_asset(path.join(__dirname, "process-records"))
10190
- )
10191
- lambda_processor = firehose.LambdaFunctionProcessor(lambda_function,
10192
- buffer_interval=Duration.minutes(5),
10193
- buffer_size=Size.mebibytes(5),
10194
- retries=5
10195
- )
10196
- s3_destination = firehose.S3Bucket(bucket,
10197
- processor=lambda_processor
10183
+ import aws_cdk.aws_kinesisfirehose as firehose
10184
+
10185
+
10186
+ bucket = s3.Bucket(self, "MyBucket")
10187
+ stream = firehose.DeliveryStream(self, "MyStream",
10188
+ destination=firehose.S3Bucket(bucket)
10198
10189
  )
10199
- firehose.DeliveryStream(self, "Delivery Stream",
10200
- destination=s3_destination
10190
+
10191
+ topic_rule = iot.TopicRule(self, "TopicRule",
10192
+ sql=iot.IotSql.from_string_as_ver20160323("SELECT * FROM 'device/+/data'"),
10193
+ actions=[
10194
+ actions.FirehosePutRecordAction(stream,
10195
+ batch_mode=True,
10196
+ record_separator=actions.FirehoseRecordSeparator.NEWLINE
10197
+ )
10198
+ ]
10201
10199
  )
10202
10200
  '''
10203
10201
  if __debug__:
@@ -11653,24 +11651,22 @@ class S3Bucket(
11653
11651
 
11654
11652
  Example::
11655
11653
 
11656
- # bucket: s3.Bucket
11657
- # Provide a Lambda function that will transform records before delivery, with custom
11658
- # buffering and retry configuration
11659
- lambda_function = lambda_.Function(self, "Processor",
11660
- runtime=lambda_.Runtime.NODEJS_LATEST,
11661
- handler="index.handler",
11662
- code=lambda_.Code.from_asset(path.join(__dirname, "process-records"))
11663
- )
11664
- lambda_processor = firehose.LambdaFunctionProcessor(lambda_function,
11665
- buffer_interval=Duration.minutes(5),
11666
- buffer_size=Size.mebibytes(5),
11667
- retries=5
11668
- )
11669
- s3_destination = firehose.S3Bucket(bucket,
11670
- processor=lambda_processor
11654
+ import aws_cdk.aws_kinesisfirehose as firehose
11655
+
11656
+
11657
+ bucket = s3.Bucket(self, "MyBucket")
11658
+ stream = firehose.DeliveryStream(self, "MyStream",
11659
+ destination=firehose.S3Bucket(bucket)
11671
11660
  )
11672
- firehose.DeliveryStream(self, "Delivery Stream",
11673
- destination=s3_destination
11661
+
11662
+ topic_rule = iot.TopicRule(self, "TopicRule",
11663
+ sql=iot.IotSql.from_string_as_ver20160323("SELECT * FROM 'device/+/data'"),
11664
+ actions=[
11665
+ actions.FirehosePutRecordAction(stream,
11666
+ batch_mode=True,
11667
+ record_separator=actions.FirehoseRecordSeparator.NEWLINE
11668
+ )
11669
+ ]
11674
11670
  )
11675
11671
  '''
11676
11672
 
@@ -12065,24 +12061,22 @@ class DeliveryStream(
12065
12061
 
12066
12062
  Example::
12067
12063
 
12068
- # bucket: s3.Bucket
12069
- # Provide a Lambda function that will transform records before delivery, with custom
12070
- # buffering and retry configuration
12071
- lambda_function = lambda_.Function(self, "Processor",
12072
- runtime=lambda_.Runtime.NODEJS_LATEST,
12073
- handler="index.handler",
12074
- code=lambda_.Code.from_asset(path.join(__dirname, "process-records"))
12075
- )
12076
- lambda_processor = firehose.LambdaFunctionProcessor(lambda_function,
12077
- buffer_interval=Duration.minutes(5),
12078
- buffer_size=Size.mebibytes(5),
12079
- retries=5
12080
- )
12081
- s3_destination = firehose.S3Bucket(bucket,
12082
- processor=lambda_processor
12064
+ import aws_cdk.aws_kinesisfirehose as firehose
12065
+
12066
+
12067
+ bucket = s3.Bucket(self, "MyBucket")
12068
+ stream = firehose.DeliveryStream(self, "MyStream",
12069
+ destination=firehose.S3Bucket(bucket)
12083
12070
  )
12084
- firehose.DeliveryStream(self, "Delivery Stream",
12085
- destination=s3_destination
12071
+
12072
+ topic_rule = iot.TopicRule(self, "TopicRule",
12073
+ sql=iot.IotSql.from_string_as_ver20160323("SELECT * FROM 'device/+/data'"),
12074
+ actions=[
12075
+ actions.FirehosePutRecordAction(stream,
12076
+ batch_mode=True,
12077
+ record_separator=actions.FirehoseRecordSeparator.NEWLINE
12078
+ )
12079
+ ]
12086
12080
  )
12087
12081
  '''
12088
12082
 
@@ -13464,7 +13464,7 @@ class EventSourceMappingOptions:
13464
13464
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
13465
13465
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
13466
13466
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
13467
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
13467
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
13468
13468
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
13469
13469
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
13470
13470
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -13472,7 +13472,7 @@ class EventSourceMappingOptions:
13472
13472
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
13473
13473
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
13474
13474
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
13475
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
13475
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
13476
13476
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
13477
13477
 
13478
13478
  :exampleMetadata: fixture=_generated
@@ -13758,7 +13758,7 @@ class EventSourceMappingOptions:
13758
13758
 
13759
13759
  @builtins.property
13760
13760
  def on_failure(self) -> typing.Optional["IEventSourceDlq"]:
13761
- '''An Amazon SQS queue or Amazon SNS topic destination for discarded records.
13761
+ '''An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records.
13762
13762
 
13763
13763
  :default: discarded records are ignored
13764
13764
  '''
@@ -13854,9 +13854,9 @@ class EventSourceMappingOptions:
13854
13854
 
13855
13855
  @builtins.property
13856
13856
  def support_s3_on_failure_destination(self) -> typing.Optional[builtins.bool]:
13857
- '''Check if support S3 onfailure destination(ODF).
13857
+ '''Check if support S3 onfailure destination(OFD).
13858
13858
 
13859
- Currently only MSK and self managed kafka event support S3 ODF
13859
+ Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD
13860
13860
 
13861
13861
  :default: false
13862
13862
  '''
@@ -13964,7 +13964,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
13964
13964
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
13965
13965
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
13966
13966
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
13967
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
13967
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
13968
13968
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
13969
13969
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
13970
13970
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -13972,7 +13972,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
13972
13972
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
13973
13973
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
13974
13974
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
13975
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
13975
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
13976
13976
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
13977
13977
  :param target: The target AWS Lambda function.
13978
13978
 
@@ -14266,7 +14266,7 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14266
14266
 
14267
14267
  @builtins.property
14268
14268
  def on_failure(self) -> typing.Optional["IEventSourceDlq"]:
14269
- '''An Amazon SQS queue or Amazon SNS topic destination for discarded records.
14269
+ '''An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records.
14270
14270
 
14271
14271
  :default: discarded records are ignored
14272
14272
  '''
@@ -14362,9 +14362,9 @@ class EventSourceMappingProps(EventSourceMappingOptions):
14362
14362
 
14363
14363
  @builtins.property
14364
14364
  def support_s3_on_failure_destination(self) -> typing.Optional[builtins.bool]:
14365
- '''Check if support S3 onfailure destination(ODF).
14365
+ '''Check if support S3 onfailure destination(OFD).
14366
14366
 
14367
- Currently only MSK and self managed kafka event support S3 ODF
14367
+ Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD
14368
14368
 
14369
14369
  :default: false
14370
14370
  '''
@@ -17838,7 +17838,7 @@ class IFunction(
17838
17838
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
17839
17839
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
17840
17840
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
17841
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
17841
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
17842
17842
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
17843
17843
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
17844
17844
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -17846,7 +17846,7 @@ class IFunction(
17846
17846
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
17847
17847
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
17848
17848
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
17849
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
17849
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
17850
17850
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
17851
17851
  '''
17852
17852
  ...
@@ -18280,7 +18280,7 @@ class _IFunctionProxy(
18280
18280
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
18281
18281
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
18282
18282
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
18283
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
18283
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
18284
18284
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
18285
18285
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
18286
18286
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -18288,7 +18288,7 @@ class _IFunctionProxy(
18288
18288
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
18289
18289
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
18290
18290
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
18291
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
18291
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
18292
18292
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
18293
18293
  '''
18294
18294
  if __debug__:
@@ -25867,7 +25867,7 @@ class EventSourceMapping(
25867
25867
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
25868
25868
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
25869
25869
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
25870
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
25870
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
25871
25871
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
25872
25872
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
25873
25873
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -25875,7 +25875,7 @@ class EventSourceMapping(
25875
25875
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
25876
25876
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
25877
25877
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
25878
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
25878
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
25879
25879
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
25880
25880
  '''
25881
25881
  if __debug__:
@@ -26045,7 +26045,7 @@ class FunctionBase(
26045
26045
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
26046
26046
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
26047
26047
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
26048
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
26048
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
26049
26049
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
26050
26050
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
26051
26051
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -26053,7 +26053,7 @@ class FunctionBase(
26053
26053
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
26054
26054
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
26055
26055
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
26056
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
26056
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
26057
26057
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
26058
26058
  '''
26059
26059
  if __debug__:
@@ -169,7 +169,7 @@ and add it to your Lambda function. The following parameters will impact Amazon
169
169
  * **reportBatchItemFailures**: Allow functions to return partially successful responses for a batch of records.
170
170
  * **maxBatchingWindow**: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of delayed processing.
171
171
  * **maxRecordAge**: The maximum age of a record that will be sent to the function for processing. Records that exceed the max age will be treated as failures.
172
- * **onFailure**: In the event a record fails after all retries or if the record age has exceeded the configured value, the record will be sent to SQS queue or SNS topic that is specified here
172
+ * **onFailure**: In the event a record fails after all retries or if the record age has exceeded the configured value, the record will be sent to S3 bucket, SQS queue or SNS topic that is specified here
173
173
  * **parallelizationFactor**: The number of batches to concurrently process on each shard.
174
174
  * **retryAttempts**: The maximum number of times a record should be retried in the event of failure.
175
175
  * **startingPosition**: Will determine where to being consumption, either at the most recent ('LATEST') record or the oldest record ('TRIM_HORIZON'). 'TRIM_HORIZON' will ensure you process all available data, while 'LATEST' will ignore all records that arrived prior to attaching the event source.
@@ -238,7 +238,7 @@ behavior:
238
238
  * **reportBatchItemFailures**: Allow functions to return partially successful responses for a batch of records.
239
239
  * **maxBatchingWindow**: The maximum amount of time to gather records before invoking the lambda. This increases the likelihood of a full batch at the cost of possibly delaying processing.
240
240
  * **maxRecordAge**: The maximum age of a record that will be sent to the function for processing. Records that exceed the max age will be treated as failures.
241
- * **onFailure**: In the event a record fails and consumes all retries, the record will be sent to SQS queue or SNS topic that is specified here
241
+ * **onFailure**: In the event a record fails and consumes all retries, the record will be sent to S3 bucket, SQS queue or SNS topic that is specified here
242
242
  * **parallelizationFactor**: The number of batches to concurrently process on each shard.
243
243
  * **retryAttempts**: The maximum number of times a record should be retried in the event of failure.
244
244
  * **startingPosition**: Will determine where to begin consumption. 'LATEST' will start at the most recent record and ignore all records that arrived prior to attaching the event source, 'TRIM_HORIZON' will start at the oldest record and ensure you process all available data, while 'AT_TIMESTAMP' will start reading records from a specified time stamp. Note that 'AT_TIMESTAMP' is only supported for Amazon Kinesis streams.
@@ -2592,7 +2592,7 @@ class StreamEventSource(
2592
2592
  :param filters: Add filter criteria option. Default: - None
2593
2593
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
2594
2594
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
2595
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
2595
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
2596
2596
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
2597
2597
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
2598
2598
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -2674,7 +2674,7 @@ class StreamEventSource(
2674
2674
  :param max_concurrency: The maximum concurrency setting limits the number of concurrent instances of the function that an Amazon SQS event source can invoke. Default: - No specific limit.
2675
2675
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days Default: - infinite or until the record expires.
2676
2676
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
2677
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
2677
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: discarded records are ignored
2678
2678
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
2679
2679
  :param provisioned_poller_config: Configuration for provisioned pollers that read from the event source. When specified, allows control over the minimum and maximum number of pollers that can be provisioned to process events from the source. Default: - no provisioned pollers
2680
2680
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
@@ -2682,7 +2682,7 @@ class StreamEventSource(
2682
2682
  :param source_access_configurations: Specific settings like the authentication protocol or the VPC components to secure access to your event source. Default: - none
2683
2683
  :param starting_position: The position in the DynamoDB, Kinesis or MSK stream where AWS Lambda should start reading. Default: - no starting position
2684
2684
  :param starting_position_timestamp: The time from which to start reading, in Unix time seconds. Default: - no timestamp
2685
- :param support_s3_on_failure_destination: Check if support S3 onfailure destination(ODF). Currently only MSK and self managed kafka event support S3 ODF Default: false
2685
+ :param support_s3_on_failure_destination: Check if support S3 onfailure destination(OFD). Kinesis, DynamoDB, MSK and self managed kafka event support S3 OFD Default: false
2686
2686
  :param tumbling_window: The size of the tumbling windows to group records sent to DynamoDB or Kinesis. Default: - None
2687
2687
  '''
2688
2688
  options = _EventSourceMappingOptions_b3f2bb85(
@@ -2788,7 +2788,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
2788
2788
  :param filters: Add filter criteria option. Default: - None
2789
2789
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
2790
2790
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
2791
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
2791
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
2792
2792
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
2793
2793
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
2794
2794
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -3010,7 +3010,7 @@ class StreamEventSourceProps(BaseStreamEventSourceProps):
3010
3010
 
3011
3011
  @builtins.property
3012
3012
  def on_failure(self) -> typing.Optional[_IEventSourceDlq_5e2c6ad9]:
3013
- '''An Amazon SQS queue or Amazon SNS topic destination for discarded records.
3013
+ '''An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records.
3014
3014
 
3015
3015
  :default: - discarded records are ignored
3016
3016
  '''
@@ -3135,7 +3135,7 @@ class DynamoEventSource(
3135
3135
  :param filters: Add filter criteria option. Default: - None
3136
3136
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
3137
3137
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
3138
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3138
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3139
3139
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
3140
3140
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
3141
3141
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -3245,7 +3245,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
3245
3245
  :param filters: Add filter criteria option. Default: - None
3246
3246
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
3247
3247
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
3248
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3248
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3249
3249
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
3250
3250
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
3251
3251
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -3449,7 +3449,7 @@ class DynamoEventSourceProps(StreamEventSourceProps):
3449
3449
 
3450
3450
  @builtins.property
3451
3451
  def on_failure(self) -> typing.Optional[_IEventSourceDlq_5e2c6ad9]:
3452
- '''An Amazon SQS queue or Amazon SNS topic destination for discarded records.
3452
+ '''An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records.
3453
3453
 
3454
3454
  :default: - discarded records are ignored
3455
3455
  '''
@@ -3572,7 +3572,7 @@ class KinesisConsumerEventSource(
3572
3572
  :param filters: Add filter criteria option. Default: - None
3573
3573
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
3574
3574
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
3575
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3575
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3576
3576
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
3577
3577
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
3578
3578
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -3689,7 +3689,7 @@ class KinesisEventSource(
3689
3689
  :param filters: Add filter criteria option. Default: - None
3690
3690
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
3691
3691
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
3692
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3692
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3693
3693
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
3694
3694
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
3695
3695
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -3807,7 +3807,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
3807
3807
  :param filters: Add filter criteria option. Default: - None
3808
3808
  :param max_record_age: The maximum age of a record that Lambda sends to a function for processing. Valid Range: - Minimum value of 60 seconds - Maximum value of 7 days The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records. Record are valid until it expires in the event source. Default: -1
3809
3809
  :param metrics_config: Configuration for enhanced monitoring metrics collection When specified, enables collection of additional metrics for the stream event source. Default: - Enhanced monitoring is disabled
3810
- :param on_failure: An Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3810
+ :param on_failure: An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records. Default: - discarded records are ignored
3811
3811
  :param parallelization_factor: The number of batches to process from each shard concurrently. Valid Range: - Minimum value of 1 - Maximum value of 10 Default: 1
3812
3812
  :param report_batch_item_failures: Allow functions to return partially successful responses for a batch of records. Default: false
3813
3813
  :param retry_attempts: Maximum number of retry attempts Valid Range: * Minimum value of 0 * Maximum value of 10000. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source. Default: -1
@@ -4007,7 +4007,7 @@ class KinesisEventSourceProps(StreamEventSourceProps):
4007
4007
 
4008
4008
  @builtins.property
4009
4009
  def on_failure(self) -> typing.Optional[_IEventSourceDlq_5e2c6ad9]:
4010
- '''An Amazon SQS queue or Amazon SNS topic destination for discarded records.
4010
+ '''An Amazon S3, Amazon SQS queue or Amazon SNS topic destination for discarded records.
4011
4011
 
4012
4012
  :default: - discarded records are ignored
4013
4013
  '''
@@ -68,6 +68,22 @@ By default, the master password will be generated and stored in AWS Secrets Mana
68
68
  Your cluster will be empty by default. To add a default database upon construction, specify the
69
69
  `defaultDatabaseName` attribute.
70
70
 
71
+ When you create a DB instance in your cluster, Aurora automatically chooses an appropriate AZ for that instance if you don't specify an AZ.
72
+ You can place each instance in fixed availability zone by specifying `availabilityZone` property.
73
+ For details, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html).
74
+
75
+ ```python
76
+ # vpc: ec2.Vpc
77
+
78
+ cluster = rds.DatabaseCluster(self, "Database",
79
+ engine=rds.DatabaseClusterEngine.aurora_mysql(version=rds.AuroraMysqlEngineVersion.VER_3_02_1),
80
+ writer=rds.ClusterInstance.provisioned("writer",
81
+ availability_zone="us-east-1a"
82
+ ),
83
+ vpc=vpc
84
+ )
85
+ ```
86
+
71
87
  To use dual-stack mode, specify `NetworkType.DUAL` on the `networkType` property:
72
88
 
73
89
  ```python
@@ -532,6 +548,19 @@ instance = rds.DatabaseInstance(self, "Instance",
532
548
  )
533
549
  ```
534
550
 
551
+ When you create a DB instance, you can choose an Availability Zone or have Amazon RDS choose one for you randomly.
552
+ For details, see [Regions, Availability Zones, and Local Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html).
553
+
554
+ ```python
555
+ # vpc: ec2.Vpc
556
+
557
+ instance = rds.DatabaseInstance(self, "Instance",
558
+ engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_16_3),
559
+ vpc=vpc,
560
+ availability_zone="us-east-1a"
561
+ )
562
+ ```
563
+
535
564
  To use dual-stack mode, specify `NetworkType.DUAL` on the `networkType` property:
536
565
 
537
566
  ```python
@@ -2259,11 +2288,14 @@ class AuroraMysqlClusterEngineProps:
2259
2288
 
2260
2289
  cluster = rds.DatabaseCluster(self, "Database",
2261
2290
  engine=rds.DatabaseClusterEngine.aurora_mysql(version=rds.AuroraMysqlEngineVersion.VER_3_01_0),
2262
- writer=rds.ClusterInstance.provisioned("Instance",
2263
- instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL)
2291
+ writer=rds.ClusterInstance.provisioned("writer",
2292
+ ca_certificate=rds.CaCertificate.RDS_CA_RSA2048_G1
2264
2293
  ),
2265
- readers=[rds.ClusterInstance.provisioned("reader")],
2266
- instance_update_behaviour=rds.InstanceUpdateBehaviour.ROLLING, # Optional - defaults to rds.InstanceUpdateBehaviour.BULK
2294
+ readers=[
2295
+ rds.ClusterInstance.serverless_v2("reader",
2296
+ ca_certificate=rds.CaCertificate.of("custom-ca")
2297
+ )
2298
+ ],
2267
2299
  vpc=vpc
2268
2300
  )
2269
2301
  '''
@@ -18992,6 +19024,7 @@ class ClusterInstanceBindOptions:
18992
19024
  "allow_major_version_upgrade": "allowMajorVersionUpgrade",
18993
19025
  "apply_immediately": "applyImmediately",
18994
19026
  "auto_minor_version_upgrade": "autoMinorVersionUpgrade",
19027
+ "availability_zone": "availabilityZone",
18995
19028
  "ca_certificate": "caCertificate",
18996
19029
  "enable_performance_insights": "enablePerformanceInsights",
18997
19030
  "instance_identifier": "instanceIdentifier",
@@ -19011,6 +19044,7 @@ class ClusterInstanceOptions:
19011
19044
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
19012
19045
  apply_immediately: typing.Optional[builtins.bool] = None,
19013
19046
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
19047
+ availability_zone: typing.Optional[builtins.str] = None,
19014
19048
  ca_certificate: typing.Optional[CaCertificate] = None,
19015
19049
  enable_performance_insights: typing.Optional[builtins.bool] = None,
19016
19050
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -19027,6 +19061,7 @@ class ClusterInstanceOptions:
19027
19061
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
19028
19062
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
19029
19063
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
19064
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
19030
19065
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
19031
19066
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
19032
19067
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -19055,6 +19090,7 @@ class ClusterInstanceOptions:
19055
19090
  allow_major_version_upgrade=False,
19056
19091
  apply_immediately=False,
19057
19092
  auto_minor_version_upgrade=False,
19093
+ availability_zone="availabilityZone",
19058
19094
  ca_certificate=ca_certificate,
19059
19095
  enable_performance_insights=False,
19060
19096
  instance_identifier="instanceIdentifier",
@@ -19074,6 +19110,7 @@ class ClusterInstanceOptions:
19074
19110
  check_type(argname="argument allow_major_version_upgrade", value=allow_major_version_upgrade, expected_type=type_hints["allow_major_version_upgrade"])
19075
19111
  check_type(argname="argument apply_immediately", value=apply_immediately, expected_type=type_hints["apply_immediately"])
19076
19112
  check_type(argname="argument auto_minor_version_upgrade", value=auto_minor_version_upgrade, expected_type=type_hints["auto_minor_version_upgrade"])
19113
+ check_type(argname="argument availability_zone", value=availability_zone, expected_type=type_hints["availability_zone"])
19077
19114
  check_type(argname="argument ca_certificate", value=ca_certificate, expected_type=type_hints["ca_certificate"])
19078
19115
  check_type(argname="argument enable_performance_insights", value=enable_performance_insights, expected_type=type_hints["enable_performance_insights"])
19079
19116
  check_type(argname="argument instance_identifier", value=instance_identifier, expected_type=type_hints["instance_identifier"])
@@ -19091,6 +19128,8 @@ class ClusterInstanceOptions:
19091
19128
  self._values["apply_immediately"] = apply_immediately
19092
19129
  if auto_minor_version_upgrade is not None:
19093
19130
  self._values["auto_minor_version_upgrade"] = auto_minor_version_upgrade
19131
+ if availability_zone is not None:
19132
+ self._values["availability_zone"] = availability_zone
19094
19133
  if ca_certificate is not None:
19095
19134
  self._values["ca_certificate"] = ca_certificate
19096
19135
  if enable_performance_insights is not None:
@@ -19148,6 +19187,20 @@ class ClusterInstanceOptions:
19148
19187
  result = self._values.get("auto_minor_version_upgrade")
19149
19188
  return typing.cast(typing.Optional[builtins.bool], result)
19150
19189
 
19190
+ @builtins.property
19191
+ def availability_zone(self) -> typing.Optional[builtins.str]:
19192
+ '''The Availability Zone (AZ) where the database will be created.
19193
+
19194
+ For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones.
19195
+ Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.
19196
+
19197
+ :default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
19198
+
19199
+ :see: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html
19200
+ '''
19201
+ result = self._values.get("availability_zone")
19202
+ return typing.cast(typing.Optional[builtins.str], result)
19203
+
19151
19204
  @builtins.property
19152
19205
  def ca_certificate(self) -> typing.Optional[CaCertificate]:
19153
19206
  '''The identifier of the CA certificate for this DB cluster's instances.
@@ -19325,6 +19378,7 @@ class ClusterInstanceOptions:
19325
19378
  "allow_major_version_upgrade": "allowMajorVersionUpgrade",
19326
19379
  "apply_immediately": "applyImmediately",
19327
19380
  "auto_minor_version_upgrade": "autoMinorVersionUpgrade",
19381
+ "availability_zone": "availabilityZone",
19328
19382
  "ca_certificate": "caCertificate",
19329
19383
  "enable_performance_insights": "enablePerformanceInsights",
19330
19384
  "instance_identifier": "instanceIdentifier",
@@ -19346,6 +19400,7 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19346
19400
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
19347
19401
  apply_immediately: typing.Optional[builtins.bool] = None,
19348
19402
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
19403
+ availability_zone: typing.Optional[builtins.str] = None,
19349
19404
  ca_certificate: typing.Optional[CaCertificate] = None,
19350
19405
  enable_performance_insights: typing.Optional[builtins.bool] = None,
19351
19406
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -19364,6 +19419,7 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19364
19419
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
19365
19420
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
19366
19421
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
19422
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
19367
19423
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
19368
19424
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
19369
19425
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -19398,6 +19454,7 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19398
19454
  allow_major_version_upgrade=False,
19399
19455
  apply_immediately=False,
19400
19456
  auto_minor_version_upgrade=False,
19457
+ availability_zone="availabilityZone",
19401
19458
  ca_certificate=ca_certificate,
19402
19459
  enable_performance_insights=False,
19403
19460
  instance_identifier="instanceIdentifier",
@@ -19418,6 +19475,7 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19418
19475
  check_type(argname="argument allow_major_version_upgrade", value=allow_major_version_upgrade, expected_type=type_hints["allow_major_version_upgrade"])
19419
19476
  check_type(argname="argument apply_immediately", value=apply_immediately, expected_type=type_hints["apply_immediately"])
19420
19477
  check_type(argname="argument auto_minor_version_upgrade", value=auto_minor_version_upgrade, expected_type=type_hints["auto_minor_version_upgrade"])
19478
+ check_type(argname="argument availability_zone", value=availability_zone, expected_type=type_hints["availability_zone"])
19421
19479
  check_type(argname="argument ca_certificate", value=ca_certificate, expected_type=type_hints["ca_certificate"])
19422
19480
  check_type(argname="argument enable_performance_insights", value=enable_performance_insights, expected_type=type_hints["enable_performance_insights"])
19423
19481
  check_type(argname="argument instance_identifier", value=instance_identifier, expected_type=type_hints["instance_identifier"])
@@ -19439,6 +19497,8 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19439
19497
  self._values["apply_immediately"] = apply_immediately
19440
19498
  if auto_minor_version_upgrade is not None:
19441
19499
  self._values["auto_minor_version_upgrade"] = auto_minor_version_upgrade
19500
+ if availability_zone is not None:
19501
+ self._values["availability_zone"] = availability_zone
19442
19502
  if ca_certificate is not None:
19443
19503
  self._values["ca_certificate"] = ca_certificate
19444
19504
  if enable_performance_insights is not None:
@@ -19498,6 +19558,20 @@ class ClusterInstanceProps(ClusterInstanceOptions):
19498
19558
  result = self._values.get("auto_minor_version_upgrade")
19499
19559
  return typing.cast(typing.Optional[builtins.bool], result)
19500
19560
 
19561
+ @builtins.property
19562
+ def availability_zone(self) -> typing.Optional[builtins.str]:
19563
+ '''The Availability Zone (AZ) where the database will be created.
19564
+
19565
+ For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones.
19566
+ Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.
19567
+
19568
+ :default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
19569
+
19570
+ :see: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html
19571
+ '''
19572
+ result = self._values.get("availability_zone")
19573
+ return typing.cast(typing.Optional[builtins.str], result)
19574
+
19501
19575
  @builtins.property
19502
19576
  def ca_certificate(self) -> typing.Optional[CaCertificate]:
19503
19577
  '''The identifier of the CA certificate for this DB cluster's instances.
@@ -20849,20 +20923,19 @@ class DatabaseClusterEngine(
20849
20923
  # vpc: ec2.Vpc
20850
20924
 
20851
20925
  cluster = rds.DatabaseCluster(self, "Database",
20852
- engine=rds.DatabaseClusterEngine.aurora_mysql(version=rds.AuroraMysqlEngineVersion.VER_3_01_0),
20853
- credentials=rds.Credentials.from_generated_secret("clusteradmin"), # Optional - will default to 'admin' username and generated password
20854
- writer=rds.ClusterInstance.provisioned("writer",
20855
- publicly_accessible=False
20856
- ),
20857
- readers=[
20858
- rds.ClusterInstance.provisioned("reader1", promotion_tier=1),
20859
- rds.ClusterInstance.serverless_v2("reader2")
20860
- ],
20861
- vpc_subnets=ec2.SubnetSelection(
20862
- subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS
20926
+ engine=rds.DatabaseClusterEngine.aurora_mysql(
20927
+ version=rds.AuroraMysqlEngineVersion.VER_3_03_0
20863
20928
  ),
20929
+ writer=rds.ClusterInstance.provisioned("writer"),
20864
20930
  vpc=vpc
20865
20931
  )
20932
+
20933
+ proxy = rds.DatabaseProxy(self, "Proxy",
20934
+ proxy_target=rds.ProxyTarget.from_cluster(cluster),
20935
+ secrets=[cluster.secret],
20936
+ vpc=vpc,
20937
+ client_password_auth_type=rds.ClientPasswordAuthType.MYSQL_NATIVE_PASSWORD
20938
+ )
20866
20939
  '''
20867
20940
 
20868
20941
  def __init__(self) -> None:
@@ -36403,6 +36476,7 @@ class ProcessorFeatures:
36403
36476
  "allow_major_version_upgrade": "allowMajorVersionUpgrade",
36404
36477
  "apply_immediately": "applyImmediately",
36405
36478
  "auto_minor_version_upgrade": "autoMinorVersionUpgrade",
36479
+ "availability_zone": "availabilityZone",
36406
36480
  "ca_certificate": "caCertificate",
36407
36481
  "enable_performance_insights": "enablePerformanceInsights",
36408
36482
  "instance_identifier": "instanceIdentifier",
@@ -36424,6 +36498,7 @@ class ProvisionedClusterInstanceProps(ClusterInstanceOptions):
36424
36498
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
36425
36499
  apply_immediately: typing.Optional[builtins.bool] = None,
36426
36500
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
36501
+ availability_zone: typing.Optional[builtins.str] = None,
36427
36502
  ca_certificate: typing.Optional[CaCertificate] = None,
36428
36503
  enable_performance_insights: typing.Optional[builtins.bool] = None,
36429
36504
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -36442,6 +36517,7 @@ class ProvisionedClusterInstanceProps(ClusterInstanceOptions):
36442
36517
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
36443
36518
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
36444
36519
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
36520
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
36445
36521
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
36446
36522
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
36447
36523
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -36476,6 +36552,7 @@ class ProvisionedClusterInstanceProps(ClusterInstanceOptions):
36476
36552
  check_type(argname="argument allow_major_version_upgrade", value=allow_major_version_upgrade, expected_type=type_hints["allow_major_version_upgrade"])
36477
36553
  check_type(argname="argument apply_immediately", value=apply_immediately, expected_type=type_hints["apply_immediately"])
36478
36554
  check_type(argname="argument auto_minor_version_upgrade", value=auto_minor_version_upgrade, expected_type=type_hints["auto_minor_version_upgrade"])
36555
+ check_type(argname="argument availability_zone", value=availability_zone, expected_type=type_hints["availability_zone"])
36479
36556
  check_type(argname="argument ca_certificate", value=ca_certificate, expected_type=type_hints["ca_certificate"])
36480
36557
  check_type(argname="argument enable_performance_insights", value=enable_performance_insights, expected_type=type_hints["enable_performance_insights"])
36481
36558
  check_type(argname="argument instance_identifier", value=instance_identifier, expected_type=type_hints["instance_identifier"])
@@ -36495,6 +36572,8 @@ class ProvisionedClusterInstanceProps(ClusterInstanceOptions):
36495
36572
  self._values["apply_immediately"] = apply_immediately
36496
36573
  if auto_minor_version_upgrade is not None:
36497
36574
  self._values["auto_minor_version_upgrade"] = auto_minor_version_upgrade
36575
+ if availability_zone is not None:
36576
+ self._values["availability_zone"] = availability_zone
36498
36577
  if ca_certificate is not None:
36499
36578
  self._values["ca_certificate"] = ca_certificate
36500
36579
  if enable_performance_insights is not None:
@@ -36556,6 +36635,20 @@ class ProvisionedClusterInstanceProps(ClusterInstanceOptions):
36556
36635
  result = self._values.get("auto_minor_version_upgrade")
36557
36636
  return typing.cast(typing.Optional[builtins.bool], result)
36558
36637
 
36638
+ @builtins.property
36639
+ def availability_zone(self) -> typing.Optional[builtins.str]:
36640
+ '''The Availability Zone (AZ) where the database will be created.
36641
+
36642
+ For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones.
36643
+ Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.
36644
+
36645
+ :default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
36646
+
36647
+ :see: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html
36648
+ '''
36649
+ result = self._values.get("availability_zone")
36650
+ return typing.cast(typing.Optional[builtins.str], result)
36651
+
36559
36652
  @builtins.property
36560
36653
  def ca_certificate(self) -> typing.Optional[CaCertificate]:
36561
36654
  '''The identifier of the CA certificate for this DB cluster's instances.
@@ -38650,6 +38743,7 @@ class ServerlessScalingOptions:
38650
38743
  "allow_major_version_upgrade": "allowMajorVersionUpgrade",
38651
38744
  "apply_immediately": "applyImmediately",
38652
38745
  "auto_minor_version_upgrade": "autoMinorVersionUpgrade",
38746
+ "availability_zone": "availabilityZone",
38653
38747
  "ca_certificate": "caCertificate",
38654
38748
  "enable_performance_insights": "enablePerformanceInsights",
38655
38749
  "instance_identifier": "instanceIdentifier",
@@ -38670,6 +38764,7 @@ class ServerlessV2ClusterInstanceProps(ClusterInstanceOptions):
38670
38764
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
38671
38765
  apply_immediately: typing.Optional[builtins.bool] = None,
38672
38766
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
38767
+ availability_zone: typing.Optional[builtins.str] = None,
38673
38768
  ca_certificate: typing.Optional[CaCertificate] = None,
38674
38769
  enable_performance_insights: typing.Optional[builtins.bool] = None,
38675
38770
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -38687,6 +38782,7 @@ class ServerlessV2ClusterInstanceProps(ClusterInstanceOptions):
38687
38782
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
38688
38783
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
38689
38784
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
38785
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
38690
38786
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
38691
38787
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
38692
38788
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -38723,6 +38819,7 @@ class ServerlessV2ClusterInstanceProps(ClusterInstanceOptions):
38723
38819
  check_type(argname="argument allow_major_version_upgrade", value=allow_major_version_upgrade, expected_type=type_hints["allow_major_version_upgrade"])
38724
38820
  check_type(argname="argument apply_immediately", value=apply_immediately, expected_type=type_hints["apply_immediately"])
38725
38821
  check_type(argname="argument auto_minor_version_upgrade", value=auto_minor_version_upgrade, expected_type=type_hints["auto_minor_version_upgrade"])
38822
+ check_type(argname="argument availability_zone", value=availability_zone, expected_type=type_hints["availability_zone"])
38726
38823
  check_type(argname="argument ca_certificate", value=ca_certificate, expected_type=type_hints["ca_certificate"])
38727
38824
  check_type(argname="argument enable_performance_insights", value=enable_performance_insights, expected_type=type_hints["enable_performance_insights"])
38728
38825
  check_type(argname="argument instance_identifier", value=instance_identifier, expected_type=type_hints["instance_identifier"])
@@ -38741,6 +38838,8 @@ class ServerlessV2ClusterInstanceProps(ClusterInstanceOptions):
38741
38838
  self._values["apply_immediately"] = apply_immediately
38742
38839
  if auto_minor_version_upgrade is not None:
38743
38840
  self._values["auto_minor_version_upgrade"] = auto_minor_version_upgrade
38841
+ if availability_zone is not None:
38842
+ self._values["availability_zone"] = availability_zone
38744
38843
  if ca_certificate is not None:
38745
38844
  self._values["ca_certificate"] = ca_certificate
38746
38845
  if enable_performance_insights is not None:
@@ -38800,6 +38899,20 @@ class ServerlessV2ClusterInstanceProps(ClusterInstanceOptions):
38800
38899
  result = self._values.get("auto_minor_version_upgrade")
38801
38900
  return typing.cast(typing.Optional[builtins.bool], result)
38802
38901
 
38902
+ @builtins.property
38903
+ def availability_zone(self) -> typing.Optional[builtins.str]:
38904
+ '''The Availability Zone (AZ) where the database will be created.
38905
+
38906
+ For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones.
38907
+ Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.
38908
+
38909
+ :default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
38910
+
38911
+ :see: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html
38912
+ '''
38913
+ result = self._values.get("availability_zone")
38914
+ return typing.cast(typing.Optional[builtins.str], result)
38915
+
38803
38916
  @builtins.property
38804
38917
  def ca_certificate(self) -> typing.Optional[CaCertificate]:
38805
38918
  '''The identifier of the CA certificate for this DB cluster's instances.
@@ -40815,6 +40928,7 @@ class ClusterInstance(
40815
40928
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
40816
40929
  apply_immediately: typing.Optional[builtins.bool] = None,
40817
40930
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
40931
+ availability_zone: typing.Optional[builtins.str] = None,
40818
40932
  ca_certificate: typing.Optional[CaCertificate] = None,
40819
40933
  enable_performance_insights: typing.Optional[builtins.bool] = None,
40820
40934
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -40834,6 +40948,7 @@ class ClusterInstance(
40834
40948
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
40835
40949
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
40836
40950
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
40951
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
40837
40952
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
40838
40953
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
40839
40954
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -40860,6 +40975,7 @@ class ClusterInstance(
40860
40975
  allow_major_version_upgrade=allow_major_version_upgrade,
40861
40976
  apply_immediately=apply_immediately,
40862
40977
  auto_minor_version_upgrade=auto_minor_version_upgrade,
40978
+ availability_zone=availability_zone,
40863
40979
  ca_certificate=ca_certificate,
40864
40980
  enable_performance_insights=enable_performance_insights,
40865
40981
  instance_identifier=instance_identifier,
@@ -40884,6 +41000,7 @@ class ClusterInstance(
40884
41000
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
40885
41001
  apply_immediately: typing.Optional[builtins.bool] = None,
40886
41002
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
41003
+ availability_zone: typing.Optional[builtins.str] = None,
40887
41004
  ca_certificate: typing.Optional[CaCertificate] = None,
40888
41005
  enable_performance_insights: typing.Optional[builtins.bool] = None,
40889
41006
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -40902,6 +41019,7 @@ class ClusterInstance(
40902
41019
  :param allow_major_version_upgrade: Whether to allow upgrade of major version for the DB instance. Default: - false
40903
41020
  :param apply_immediately: Specifies whether changes to the DB instance and any pending modifications are applied immediately, regardless of the ``preferredMaintenanceWindow`` setting. If set to ``false``, changes are applied during the next maintenance window. Until RDS applies the changes, the DB instance remains in a drift state. As a result, the configuration doesn't fully reflect the requested modifications and temporarily diverges from the intended state. This property also determines whether the DB instance reboots when a static parameter is modified in the associated DB parameter group. Default: - Changes will be applied immediately
40904
41021
  :param auto_minor_version_upgrade: Whether to enable automatic upgrade of minor version for the DB instance. Default: - true
41022
+ :param availability_zone: The Availability Zone (AZ) where the database will be created. For Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one. Default: - A random, system-chosen Availability Zone in the endpointʼs AWS Region.
40905
41023
  :param ca_certificate: The identifier of the CA certificate for this DB cluster's instances. Specifying or updating this property triggers a reboot. For RDS DB engines: Default: - RDS will choose a certificate authority
40906
41024
  :param enable_performance_insights: Whether to enable Performance Insights for the DB instance. Default: - false, unless ``performanceInsightRetention`` or ``performanceInsightEncryptionKey`` is set.
40907
41025
  :param instance_identifier: The identifier for the database instance. Default: - CloudFormation generated identifier
@@ -40927,6 +41045,7 @@ class ClusterInstance(
40927
41045
  allow_major_version_upgrade=allow_major_version_upgrade,
40928
41046
  apply_immediately=apply_immediately,
40929
41047
  auto_minor_version_upgrade=auto_minor_version_upgrade,
41048
+ availability_zone=availability_zone,
40930
41049
  ca_certificate=ca_certificate,
40931
41050
  enable_performance_insights=enable_performance_insights,
40932
41051
  instance_identifier=instance_identifier,
@@ -49813,6 +49932,7 @@ def _typecheckingstub__8cdde1ea7f85160803079277e8fcc0af34768579c1b17b771033b3c63
49813
49932
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
49814
49933
  apply_immediately: typing.Optional[builtins.bool] = None,
49815
49934
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
49935
+ availability_zone: typing.Optional[builtins.str] = None,
49816
49936
  ca_certificate: typing.Optional[CaCertificate] = None,
49817
49937
  enable_performance_insights: typing.Optional[builtins.bool] = None,
49818
49938
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -49832,6 +49952,7 @@ def _typecheckingstub__431d59239caf38b9912bfae3130d40eeb8bdb18e013240bac43c98015
49832
49952
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
49833
49953
  apply_immediately: typing.Optional[builtins.bool] = None,
49834
49954
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
49955
+ availability_zone: typing.Optional[builtins.str] = None,
49835
49956
  ca_certificate: typing.Optional[CaCertificate] = None,
49836
49957
  enable_performance_insights: typing.Optional[builtins.bool] = None,
49837
49958
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -50732,6 +50853,7 @@ def _typecheckingstub__0d5c78a39da629a585066921d3ee78da795285acdbebe6935198fc929
50732
50853
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
50733
50854
  apply_immediately: typing.Optional[builtins.bool] = None,
50734
50855
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
50856
+ availability_zone: typing.Optional[builtins.str] = None,
50735
50857
  ca_certificate: typing.Optional[CaCertificate] = None,
50736
50858
  enable_performance_insights: typing.Optional[builtins.bool] = None,
50737
50859
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -50972,6 +51094,7 @@ def _typecheckingstub__c8fd71a155386e8ce12e74b8c5684dfdd43d26e347ef8bbb979e8a2c3
50972
51094
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
50973
51095
  apply_immediately: typing.Optional[builtins.bool] = None,
50974
51096
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
51097
+ availability_zone: typing.Optional[builtins.str] = None,
50975
51098
  ca_certificate: typing.Optional[CaCertificate] = None,
50976
51099
  enable_performance_insights: typing.Optional[builtins.bool] = None,
50977
51100
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -51109,6 +51232,7 @@ def _typecheckingstub__d0d2cd14a2c7ed00bfb6fd9860c31cd0b1af1bff8343258b1b4a8d847
51109
51232
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
51110
51233
  apply_immediately: typing.Optional[builtins.bool] = None,
51111
51234
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
51235
+ availability_zone: typing.Optional[builtins.str] = None,
51112
51236
  ca_certificate: typing.Optional[CaCertificate] = None,
51113
51237
  enable_performance_insights: typing.Optional[builtins.bool] = None,
51114
51238
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -51130,6 +51254,7 @@ def _typecheckingstub__95714f22d2724c29931e2710712a92b10932588d2061fa1ceed93097e
51130
51254
  allow_major_version_upgrade: typing.Optional[builtins.bool] = None,
51131
51255
  apply_immediately: typing.Optional[builtins.bool] = None,
51132
51256
  auto_minor_version_upgrade: typing.Optional[builtins.bool] = None,
51257
+ availability_zone: typing.Optional[builtins.str] = None,
51133
51258
  ca_certificate: typing.Optional[CaCertificate] = None,
51134
51259
  enable_performance_insights: typing.Optional[builtins.bool] = None,
51135
51260
  instance_identifier: typing.Optional[builtins.str] = None,
@@ -2912,7 +2912,7 @@ class DeployOptions(DefaultCdkOptions):
2912
2912
  :param notification_arns: ARNs of SNS topics that CloudFormation will notify with stack related events. Default: - no notifications
2913
2913
  :param outputs_file: Path to file where stack outputs will be written after a successful deploy as JSON. Default: - Outputs are not written to any file
2914
2914
  :param parameters: Additional parameters for CloudFormation at deploy time. Default: {}
2915
- :param require_approval: What kind of security changes require approval. Default: RequireApproval.Never
2915
+ :param require_approval: What kind of security changes require approval. Default: RequireApproval.NEVER
2916
2916
  :param reuse_assets: Reuse the assets with the given asset IDs. Default: - do not reuse assets
2917
2917
  :param rollback: Rollback failed deployments. Default: true
2918
2918
  :param toolkit_stack_name: Name of the toolkit stack to use/deploy. Default: CDKToolkit
@@ -3391,7 +3391,7 @@ class DeployOptions(DefaultCdkOptions):
3391
3391
  def require_approval(self) -> typing.Optional["RequireApproval"]:
3392
3392
  '''What kind of security changes require approval.
3393
3393
 
3394
- :default: RequireApproval.Never
3394
+ :default: RequireApproval.NEVER
3395
3395
  '''
3396
3396
  result = self._values.get("require_approval")
3397
3397
  return typing.cast(typing.Optional["RequireApproval"], result)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: aws-cdk-lib
3
- Version: 2.183.0
3
+ Version: 2.184.1
4
4
  Summary: Version 2 of the AWS Cloud Development Kit library
5
5
  Home-page: https://github.com/aws/aws-cdk
6
6
  Author: Amazon Web Services
@@ -10,22 +10,21 @@ Classifier: Intended Audience :: Developers
10
10
  Classifier: Operating System :: OS Independent
11
11
  Classifier: Programming Language :: JavaScript
12
12
  Classifier: Programming Language :: Python :: 3 :: Only
13
- Classifier: Programming Language :: Python :: 3.8
14
13
  Classifier: Programming Language :: Python :: 3.9
15
14
  Classifier: Programming Language :: Python :: 3.10
16
15
  Classifier: Programming Language :: Python :: 3.11
17
16
  Classifier: Typing :: Typed
18
17
  Classifier: Development Status :: 5 - Production/Stable
19
18
  Classifier: License :: OSI Approved
20
- Requires-Python: ~=3.8
19
+ Requires-Python: ~=3.9
21
20
  Description-Content-Type: text/markdown
22
21
  License-File: LICENSE
23
22
  License-File: NOTICE
24
- Requires-Dist: aws-cdk.asset-awscli-v1<3.0.0,>=2.2.208
23
+ Requires-Dist: aws-cdk.asset-awscli-v1<3.0.0,>=2.2.227
25
24
  Requires-Dist: aws-cdk.asset-node-proxy-agent-v6<3.0.0,>=2.1.0
26
- Requires-Dist: aws-cdk.cloud-assembly-schema<41.0.0,>=40.6.0
25
+ Requires-Dist: aws-cdk.cloud-assembly-schema<41.0.0,>=40.7.0
27
26
  Requires-Dist: constructs<11.0.0,>=10.0.0
28
- Requires-Dist: jsii<2.0.0,>=1.106.0
27
+ Requires-Dist: jsii<2.0.0,>=1.109.0
29
28
  Requires-Dist: publication>=0.0.3
30
29
  Requires-Dist: typeguard<4.3.0,>=2.13.3
31
30
 
@@ -1,7 +1,7 @@
1
1
  aws_cdk/__init__.py,sha256=CT2hAjEz6eznk0nBuMMMKiAoNy1NfwGxf5uzqjCTpY4,1988974
2
2
  aws_cdk/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
- aws_cdk/_jsii/__init__.py,sha256=pLHC3HsMfNrXlLzgdVyUjBh-RJUZKnhjGlBVXowB0Hc,1543
4
- aws_cdk/_jsii/aws-cdk-lib@2.183.0.jsii.tgz,sha256=pMFojqNmU9tRtC6YbOL3WF8gc-b4G2VpQvd0nk0SlPQ,24608489
3
+ aws_cdk/_jsii/__init__.py,sha256=uqvFRTAPKbpNWWbGKk3_hYN4BN7V289MWvkTKC1AgVA,1543
4
+ aws_cdk/_jsii/aws-cdk-lib@2.184.1.jsii.tgz,sha256=6xKGzcMDElUcNlHBkUf4Ww3C6H0ufGlheZOEktx1Jmc,24613012
5
5
  aws_cdk/alexa_ask/__init__.py,sha256=yF4ftch7XArzAniw_xoUmGi3wLGeBqIUlOjBTHSxDb4,36370
6
6
  aws_cdk/assertions/__init__.py,sha256=Fe7BZZpx5cUrxKtNdGbU4eXue79txDh_GqjfLV5wgCM,94355
7
7
  aws_cdk/aws_accessanalyzer/__init__.py,sha256=XiNBttjwK1s2CYyccwKCXH5Nzb74L1i6rVnZ9Zwh49I,57464
@@ -49,7 +49,7 @@ aws_cdk/aws_cleanroomsml/__init__.py,sha256=OZ769Xji5yR16YuvCcaCuH-5emjHREnhIUqj
49
49
  aws_cdk/aws_cloud9/__init__.py,sha256=WbYJhSBPFY4ix6PJiND_KHPIVWxim7VAsbDJ6ePpGec,43094
50
50
  aws_cdk/aws_cloudformation/__init__.py,sha256=7u_vH1KkZPhdXHeQF7lowC3OOTazNy6VVhfSzuvpkJg,523009
51
51
  aws_cdk/aws_cloudfront/__init__.py,sha256=lrA1dgv7ksOyVXtrEkLhKLBMIwcN6sfo8bn2FASf2d8,1636176
52
- aws_cdk/aws_cloudfront/experimental/__init__.py,sha256=Svd8hUJhY0KNHirVPPGjF1vmiBBiGoxlcCI93XvdeYQ,142419
52
+ aws_cdk/aws_cloudfront/experimental/__init__.py,sha256=6tyuWzp_aAXavJNY0Biw4oQOGViZEolRSLRkyeaVKM0,142434
53
53
  aws_cdk/aws_cloudfront_origins/__init__.py,sha256=D8kwMlk2NKWrXsB7J9EDcGJYL9BSxN1uXWdw7olR1eg,320367
54
54
  aws_cdk/aws_cloudtrail/__init__.py,sha256=OHfsyI4ErR7X1fSOjcezQcN0OVu68TcYnQ1Jw1TS11I,365818
55
55
  aws_cdk/aws_cloudwatch/__init__.py,sha256=VHqNGmsxGh8Pay-7dLewu0z--nXt0xIiaTbmQgMJJDA,815681
@@ -108,8 +108,8 @@ aws_cdk/aws_emr/__init__.py,sha256=qP-9c3PI1TltlasLxEKJGXK5YXa1qIquA_U06GWNLvw,7
108
108
  aws_cdk/aws_emrcontainers/__init__.py,sha256=oxnV_uTszH48R3bV661tMkLbbyAphYKzYCg7C8LSOBk,31965
109
109
  aws_cdk/aws_emrserverless/__init__.py,sha256=UhwWby2jP2uaBGV5JNhRS1Y-SB914vhJknIleLyitKU,156837
110
110
  aws_cdk/aws_entityresolution/__init__.py,sha256=OKdLQprExmxYTJp3Awbtm_rRjTBzh0a8h2xa4m8Sa6Q,268322
111
- aws_cdk/aws_events/__init__.py,sha256=_LVqVv5zQS7CLreF8OIfimwin3qdAtXZObzGOpZzWR4,690890
112
- aws_cdk/aws_events_targets/__init__.py,sha256=-7Iu_YnxudK_goEYA2rhRc6-Lf_quUXtmXZC3kmpd1Q,294495
111
+ aws_cdk/aws_events/__init__.py,sha256=qYkAVTpVRhkr74bB4yAiEwZTPEYIRbiooRFoBWdltqs,690927
112
+ aws_cdk/aws_events_targets/__init__.py,sha256=VjTp0TggOFCaSt09TkbWg4dCEIdh22BodZ7I7LaGe4Q,295737
113
113
  aws_cdk/aws_eventschemas/__init__.py,sha256=UUe4lA7r4R1145JUgfbMmvgyLVscusnYYPmuDgUhqU4,72936
114
114
  aws_cdk/aws_evidently/__init__.py,sha256=adPCcp-p4hfgkrjB3Hxlmum-NU4gMq9TrWcMEDNhziE,248694
115
115
  aws_cdk/aws_finspace/__init__.py,sha256=WoB0n3g8JuoSnzQXAjy3rxKka_vTJBvassXdV8ux5BI,51444
@@ -130,7 +130,7 @@ aws_cdk/aws_groundstation/__init__.py,sha256=hPZkcNCJiPbU8-OI4iCYpC27uzednN1Vt6F
130
130
  aws_cdk/aws_guardduty/__init__.py,sha256=XMUcOWwWMo073sJ_gbrB6FRtN190FVTgLg4oVcdHP1g,261720
131
131
  aws_cdk/aws_healthimaging/__init__.py,sha256=bNpcbOhCYjvvgCJwb6pbmA8BH_8RjtjKJZG8f40I8qQ,17410
132
132
  aws_cdk/aws_healthlake/__init__.py,sha256=dMFbQBLwb5u5qcae0VijYlqI8dU47O7F4Cr7uSPqFsE,55398
133
- aws_cdk/aws_iam/__init__.py,sha256=RudpoLK7DZ17d7sAHzlCUy5MxVRTdgLHnf4S23BwOeY,862810
133
+ aws_cdk/aws_iam/__init__.py,sha256=cJIThtNamlAD5fupUatmdJpHyPtmlE-1I9AKREde3Lw,862630
134
134
  aws_cdk/aws_identitystore/__init__.py,sha256=yXjJuier75gXXW5QBSRTsGvDx5BdNdu9VRcnLhbEMlQ,32106
135
135
  aws_cdk/aws_imagebuilder/__init__.py,sha256=xnhEuR7Ad_174z7mrTVY9k6e31IKVwOVpXfFUmZHUWI,592140
136
136
  aws_cdk/aws_inspector/__init__.py,sha256=cjCiSxDMYusGsxpd2a3d2wOIW7UK32LdpPT5FvLDSHU,52446
@@ -155,13 +155,13 @@ aws_cdk/aws_kendraranking/__init__.py,sha256=3HBuK2dbNWAYDUbjyCmc7D9M9pDo1jg-DNC
155
155
  aws_cdk/aws_kinesis/__init__.py,sha256=Zy8yybVxFdNNNFkamLRdGFivYe7J90nrGAT_HiGONVM,353435
156
156
  aws_cdk/aws_kinesisanalytics/__init__.py,sha256=0ZPCWgzPFOS99sfyhzdTfr7-eQQMoUlR_eDmf5PCmc0,587440
157
157
  aws_cdk/aws_kinesisanalyticsv2/__init__.py,sha256=zAK6orw0mZs0V98cWWb94KcE76_nieuxXVFgKQCD9XM,387963
158
- aws_cdk/aws_kinesisfirehose/__init__.py,sha256=BUVKNQkI8thv66zNmX7ZCCknKP1akRFsHZD7eV5_9Vo,843520
158
+ aws_cdk/aws_kinesisfirehose/__init__.py,sha256=iqmnmluI82kvnqebGGMq71O2crmq7BukMZ8fV0zn1Tc,842936
159
159
  aws_cdk/aws_kinesisvideo/__init__.py,sha256=KR-ZCTMZFNR9EJ32dIjEfeiZQMJZha33v-2lg0-mhYY,38745
160
160
  aws_cdk/aws_kms/__init__.py,sha256=zVprnq7sot7-s8S3-EUMDO4YB7Y1DTDIycenuMo3edw,251590
161
161
  aws_cdk/aws_lakeformation/__init__.py,sha256=MmGVkaQ4_kMg9y78pdpq-YO4OEJXC5vMi3Z5UliTZ3M,335979
162
- aws_cdk/aws_lambda/__init__.py,sha256=j-4rw40EIVbJetn34JK52bqaKtZJsshlDaYeUCukcNM,1744584
162
+ aws_cdk/aws_lambda/__init__.py,sha256=eqma_MLwzFMOHk6IvtWPCkfdf14gqPOHSFYpuUmt7qE,1744704
163
163
  aws_cdk/aws_lambda_destinations/__init__.py,sha256=WHB7Vk6jk0pwFVUEAPGfFuSUUOJirZ7h3K5Z92yoE8E,24123
164
- aws_cdk/aws_lambda_event_sources/__init__.py,sha256=zL9hyaZTZbcJNwWfLQkryhBzWBi-5MgxjFBivhzzH0A,247801
164
+ aws_cdk/aws_lambda_event_sources/__init__.py,sha256=RrZEPOjJO_mJd-bR1wmP5Su8sFbGcjaZ9gf-KyJ_U-4,247948
165
165
  aws_cdk/aws_lambda_nodejs/__init__.py,sha256=5ZHsuf92bN0tyUQDKHmsVuF0s6nW28juTIgcMUyPen4,179430
166
166
  aws_cdk/aws_launchwizard/__init__.py,sha256=rPNzud_j-MSKV7aH_fJcJhTHdXtzItJA8hVk37HJHuY,24028
167
167
  aws_cdk/aws_lex/__init__.py,sha256=wYvO4joPahEBoAF9dg_wj6u1nymh32ukEYkG4UfWAew,707100
@@ -216,7 +216,7 @@ aws_cdk/aws_qldb/__init__.py,sha256=Y77mQlE_bPnvp2-xi6Zx7Nqq88MVjB0bGsFskfkTpj8,
216
216
  aws_cdk/aws_quicksight/__init__.py,sha256=GmBXrz4cuiOiHZBU24eKDQWKek5uI9z_KDoJpP3xxTA,14366277
217
217
  aws_cdk/aws_ram/__init__.py,sha256=hTueUpQ-pdpE9TIHA7Ekd4-IMhT006gRbqhaUtTDDy0,51838
218
218
  aws_cdk/aws_rbin/__init__.py,sha256=CzabGGl5JnclFXLGuB8hu8bf_z0rWog3FqYK7o84QYo,50396
219
- aws_cdk/aws_rds/__init__.py,sha256=mQObK7IFtNzqiu21YUfnZ1IZrtwVferdfOBt3AznHbg,2920587
219
+ aws_cdk/aws_rds/__init__.py,sha256=zpJhdg10EBWDOXz3ffCnH3YKld4q4s26VRRRgkPQCvw,2929219
220
220
  aws_cdk/aws_redshift/__init__.py,sha256=_bz821c0euCtdo8lBVuh1EQ7r3Rg3kvzgCfEJz1bmLw,405044
221
221
  aws_cdk/aws_redshiftserverless/__init__.py,sha256=_sToFikuIuA0NKu0ug8TqhCnDsCu-5SLosIeCvtxkH0,170949
222
222
  aws_cdk/aws_refactorspaces/__init__.py,sha256=HlrRPKH0kwPz6Ka6zooBl3hqU5s6lpjiLrMjJXHDIro,123625
@@ -283,7 +283,7 @@ aws_cdk/aws_workspaces/__init__.py,sha256=agC72J8zP_G2NI4nzzYjmlSNUygoa7f-ebFcfx
283
283
  aws_cdk/aws_workspacesthinclient/__init__.py,sha256=nFSsouXqpjSDrsEBOH0CGV6CCwHxCjNACgcigDEMN8g,49517
284
284
  aws_cdk/aws_workspacesweb/__init__.py,sha256=CppQEMxEhUPkEPh6MmFFfhsm1c2TybopdiZGuuiAEfA,280980
285
285
  aws_cdk/aws_xray/__init__.py,sha256=xEfRY-cPsnAqrrYueXFUxXskU4CxwejB1wrTMGrNKCc,106868
286
- aws_cdk/cloud_assembly_schema/__init__.py,sha256=o5Jxc9ApXbvUYeNbljV-HFrVWOeWsBPxuAoQFUsV8p0,440021
286
+ aws_cdk/cloud_assembly_schema/__init__.py,sha256=7FnuQlp_Lxoe9Qqqysx7AIR7oUEeMLsxjoM_FdCgkBE,440021
287
287
  aws_cdk/cloudformation_include/__init__.py,sha256=nWU59UskSwMHLVfmA2qrsTOqUk65NWElIPTvp-KLA_8,50358
288
288
  aws_cdk/custom_resources/__init__.py,sha256=lZy80GjA5K94Dr5vkA4C_77fc4Ow0vrSBQ-4P6K2M18,173059
289
289
  aws_cdk/cx_api/__init__.py,sha256=soCQVBkw6fPzkwGqmqxizGVpq9Mq6asAuhqICFAm0wo,176191
@@ -292,9 +292,9 @@ aws_cdk/lambda_layer_node_proxy_agent/__init__.py,sha256=Q0PMbPsP4A7YO-YZe9esn-i
292
292
  aws_cdk/pipelines/__init__.py,sha256=HOXBjF1CPi8wQm8zl8Si7G8DpfKl_DyY7-Bgm8WpjEU,397495
293
293
  aws_cdk/region_info/__init__.py,sha256=29jwDjGrb4gSGedV1W1e5SuAYF9ZZKYsz0gsSFjdBO4,39658
294
294
  aws_cdk/triggers/__init__.py,sha256=fPVnj7ot9BFSzO-cTWQz9bMuGPG1hqZFJ7ROMkq0vnk,123578
295
- aws_cdk_lib-2.183.0.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
296
- aws_cdk_lib-2.183.0.dist-info/METADATA,sha256=ZWAoX8incNXZhQtyxm-sXx4qA-HgVVho6BOXySIu5_g,59972
297
- aws_cdk_lib-2.183.0.dist-info/NOTICE,sha256=lrDSwMl9zn-5xv2z3qp2Rw6Nm8pARejpIJ5eXzJtuQk,41177
298
- aws_cdk_lib-2.183.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
299
- aws_cdk_lib-2.183.0.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
300
- aws_cdk_lib-2.183.0.dist-info/RECORD,,
295
+ aws_cdk_lib-2.184.1.dist-info/LICENSE,sha256=y47tc38H0C4DpGljYUZDl8XxidQjNxxGLq-K4jwv6Xc,11391
296
+ aws_cdk_lib-2.184.1.dist-info/METADATA,sha256=nGc-P3ImT6U18PyDNaD6InjkYd1VupLSmWbcsmGBU2c,59922
297
+ aws_cdk_lib-2.184.1.dist-info/NOTICE,sha256=lrDSwMl9zn-5xv2z3qp2Rw6Nm8pARejpIJ5eXzJtuQk,41177
298
+ aws_cdk_lib-2.184.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
299
+ aws_cdk_lib-2.184.1.dist-info/top_level.txt,sha256=1TALAKbuUGsMSrfKWEf268lySCmcqSEO6cDYe_XlLHM,8
300
+ aws_cdk_lib-2.184.1.dist-info/RECORD,,