aws-cdk-lib 2.148.1__py3-none-any.whl → 2.150.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (51) hide show
  1. aws_cdk/__init__.py +4 -8
  2. aws_cdk/_jsii/__init__.py +1 -1
  3. aws_cdk/_jsii/{aws-cdk-lib@2.148.1.jsii.tgz → aws-cdk-lib@2.150.0.jsii.tgz} +0 -0
  4. aws_cdk/aws_applicationautoscaling/__init__.py +16 -12
  5. aws_cdk/aws_bedrock/__init__.py +60 -28
  6. aws_cdk/aws_cloudformation/__init__.py +4 -8
  7. aws_cdk/aws_cloudtrail/__init__.py +30 -558
  8. aws_cdk/aws_cloudwatch/__init__.py +1 -1
  9. aws_cdk/aws_codebuild/__init__.py +57 -5
  10. aws_cdk/aws_codecommit/__init__.py +103 -0
  11. aws_cdk/aws_codedeploy/__init__.py +251 -5
  12. aws_cdk/aws_codepipeline/__init__.py +80 -5
  13. aws_cdk/aws_codestarnotifications/__init__.py +90 -4
  14. aws_cdk/aws_cognito/__init__.py +1 -2
  15. aws_cdk/aws_deadline/__init__.py +9 -15
  16. aws_cdk/aws_dms/__init__.py +10 -10
  17. aws_cdk/aws_ec2/__init__.py +86 -4
  18. aws_cdk/aws_ecs/__init__.py +10 -8
  19. aws_cdk/aws_eks/__init__.py +26 -20
  20. aws_cdk/aws_elasticloadbalancingv2/__init__.py +2 -2
  21. aws_cdk/aws_emr/__init__.py +26 -28
  22. aws_cdk/aws_events/__init__.py +1 -13
  23. aws_cdk/aws_fsx/__init__.py +25 -23
  24. aws_cdk/aws_glue/__init__.py +3 -3
  25. aws_cdk/aws_guardduty/__init__.py +6 -4
  26. aws_cdk/aws_iam/__init__.py +8 -5
  27. aws_cdk/aws_kinesisanalytics/__init__.py +11 -11
  28. aws_cdk/aws_kinesisanalyticsv2/__init__.py +11 -11
  29. aws_cdk/aws_lambda/__init__.py +19 -2
  30. aws_cdk/aws_logs/__init__.py +9 -0
  31. aws_cdk/aws_qbusiness/__init__.py +21 -7
  32. aws_cdk/aws_rds/__init__.py +18 -12
  33. aws_cdk/aws_rolesanywhere/__init__.py +22 -13
  34. aws_cdk/aws_route53profiles/__init__.py +4 -4
  35. aws_cdk/aws_s3/__init__.py +15 -117
  36. aws_cdk/aws_sagemaker/__init__.py +10 -10
  37. aws_cdk/aws_ses/__init__.py +119 -102
  38. aws_cdk/aws_stepfunctions_tasks/__init__.py +215 -24
  39. aws_cdk/aws_synthetics/__init__.py +15 -1
  40. aws_cdk/aws_verifiedpermissions/__init__.py +7 -9
  41. aws_cdk/aws_wafv2/__init__.py +10 -16
  42. aws_cdk/aws_workspaces/__init__.py +86 -56
  43. aws_cdk/custom_resources/__init__.py +91 -23
  44. aws_cdk/pipelines/__init__.py +1 -1
  45. aws_cdk/region_info/__init__.py +32 -12
  46. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/METADATA +1 -1
  47. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/RECORD +51 -51
  48. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/LICENSE +0 -0
  49. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/NOTICE +0 -0
  50. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/WHEEL +0 -0
  51. {aws_cdk_lib-2.148.1.dist-info → aws_cdk_lib-2.150.0.dist-info}/top_level.txt +0 -0
@@ -2161,17 +2161,17 @@ class CfnFileSystem(
2161
2161
  ) -> None:
2162
2162
  '''The configuration for this Amazon FSx for NetApp ONTAP file system.
2163
2163
 
2164
- :param deployment_type: Specifies the FSx for ONTAP file system deployment type to use in creating the file system. - ``MULTI_AZ_1`` - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2164
+ :param deployment_type: Specifies the FSx for ONTAP file system deployment type to use in creating the file system. - ``MULTI_AZ_1`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system. - ``MULTI_AZ_2`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system. - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system. - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2165
2165
  :param automatic_backup_retention_days: The number of days to retain automatic backups. Setting this property to ``0`` disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is ``30`` .
2166
2166
  :param daily_automatic_backup_start_time: A recurring daily time, in the format ``HH:MM`` . ``HH`` is the zero-padded hour of the day (0-23), and ``MM`` is the zero-padded minute of the hour. For example, ``05:00`` specifies 5 AM daily.
2167
2167
  :param disk_iops_configuration: The SSD IOPS configuration for the FSx for ONTAP file system.
2168
2168
  :param endpoint_ip_address_range: (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
2169
2169
  :param fsx_admin_password: The ONTAP administrative password for the ``fsxadmin`` user with which you administer your file system using the NetApp ONTAP CLI and REST API.
2170
- :param ha_pairs: Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``HAPairs`` is less than 1 or greater than 12. - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` or ``MULTI_AZ_1`` .
2171
- :param preferred_subnet_id: Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` . This specifies the subnet in which you want the preferred file server to be located.
2170
+ :param ha_pairs: Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see `Using block storage protocols <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage>`_ . Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``HAPairs`` is less than 1 or greater than 12. - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` , ``MULTI_AZ_1`` , or ``MULTI_AZ_2`` .
2171
+ :param preferred_subnet_id: Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` or ``MULTI_AZ_2`` . This specifies the subnet in which you want the preferred file server to be located.
2172
2172
  :param route_table_ids: (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. .. epigraph:: Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with ``Key: AmazonFSx; Value: ManagedByAmazonFSx`` . When creating FSx for ONTAP Multi-AZ file systems using AWS CloudFormation we recommend that you add the ``Key: AmazonFSx; Value: ManagedByAmazonFSx`` tag manually.
2173
2173
  :param throughput_capacity: Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see `Managing throughput capacity <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-throughput-capacity.html>`_ in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value. - The value of ``ThroughputCapacity`` when divided by the value of ``HAPairs`` is outside of the valid range for ``ThroughputCapacity`` .
2174
- :param throughput_capacity_per_ha_pair: Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both. This field and ``ThroughputCapacity`` are the same for scale-up file systems powered by one HA pair. - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. - For ``SINGLE_AZ_2`` file systems, valid values are 3072 or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair. - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is a valid HA pair (a value between 2 and 12). - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2174
+ :param throughput_capacity_per_ha_pair: Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both. This field and ``ThroughputCapacity`` are the same for file systems powered by one HA pair. - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. - For ``SINGLE_AZ_2`` , valid values are 1536, 3072, or 6144 MBps. - For ``MULTI_AZ_2`` , valid values are 384, 768, 1536, 3072, or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair. - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is not a valid HA pair (a value between 1 and 12). - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2175
2175
  :param weekly_maintenance_start_time: A recurring weekly time, in the format ``D:HH:MM`` . ``D`` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see `the ISO-8601 spec as described on Wikipedia <https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date>`_ . ``HH`` is the zero-padded hour of the day (0-23), and ``MM`` is the zero-padded minute of the hour. For example, ``1:05:00`` specifies maintenance at 5 AM Monday.
2176
2176
 
2177
2177
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html
@@ -2247,9 +2247,10 @@ class CfnFileSystem(
2247
2247
  def deployment_type(self) -> builtins.str:
2248
2248
  '''Specifies the FSx for ONTAP file system deployment type to use in creating the file system.
2249
2249
 
2250
- - ``MULTI_AZ_1`` - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.
2251
- - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy.
2252
- - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.
2250
+ - ``MULTI_AZ_1`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.
2251
+ - ``MULTI_AZ_2`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.
2252
+ - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.
2253
+ - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.
2253
2254
 
2254
2255
  For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2255
2256
 
@@ -2316,12 +2317,12 @@ class CfnFileSystem(
2316
2317
  def ha_pairs(self) -> typing.Optional[jsii.Number]:
2317
2318
  '''Specifies how many high-availability (HA) pairs of file servers will power your file system.
2318
2319
 
2319
- Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide.
2320
+ First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see `Using block storage protocols <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage>`_ .
2320
2321
 
2321
2322
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
2322
2323
 
2323
2324
  - The value of ``HAPairs`` is less than 1 or greater than 12.
2324
- - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` or ``MULTI_AZ_1`` .
2325
+ - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` , ``MULTI_AZ_1`` , or ``MULTI_AZ_2`` .
2325
2326
 
2326
2327
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html#cfn-fsx-filesystem-ontapconfiguration-hapairs
2327
2328
  '''
@@ -2330,7 +2331,7 @@ class CfnFileSystem(
2330
2331
 
2331
2332
  @builtins.property
2332
2333
  def preferred_subnet_id(self) -> typing.Optional[builtins.str]:
2333
- '''Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` .
2334
+ '''Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` or ``MULTI_AZ_2`` .
2334
2335
 
2335
2336
  This specifies the subnet in which you want the preferred file server to be located.
2336
2337
 
@@ -2375,15 +2376,16 @@ class CfnFileSystem(
2375
2376
 
2376
2377
  You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both.
2377
2378
 
2378
- This field and ``ThroughputCapacity`` are the same for scale-up file systems powered by one HA pair.
2379
+ This field and ``ThroughputCapacity`` are the same for file systems powered by one HA pair.
2379
2380
 
2380
2381
  - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
2381
- - For ``SINGLE_AZ_2`` file systems, valid values are 3072 or 6144 MBps.
2382
+ - For ``SINGLE_AZ_2`` , valid values are 1536, 3072, or 6144 MBps.
2383
+ - For ``MULTI_AZ_2`` , valid values are 384, 768, 1536, 3072, or 6144 MBps.
2382
2384
 
2383
2385
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
2384
2386
 
2385
2387
  - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair.
2386
- - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is a valid HA pair (a value between 2 and 12).
2388
+ - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is not a valid HA pair (a value between 1 and 12).
2387
2389
  - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2388
2390
 
2389
2391
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html#cfn-fsx-filesystem-ontapconfiguration-throughputcapacityperhapair
@@ -2456,7 +2458,7 @@ class CfnFileSystem(
2456
2458
  ) -> None:
2457
2459
  '''The OpenZFS configuration for the file system that's being created.
2458
2460
 
2459
- :param deployment_type: Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an AWS Region . Valid values are the following: - ``MULTI_AZ_1`` - Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). ``Multi_AZ_1`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions . - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MB/s. ``Single_AZ_1`` is available in all AWS Regions where Amazon FSx for OpenZFS is available. - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. ``Single_AZ_2`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions . For more information, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ and `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2461
+ :param deployment_type: Specifies the file system deployment type. Valid values are the following:. - ``MULTI_AZ_1`` - Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same AWS Region . - ``SINGLE_AZ_HA_2`` - Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone. - ``SINGLE_AZ_HA_1`` - Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone. - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone. - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone. For a list of which AWS Regions each deployment type is available in, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ . For more information on the differences in performance between deployment types, see `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2460
2462
  :param automatic_backup_retention_days: The number of days to retain automatic backups. Setting this property to ``0`` disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is ``30`` .
2461
2463
  :param copy_tags_to_backups: A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to ``false`` . If it's set to ``true`` , all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is ``true`` , and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
2462
2464
  :param copy_tags_to_volumes: A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to ``false`` . If it's set to ``true`` , all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is ``true`` , and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.
@@ -2561,15 +2563,15 @@ class CfnFileSystem(
2561
2563
 
2562
2564
  @builtins.property
2563
2565
  def deployment_type(self) -> builtins.str:
2564
- '''Specifies the file system deployment type.
2565
-
2566
- Single AZ deployment types are configured for redundancy within a single Availability Zone in an AWS Region . Valid values are the following:
2566
+ '''Specifies the file system deployment type. Valid values are the following:.
2567
2567
 
2568
- - ``MULTI_AZ_1`` - Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). ``Multi_AZ_1`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions .
2569
- - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MB/s. ``Single_AZ_1`` is available in all AWS Regions where Amazon FSx for OpenZFS is available.
2570
- - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. ``Single_AZ_2`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions .
2568
+ - ``MULTI_AZ_1`` - Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same AWS Region .
2569
+ - ``SINGLE_AZ_HA_2`` - Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.
2570
+ - ``SINGLE_AZ_HA_1`` - Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.
2571
+ - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone.
2572
+ - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.
2571
2573
 
2572
- For more information, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ and `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2574
+ For a list of which AWS Regions each deployment type is available in, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ . For more information on the differences in performance between deployment types, see `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2573
2575
 
2574
2576
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-openzfsconfiguration.html#cfn-fsx-filesystem-openzfsconfiguration-deploymenttype
2575
2577
  '''
@@ -5089,7 +5091,7 @@ class CfnVolume(
5089
5091
  ) -> None:
5090
5092
  '''Use to specify configuration options for a volume’s storage aggregate or aggregates.
5091
5093
 
5092
- :param aggregates: The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 6. - The value of ``Aggregates`` contains aggregates that are not present. - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5094
+ :param aggregates: The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 12. - The value of ``Aggregates`` contains aggregates that are not present. - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5093
5095
  :param constituents_per_aggregate: Used to explicitly set the number of constituents within the FlexGroup per storage aggregate. This field is optional when creating a FlexGroup volume. If unspecified, the default value will be 8. This field cannot be provided when creating a FlexVol volume.
5094
5096
 
5095
5097
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-volume-aggregateconfiguration.html
@@ -5124,7 +5126,7 @@ class CfnVolume(
5124
5126
 
5125
5127
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
5126
5128
 
5127
- - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 6.
5129
+ - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 12.
5128
5130
  - The value of ``Aggregates`` contains aggregates that are not present.
5129
5131
  - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5130
5132
 
@@ -6037,7 +6037,7 @@ class CfnJob(
6037
6037
  :param description: A description of the job.
6038
6038
  :param execution_class: Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with AWS Glue version 3.0 and above and command type ``glueetl`` will be allowed to set ``ExecutionClass`` to ``FLEX`` . The flexible execution class is available for Spark jobs.
6039
6039
  :param execution_property: The maximum number of concurrent runs that are allowed for this job.
6040
- :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
6040
+ :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to the latest Glue version available.
6041
6041
  :param log_uri: This field is reserved for future use.
6042
6042
  :param maintenance_window: This field specifies a day of the week and hour for a maintenance window for streaming jobs. AWS Glue periodically performs maintenance activities. During these maintenance windows, AWS Glue will need to restart your streaming jobs. AWS Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
6043
6043
  :param max_capacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. Do not set ``Max Capacity`` if using ``WorkerType`` and ``NumberOfWorkers`` . The value that can be allocated for ``MaxCapacity`` depends on whether you are running a Python shell job or an Apache Spark ETL job: - When you specify a Python shell job ( ``JobCommand.Name`` ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - When you specify an Apache Spark ETL job ( ``JobCommand.Name`` ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
@@ -6769,7 +6769,7 @@ class CfnJobProps:
6769
6769
  :param description: A description of the job.
6770
6770
  :param execution_class: Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with AWS Glue version 3.0 and above and command type ``glueetl`` will be allowed to set ``ExecutionClass`` to ``FLEX`` . The flexible execution class is available for Spark jobs.
6771
6771
  :param execution_property: The maximum number of concurrent runs that are allowed for this job.
6772
- :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
6772
+ :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to the latest Glue version available.
6773
6773
  :param log_uri: This field is reserved for future use.
6774
6774
  :param maintenance_window: This field specifies a day of the week and hour for a maintenance window for streaming jobs. AWS Glue periodically performs maintenance activities. During these maintenance windows, AWS Glue will need to restart your streaming jobs. AWS Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
6775
6775
  :param max_capacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. Do not set ``Max Capacity`` if using ``WorkerType`` and ``NumberOfWorkers`` . The value that can be allocated for ``MaxCapacity`` depends on whether you are running a Python shell job or an Apache Spark ETL job: - When you specify a Python shell job ( ``JobCommand.Name`` ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - When you specify an Apache Spark ETL job ( ``JobCommand.Name`` ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
@@ -6999,7 +6999,7 @@ class CfnJobProps:
6999
6999
 
7000
7000
  For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide.
7001
7001
 
7002
- Jobs that are created without specifying a Glue version default to Glue 0.9.
7002
+ Jobs that are created without specifying a Glue version default to the latest Glue version available.
7003
7003
 
7004
7004
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-glue-job.html#cfn-glue-job-glueversion
7005
7005
  '''
@@ -2336,7 +2336,7 @@ class CfnMalwareProtectionPlan(
2336
2336
  :param scope: Scope in which this resource is defined.
2337
2337
  :param id: Construct identifier for this resource (unique in its scope).
2338
2338
  :param protected_resource: Information about the protected resource. Presently, ``S3Bucket`` is the only supported protected resource.
2339
- :param role: IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.
2339
+ :param role: Amazon Resource Name (ARN) of the IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource. To find the ARN of your IAM role, go to the IAM console, and select the role name for details.
2340
2340
  :param actions: Specifies the action that is to be applied to the Malware Protection plan resource.
2341
2341
  :param tags: The tags to be added to the created Malware Protection plan resource. Each tag consists of a key and an optional value, both of which you need to specify.
2342
2342
  '''
@@ -2460,7 +2460,7 @@ class CfnMalwareProtectionPlan(
2460
2460
  @builtins.property
2461
2461
  @jsii.member(jsii_name="role")
2462
2462
  def role(self) -> builtins.str:
2463
- '''IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.'''
2463
+ '''Amazon Resource Name (ARN) of the IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.'''
2464
2464
  return typing.cast(builtins.str, jsii.get(self, "role"))
2465
2465
 
2466
2466
  @role.setter
@@ -2917,7 +2917,7 @@ class CfnMalwareProtectionPlanProps:
2917
2917
  '''Properties for defining a ``CfnMalwareProtectionPlan``.
2918
2918
 
2919
2919
  :param protected_resource: Information about the protected resource. Presently, ``S3Bucket`` is the only supported protected resource.
2920
- :param role: IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.
2920
+ :param role: Amazon Resource Name (ARN) of the IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource. To find the ARN of your IAM role, go to the IAM console, and select the role name for details.
2921
2921
  :param actions: Specifies the action that is to be applied to the Malware Protection plan resource.
2922
2922
  :param tags: The tags to be added to the created Malware Protection plan resource. Each tag consists of a key and an optional value, both of which you need to specify.
2923
2923
 
@@ -2982,7 +2982,9 @@ class CfnMalwareProtectionPlanProps:
2982
2982
 
2983
2983
  @builtins.property
2984
2984
  def role(self) -> builtins.str:
2985
- '''IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.
2985
+ '''Amazon Resource Name (ARN) of the IAM role that includes the permissions required to scan and (optionally) add tags to the associated protected resource.
2986
+
2987
+ To find the ARN of your IAM role, go to the IAM console, and select the role name for details.
2986
2988
 
2987
2989
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-guardduty-malwareprotectionplan.html#cfn-guardduty-malwareprotectionplan-role
2988
2990
  '''
@@ -13298,11 +13298,14 @@ class ServicePrincipal(
13298
13298
  These days all service principal names are standardized, and they are all
13299
13299
  of the form ``<servicename>.amazonaws.com``.
13300
13300
 
13301
- If the feature flag ``@aws-cdk/aws-iam:standardizedServicePrincipals`` is set, this
13302
- method will always return its input. If this feature flag is not set, this
13303
- method will perform the legacy behavior, which appends the region-specific
13304
- domain suffix for some select services (for example, it would append ``.cn``
13305
- to some service principal names).
13301
+ To avoid breaking changes, handling is provided for services added with the formats below,
13302
+ however, no additional handling will be added for new regions or partitions.
13303
+
13304
+ - s3
13305
+ - s3.amazonaws.com
13306
+ - s3.amazonaws.com.cn
13307
+ - s3.c2s.ic.gov
13308
+ - s3.sc2s.sgov.gov
13306
13309
 
13307
13310
  :param service: -
13308
13311
 
@@ -5555,7 +5555,7 @@ class CfnApplicationV2(
5555
5555
 
5556
5556
  :param application_code_configuration: The code location and type parameters for a Managed Service for Apache Flink application.
5557
5557
  :param application_snapshot_configuration: Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
5558
- :param application_system_rollback_configuration: Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
5558
+ :param application_system_rollback_configuration: Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
5559
5559
  :param environment_properties: Describes execution properties for a Managed Service for Apache Flink application.
5560
5560
  :param flink_application_configuration: The creation and update parameters for a Managed Service for Apache Flink application.
5561
5561
  :param sql_application_configuration: The creation and update parameters for a SQL-based Kinesis Data Analytics application.
@@ -5767,7 +5767,7 @@ class CfnApplicationV2(
5767
5767
  def application_system_rollback_configuration(
5768
5768
  self,
5769
5769
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnApplicationV2.ApplicationSystemRollbackConfigurationProperty"]]:
5770
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
5770
+ '''Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
5771
5771
 
5772
5772
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationsystemrollbackconfiguration
5773
5773
  '''
@@ -6045,9 +6045,9 @@ class CfnApplicationV2(
6045
6045
  *,
6046
6046
  rollback_enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
6047
6047
  ) -> None:
6048
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
6048
+ '''Describes the system rollback configuration for a Managed Service for Apache Flink application.
6049
6049
 
6050
- :param rollback_enabled: Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
6050
+ :param rollback_enabled: Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
6051
6051
 
6052
6052
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration.html
6053
6053
  :exampleMetadata: fixture=_generated
@@ -6073,7 +6073,7 @@ class CfnApplicationV2(
6073
6073
  def rollback_enabled(
6074
6074
  self,
6075
6075
  ) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
6076
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
6076
+ '''Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
6077
6077
 
6078
6078
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration.html#cfn-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration-rollbackenabled
6079
6079
  '''
@@ -6262,12 +6262,12 @@ class CfnApplicationV2(
6262
6262
  ) -> None:
6263
6263
  '''Describes an application's checkpointing configuration.
6264
6264
 
6265
- Checkpointing is the process of persisting application state for fault tolerance. For more information, see `Checkpoints for Fault Tolerance <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
6265
+ Checkpointing is the process of persisting application state for fault tolerance. For more information, see `Checkpoints for Fault Tolerance <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/dev/datastream/fault-tolerance/checkpointing/>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
6266
6266
 
6267
6267
  :param configuration_type: Describes whether the application uses Managed Service for Apache Flink' default checkpointing behavior. You must set this property to ``CUSTOM`` in order to set the ``CheckpointingEnabled`` , ``CheckpointInterval`` , or ``MinPauseBetweenCheckpoints`` parameters. .. epigraph:: If this value is set to ``DEFAULT`` , the application will use the following values, even if they are set to other values using APIs or application code: - *CheckpointingEnabled:* true - *CheckpointInterval:* 60000 - *MinPauseBetweenCheckpoints:* 5000
6268
6268
  :param checkpointing_enabled: Describes whether checkpointing is enabled for a Managed Service for Apache Flink application. .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``CheckpointingEnabled`` value of ``true`` , even if this value is set to another value using this API or in application code.
6269
6269
  :param checkpoint_interval: Describes the interval in milliseconds between checkpoint operations. .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``CheckpointInterval`` value of 60000, even if this value is set to another value using this API or in application code.
6270
- :param min_pause_between_checkpoints: Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ . .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
6270
+ :param min_pause_between_checkpoints: Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/large_state_tuning/#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ . .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
6271
6271
 
6272
6272
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html
6273
6273
  :exampleMetadata: fixture=_generated
@@ -6354,7 +6354,7 @@ class CfnApplicationV2(
6354
6354
  def min_pause_between_checkpoints(self) -> typing.Optional[jsii.Number]:
6355
6355
  '''Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start.
6356
6356
 
6357
- If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
6357
+ If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/large_state_tuning/#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
6358
6358
  .. epigraph::
6359
6359
 
6360
6360
  If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
@@ -6835,7 +6835,7 @@ class CfnApplicationV2(
6835
6835
  ) -> None:
6836
6836
  '''Describes the starting parameters for a Managed Service for Apache Flink application.
6837
6837
 
6838
- :param allow_non_restored_state: When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ . .. epigraph:: This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
6838
+ :param allow_non_restored_state: When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/savepoints/#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ . .. epigraph:: This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
6839
6839
 
6840
6840
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkrunconfiguration.html
6841
6841
  :exampleMetadata: fixture=_generated
@@ -6863,7 +6863,7 @@ class CfnApplicationV2(
6863
6863
  ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
6864
6864
  '''When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program.
6865
6865
 
6866
- This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
6866
+ This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/savepoints/#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
6867
6867
  .. epigraph::
6868
6868
 
6869
6869
  This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
@@ -7866,7 +7866,7 @@ class CfnApplicationV2(
7866
7866
  ) -> None:
7867
7867
  '''Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously.
7868
7868
 
7869
- For more information about parallelism, see `Parallel Execution <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
7869
+ For more information about parallelism, see `Parallel Execution <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/dev/datastream/execution/parallel/>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
7870
7870
 
7871
7871
  :param configuration_type: Describes whether the application uses the default parallelism for the Managed Service for Apache Flink service. You must set this property to ``CUSTOM`` in order to change your application's ``AutoScalingEnabled`` , ``Parallelism`` , or ``ParallelismPerKPU`` properties.
7872
7872
  :param auto_scaling_enabled: Describes whether the Managed Service for Apache Flink service can increase the parallelism of the application in response to increased throughput.
@@ -580,7 +580,7 @@ class CfnApplication(
580
580
 
581
581
  :param application_code_configuration: The code location and type parameters for a Managed Service for Apache Flink application.
582
582
  :param application_snapshot_configuration: Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
583
- :param application_system_rollback_configuration: Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
583
+ :param application_system_rollback_configuration: Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
584
584
  :param environment_properties: Describes execution properties for a Managed Service for Apache Flink application.
585
585
  :param flink_application_configuration: The creation and update parameters for a Managed Service for Apache Flink application.
586
586
  :param sql_application_configuration: The creation and update parameters for a SQL-based Kinesis Data Analytics application.
@@ -792,7 +792,7 @@ class CfnApplication(
792
792
  def application_system_rollback_configuration(
793
793
  self,
794
794
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnApplication.ApplicationSystemRollbackConfigurationProperty"]]:
795
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
795
+ '''Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
796
796
 
797
797
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationsystemrollbackconfiguration
798
798
  '''
@@ -1070,9 +1070,9 @@ class CfnApplication(
1070
1070
  *,
1071
1071
  rollback_enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
1072
1072
  ) -> None:
1073
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
1073
+ '''Describes the system rollback configuration for a Managed Service for Apache Flink application.
1074
1074
 
1075
- :param rollback_enabled: Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
1075
+ :param rollback_enabled: Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
1076
1076
 
1077
1077
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration.html
1078
1078
  :exampleMetadata: fixture=_generated
@@ -1098,7 +1098,7 @@ class CfnApplication(
1098
1098
  def rollback_enabled(
1099
1099
  self,
1100
1100
  ) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
1101
- '''Describes whether system initiated rollbacks are enabled for a Flink-based Kinesis Data Analytics application.
1101
+ '''Describes whether system rollbacks are enabled for a Managed Service for Apache Flink application.
1102
1102
 
1103
1103
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration.html#cfn-kinesisanalyticsv2-application-applicationsystemrollbackconfiguration-rollbackenabled
1104
1104
  '''
@@ -1287,12 +1287,12 @@ class CfnApplication(
1287
1287
  ) -> None:
1288
1288
  '''Describes an application's checkpointing configuration.
1289
1289
 
1290
- Checkpointing is the process of persisting application state for fault tolerance. For more information, see `Checkpoints for Fault Tolerance <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
1290
+ Checkpointing is the process of persisting application state for fault tolerance. For more information, see `Checkpoints for Fault Tolerance <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/dev/datastream/fault-tolerance/checkpointing/>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
1291
1291
 
1292
1292
  :param configuration_type: Describes whether the application uses Managed Service for Apache Flink' default checkpointing behavior. You must set this property to ``CUSTOM`` in order to set the ``CheckpointingEnabled`` , ``CheckpointInterval`` , or ``MinPauseBetweenCheckpoints`` parameters. .. epigraph:: If this value is set to ``DEFAULT`` , the application will use the following values, even if they are set to other values using APIs or application code: - *CheckpointingEnabled:* true - *CheckpointInterval:* 60000 - *MinPauseBetweenCheckpoints:* 5000
1293
1293
  :param checkpointing_enabled: Describes whether checkpointing is enabled for a Managed Service for Apache Flink application. .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``CheckpointingEnabled`` value of ``true`` , even if this value is set to another value using this API or in application code.
1294
1294
  :param checkpoint_interval: Describes the interval in milliseconds between checkpoint operations. .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``CheckpointInterval`` value of 60000, even if this value is set to another value using this API or in application code.
1295
- :param min_pause_between_checkpoints: Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ . .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
1295
+ :param min_pause_between_checkpoints: Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start. If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/large_state_tuning/#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ . .. epigraph:: If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
1296
1296
 
1297
1297
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html
1298
1298
  :exampleMetadata: fixture=_generated
@@ -1379,7 +1379,7 @@ class CfnApplication(
1379
1379
  def min_pause_between_checkpoints(self) -> typing.Optional[jsii.Number]:
1380
1380
  '''Describes the minimum time in milliseconds after a checkpoint operation completes that a new checkpoint operation can start.
1381
1381
 
1382
- If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
1382
+ If a checkpoint operation takes longer than the ``CheckpointInterval`` , the application otherwise performs continual checkpoint operations. For more information, see `Tuning Checkpointing <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/large_state_tuning/#tuning-checkpointing>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
1383
1383
  .. epigraph::
1384
1384
 
1385
1385
  If ``CheckpointConfiguration.ConfigurationType`` is ``DEFAULT`` , the application will use a ``MinPauseBetweenCheckpoints`` value of 5000, even if this value is set using this API or in application code.
@@ -1860,7 +1860,7 @@ class CfnApplication(
1860
1860
  ) -> None:
1861
1861
  '''Describes the starting parameters for a Managed Service for Apache Flink application.
1862
1862
 
1863
- :param allow_non_restored_state: When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ . .. epigraph:: This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
1863
+ :param allow_non_restored_state: When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/savepoints/#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ . .. epigraph:: This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
1864
1864
 
1865
1865
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkrunconfiguration.html
1866
1866
  :exampleMetadata: fixture=_generated
@@ -1888,7 +1888,7 @@ class CfnApplication(
1888
1888
  ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:
1889
1889
  '''When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program.
1890
1890
 
1891
- This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
1891
+ This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see `Allowing Non-Restored State <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/ops/state/savepoints/#allowing-non-restored-state>`_ in the `Apache Flink documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
1892
1892
  .. epigraph::
1893
1893
 
1894
1894
  This value defaults to ``false`` . If you update your application without specifying this parameter, ``AllowNonRestoredState`` will be set to ``false`` , even if it was previously set to ``true`` .
@@ -2891,7 +2891,7 @@ class CfnApplication(
2891
2891
  ) -> None:
2892
2892
  '''Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously.
2893
2893
 
2894
- For more information about parallelism, see `Parallel Execution <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://ci.apache.org/projects/flink/flink-docs-release-1.8/>`_ .
2894
+ For more information about parallelism, see `Parallel Execution <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master/docs/dev/datastream/execution/parallel/>`_ in the `Apache Flink Documentation <https://docs.aws.amazon.com/https://nightlies.apache.org/flink/flink-docs-master>`_ .
2895
2895
 
2896
2896
  :param configuration_type: Describes whether the application uses the default parallelism for the Managed Service for Apache Flink service. You must set this property to ``CUSTOM`` in order to change your application's ``AutoScalingEnabled`` , ``Parallelism`` , or ``ParallelismPerKPU`` properties.
2897
2897
  :param auto_scaling_enabled: Describes whether the Managed Service for Apache Flink service can increase the parallelism of the application in response to increased throughput.
@@ -1003,7 +1003,7 @@ managing concurrency.
1003
1003
 
1004
1004
  ## Lambda with SnapStart
1005
1005
 
1006
- SnapStart is currently supported only on Java 11/Java 17 runtime. SnapStart does not support provisioned concurrency, the arm64 architecture, Amazon Elastic File System (Amazon EFS), or ephemeral storage greater than 512 MB. After you enable Lambda SnapStart for a particular Lambda function, publishing a new version of the function will trigger an optimization process.
1006
+ SnapStart is currently supported only on Java 11 and later [Java managed runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). SnapStart does not support provisioned concurrency, Amazon Elastic File System (Amazon EFS), or ephemeral storage greater than 512 MB. After you enable Lambda SnapStart for a particular Lambda function, publishing a new version of the function will trigger an optimization process.
1007
1007
 
1008
1008
  See [the AWS documentation](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) to learn more about AWS Lambda SnapStart
1009
1009
 
@@ -1640,6 +1640,12 @@ class AdotLambdaLayerGenericVersion(
1640
1640
  '''
1641
1641
  return typing.cast("AdotLambdaLayerGenericVersion", jsii.sget(cls, "LATEST"))
1642
1642
 
1643
+ @jsii.python.classproperty
1644
+ @jsii.member(jsii_name="V0_102_1")
1645
+ def V0_102_1(cls) -> "AdotLambdaLayerGenericVersion":
1646
+ '''Version 0.102.1.'''
1647
+ return typing.cast("AdotLambdaLayerGenericVersion", jsii.sget(cls, "V0_102_1"))
1648
+
1643
1649
  @jsii.python.classproperty
1644
1650
  @jsii.member(jsii_name="V0_62_1")
1645
1651
  def V0_62_1(cls) -> "AdotLambdaLayerGenericVersion":
@@ -2037,6 +2043,12 @@ class AdotLambdaLayerPythonSdkVersion(
2037
2043
  '''Version 1.24.0.'''
2038
2044
  return typing.cast("AdotLambdaLayerPythonSdkVersion", jsii.sget(cls, "V1_24_0"))
2039
2045
 
2046
+ @jsii.python.classproperty
2047
+ @jsii.member(jsii_name="V1_25_0")
2048
+ def V1_25_0(cls) -> "AdotLambdaLayerPythonSdkVersion":
2049
+ '''Version 1.25.0.'''
2050
+ return typing.cast("AdotLambdaLayerPythonSdkVersion", jsii.sget(cls, "V1_25_0"))
2051
+
2040
2052
  @builtins.property
2041
2053
  @jsii.member(jsii_name="layerVersion")
2042
2054
  def _layer_version(self) -> builtins.str:
@@ -20034,7 +20046,12 @@ class Runtime(metaclass=jsii.JSIIMeta, jsii_type="aws-cdk-lib.aws_lambda.Runtime
20034
20046
  @jsii.python.classproperty
20035
20047
  @jsii.member(jsii_name="NODEJS_16_X")
20036
20048
  def NODEJS_16_X(cls) -> "Runtime":
20037
- '''The NodeJS 16.x runtime (nodejs16.x).'''
20049
+ '''(deprecated) The NodeJS 16.x runtime (nodejs16.x).
20050
+
20051
+ :deprecated: Legacy runtime no longer supported by AWS Lambda. Migrate to the latest NodeJS runtime.
20052
+
20053
+ :stability: deprecated
20054
+ '''
20038
20055
  return typing.cast("Runtime", jsii.sget(cls, "NODEJS_16_X"))
20039
20056
 
20040
20057
  @jsii.python.classproperty
@@ -48,6 +48,15 @@ publish their log group to a specific region, such as AWS Chatbot creating a log
48
48
 
49
49
  By default, the log group created by LogRetention will be retained after the stack is deleted. If the RemovalPolicy is set to DESTROY, then the log group will be deleted when the stack is deleted.
50
50
 
51
+ ## Log Group Class
52
+
53
+ CloudWatch Logs offers two classes of log groups:
54
+
55
+ 1. The CloudWatch Logs Standard log class is a full-featured option for logs that require real-time monitoring or logs that you access frequently.
56
+ 2. The CloudWatch Logs Infrequent Access log class is a new log class that you can use to cost-effectively consolidate your logs. This log class offers a subset of CloudWatch Logs capabilities including managed ingestion, storage, cross-account log analytics, and encryption with a lower ingestion price per GB. The Infrequent Access log class is ideal for ad-hoc querying and after-the-fact forensic analysis on infrequently accessed logs.
57
+
58
+ For more details please check: [log group class documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CloudWatch_Logs_Log_Classes.html)
59
+
51
60
  ## Resource Policy
52
61
 
53
62
  CloudWatch Resource Policies allow other AWS services or IAM Principals to put log events into the log groups.