aws-cdk-lib 2.204.0__py3-none-any.whl → 2.205.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (48) hide show
  1. aws_cdk/__init__.py +170 -92
  2. aws_cdk/_jsii/__init__.py +1 -1
  3. aws_cdk/_jsii/{aws-cdk-lib@2.204.0.jsii.tgz → aws-cdk-lib@2.205.0.jsii.tgz} +0 -0
  4. aws_cdk/aws_aiops/__init__.py +89 -39
  5. aws_cdk/aws_applicationautoscaling/__init__.py +2 -2
  6. aws_cdk/aws_arczonalshift/__init__.py +4 -1
  7. aws_cdk/aws_b2bi/__init__.py +32 -16
  8. aws_cdk/aws_bedrock/__init__.py +198 -10
  9. aws_cdk/aws_cassandra/__init__.py +156 -0
  10. aws_cdk/aws_cloudformation/__init__.py +74 -72
  11. aws_cdk/aws_cloudfront/__init__.py +1181 -485
  12. aws_cdk/aws_cloudfront_origins/__init__.py +26 -21
  13. aws_cdk/aws_cloudwatch/__init__.py +61 -0
  14. aws_cdk/aws_codebuild/__init__.py +216 -36
  15. aws_cdk/aws_datasync/__init__.py +2 -2
  16. aws_cdk/aws_docdb/__init__.py +78 -0
  17. aws_cdk/aws_dynamodb/__init__.py +207 -35
  18. aws_cdk/aws_ec2/__init__.py +32 -30
  19. aws_cdk/aws_ecs/__init__.py +12 -19
  20. aws_cdk/aws_emrserverless/__init__.py +5 -5
  21. aws_cdk/aws_events/__init__.py +58 -3
  22. aws_cdk/aws_events_targets/__init__.py +7 -2
  23. aws_cdk/aws_evs/__init__.py +7 -7
  24. aws_cdk/aws_fsx/__init__.py +138 -78
  25. aws_cdk/aws_gamelift/__init__.py +19 -0
  26. aws_cdk/aws_glue/__init__.py +3 -3
  27. aws_cdk/aws_iot/__init__.py +1 -1
  28. aws_cdk/aws_kinesis/__init__.py +67 -13
  29. aws_cdk/aws_kinesisfirehose/__init__.py +28 -1
  30. aws_cdk/aws_lex/__init__.py +36 -19
  31. aws_cdk/aws_neptune/__init__.py +12 -12
  32. aws_cdk/aws_odb/__init__.py +4049 -0
  33. aws_cdk/aws_omics/__init__.py +1 -1
  34. aws_cdk/aws_qbusiness/__init__.py +471 -4
  35. aws_cdk/aws_quicksight/__init__.py +185 -16
  36. aws_cdk/aws_rds/__init__.py +169 -17
  37. aws_cdk/aws_redshiftserverless/__init__.py +72 -45
  38. aws_cdk/aws_route53/__init__.py +41 -19
  39. aws_cdk/aws_s3tables/__init__.py +1005 -0
  40. aws_cdk/aws_sagemaker/__init__.py +20 -0
  41. aws_cdk/aws_synthetics/__init__.py +141 -37
  42. aws_cdk/aws_transfer/__init__.py +23 -1
  43. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/METADATA +1 -1
  44. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/RECORD +48 -47
  45. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/LICENSE +0 -0
  46. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/NOTICE +0 -0
  47. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/WHEEL +0 -0
  48. {aws_cdk_lib-2.204.0.dist-info → aws_cdk_lib-2.205.0.dist-info}/top_level.txt +0 -0
@@ -1041,6 +1041,7 @@ class CfnContainerFleet(
1041
1041
  )],
1042
1042
  log_configuration=gamelift.CfnContainerFleet.LogConfigurationProperty(
1043
1043
  log_destination="logDestination",
1044
+ log_group_arn="logGroupArn",
1044
1045
  s3_bucket_name="s3BucketName"
1045
1046
  ),
1046
1047
  metric_groups=["metricGroups"],
@@ -2155,6 +2156,7 @@ class CfnContainerFleet(
2155
2156
  jsii_struct_bases=[],
2156
2157
  name_mapping={
2157
2158
  "log_destination": "logDestination",
2159
+ "log_group_arn": "logGroupArn",
2158
2160
  "s3_bucket_name": "s3BucketName",
2159
2161
  },
2160
2162
  )
@@ -2163,6 +2165,7 @@ class CfnContainerFleet(
2163
2165
  self,
2164
2166
  *,
2165
2167
  log_destination: typing.Optional[builtins.str] = None,
2168
+ log_group_arn: typing.Optional[builtins.str] = None,
2166
2169
  s3_bucket_name: typing.Optional[builtins.str] = None,
2167
2170
  ) -> None:
2168
2171
  '''A method for collecting container logs for the fleet.
@@ -2170,6 +2173,7 @@ class CfnContainerFleet(
2170
2173
  Amazon GameLift Servers saves all standard output for each container in logs, including game session logs. You can select from the following methods:
2171
2174
 
2172
2175
  :param log_destination: The type of log collection to use for a fleet. - ``CLOUDWATCH`` -- (default value) Send logs to an Amazon CloudWatch log group that you define. Each container emits a log stream, which is organized in the log group. - ``S3`` -- Store logs in an Amazon S3 bucket that you define. This bucket must reside in the fleet's home AWS Region. - ``NONE`` -- Don't collect container logs.
2176
+ :param log_group_arn: If log destination is ``CLOUDWATCH`` , logs are sent to the specified log group in Amazon CloudWatch.
2173
2177
  :param s3_bucket_name: If log destination is ``S3`` , logs are sent to the specified Amazon S3 bucket name.
2174
2178
 
2175
2179
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-containerfleet-logconfiguration.html
@@ -2183,16 +2187,20 @@ class CfnContainerFleet(
2183
2187
 
2184
2188
  log_configuration_property = gamelift.CfnContainerFleet.LogConfigurationProperty(
2185
2189
  log_destination="logDestination",
2190
+ log_group_arn="logGroupArn",
2186
2191
  s3_bucket_name="s3BucketName"
2187
2192
  )
2188
2193
  '''
2189
2194
  if __debug__:
2190
2195
  type_hints = typing.get_type_hints(_typecheckingstub__287854cb20cec032892a9072e611adf774cd43dcc343c3ffdda539be73bce563)
2191
2196
  check_type(argname="argument log_destination", value=log_destination, expected_type=type_hints["log_destination"])
2197
+ check_type(argname="argument log_group_arn", value=log_group_arn, expected_type=type_hints["log_group_arn"])
2192
2198
  check_type(argname="argument s3_bucket_name", value=s3_bucket_name, expected_type=type_hints["s3_bucket_name"])
2193
2199
  self._values: typing.Dict[builtins.str, typing.Any] = {}
2194
2200
  if log_destination is not None:
2195
2201
  self._values["log_destination"] = log_destination
2202
+ if log_group_arn is not None:
2203
+ self._values["log_group_arn"] = log_group_arn
2196
2204
  if s3_bucket_name is not None:
2197
2205
  self._values["s3_bucket_name"] = s3_bucket_name
2198
2206
 
@@ -2209,6 +2217,15 @@ class CfnContainerFleet(
2209
2217
  result = self._values.get("log_destination")
2210
2218
  return typing.cast(typing.Optional[builtins.str], result)
2211
2219
 
2220
+ @builtins.property
2221
+ def log_group_arn(self) -> typing.Optional[builtins.str]:
2222
+ '''If log destination is ``CLOUDWATCH`` , logs are sent to the specified log group in Amazon CloudWatch.
2223
+
2224
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-gamelift-containerfleet-logconfiguration.html#cfn-gamelift-containerfleet-logconfiguration-loggrouparn
2225
+ '''
2226
+ result = self._values.get("log_group_arn")
2227
+ return typing.cast(typing.Optional[builtins.str], result)
2228
+
2212
2229
  @builtins.property
2213
2230
  def s3_bucket_name(self) -> typing.Optional[builtins.str]:
2214
2231
  '''If log destination is ``S3`` , logs are sent to the specified Amazon S3 bucket name.
@@ -2617,6 +2634,7 @@ class CfnContainerFleetProps:
2617
2634
  )],
2618
2635
  log_configuration=gamelift.CfnContainerFleet.LogConfigurationProperty(
2619
2636
  log_destination="logDestination",
2637
+ log_group_arn="logGroupArn",
2620
2638
  s3_bucket_name="s3BucketName"
2621
2639
  ),
2622
2640
  metric_groups=["metricGroups"],
@@ -11015,6 +11033,7 @@ def _typecheckingstub__6f0d55ebb1c8fdd9364a92df8152f6c91294e481bedd64b3458f3dff3
11015
11033
  def _typecheckingstub__287854cb20cec032892a9072e611adf774cd43dcc343c3ffdda539be73bce563(
11016
11034
  *,
11017
11035
  log_destination: typing.Optional[builtins.str] = None,
11036
+ log_group_arn: typing.Optional[builtins.str] = None,
11018
11037
  s3_bucket_name: typing.Optional[builtins.str] = None,
11019
11038
  ) -> None:
11020
11039
  """Type checking stubs"""
@@ -7154,7 +7154,7 @@ class CfnJob(
7154
7154
  :param security_configuration: The name of the ``SecurityConfiguration`` structure to be used with this job.
7155
7155
  :param tags: The tags to use with this job.
7156
7156
  :param timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
7157
- :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
7157
+ :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Mumbai), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), Europe (London), Europe (Spain), Europe (Stockholm), and South America (São Paulo). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
7158
7158
  '''
7159
7159
  if __debug__:
7160
7160
  type_hints = typing.get_type_hints(_typecheckingstub__2bea698eff4ea1d2bc08b1ab842f318f77ba719c0241a0959453e26989b5b53e)
@@ -7919,7 +7919,7 @@ class CfnJobProps:
7919
7919
  :param security_configuration: The name of the ``SecurityConfiguration`` structure to be used with this job.
7920
7920
  :param tags: The tags to use with this job.
7921
7921
  :param timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
7922
- :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
7922
+ :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Mumbai), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), Europe (London), Europe (Spain), Europe (Stockholm), and South America (São Paulo). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
7923
7923
 
7924
7924
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-glue-job.html
7925
7925
  :exampleMetadata: fixture=_generated
@@ -8308,7 +8308,7 @@ class CfnJobProps:
8308
8308
 
8309
8309
  - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
8310
8310
  - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
8311
- - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
8311
+ - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Mumbai), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), Europe (London), Europe (Spain), Europe (Stockholm), and South America (São Paulo).
8312
8312
  - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type.
8313
8313
  - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs.
8314
8314
  - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
@@ -7712,7 +7712,7 @@ class CfnLogging(
7712
7712
 
7713
7713
  .. epigraph::
7714
7714
 
7715
- If you already set the log function of AWS IoT Core , you can't deploy the AWS Cloud Development Kit (AWS CDK) to change the logging settings. You can change the logging settings by either:
7715
+ If you already set the log function of AWS IoT Core , you can't deploy the Cloud Development Kit to change the logging settings. You can change the logging settings by either:
7716
7716
 
7717
7717
  - Importing the existing logging resource into your AWS CloudFormation stack, such as with the `infrastructure as code generator (IaC generator) <https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/generate-IaC.html>`_ .
7718
7718
  - Calling ``aws iot set-v2-logging-options --disable-all-logs`` before creating a new CloudFormation stack. This command disables all AWS IoT logging. As a result, no AWS IoT logs will be delivered to Amazon CloudWatch until you re-enable logging.
@@ -342,6 +342,7 @@ from .. import (
342
342
  IResolvable as _IResolvable_da3f097b,
343
343
  IResource as _IResource_c80c4260,
344
344
  ITaggable as _ITaggable_36806126,
345
+ ITaggableV2 as _ITaggableV2_4e6798f8,
345
346
  RemovalPolicy as _RemovalPolicy_9f93c814,
346
347
  Resource as _Resource_45bc6135,
347
348
  TagManager as _TagManager_0a598cb3,
@@ -938,7 +939,7 @@ class CfnStream(
938
939
  )
939
940
 
940
941
 
941
- @jsii.implements(_IInspectable_c2943556)
942
+ @jsii.implements(_IInspectable_c2943556, _ITaggableV2_4e6798f8)
942
943
  class CfnStreamConsumer(
943
944
  _CfnResource_9df397a6,
944
945
  metaclass=jsii.JSIIMeta,
@@ -964,7 +965,13 @@ class CfnStreamConsumer(
964
965
 
965
966
  cfn_stream_consumer = kinesis.CfnStreamConsumer(self, "MyCfnStreamConsumer",
966
967
  consumer_name="consumerName",
967
- stream_arn="streamArn"
968
+ stream_arn="streamArn",
969
+
970
+ # the properties below are optional
971
+ tags=[CfnTag(
972
+ key="key",
973
+ value="value"
974
+ )]
968
975
  )
969
976
  '''
970
977
 
@@ -975,19 +982,21 @@ class CfnStreamConsumer(
975
982
  *,
976
983
  consumer_name: builtins.str,
977
984
  stream_arn: builtins.str,
985
+ tags: typing.Optional[typing.Sequence[typing.Union[_CfnTag_f6864754, typing.Dict[builtins.str, typing.Any]]]] = None,
978
986
  ) -> None:
979
987
  '''
980
988
  :param scope: Scope in which this resource is defined.
981
989
  :param id: Construct identifier for this resource (unique in its scope).
982
990
  :param consumer_name: The name of the consumer is something you choose when you register the consumer.
983
991
  :param stream_arn: The ARN of the stream with which you registered the consumer.
992
+ :param tags: An array of tags to be added to a specified Kinesis resource. A tag consists of a required key and an optional value. You can specify up to 50 tag key-value pairs.
984
993
  '''
985
994
  if __debug__:
986
995
  type_hints = typing.get_type_hints(_typecheckingstub__40129c585ad941f6708a726728b53f8f98c392e9aac49550e397150ee353888f)
987
996
  check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
988
997
  check_type(argname="argument id", value=id, expected_type=type_hints["id"])
989
998
  props = CfnStreamConsumerProps(
990
- consumer_name=consumer_name, stream_arn=stream_arn
999
+ consumer_name=consumer_name, stream_arn=stream_arn, tags=tags
991
1000
  )
992
1001
 
993
1002
  jsii.create(self.__class__, self, [scope, id, props])
@@ -1062,14 +1071,6 @@ class CfnStreamConsumer(
1062
1071
  '''
1063
1072
  return typing.cast(builtins.str, jsii.get(self, "attrConsumerStatus"))
1064
1073
 
1065
- @builtins.property
1066
- @jsii.member(jsii_name="attrId")
1067
- def attr_id(self) -> builtins.str:
1068
- '''
1069
- :cloudformationAttribute: Id
1070
- '''
1071
- return typing.cast(builtins.str, jsii.get(self, "attrId"))
1072
-
1073
1074
  @builtins.property
1074
1075
  @jsii.member(jsii_name="attrStreamArn")
1075
1076
  def attr_stream_arn(self) -> builtins.str:
@@ -1079,6 +1080,12 @@ class CfnStreamConsumer(
1079
1080
  '''
1080
1081
  return typing.cast(builtins.str, jsii.get(self, "attrStreamArn"))
1081
1082
 
1083
+ @builtins.property
1084
+ @jsii.member(jsii_name="cdkTagManager")
1085
+ def cdk_tag_manager(self) -> _TagManager_0a598cb3:
1086
+ '''Tag Manager which manages the tags for this resource.'''
1087
+ return typing.cast(_TagManager_0a598cb3, jsii.get(self, "cdkTagManager"))
1088
+
1082
1089
  @builtins.property
1083
1090
  @jsii.member(jsii_name="cfnProperties")
1084
1091
  def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
@@ -1110,11 +1117,28 @@ class CfnStreamConsumer(
1110
1117
  check_type(argname="argument value", value=value, expected_type=type_hints["value"])
1111
1118
  jsii.set(self, "streamArn", value) # pyright: ignore[reportArgumentType]
1112
1119
 
1120
+ @builtins.property
1121
+ @jsii.member(jsii_name="tags")
1122
+ def tags(self) -> typing.Optional[typing.List[_CfnTag_f6864754]]:
1123
+ '''An array of tags to be added to a specified Kinesis resource.'''
1124
+ return typing.cast(typing.Optional[typing.List[_CfnTag_f6864754]], jsii.get(self, "tags"))
1125
+
1126
+ @tags.setter
1127
+ def tags(self, value: typing.Optional[typing.List[_CfnTag_f6864754]]) -> None:
1128
+ if __debug__:
1129
+ type_hints = typing.get_type_hints(_typecheckingstub__741a97d9ae28403dd10b071c7777bb76448096ad2b30f06325c121d8271174db)
1130
+ check_type(argname="argument value", value=value, expected_type=type_hints["value"])
1131
+ jsii.set(self, "tags", value) # pyright: ignore[reportArgumentType]
1132
+
1113
1133
 
1114
1134
  @jsii.data_type(
1115
1135
  jsii_type="aws-cdk-lib.aws_kinesis.CfnStreamConsumerProps",
1116
1136
  jsii_struct_bases=[],
1117
- name_mapping={"consumer_name": "consumerName", "stream_arn": "streamArn"},
1137
+ name_mapping={
1138
+ "consumer_name": "consumerName",
1139
+ "stream_arn": "streamArn",
1140
+ "tags": "tags",
1141
+ },
1118
1142
  )
1119
1143
  class CfnStreamConsumerProps:
1120
1144
  def __init__(
@@ -1122,11 +1146,13 @@ class CfnStreamConsumerProps:
1122
1146
  *,
1123
1147
  consumer_name: builtins.str,
1124
1148
  stream_arn: builtins.str,
1149
+ tags: typing.Optional[typing.Sequence[typing.Union[_CfnTag_f6864754, typing.Dict[builtins.str, typing.Any]]]] = None,
1125
1150
  ) -> None:
1126
1151
  '''Properties for defining a ``CfnStreamConsumer``.
1127
1152
 
1128
1153
  :param consumer_name: The name of the consumer is something you choose when you register the consumer.
1129
1154
  :param stream_arn: The ARN of the stream with which you registered the consumer.
1155
+ :param tags: An array of tags to be added to a specified Kinesis resource. A tag consists of a required key and an optional value. You can specify up to 50 tag key-value pairs.
1130
1156
 
1131
1157
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
1132
1158
  :exampleMetadata: fixture=_generated
@@ -1139,17 +1165,26 @@ class CfnStreamConsumerProps:
1139
1165
 
1140
1166
  cfn_stream_consumer_props = kinesis.CfnStreamConsumerProps(
1141
1167
  consumer_name="consumerName",
1142
- stream_arn="streamArn"
1168
+ stream_arn="streamArn",
1169
+
1170
+ # the properties below are optional
1171
+ tags=[CfnTag(
1172
+ key="key",
1173
+ value="value"
1174
+ )]
1143
1175
  )
1144
1176
  '''
1145
1177
  if __debug__:
1146
1178
  type_hints = typing.get_type_hints(_typecheckingstub__04af0c0cee5710afdb7b50f59ef3686da8bba1bf2ed3a56c1e5209c6859bca6f)
1147
1179
  check_type(argname="argument consumer_name", value=consumer_name, expected_type=type_hints["consumer_name"])
1148
1180
  check_type(argname="argument stream_arn", value=stream_arn, expected_type=type_hints["stream_arn"])
1181
+ check_type(argname="argument tags", value=tags, expected_type=type_hints["tags"])
1149
1182
  self._values: typing.Dict[builtins.str, typing.Any] = {
1150
1183
  "consumer_name": consumer_name,
1151
1184
  "stream_arn": stream_arn,
1152
1185
  }
1186
+ if tags is not None:
1187
+ self._values["tags"] = tags
1153
1188
 
1154
1189
  @builtins.property
1155
1190
  def consumer_name(self) -> builtins.str:
@@ -1171,6 +1206,17 @@ class CfnStreamConsumerProps:
1171
1206
  assert result is not None, "Required property 'stream_arn' is missing"
1172
1207
  return typing.cast(builtins.str, result)
1173
1208
 
1209
+ @builtins.property
1210
+ def tags(self) -> typing.Optional[typing.List[_CfnTag_f6864754]]:
1211
+ '''An array of tags to be added to a specified Kinesis resource.
1212
+
1213
+ A tag consists of a required key and an optional value. You can specify up to 50 tag key-value pairs.
1214
+
1215
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-tags
1216
+ '''
1217
+ result = self._values.get("tags")
1218
+ return typing.cast(typing.Optional[typing.List[_CfnTag_f6864754]], result)
1219
+
1174
1220
  def __eq__(self, rhs: typing.Any) -> builtins.bool:
1175
1221
  return isinstance(rhs, self.__class__) and rhs._values == self._values
1176
1222
 
@@ -5771,6 +5817,7 @@ def _typecheckingstub__40129c585ad941f6708a726728b53f8f98c392e9aac49550e397150ee
5771
5817
  *,
5772
5818
  consumer_name: builtins.str,
5773
5819
  stream_arn: builtins.str,
5820
+ tags: typing.Optional[typing.Sequence[typing.Union[_CfnTag_f6864754, typing.Dict[builtins.str, typing.Any]]]] = None,
5774
5821
  ) -> None:
5775
5822
  """Type checking stubs"""
5776
5823
  pass
@@ -5799,10 +5846,17 @@ def _typecheckingstub__decaab8d4ef9478c7a29a7262bbddccc719545cf6499f4ac625d9c446
5799
5846
  """Type checking stubs"""
5800
5847
  pass
5801
5848
 
5849
+ def _typecheckingstub__741a97d9ae28403dd10b071c7777bb76448096ad2b30f06325c121d8271174db(
5850
+ value: typing.Optional[typing.List[_CfnTag_f6864754]],
5851
+ ) -> None:
5852
+ """Type checking stubs"""
5853
+ pass
5854
+
5802
5855
  def _typecheckingstub__04af0c0cee5710afdb7b50f59ef3686da8bba1bf2ed3a56c1e5209c6859bca6f(
5803
5856
  *,
5804
5857
  consumer_name: builtins.str,
5805
5858
  stream_arn: builtins.str,
5859
+ tags: typing.Optional[typing.Sequence[typing.Union[_CfnTag_f6864754, typing.Dict[builtins.str, typing.Any]]]] = None,
5806
5860
  ) -> None:
5807
5861
  """Type checking stubs"""
5808
5862
  pass
@@ -105,11 +105,14 @@ will be used for files successfully delivered to S3. `errorOutputPrefix` will be
105
105
  failed records before writing them to S3.
106
106
 
107
107
  ```python
108
+ from aws_cdk import TimeZone
108
109
  # bucket: s3.Bucket
109
110
 
110
111
  s3_destination = firehose.S3Bucket(bucket,
111
112
  data_output_prefix="myFirehose/DeliveredYear=!{timestamp:yyyy}/anyMonth/rand=!{firehose:random-string}",
112
- error_output_prefix="myFirehoseFailures/!{firehose:error-output-type}/!{timestamp:yyyy}/anyMonth/!{timestamp:dd}"
113
+ error_output_prefix="myFirehoseFailures/!{firehose:error-output-type}/!{timestamp:yyyy}/anyMonth/!{timestamp:dd}",
114
+ # The time zone of timestamps (default UTC)
115
+ time_zone=TimeZone.ASIA_TOKYO
113
116
  )
114
117
  ```
115
118
 
@@ -500,6 +503,7 @@ delivery_stream = firehose.DeliveryStream(stack, "DeliveryStream",
500
503
  data_output_prefix="regularPrefix",
501
504
  error_output_prefix="errorPrefix",
502
505
  file_extension=".log.gz",
506
+ time_zone=cdk.TimeZone.ASIA_TOKYO,
503
507
  buffering_interval=cdk.Duration.seconds(60),
504
508
  buffering_size=cdk.Size.mebibytes(1),
505
509
  encryption_key=key,
@@ -689,6 +693,7 @@ from .. import (
689
693
  Resource as _Resource_45bc6135,
690
694
  Size as _Size_7b441c34,
691
695
  TagManager as _TagManager_0a598cb3,
696
+ TimeZone as _TimeZone_cdd72ac9,
692
697
  TreeInspector as _TreeInspector_488e0dd5,
693
698
  )
694
699
  from ..aws_cloudwatch import (
@@ -11751,6 +11756,7 @@ class S3Bucket(
11751
11756
  bucket: _IBucket_42e086fd,
11752
11757
  *,
11753
11758
  file_extension: typing.Optional[builtins.str] = None,
11759
+ time_zone: typing.Optional[_TimeZone_cdd72ac9] = None,
11754
11760
  buffering_interval: typing.Optional[_Duration_4839e8c3] = None,
11755
11761
  buffering_size: typing.Optional[_Size_7b441c34] = None,
11756
11762
  compression: typing.Optional[Compression] = None,
@@ -11765,6 +11771,7 @@ class S3Bucket(
11765
11771
  '''
11766
11772
  :param bucket: -
11767
11773
  :param file_extension: Specify a file extension. It will override the default file extension appended by Data Format Conversion or S3 compression features such as ``.parquet`` or ``.gz``. File extension must start with a period (``.``) and can contain allowed characters: ``0-9a-z!-_.*'()``. Default: - The default file extension appended by Data Format Conversion or S3 compression features
11774
+ :param time_zone: The time zone you prefer. Default: - UTC
11768
11775
  :param buffering_interval: The length of time that Firehose buffers incoming data before delivering it to the S3 bucket. Minimum: Duration.seconds(0) Maximum: Duration.seconds(900) Default: Duration.seconds(300)
11769
11776
  :param buffering_size: The size of the buffer that Amazon Data Firehose uses for incoming data before delivering it to the S3 bucket. Minimum: Size.mebibytes(1) Maximum: Size.mebibytes(128) Default: Size.mebibytes(5)
11770
11777
  :param compression: The type of compression that Amazon Data Firehose uses to compress the data that it delivers to the Amazon S3 bucket. The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift destinations because they are not supported by the Amazon Redshift COPY operation that reads from the S3 bucket. Default: - UNCOMPRESSED
@@ -11781,6 +11788,7 @@ class S3Bucket(
11781
11788
  check_type(argname="argument bucket", value=bucket, expected_type=type_hints["bucket"])
11782
11789
  props = S3BucketProps(
11783
11790
  file_extension=file_extension,
11791
+ time_zone=time_zone,
11784
11792
  buffering_interval=buffering_interval,
11785
11793
  buffering_size=buffering_size,
11786
11794
  compression=compression,
@@ -11826,6 +11834,7 @@ class S3Bucket(
11826
11834
  "role": "role",
11827
11835
  "s3_backup": "s3Backup",
11828
11836
  "file_extension": "fileExtension",
11837
+ "time_zone": "timeZone",
11829
11838
  },
11830
11839
  )
11831
11840
  class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
@@ -11843,6 +11852,7 @@ class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
11843
11852
  role: typing.Optional[_IRole_235f5d8e] = None,
11844
11853
  s3_backup: typing.Optional[typing.Union[DestinationS3BackupProps, typing.Dict[builtins.str, typing.Any]]] = None,
11845
11854
  file_extension: typing.Optional[builtins.str] = None,
11855
+ time_zone: typing.Optional[_TimeZone_cdd72ac9] = None,
11846
11856
  ) -> None:
11847
11857
  '''Props for defining an S3 destination of an Amazon Data Firehose delivery stream.
11848
11858
 
@@ -11857,6 +11867,7 @@ class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
11857
11867
  :param role: The IAM role associated with this destination. Assumed by Amazon Data Firehose to invoke processors and write to destinations Default: - a role will be created with default permissions.
11858
11868
  :param s3_backup: The configuration for backing up source records to S3. Default: - source records will not be backed up to S3.
11859
11869
  :param file_extension: Specify a file extension. It will override the default file extension appended by Data Format Conversion or S3 compression features such as ``.parquet`` or ``.gz``. File extension must start with a period (``.``) and can contain allowed characters: ``0-9a-z!-_.*'()``. Default: - The default file extension appended by Data Format Conversion or S3 compression features
11870
+ :param time_zone: The time zone you prefer. Default: - UTC
11860
11871
 
11861
11872
  :exampleMetadata: infused
11862
11873
 
@@ -11897,6 +11908,7 @@ class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
11897
11908
  check_type(argname="argument role", value=role, expected_type=type_hints["role"])
11898
11909
  check_type(argname="argument s3_backup", value=s3_backup, expected_type=type_hints["s3_backup"])
11899
11910
  check_type(argname="argument file_extension", value=file_extension, expected_type=type_hints["file_extension"])
11911
+ check_type(argname="argument time_zone", value=time_zone, expected_type=type_hints["time_zone"])
11900
11912
  self._values: typing.Dict[builtins.str, typing.Any] = {}
11901
11913
  if buffering_interval is not None:
11902
11914
  self._values["buffering_interval"] = buffering_interval
@@ -11920,6 +11932,8 @@ class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
11920
11932
  self._values["s3_backup"] = s3_backup
11921
11933
  if file_extension is not None:
11922
11934
  self._values["file_extension"] = file_extension
11935
+ if time_zone is not None:
11936
+ self._values["time_zone"] = time_zone
11923
11937
 
11924
11938
  @builtins.property
11925
11939
  def buffering_interval(self) -> typing.Optional[_Duration_4839e8c3]:
@@ -12046,6 +12060,17 @@ class S3BucketProps(CommonDestinationS3Props, CommonDestinationProps):
12046
12060
  result = self._values.get("file_extension")
12047
12061
  return typing.cast(typing.Optional[builtins.str], result)
12048
12062
 
12063
+ @builtins.property
12064
+ def time_zone(self) -> typing.Optional[_TimeZone_cdd72ac9]:
12065
+ '''The time zone you prefer.
12066
+
12067
+ :default: - UTC
12068
+
12069
+ :see: https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html#timestamp-namespace
12070
+ '''
12071
+ result = self._values.get("time_zone")
12072
+ return typing.cast(typing.Optional[_TimeZone_cdd72ac9], result)
12073
+
12049
12074
  def __eq__(self, rhs: typing.Any) -> builtins.bool:
12050
12075
  return isinstance(rhs, self.__class__) and rhs._values == self._values
12051
12076
 
@@ -13766,6 +13791,7 @@ def _typecheckingstub__a2eaf455255fc260033aa24d456779f4b21172e8b4cf2c51f6355f415
13766
13791
  bucket: _IBucket_42e086fd,
13767
13792
  *,
13768
13793
  file_extension: typing.Optional[builtins.str] = None,
13794
+ time_zone: typing.Optional[_TimeZone_cdd72ac9] = None,
13769
13795
  buffering_interval: typing.Optional[_Duration_4839e8c3] = None,
13770
13796
  buffering_size: typing.Optional[_Size_7b441c34] = None,
13771
13797
  compression: typing.Optional[Compression] = None,
@@ -13799,6 +13825,7 @@ def _typecheckingstub__04b12dc503479d22af2396c4df8d38c37536719187eef6ddd01c18b52
13799
13825
  role: typing.Optional[_IRole_235f5d8e] = None,
13800
13826
  s3_backup: typing.Optional[typing.Union[DestinationS3BackupProps, typing.Dict[builtins.str, typing.Any]]] = None,
13801
13827
  file_extension: typing.Optional[builtins.str] = None,
13828
+ time_zone: typing.Optional[_TimeZone_cdd72ac9] = None,
13802
13829
  ) -> None:
13803
13830
  """Type checking stubs"""
13804
13831
  pass
@@ -1784,7 +1784,8 @@ class CfnBot(
1784
1784
  descriptive_bot_builder_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.DescriptiveBotBuilderSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
1785
1785
  sample_utterance_generation_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.SampleUtteranceGenerationSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
1786
1786
  ) -> None:
1787
- '''
1787
+ '''Contains specifications about the Amazon Lex build time generative AI capabilities from Amazon Bedrock that you can turn on for your bot.
1788
+
1788
1789
  :param descriptive_bot_builder_specification:
1789
1790
  :param sample_utterance_generation_specification:
1790
1791
 
@@ -3430,9 +3431,10 @@ class CfnBot(
3430
3431
  enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
3431
3432
  bedrock_model_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.BedrockModelSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
3432
3433
  ) -> None:
3433
- '''
3434
- :param enabled:
3435
- :param bedrock_model_specification:
3434
+ '''Contains specifications for the descriptive bot building feature.
3435
+
3436
+ :param enabled: Specifies whether the descriptive bot building feature is activated or not.
3437
+ :param bedrock_model_specification: An object containing information about the Amazon Bedrock model used to interpret the prompt used in descriptive bot building.
3436
3438
 
3437
3439
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-descriptivebotbuilderspecification.html
3438
3440
  :exampleMetadata: fixture=_generated
@@ -3472,7 +3474,8 @@ class CfnBot(
3472
3474
 
3473
3475
  @builtins.property
3474
3476
  def enabled(self) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
3475
- '''
3477
+ '''Specifies whether the descriptive bot building feature is activated or not.
3478
+
3476
3479
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-descriptivebotbuilderspecification.html#cfn-lex-bot-descriptivebotbuilderspecification-enabled
3477
3480
  '''
3478
3481
  result = self._values.get("enabled")
@@ -3483,7 +3486,8 @@ class CfnBot(
3483
3486
  def bedrock_model_specification(
3484
3487
  self,
3485
3488
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnBot.BedrockModelSpecificationProperty"]]:
3486
- '''
3489
+ '''An object containing information about the Amazon Bedrock model used to interpret the prompt used in descriptive bot building.
3490
+
3487
3491
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-descriptivebotbuilderspecification.html#cfn-lex-bot-descriptivebotbuilderspecification-bedrockmodelspecification
3488
3492
  '''
3489
3493
  result = self._values.get("bedrock_model_specification")
@@ -4727,7 +4731,8 @@ class CfnBot(
4727
4731
  buildtime_settings: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.BuildtimeSettingsProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
4728
4732
  runtime_settings: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.RuntimeSettingsProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
4729
4733
  ) -> None:
4730
- '''
4734
+ '''Contains specifications about the generative AI capabilities from Amazon Bedrock that you can turn on for your bot.
4735
+
4731
4736
  :param buildtime_settings:
4732
4737
  :param runtime_settings:
4733
4738
 
@@ -6825,8 +6830,11 @@ class CfnBot(
6825
6830
  *,
6826
6831
  enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
6827
6832
  ) -> None:
6828
- '''
6829
- :param enabled:
6833
+ '''Configures the Assisted Natural Language Understanding (NLU) feature for your bot.
6834
+
6835
+ This specification determines whether enhanced intent recognition and utterance understanding capabilities are active.
6836
+
6837
+ :param enabled: Determines whether the Assisted NLU feature is enabled for the bot. When set to ``true`` , Amazon Lex uses advanced models to improve intent recognition and slot resolution, with the default being ``false`` .
6830
6838
 
6831
6839
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-nluimprovementspecification.html
6832
6840
  :exampleMetadata: fixture=_generated
@@ -6850,7 +6858,10 @@ class CfnBot(
6850
6858
 
6851
6859
  @builtins.property
6852
6860
  def enabled(self) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
6853
- '''
6861
+ '''Determines whether the Assisted NLU feature is enabled for the bot.
6862
+
6863
+ When set to ``true`` , Amazon Lex uses advanced models to improve intent recognition and slot resolution, with the default being ``false`` .
6864
+
6854
6865
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-nluimprovementspecification.html#cfn-lex-bot-nluimprovementspecification-enabled
6855
6866
  '''
6856
6867
  result = self._values.get("enabled")
@@ -8448,7 +8459,8 @@ class CfnBot(
8448
8459
  nlu_improvement_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.NluImprovementSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
8449
8460
  slot_resolution_improvement_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.SlotResolutionImprovementSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
8450
8461
  ) -> None:
8451
- '''
8462
+ '''Contains specifications about the Amazon Lex runtime generative AI capabilities from Amazon Bedrock that you can turn on for your bot.
8463
+
8452
8464
  :param nlu_improvement_specification:
8453
8465
  :param slot_resolution_improvement_specification:
8454
8466
 
@@ -8775,8 +8787,9 @@ class CfnBot(
8775
8787
  enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
8776
8788
  bedrock_model_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.BedrockModelSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
8777
8789
  ) -> None:
8778
- '''
8779
- :param enabled:
8790
+ '''Contains specifications for the sample utterance generation feature.
8791
+
8792
+ :param enabled: Specifies whether to enable sample utterance generation or not.
8780
8793
  :param bedrock_model_specification:
8781
8794
 
8782
8795
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-sampleutterancegenerationspecification.html
@@ -8817,7 +8830,8 @@ class CfnBot(
8817
8830
 
8818
8831
  @builtins.property
8819
8832
  def enabled(self) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
8820
- '''
8833
+ '''Specifies whether to enable sample utterance generation or not.
8834
+
8821
8835
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-sampleutterancegenerationspecification.html#cfn-lex-bot-sampleutterancegenerationspecification-enabled
8822
8836
  '''
8823
8837
  result = self._values.get("enabled")
@@ -9611,9 +9625,10 @@ class CfnBot(
9611
9625
  enabled: typing.Union[builtins.bool, _IResolvable_da3f097b],
9612
9626
  bedrock_model_specification: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnBot.BedrockModelSpecificationProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
9613
9627
  ) -> None:
9614
- '''
9615
- :param enabled:
9616
- :param bedrock_model_specification:
9628
+ '''Contains specifications for the assisted slot resolution feature.
9629
+
9630
+ :param enabled: Specifies whether assisted slot resolution is turned on or off.
9631
+ :param bedrock_model_specification: An object containing information about the Amazon Bedrock model used to assist slot resolution.
9617
9632
 
9618
9633
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-slotresolutionimprovementspecification.html
9619
9634
  :exampleMetadata: fixture=_generated
@@ -9653,7 +9668,8 @@ class CfnBot(
9653
9668
 
9654
9669
  @builtins.property
9655
9670
  def enabled(self) -> typing.Union[builtins.bool, _IResolvable_da3f097b]:
9656
- '''
9671
+ '''Specifies whether assisted slot resolution is turned on or off.
9672
+
9657
9673
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-slotresolutionimprovementspecification.html#cfn-lex-bot-slotresolutionimprovementspecification-enabled
9658
9674
  '''
9659
9675
  result = self._values.get("enabled")
@@ -9664,7 +9680,8 @@ class CfnBot(
9664
9680
  def bedrock_model_specification(
9665
9681
  self,
9666
9682
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnBot.BedrockModelSpecificationProperty"]]:
9667
- '''
9683
+ '''An object containing information about the Amazon Bedrock model used to assist slot resolution.
9684
+
9668
9685
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lex-bot-slotresolutionimprovementspecification.html#cfn-lex-bot-slotresolutionimprovementspecification-bedrockmodelspecification
9669
9686
  '''
9670
9687
  result = self._values.get("bedrock_model_specification")