aws-cdk-lib 2.149.0__py3-none-any.whl → 2.151.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (48) hide show
  1. aws_cdk/__init__.py +6 -16
  2. aws_cdk/_jsii/__init__.py +1 -1
  3. aws_cdk/_jsii/{aws-cdk-lib@2.149.0.jsii.tgz → aws-cdk-lib@2.151.0.jsii.tgz} +0 -0
  4. aws_cdk/aws_apigatewayv2/__init__.py +94 -21
  5. aws_cdk/aws_appconfig/__init__.py +3 -3
  6. aws_cdk/aws_backup/__init__.py +3 -3
  7. aws_cdk/aws_bedrock/__init__.py +58 -46
  8. aws_cdk/aws_cleanrooms/__init__.py +5 -5
  9. aws_cdk/aws_cloudformation/__init__.py +4 -8
  10. aws_cdk/aws_cloudfront/__init__.py +102 -32
  11. aws_cdk/aws_cloudtrail/__init__.py +34 -558
  12. aws_cdk/aws_cloudwatch/__init__.py +1 -1
  13. aws_cdk/aws_codepipeline/__init__.py +11 -5
  14. aws_cdk/aws_cognito/__init__.py +1 -2
  15. aws_cdk/aws_ec2/__init__.py +263 -7
  16. aws_cdk/aws_ecs/__init__.py +16 -10
  17. aws_cdk/aws_eks/__init__.py +26 -20
  18. aws_cdk/aws_elasticloadbalancingv2/__init__.py +106 -11
  19. aws_cdk/aws_emr/__init__.py +18 -20
  20. aws_cdk/aws_entityresolution/__init__.py +27 -21
  21. aws_cdk/aws_events/__init__.py +83 -16
  22. aws_cdk/aws_fsx/__init__.py +25 -23
  23. aws_cdk/aws_glue/__init__.py +3 -3
  24. aws_cdk/aws_guardduty/__init__.py +6 -4
  25. aws_cdk/aws_iam/__init__.py +19 -29
  26. aws_cdk/aws_iotsitewise/__init__.py +8 -8
  27. aws_cdk/aws_lambda/__init__.py +21 -2
  28. aws_cdk/aws_logs/__init__.py +9 -0
  29. aws_cdk/aws_mwaa/__init__.py +3 -3
  30. aws_cdk/aws_pipes/__init__.py +2 -2
  31. aws_cdk/aws_qbusiness/__init__.py +21 -7
  32. aws_cdk/aws_rds/__init__.py +252 -206
  33. aws_cdk/aws_s3/__init__.py +8 -2
  34. aws_cdk/aws_sagemaker/__init__.py +10 -10
  35. aws_cdk/aws_ses/__init__.py +3 -3
  36. aws_cdk/aws_sns/__init__.py +5 -2
  37. aws_cdk/aws_stepfunctions/__init__.py +5 -2
  38. aws_cdk/aws_stepfunctions_tasks/__init__.py +23 -8
  39. aws_cdk/aws_synthetics/__init__.py +174 -22
  40. aws_cdk/custom_resources/__init__.py +91 -23
  41. aws_cdk/pipelines/__init__.py +1 -1
  42. aws_cdk/region_info/__init__.py +32 -12
  43. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/METADATA +1 -1
  44. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/RECORD +48 -48
  45. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/LICENSE +0 -0
  46. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/NOTICE +0 -0
  47. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/WHEEL +0 -0
  48. {aws_cdk_lib-2.149.0.dist-info → aws_cdk_lib-2.151.0.dist-info}/top_level.txt +0 -0
@@ -140,7 +140,7 @@ class CfnIdMappingWorkflow(
140
140
  '''
141
141
  :param scope: Scope in which this resource is defined.
142
142
  :param id: Construct identifier for this resource (unique in its scope).
143
- :param id_mapping_techniques: An object which defines the ``idMappingType`` and the ``providerProperties`` .
143
+ :param id_mapping_techniques: An object which defines the ID mapping technique and any additional configurations.
144
144
  :param input_source_config: A list of ``InputSource`` objects, which have the fields ``InputSourceARN`` and ``SchemaName`` .
145
145
  :param role_arn: The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
146
146
  :param workflow_name: The name of the workflow. There can't be multiple ``IdMappingWorkflows`` with the same name.
@@ -237,7 +237,7 @@ class CfnIdMappingWorkflow(
237
237
  def id_mapping_techniques(
238
238
  self,
239
239
  ) -> typing.Union[_IResolvable_da3f097b, "CfnIdMappingWorkflow.IdMappingTechniquesProperty"]:
240
- '''An object which defines the ``idMappingType`` and the ``providerProperties`` .'''
240
+ '''An object which defines the ID mapping technique and any additional configurations.'''
241
241
  return typing.cast(typing.Union[_IResolvable_da3f097b, "CfnIdMappingWorkflow.IdMappingTechniquesProperty"], jsii.get(self, "idMappingTechniques"))
242
242
 
243
243
  @id_mapping_techniques.setter
@@ -353,7 +353,7 @@ class CfnIdMappingWorkflow(
353
353
  id_mapping_type: typing.Optional[builtins.str] = None,
354
354
  provider_properties: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnIdMappingWorkflow.ProviderPropertiesProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
355
355
  ) -> None:
356
- '''An object which defines the ID mapping techniques and provider configurations.
356
+ '''An object which defines the ID mapping technique and any additional configurations.
357
357
 
358
358
  :param id_mapping_type: The type of ID mapping.
359
359
  :param provider_properties: An object which defines any additional configurations required by the provider service.
@@ -442,9 +442,9 @@ class CfnIdMappingWorkflow(
442
442
  ) -> None:
443
443
  '''An object containing ``InputSourceARN`` , ``SchemaName`` , and ``Type`` .
444
444
 
445
- :param input_source_arn: An AWS Glue table ARN for the input source table.
445
+ :param input_source_arn: An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.
446
446
  :param schema_arn: The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the ``SchemaMapping`` .
447
- :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` to which all ``sourceIds`` will resolve to.
447
+ :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` which all ``sourceIds`` will resolve to.
448
448
 
449
449
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-idmappingworkflow-idmappingworkflowinputsource.html
450
450
  :exampleMetadata: fixture=_generated
@@ -478,7 +478,7 @@ class CfnIdMappingWorkflow(
478
478
 
479
479
  @builtins.property
480
480
  def input_source_arn(self) -> builtins.str:
481
- '''An AWS Glue table ARN for the input source table.
481
+ '''An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.
482
482
 
483
483
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-idmappingworkflow-idmappingworkflowinputsource.html#cfn-entityresolution-idmappingworkflow-idmappingworkflowinputsource-inputsourcearn
484
484
  '''
@@ -501,7 +501,7 @@ class CfnIdMappingWorkflow(
501
501
 
502
502
  The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow.
503
503
 
504
- The ``TARGET`` contains a configuration of ``targetId`` to which all ``sourceIds`` will resolve to.
504
+ The ``TARGET`` contains a configuration of ``targetId`` which all ``sourceIds`` will resolve to.
505
505
 
506
506
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-idmappingworkflow-idmappingworkflowinputsource.html#cfn-entityresolution-idmappingworkflow-idmappingworkflowinputsource-type
507
507
  '''
@@ -780,7 +780,7 @@ class CfnIdMappingWorkflowProps:
780
780
  ) -> None:
781
781
  '''Properties for defining a ``CfnIdMappingWorkflow``.
782
782
 
783
- :param id_mapping_techniques: An object which defines the ``idMappingType`` and the ``providerProperties`` .
783
+ :param id_mapping_techniques: An object which defines the ID mapping technique and any additional configurations.
784
784
  :param input_source_config: A list of ``InputSource`` objects, which have the fields ``InputSourceARN`` and ``SchemaName`` .
785
785
  :param role_arn: The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.
786
786
  :param workflow_name: The name of the workflow. There can't be multiple ``IdMappingWorkflows`` with the same name.
@@ -862,7 +862,7 @@ class CfnIdMappingWorkflowProps:
862
862
  def id_mapping_techniques(
863
863
  self,
864
864
  ) -> typing.Union[_IResolvable_da3f097b, CfnIdMappingWorkflow.IdMappingTechniquesProperty]:
865
- '''An object which defines the ``idMappingType`` and the ``providerProperties`` .
865
+ '''An object which defines the ID mapping technique and any additional configurations.
866
866
 
867
867
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-entityresolution-idmappingworkflow.html#cfn-entityresolution-idmappingworkflow-idmappingtechniques
868
868
  '''
@@ -1017,7 +1017,7 @@ class CfnIdNamespace(
1017
1017
  :param scope: Scope in which this resource is defined.
1018
1018
  :param id: Construct identifier for this resource (unique in its scope).
1019
1019
  :param id_namespace_name: The name of the ID namespace.
1020
- :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` to which all ``sourceIds`` will resolve to.
1020
+ :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` which all ``sourceIds`` will resolve to.
1021
1021
  :param description: The description of the ID namespace.
1022
1022
  :param id_mapping_workflow_properties: Determines the properties of ``IdMappingWorflow`` where this ``IdNamespace`` can be used as a ``Source`` or a ``Target`` .
1023
1023
  :param input_source_config: A list of ``InputSource`` objects, which have the fields ``InputSourceARN`` and ``SchemaName`` .
@@ -1227,7 +1227,7 @@ class CfnIdNamespace(
1227
1227
  id_mapping_type: builtins.str,
1228
1228
  provider_properties: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnIdNamespace.NamespaceProviderPropertiesProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
1229
1229
  ) -> None:
1230
- '''An object containing ``IdMappingType`` and ``ProviderProperties`` .
1230
+ '''An object containing ``IdMappingType`` , ``ProviderProperties`` , and ``RuleBasedProperties`` .
1231
1231
 
1232
1232
  :param id_mapping_type: The type of ID mapping.
1233
1233
  :param provider_properties: An object which defines any additional configurations required by the provider service.
@@ -1314,7 +1314,7 @@ class CfnIdNamespace(
1314
1314
  ) -> None:
1315
1315
  '''An object containing ``InputSourceARN`` and ``SchemaName`` .
1316
1316
 
1317
- :param input_source_arn: An AWS Glue table ARN for the input source table.
1317
+ :param input_source_arn: An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.
1318
1318
  :param schema_name: The name of the schema.
1319
1319
 
1320
1320
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-idnamespace-idnamespaceinputsource.html
@@ -1345,7 +1345,7 @@ class CfnIdNamespace(
1345
1345
 
1346
1346
  @builtins.property
1347
1347
  def input_source_arn(self) -> builtins.str:
1348
- '''An AWS Glue table ARN for the input source table.
1348
+ '''An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.
1349
1349
 
1350
1350
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-idnamespace-idnamespaceinputsource.html#cfn-entityresolution-idnamespace-idnamespaceinputsource-inputsourcearn
1351
1351
  '''
@@ -1482,7 +1482,7 @@ class CfnIdNamespaceProps:
1482
1482
  '''Properties for defining a ``CfnIdNamespace``.
1483
1483
 
1484
1484
  :param id_namespace_name: The name of the ID namespace.
1485
- :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` to which all ``sourceIds`` will resolve to.
1485
+ :param type: The type of ID namespace. There are two types: ``SOURCE`` and ``TARGET`` . The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow. The ``TARGET`` contains a configuration of ``targetId`` which all ``sourceIds`` will resolve to.
1486
1486
  :param description: The description of the ID namespace.
1487
1487
  :param id_mapping_workflow_properties: Determines the properties of ``IdMappingWorflow`` where this ``IdNamespace`` can be used as a ``Source`` or a ``Target`` .
1488
1488
  :param input_source_config: A list of ``InputSource`` objects, which have the fields ``InputSourceARN`` and ``SchemaName`` .
@@ -1570,7 +1570,7 @@ class CfnIdNamespaceProps:
1570
1570
 
1571
1571
  The ``SOURCE`` contains configurations for ``sourceId`` data that will be processed in an ID mapping workflow.
1572
1572
 
1573
- The ``TARGET`` contains a configuration of ``targetId`` to which all ``sourceIds`` will resolve to.
1573
+ The ``TARGET`` contains a configuration of ``targetId`` which all ``sourceIds`` will resolve to.
1574
1574
 
1575
1575
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-entityresolution-idnamespace.html#cfn-entityresolution-idnamespace-type
1576
1576
  '''
@@ -2513,9 +2513,11 @@ class CfnMatchingWorkflow(
2513
2513
  attribute_matching_model: builtins.str,
2514
2514
  rules: typing.Union[_IResolvable_da3f097b, typing.Sequence[typing.Union[_IResolvable_da3f097b, typing.Union["CfnMatchingWorkflow.RuleProperty", typing.Dict[builtins.str, typing.Any]]]]],
2515
2515
  ) -> None:
2516
- '''An object which defines the list of matching rules to run and has a field ``Rules`` , which is a list of rule objects.
2516
+ '''An object which defines the list of matching rules to run in a matching workflow.
2517
+
2518
+ RuleBasedProperties contain a ``Rules`` field, which is a list of rule objects.
2517
2519
 
2518
- :param attribute_matching_model: The comparison type. You can either choose ``ONE_TO_ONE`` or ``MANY_TO_MANY`` as the AttributeMatchingModel. When choosing ``MANY_TO_MANY`` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the ``Email`` field of Profile A and the value of ``BusinessEmail`` field of Profile B matches, the two profiles are matched on the ``Email`` type. When choosing ``ONE_TO_ONE`` ,the system can only match if the sub-types are exact matches. For example, only when the value of the ``Email`` field of Profile A and the value of the ``Email`` field of Profile B matches, the two profiles are matched on the ``Email`` type.
2520
+ :param attribute_matching_model: The comparison type. You can either choose ``ONE_TO_ONE`` or ``MANY_TO_MANY`` as the ``attributeMatchingModel`` . If you choose ``MANY_TO_MANY`` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the ``Email`` field of Profile A and the value of ``BusinessEmail`` field of Profile B matches, the two profiles are matched on the ``Email`` attribute type. If you choose ``ONE_TO_ONE`` , the system can only match attributes if the sub-types are an exact match. For example, for the ``Email`` attribute type, the system will only consider it a match if the value of the ``Email`` field of Profile A matches the value of the ``Email`` field of Profile B.
2519
2521
  :param rules: A list of ``Rule`` objects, each of which have fields ``RuleName`` and ``MatchingKeys`` .
2520
2522
 
2521
2523
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-matchingworkflow-rulebasedproperties.html
@@ -2546,9 +2548,11 @@ class CfnMatchingWorkflow(
2546
2548
 
2547
2549
  @builtins.property
2548
2550
  def attribute_matching_model(self) -> builtins.str:
2549
- '''The comparison type.
2551
+ '''The comparison type. You can either choose ``ONE_TO_ONE`` or ``MANY_TO_MANY`` as the ``attributeMatchingModel`` .
2550
2552
 
2551
- You can either choose ``ONE_TO_ONE`` or ``MANY_TO_MANY`` as the AttributeMatchingModel. When choosing ``MANY_TO_MANY`` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the ``Email`` field of Profile A and the value of ``BusinessEmail`` field of Profile B matches, the two profiles are matched on the ``Email`` type. When choosing ``ONE_TO_ONE`` ,the system can only match if the sub-types are exact matches. For example, only when the value of the ``Email`` field of Profile A and the value of the ``Email`` field of Profile B matches, the two profiles are matched on the ``Email`` type.
2553
+ If you choose ``MANY_TO_MANY`` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the ``Email`` field of Profile A and the value of ``BusinessEmail`` field of Profile B matches, the two profiles are matched on the ``Email`` attribute type.
2554
+
2555
+ If you choose ``ONE_TO_ONE`` , the system can only match attributes if the sub-types are an exact match. For example, for the ``Email`` attribute type, the system will only consider it a match if the value of the ``Email`` field of Profile A matches the value of the ``Email`` field of Profile B.
2552
2556
 
2553
2557
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-matchingworkflow-rulebasedproperties.html#cfn-entityresolution-matchingworkflow-rulebasedproperties-attributematchingmodel
2554
2558
  '''
@@ -3418,7 +3422,7 @@ class CfnSchemaMapping(
3418
3422
  match_key: typing.Optional[builtins.str] = None,
3419
3423
  sub_type: typing.Optional[builtins.str] = None,
3420
3424
  ) -> None:
3421
- '''An object containing ``FieldName`` , ``Type`` , ``GroupName`` , ``MatchKey`` , and ``SubType`` .
3425
+ '''An object containing ``FieldName`` , ``Type`` , ``GroupName`` , ``MatchKey`` , ``Hashing`` , and ``SubType`` .
3422
3426
 
3423
3427
  :param field_name: A string containing the field name.
3424
3428
  :param type: The type of the attribute, selected from a list of values.
@@ -3498,7 +3502,9 @@ class CfnSchemaMapping(
3498
3502
  def match_key(self) -> typing.Optional[builtins.str]:
3499
3503
  '''A key that allows grouping of multiple input attributes into a unified matching group.
3500
3504
 
3501
- For example, consider a scenario where the source table contains various addresses, such as ``business_address`` and ``shipping_address`` . By assigning a ``matchKey`` called ``address`` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no ``matchKey`` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.
3505
+ For example, consider a scenario where the source table contains various addresses, such as ``business_address`` and ``shipping_address`` . By assigning a ``matchKey`` called ``address`` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.
3506
+
3507
+ If no ``matchKey`` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.
3502
3508
 
3503
3509
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-entityresolution-schemamapping-schemainputattribute.html#cfn-entityresolution-schemamapping-schemainputattribute-matchkey
3504
3510
  '''
@@ -206,7 +206,8 @@ It is possible to archive all or some events sent to an event bus. It is then po
206
206
 
207
207
  ```python
208
208
  bus = events.EventBus(self, "bus",
209
- event_bus_name="MyCustomEventBus"
209
+ event_bus_name="MyCustomEventBus",
210
+ description="MyCustomEventBus"
210
211
  )
211
212
 
212
213
  bus.archive("MyArchive",
@@ -235,6 +236,25 @@ event_bus = events.EventBus.from_event_bus_arn(self, "ImportedEventBus", "arn:aw
235
236
  # now you can just call methods on the eventbus
236
237
  event_bus.grant_put_events_to(lambda_function)
237
238
  ```
239
+
240
+ ## Use a customer managed key
241
+
242
+ To use a customer managed key for events on the event bus, use the `kmsKey` attribute.
243
+
244
+ ```python
245
+ import aws_cdk.aws_kms as kms
246
+
247
+ # kms_key: kms.IKey
248
+
249
+
250
+ events.EventBus(self, "Bus",
251
+ kms_key=kms_key
252
+ )
253
+ ```
254
+
255
+ **Note**: Archives and schema discovery are not supported for event buses encrypted using a customer managed key.
256
+ To enable archives or schema discovery on an event bus, choose to use an AWS owned key.
257
+ For more information, see [KMS key options for event bus encryption](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-encryption-at-rest-key-options.html).
238
258
  '''
239
259
  from pkgutil import extend_path
240
260
  __path__ = extend_path(__path__, __name__)
@@ -275,6 +295,7 @@ from ..aws_iam import (
275
295
  IRole as _IRole_235f5d8e,
276
296
  PolicyStatement as _PolicyStatement_0fe33853,
277
297
  )
298
+ from ..aws_kms import IKey as _IKey_5f11635f
278
299
 
279
300
 
280
301
  @jsii.data_type(
@@ -720,7 +741,8 @@ class BaseArchiveProps:
720
741
  Example::
721
742
 
722
743
  bus = events.EventBus(self, "bus",
723
- event_bus_name="MyCustomEventBus"
744
+ event_bus_name="MyCustomEventBus",
745
+ description="MyCustomEventBus"
724
746
  )
725
747
 
726
748
  bus.archive("MyArchive",
@@ -8164,21 +8186,27 @@ class EventBusPolicyProps:
8164
8186
  jsii_type="aws-cdk-lib.aws_events.EventBusProps",
8165
8187
  jsii_struct_bases=[],
8166
8188
  name_mapping={
8189
+ "description": "description",
8167
8190
  "event_bus_name": "eventBusName",
8168
8191
  "event_source_name": "eventSourceName",
8192
+ "kms_key": "kmsKey",
8169
8193
  },
8170
8194
  )
8171
8195
  class EventBusProps:
8172
8196
  def __init__(
8173
8197
  self,
8174
8198
  *,
8199
+ description: typing.Optional[builtins.str] = None,
8175
8200
  event_bus_name: typing.Optional[builtins.str] = None,
8176
8201
  event_source_name: typing.Optional[builtins.str] = None,
8202
+ kms_key: typing.Optional[_IKey_5f11635f] = None,
8177
8203
  ) -> None:
8178
8204
  '''Properties to define an event bus.
8179
8205
 
8206
+ :param description: The event bus description. The description can be up to 512 characters long. Default: - no description
8180
8207
  :param event_bus_name: The name of the event bus you are creating Note: If 'eventSourceName' is passed in, you cannot set this. Default: - automatically generated name
8181
8208
  :param event_source_name: The partner event source to associate with this event bus resource Note: If 'eventBusName' is passed in, you cannot set this. Default: - no partner event source
8209
+ :param kms_key: The customer managed key that encrypt events on this event bus. Default: - Use an AWS managed key
8182
8210
 
8183
8211
  :exampleMetadata: infused
8184
8212
 
@@ -8187,31 +8215,49 @@ class EventBusProps:
8187
8215
  import aws_cdk.aws_events as events
8188
8216
 
8189
8217
 
8190
- event_bus = events.EventBus(self, "EventBus",
8191
- event_bus_name="DomainEvents"
8192
- )
8193
-
8194
- event_entry = targets.EventBridgePutEventsEntry(
8195
- event_bus=event_bus,
8196
- source="PetService",
8197
- detail=ScheduleTargetInput.from_object({"Name": "Fluffy"}),
8198
- detail_type="🐶"
8218
+ my_event_bus = events.EventBus(self, "EventBus",
8219
+ event_bus_name="MyEventBus1"
8199
8220
  )
8200
8221
 
8201
- Schedule(self, "Schedule",
8202
- schedule=ScheduleExpression.rate(Duration.hours(1)),
8203
- target=targets.EventBridgePutEvents(event_entry)
8222
+ tasks.EventBridgePutEvents(self, "Send an event to EventBridge",
8223
+ entries=[tasks.EventBridgePutEventsEntry(
8224
+ detail=sfn.TaskInput.from_object({
8225
+ "Message": "Hello from Step Functions!"
8226
+ }),
8227
+ event_bus=my_event_bus,
8228
+ detail_type="MessageFromStepFunctions",
8229
+ source="step.functions"
8230
+ )]
8204
8231
  )
8205
8232
  '''
8206
8233
  if __debug__:
8207
8234
  type_hints = typing.get_type_hints(_typecheckingstub__298a8c4285f4e039344007a0deb097d820ddec52c59d396d7a8faa1aa9c8b743)
8235
+ check_type(argname="argument description", value=description, expected_type=type_hints["description"])
8208
8236
  check_type(argname="argument event_bus_name", value=event_bus_name, expected_type=type_hints["event_bus_name"])
8209
8237
  check_type(argname="argument event_source_name", value=event_source_name, expected_type=type_hints["event_source_name"])
8238
+ check_type(argname="argument kms_key", value=kms_key, expected_type=type_hints["kms_key"])
8210
8239
  self._values: typing.Dict[builtins.str, typing.Any] = {}
8240
+ if description is not None:
8241
+ self._values["description"] = description
8211
8242
  if event_bus_name is not None:
8212
8243
  self._values["event_bus_name"] = event_bus_name
8213
8244
  if event_source_name is not None:
8214
8245
  self._values["event_source_name"] = event_source_name
8246
+ if kms_key is not None:
8247
+ self._values["kms_key"] = kms_key
8248
+
8249
+ @builtins.property
8250
+ def description(self) -> typing.Optional[builtins.str]:
8251
+ '''The event bus description.
8252
+
8253
+ The description can be up to 512 characters long.
8254
+
8255
+ :default: - no description
8256
+
8257
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbus.html#cfn-events-eventbus-description
8258
+ '''
8259
+ result = self._values.get("description")
8260
+ return typing.cast(typing.Optional[builtins.str], result)
8215
8261
 
8216
8262
  @builtins.property
8217
8263
  def event_bus_name(self) -> typing.Optional[builtins.str]:
@@ -8235,6 +8281,15 @@ class EventBusProps:
8235
8281
  result = self._values.get("event_source_name")
8236
8282
  return typing.cast(typing.Optional[builtins.str], result)
8237
8283
 
8284
+ @builtins.property
8285
+ def kms_key(self) -> typing.Optional[_IKey_5f11635f]:
8286
+ '''The customer managed key that encrypt events on this event bus.
8287
+
8288
+ :default: - Use an AWS managed key
8289
+ '''
8290
+ result = self._values.get("kms_key")
8291
+ return typing.cast(typing.Optional[_IKey_5f11635f], result)
8292
+
8238
8293
  def __eq__(self, rhs: typing.Any) -> builtins.bool:
8239
8294
  return isinstance(rhs, self.__class__) and rhs._values == self._values
8240
8295
 
@@ -11331,7 +11386,8 @@ class EventBus(
11331
11386
  Example::
11332
11387
 
11333
11388
  bus = events.EventBus(self, "bus",
11334
- event_bus_name="MyCustomEventBus"
11389
+ event_bus_name="MyCustomEventBus",
11390
+ description="MyCustomEventBus"
11335
11391
  )
11336
11392
 
11337
11393
  bus.archive("MyArchive",
@@ -11349,21 +11405,28 @@ class EventBus(
11349
11405
  scope: _constructs_77d1e7e8.Construct,
11350
11406
  id: builtins.str,
11351
11407
  *,
11408
+ description: typing.Optional[builtins.str] = None,
11352
11409
  event_bus_name: typing.Optional[builtins.str] = None,
11353
11410
  event_source_name: typing.Optional[builtins.str] = None,
11411
+ kms_key: typing.Optional[_IKey_5f11635f] = None,
11354
11412
  ) -> None:
11355
11413
  '''
11356
11414
  :param scope: -
11357
11415
  :param id: -
11416
+ :param description: The event bus description. The description can be up to 512 characters long. Default: - no description
11358
11417
  :param event_bus_name: The name of the event bus you are creating Note: If 'eventSourceName' is passed in, you cannot set this. Default: - automatically generated name
11359
11418
  :param event_source_name: The partner event source to associate with this event bus resource Note: If 'eventBusName' is passed in, you cannot set this. Default: - no partner event source
11419
+ :param kms_key: The customer managed key that encrypt events on this event bus. Default: - Use an AWS managed key
11360
11420
  '''
11361
11421
  if __debug__:
11362
11422
  type_hints = typing.get_type_hints(_typecheckingstub__95a51d19a0503daf5e05f08738b44a6276eaa23c373c99735de37b1247783380)
11363
11423
  check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
11364
11424
  check_type(argname="argument id", value=id, expected_type=type_hints["id"])
11365
11425
  props = EventBusProps(
11366
- event_bus_name=event_bus_name, event_source_name=event_source_name
11426
+ description=description,
11427
+ event_bus_name=event_bus_name,
11428
+ event_source_name=event_source_name,
11429
+ kms_key=kms_key,
11367
11430
  )
11368
11431
 
11369
11432
  jsii.create(self.__class__, self, [scope, id, props])
@@ -12566,8 +12629,10 @@ def _typecheckingstub__30683f3da3ccc9235ea663f165631b4ac57c807c1878517c5051bcd78
12566
12629
 
12567
12630
  def _typecheckingstub__298a8c4285f4e039344007a0deb097d820ddec52c59d396d7a8faa1aa9c8b743(
12568
12631
  *,
12632
+ description: typing.Optional[builtins.str] = None,
12569
12633
  event_bus_name: typing.Optional[builtins.str] = None,
12570
12634
  event_source_name: typing.Optional[builtins.str] = None,
12635
+ kms_key: typing.Optional[_IKey_5f11635f] = None,
12571
12636
  ) -> None:
12572
12637
  """Type checking stubs"""
12573
12638
  pass
@@ -12960,8 +13025,10 @@ def _typecheckingstub__95a51d19a0503daf5e05f08738b44a6276eaa23c373c99735de37b124
12960
13025
  scope: _constructs_77d1e7e8.Construct,
12961
13026
  id: builtins.str,
12962
13027
  *,
13028
+ description: typing.Optional[builtins.str] = None,
12963
13029
  event_bus_name: typing.Optional[builtins.str] = None,
12964
13030
  event_source_name: typing.Optional[builtins.str] = None,
13031
+ kms_key: typing.Optional[_IKey_5f11635f] = None,
12965
13032
  ) -> None:
12966
13033
  """Type checking stubs"""
12967
13034
  pass
@@ -2161,17 +2161,17 @@ class CfnFileSystem(
2161
2161
  ) -> None:
2162
2162
  '''The configuration for this Amazon FSx for NetApp ONTAP file system.
2163
2163
 
2164
- :param deployment_type: Specifies the FSx for ONTAP file system deployment type to use in creating the file system. - ``MULTI_AZ_1`` - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2164
+ :param deployment_type: Specifies the FSx for ONTAP file system deployment type to use in creating the file system. - ``MULTI_AZ_1`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system. - ``MULTI_AZ_2`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system. - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system. - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system. For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2165
2165
  :param automatic_backup_retention_days: The number of days to retain automatic backups. Setting this property to ``0`` disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is ``30`` .
2166
2166
  :param daily_automatic_backup_start_time: A recurring daily time, in the format ``HH:MM`` . ``HH`` is the zero-padded hour of the day (0-23), and ``MM`` is the zero-padded minute of the hour. For example, ``05:00`` specifies 5 AM daily.
2167
2167
  :param disk_iops_configuration: The SSD IOPS configuration for the FSx for ONTAP file system.
2168
2168
  :param endpoint_ip_address_range: (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API, Amazon FSx selects an unused IP address range for you from the 198.19.* range. By default in the Amazon FSx console, Amazon FSx chooses the last 64 IP addresses from the VPC’s primary CIDR range to use as the endpoint IP address range for the file system. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables, as long as they don't overlap with any subnet.
2169
2169
  :param fsx_admin_password: The ONTAP administrative password for the ``fsxadmin`` user with which you administer your file system using the NetApp ONTAP CLI and REST API.
2170
- :param ha_pairs: Specifies how many high-availability (HA) pairs of file servers will power your file system. Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``HAPairs`` is less than 1 or greater than 12. - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` or ``MULTI_AZ_1`` .
2171
- :param preferred_subnet_id: Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` . This specifies the subnet in which you want the preferred file server to be located.
2170
+ :param ha_pairs: Specifies how many high-availability (HA) pairs of file servers will power your file system. First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see `Using block storage protocols <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage>`_ . Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``HAPairs`` is less than 1 or greater than 12. - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` , ``MULTI_AZ_1`` , or ``MULTI_AZ_2`` .
2171
+ :param preferred_subnet_id: Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` or ``MULTI_AZ_2`` . This specifies the subnet in which you want the preferred file server to be located.
2172
2172
  :param route_table_ids: (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. .. epigraph:: Amazon FSx manages these route tables for Multi-AZ file systems using tag-based authentication. These route tables are tagged with ``Key: AmazonFSx; Value: ManagedByAmazonFSx`` . When creating FSx for ONTAP Multi-AZ file systems using AWS CloudFormation we recommend that you add the ``Key: AmazonFSx; Value: ManagedByAmazonFSx`` tag manually.
2173
2173
  :param throughput_capacity: Sets the throughput capacity for the file system that you're creating in megabytes per second (MBps). For more information, see `Managing throughput capacity <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-throughput-capacity.html>`_ in the FSx for ONTAP User Guide. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value. - The value of ``ThroughputCapacity`` when divided by the value of ``HAPairs`` is outside of the valid range for ``ThroughputCapacity`` .
2174
- :param throughput_capacity_per_ha_pair: Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both. This field and ``ThroughputCapacity`` are the same for scale-up file systems powered by one HA pair. - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. - For ``SINGLE_AZ_2`` file systems, valid values are 3072 or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair. - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is a valid HA pair (a value between 2 and 12). - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2174
+ :param throughput_capacity_per_ha_pair: Use to choose the throughput capacity per HA pair, rather than the total throughput for the file system. You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both. This field and ``ThroughputCapacity`` are the same for file systems powered by one HA pair. - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps. - For ``SINGLE_AZ_2`` , valid values are 1536, 3072, or 6144 MBps. - For ``MULTI_AZ_2`` , valid values are 384, 768, 1536, 3072, or 6144 MBps. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair. - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is not a valid HA pair (a value between 1 and 12). - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2175
2175
  :param weekly_maintenance_start_time: A recurring weekly time, in the format ``D:HH:MM`` . ``D`` is the day of the week, for which 1 represents Monday and 7 represents Sunday. For further details, see `the ISO-8601 spec as described on Wikipedia <https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_week_date>`_ . ``HH`` is the zero-padded hour of the day (0-23), and ``MM`` is the zero-padded minute of the hour. For example, ``1:05:00`` specifies maintenance at 5 AM Monday.
2176
2176
 
2177
2177
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html
@@ -2247,9 +2247,10 @@ class CfnFileSystem(
2247
2247
  def deployment_type(self) -> builtins.str:
2248
2248
  '''Specifies the FSx for ONTAP file system deployment type to use in creating the file system.
2249
2249
 
2250
- - ``MULTI_AZ_1`` - (Default) A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability.
2251
- - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy.
2252
- - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy.
2250
+ - ``MULTI_AZ_1`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. This is a first-generation FSx for ONTAP file system.
2251
+ - ``MULTI_AZ_2`` - A high availability file system configured for Multi-AZ redundancy to tolerate temporary AZ unavailability. This is a second-generation FSx for ONTAP file system.
2252
+ - ``SINGLE_AZ_1`` - A file system configured for Single-AZ redundancy. This is a first-generation FSx for ONTAP file system.
2253
+ - ``SINGLE_AZ_2`` - A file system configured with multiple high-availability (HA) pairs for Single-AZ redundancy. This is a second-generation FSx for ONTAP file system.
2253
2254
 
2254
2255
  For information about the use cases for Multi-AZ and Single-AZ deployments, refer to `Choosing a file system deployment type <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/high-availability-AZ.html>`_ .
2255
2256
 
@@ -2316,12 +2317,12 @@ class CfnFileSystem(
2316
2317
  def ha_pairs(self) -> typing.Optional[jsii.Number]:
2317
2318
  '''Specifies how many high-availability (HA) pairs of file servers will power your file system.
2318
2319
 
2319
- Scale-up file systems are powered by 1 HA pair. The default value is 1. FSx for ONTAP scale-out file systems are powered by up to 12 HA pairs. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide.
2320
+ First-generation file systems are powered by 1 HA pair. Second-generation multi-AZ file systems are powered by 1 HA pair. Second generation single-AZ file systems are powered by up to 12 HA pairs. The default value is 1. The value of this property affects the values of ``StorageCapacity`` , ``Iops`` , and ``ThroughputCapacity`` . For more information, see `High-availability (HA) pairs <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/administering-file-systems.html#HA-pairs>`_ in the FSx for ONTAP user guide. Block storage protocol support (iSCSI and NVMe over TCP) is disabled on file systems with more than 6 HA pairs. For more information, see `Using block storage protocols <https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/supported-fsx-clients.html#using-block-storage>`_ .
2320
2321
 
2321
2322
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
2322
2323
 
2323
2324
  - The value of ``HAPairs`` is less than 1 or greater than 12.
2324
- - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` or ``MULTI_AZ_1`` .
2325
+ - The value of ``HAPairs`` is greater than 1 and the value of ``DeploymentType`` is ``SINGLE_AZ_1`` , ``MULTI_AZ_1`` , or ``MULTI_AZ_2`` .
2325
2326
 
2326
2327
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html#cfn-fsx-filesystem-ontapconfiguration-hapairs
2327
2328
  '''
@@ -2330,7 +2331,7 @@ class CfnFileSystem(
2330
2331
 
2331
2332
  @builtins.property
2332
2333
  def preferred_subnet_id(self) -> typing.Optional[builtins.str]:
2333
- '''Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` .
2334
+ '''Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` or ``MULTI_AZ_2`` .
2334
2335
 
2335
2336
  This specifies the subnet in which you want the preferred file server to be located.
2336
2337
 
@@ -2375,15 +2376,16 @@ class CfnFileSystem(
2375
2376
 
2376
2377
  You can define either the ``ThroughputCapacityPerHAPair`` or the ``ThroughputCapacity`` when creating a file system, but not both.
2377
2378
 
2378
- This field and ``ThroughputCapacity`` are the same for scale-up file systems powered by one HA pair.
2379
+ This field and ``ThroughputCapacity`` are the same for file systems powered by one HA pair.
2379
2380
 
2380
2381
  - For ``SINGLE_AZ_1`` and ``MULTI_AZ_1`` file systems, valid values are 128, 256, 512, 1024, 2048, or 4096 MBps.
2381
- - For ``SINGLE_AZ_2`` file systems, valid values are 3072 or 6144 MBps.
2382
+ - For ``SINGLE_AZ_2`` , valid values are 1536, 3072, or 6144 MBps.
2383
+ - For ``MULTI_AZ_2`` , valid values are 384, 768, 1536, 3072, or 6144 MBps.
2382
2384
 
2383
2385
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
2384
2386
 
2385
2387
  - The value of ``ThroughputCapacity`` and ``ThroughputCapacityPerHAPair`` are not the same value for file systems with one HA pair.
2386
- - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is a valid HA pair (a value between 2 and 12).
2388
+ - The value of deployment type is ``SINGLE_AZ_2`` and ``ThroughputCapacity`` / ``ThroughputCapacityPerHAPair`` is not a valid HA pair (a value between 1 and 12).
2387
2389
  - The value of ``ThroughputCapacityPerHAPair`` is not a valid value.
2388
2390
 
2389
2391
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-ontapconfiguration.html#cfn-fsx-filesystem-ontapconfiguration-throughputcapacityperhapair
@@ -2456,7 +2458,7 @@ class CfnFileSystem(
2456
2458
  ) -> None:
2457
2459
  '''The OpenZFS configuration for the file system that's being created.
2458
2460
 
2459
- :param deployment_type: Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an AWS Region . Valid values are the following: - ``MULTI_AZ_1`` - Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). ``Multi_AZ_1`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions . - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MB/s. ``Single_AZ_1`` is available in all AWS Regions where Amazon FSx for OpenZFS is available. - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. ``Single_AZ_2`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions . For more information, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ and `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2461
+ :param deployment_type: Specifies the file system deployment type. Valid values are the following:. - ``MULTI_AZ_1`` - Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same AWS Region . - ``SINGLE_AZ_HA_2`` - Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone. - ``SINGLE_AZ_HA_1`` - Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone. - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone. - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone. For a list of which AWS Regions each deployment type is available in, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ . For more information on the differences in performance between deployment types, see `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2460
2462
  :param automatic_backup_retention_days: The number of days to retain automatic backups. Setting this property to ``0`` disables automatic backups. You can retain automatic backups for a maximum of 90 days. The default is ``30`` .
2461
2463
  :param copy_tags_to_backups: A Boolean value indicating whether tags for the file system should be copied to backups. This value defaults to ``false`` . If it's set to ``true`` , all tags for the file system are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is ``true`` , and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the file system, regardless of this value.
2462
2464
  :param copy_tags_to_volumes: A Boolean value indicating whether tags for the file system should be copied to volumes. This value defaults to ``false`` . If it's set to ``true`` , all tags for the file system are copied to volumes where the user doesn't specify tags. If this value is ``true`` , and you specify one or more tags, only the specified tags are copied to volumes. If you specify one or more tags when creating the volume, no tags are copied from the file system, regardless of this value.
@@ -2561,15 +2563,15 @@ class CfnFileSystem(
2561
2563
 
2562
2564
  @builtins.property
2563
2565
  def deployment_type(self) -> builtins.str:
2564
- '''Specifies the file system deployment type.
2565
-
2566
- Single AZ deployment types are configured for redundancy within a single Availability Zone in an AWS Region . Valid values are the following:
2566
+ '''Specifies the file system deployment type. Valid values are the following:.
2567
2567
 
2568
- - ``MULTI_AZ_1`` - Creates file systems with high availability that are configured for Multi-AZ redundancy to tolerate temporary unavailability in Availability Zones (AZs). ``Multi_AZ_1`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions .
2569
- - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MB/s. ``Single_AZ_1`` is available in all AWS Regions where Amazon FSx for OpenZFS is available.
2570
- - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. ``Single_AZ_2`` is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Tokyo), and Europe (Ireland) AWS Regions .
2568
+ - ``MULTI_AZ_1`` - Creates file systems with high availability and durability by replicating your data and supporting failover across multiple Availability Zones in the same AWS Region .
2569
+ - ``SINGLE_AZ_HA_2`` - Creates file systems with high availability and throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache by deploying a primary and standby file system within the same Availability Zone.
2570
+ - ``SINGLE_AZ_HA_1`` - Creates file systems with high availability and throughput capacities of 64 - 4,096 MB/s by deploying a primary and standby file system within the same Availability Zone.
2571
+ - ``SINGLE_AZ_2`` - Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache that automatically recover within a single Availability Zone.
2572
+ - ``SINGLE_AZ_1`` - Creates file systems with throughput capacities of 64 - 4,096 MBs that automatically recover within a single Availability Zone.
2571
2573
 
2572
- For more information, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ and `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2574
+ For a list of which AWS Regions each deployment type is available in, see `Deployment type availability <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/availability-durability.html#available-aws-regions>`_ . For more information on the differences in performance between deployment types, see `File system performance <https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/performance.html#zfs-fs-performance>`_ in the *Amazon FSx for OpenZFS User Guide* .
2573
2575
 
2574
2576
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-openzfsconfiguration.html#cfn-fsx-filesystem-openzfsconfiguration-deploymenttype
2575
2577
  '''
@@ -5089,7 +5091,7 @@ class CfnVolume(
5089
5091
  ) -> None:
5090
5092
  '''Use to specify configuration options for a volume’s storage aggregate or aggregates.
5091
5093
 
5092
- :param aggregates: The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 6. - The value of ``Aggregates`` contains aggregates that are not present. - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5094
+ :param aggregates: The list of aggregates that this volume resides on. Aggregates are storage pools which make up your primary storage tier. Each high-availability (HA) pair has one aggregate. The names of the aggregates map to the names of the aggregates in the ONTAP CLI and REST API. For FlexVols, there will always be a single entry. Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions: - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 12. - The value of ``Aggregates`` contains aggregates that are not present. - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5093
5095
  :param constituents_per_aggregate: Used to explicitly set the number of constituents within the FlexGroup per storage aggregate. This field is optional when creating a FlexGroup volume. If unspecified, the default value will be 8. This field cannot be provided when creating a FlexVol volume.
5094
5096
 
5095
5097
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-volume-aggregateconfiguration.html
@@ -5124,7 +5126,7 @@ class CfnVolume(
5124
5126
 
5125
5127
  Amazon FSx responds with an HTTP status code 400 (Bad Request) for the following conditions:
5126
5128
 
5127
- - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 6.
5129
+ - The strings in the value of ``Aggregates`` are not are not formatted as ``aggrX`` , where X is a number between 1 and 12.
5128
5130
  - The value of ``Aggregates`` contains aggregates that are not present.
5129
5131
  - One or more of the aggregates supplied are too close to the volume limit to support adding more volumes.
5130
5132
 
@@ -6037,7 +6037,7 @@ class CfnJob(
6037
6037
  :param description: A description of the job.
6038
6038
  :param execution_class: Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with AWS Glue version 3.0 and above and command type ``glueetl`` will be allowed to set ``ExecutionClass`` to ``FLEX`` . The flexible execution class is available for Spark jobs.
6039
6039
  :param execution_property: The maximum number of concurrent runs that are allowed for this job.
6040
- :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
6040
+ :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to the latest Glue version available.
6041
6041
  :param log_uri: This field is reserved for future use.
6042
6042
  :param maintenance_window: This field specifies a day of the week and hour for a maintenance window for streaming jobs. AWS Glue periodically performs maintenance activities. During these maintenance windows, AWS Glue will need to restart your streaming jobs. AWS Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
6043
6043
  :param max_capacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. Do not set ``Max Capacity`` if using ``WorkerType`` and ``NumberOfWorkers`` . The value that can be allocated for ``MaxCapacity`` depends on whether you are running a Python shell job or an Apache Spark ETL job: - When you specify a Python shell job ( ``JobCommand.Name`` ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - When you specify an Apache Spark ETL job ( ``JobCommand.Name`` ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
@@ -6769,7 +6769,7 @@ class CfnJobProps:
6769
6769
  :param description: A description of the job.
6770
6770
  :param execution_class: Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with AWS Glue version 3.0 and above and command type ``glueetl`` will be allowed to set ``ExecutionClass`` to ``FLEX`` . The flexible execution class is available for Spark jobs.
6771
6771
  :param execution_property: The maximum number of concurrent runs that are allowed for this job.
6772
- :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9.
6772
+ :param glue_version: Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide. Jobs that are created without specifying a Glue version default to the latest Glue version available.
6773
6773
  :param log_uri: This field is reserved for future use.
6774
6774
  :param maintenance_window: This field specifies a day of the week and hour for a maintenance window for streaming jobs. AWS Glue periodically performs maintenance activities. During these maintenance windows, AWS Glue will need to restart your streaming jobs. AWS Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.
6775
6775
  :param max_capacity: The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. Do not set ``Max Capacity`` if using ``WorkerType`` and ``NumberOfWorkers`` . The value that can be allocated for ``MaxCapacity`` depends on whether you are running a Python shell job or an Apache Spark ETL job: - When you specify a Python shell job ( ``JobCommand.Name`` ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. - When you specify an Apache Spark ETL job ( ``JobCommand.Name`` ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
@@ -6999,7 +6999,7 @@ class CfnJobProps:
6999
6999
 
7000
7000
  For more information about the available AWS Glue versions and corresponding Spark and Python versions, see `Glue version <https://docs.aws.amazon.com/glue/latest/dg/add-job.html>`_ in the developer guide.
7001
7001
 
7002
- Jobs that are created without specifying a Glue version default to Glue 0.9.
7002
+ Jobs that are created without specifying a Glue version default to the latest Glue version available.
7003
7003
 
7004
7004
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-glue-job.html#cfn-glue-job-glueversion
7005
7005
  '''