aws-cdk-lib 2.173.4__py3-none-any.whl → 2.174.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aws-cdk-lib might be problematic. Click here for more details.

Files changed (91) hide show
  1. aws_cdk/__init__.py +210 -181
  2. aws_cdk/_jsii/__init__.py +1 -1
  3. aws_cdk/_jsii/{aws-cdk-lib@2.173.4.jsii.tgz → aws-cdk-lib@2.174.1.jsii.tgz} +0 -0
  4. aws_cdk/aws_amazonmq/__init__.py +18 -0
  5. aws_cdk/aws_apigateway/__init__.py +127 -168
  6. aws_cdk/aws_appconfig/__init__.py +498 -1
  7. aws_cdk/aws_applicationautoscaling/__init__.py +4 -1
  8. aws_cdk/aws_appsync/__init__.py +4 -0
  9. aws_cdk/aws_autoscaling/__init__.py +96 -0
  10. aws_cdk/aws_batch/__init__.py +662 -237
  11. aws_cdk/aws_bedrock/__init__.py +2122 -181
  12. aws_cdk/aws_cassandra/__init__.py +476 -4
  13. aws_cdk/aws_cleanrooms/__init__.py +1227 -20
  14. aws_cdk/aws_cloudformation/__init__.py +172 -169
  15. aws_cdk/aws_cloudfront/__init__.py +773 -26
  16. aws_cdk/aws_cloudtrail/__init__.py +8 -2
  17. aws_cdk/aws_codebuild/__init__.py +83 -0
  18. aws_cdk/aws_codepipeline/__init__.py +2 -1
  19. aws_cdk/aws_cognito/__init__.py +232 -200
  20. aws_cdk/aws_connect/__init__.py +187 -36
  21. aws_cdk/aws_connectcampaignsv2/__init__.py +106 -12
  22. aws_cdk/aws_databrew/__init__.py +69 -1
  23. aws_cdk/aws_datasync/__init__.py +33 -28
  24. aws_cdk/aws_datazone/__init__.py +141 -41
  25. aws_cdk/aws_dlm/__init__.py +20 -10
  26. aws_cdk/aws_dms/__init__.py +736 -22
  27. aws_cdk/aws_docdb/__init__.py +134 -0
  28. aws_cdk/aws_dynamodb/__init__.py +76 -4
  29. aws_cdk/aws_ec2/__init__.py +1233 -65
  30. aws_cdk/aws_ecr/__init__.py +31 -0
  31. aws_cdk/aws_ecr_assets/__init__.py +5 -0
  32. aws_cdk/aws_ecs/__init__.py +88 -11
  33. aws_cdk/aws_efs/__init__.py +15 -8
  34. aws_cdk/aws_eks/__init__.py +114 -45
  35. aws_cdk/aws_elasticloadbalancingv2/__init__.py +2 -2
  36. aws_cdk/aws_emrserverless/__init__.py +143 -0
  37. aws_cdk/aws_events/__init__.py +50 -30
  38. aws_cdk/aws_fis/__init__.py +33 -33
  39. aws_cdk/aws_fsx/__init__.py +20 -10
  40. aws_cdk/aws_glue/__init__.py +34 -11
  41. aws_cdk/aws_greengrass/__init__.py +8 -8
  42. aws_cdk/aws_guardduty/__init__.py +1 -1
  43. aws_cdk/aws_imagebuilder/__init__.py +45 -47
  44. aws_cdk/aws_internetmonitor/__init__.py +3 -3
  45. aws_cdk/aws_invoicing/__init__.py +55 -30
  46. aws_cdk/aws_iot/__init__.py +1117 -4
  47. aws_cdk/aws_iot1click/__init__.py +17 -7
  48. aws_cdk/aws_kendra/__init__.py +4 -6
  49. aws_cdk/aws_lambda/__init__.py +15 -1
  50. aws_cdk/aws_logs/__init__.py +4011 -86
  51. aws_cdk/aws_m2/__init__.py +41 -0
  52. aws_cdk/aws_mediaconnect/__init__.py +202 -7
  53. aws_cdk/aws_mediaconvert/__init__.py +39 -0
  54. aws_cdk/aws_memorydb/__init__.py +22 -22
  55. aws_cdk/aws_networkmanager/__init__.py +758 -0
  56. aws_cdk/aws_opensearchservice/__init__.py +222 -0
  57. aws_cdk/aws_organizations/__init__.py +5 -3
  58. aws_cdk/aws_pcs/__init__.py +3038 -0
  59. aws_cdk/aws_pipes/__init__.py +10 -10
  60. aws_cdk/aws_qbusiness/__init__.py +415 -140
  61. aws_cdk/aws_quicksight/__init__.py +59795 -49748
  62. aws_cdk/aws_ram/__init__.py +5 -11
  63. aws_cdk/aws_rbin/__init__.py +28 -16
  64. aws_cdk/aws_rds/__init__.py +186 -31
  65. aws_cdk/aws_redshiftserverless/__init__.py +156 -0
  66. aws_cdk/aws_resiliencehub/__init__.py +2 -1
  67. aws_cdk/aws_route53_targets/__init__.py +5 -5
  68. aws_cdk/aws_s3/__init__.py +25 -15
  69. aws_cdk/aws_s3express/__init__.py +7 -7
  70. aws_cdk/aws_s3tables/__init__.py +683 -0
  71. aws_cdk/aws_sagemaker/__init__.py +580 -195
  72. aws_cdk/aws_secretsmanager/__init__.py +2 -0
  73. aws_cdk/aws_securityhub/__init__.py +41 -165
  74. aws_cdk/aws_servicediscovery/__init__.py +10 -3
  75. aws_cdk/aws_ses/__init__.py +190 -5
  76. aws_cdk/aws_sns/__init__.py +7 -3
  77. aws_cdk/aws_synthetics/__init__.py +29 -4
  78. aws_cdk/aws_transfer/__init__.py +8 -0
  79. aws_cdk/aws_vpclattice/__init__.py +147 -77
  80. aws_cdk/aws_wafv2/__init__.py +20 -8
  81. aws_cdk/aws_wisdom/__init__.py +162 -111
  82. aws_cdk/aws_workspaces/__init__.py +10 -4
  83. aws_cdk/cloud_assembly_schema/__init__.py +22 -0
  84. aws_cdk/custom_resources/__init__.py +31 -0
  85. aws_cdk/cx_api/__init__.py +19 -0
  86. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/LICENSE +1 -1
  87. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/METADATA +2 -2
  88. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/NOTICE +1 -1
  89. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/RECORD +91 -89
  90. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/WHEEL +0 -0
  91. {aws_cdk_lib-2.173.4.dist-info → aws_cdk_lib-2.174.1.dist-info}/top_level.txt +0 -0
@@ -147,11 +147,6 @@ class CfnExperimentTemplate(
147
147
  empty_target_resolution_mode="emptyTargetResolutionMode"
148
148
  ),
149
149
  experiment_report_configuration=fis.CfnExperimentTemplate.ExperimentTemplateExperimentReportConfigurationProperty(
150
- data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
151
- cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
152
- dashboard_identifier="dashboardIdentifier"
153
- )]
154
- ),
155
150
  outputs=fis.CfnExperimentTemplate.OutputsProperty(
156
151
  experiment_report_s3_configuration=fis.CfnExperimentTemplate.ExperimentReportS3ConfigurationProperty(
157
152
  bucket_name="bucketName",
@@ -162,6 +157,11 @@ class CfnExperimentTemplate(
162
157
  ),
163
158
 
164
159
  # the properties below are optional
160
+ data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
161
+ cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
162
+ dashboard_identifier="dashboardIdentifier"
163
+ )]
164
+ ),
165
165
  post_experiment_duration="postExperimentDuration",
166
166
  pre_experiment_duration="preExperimentDuration"
167
167
  ),
@@ -871,8 +871,8 @@ class CfnExperimentTemplate(
871
871
  jsii_type="aws-cdk-lib.aws_fis.CfnExperimentTemplate.ExperimentTemplateExperimentReportConfigurationProperty",
872
872
  jsii_struct_bases=[],
873
873
  name_mapping={
874
- "data_sources": "dataSources",
875
874
  "outputs": "outputs",
875
+ "data_sources": "dataSources",
876
876
  "post_experiment_duration": "postExperimentDuration",
877
877
  "pre_experiment_duration": "preExperimentDuration",
878
878
  },
@@ -881,15 +881,15 @@ class CfnExperimentTemplate(
881
881
  def __init__(
882
882
  self,
883
883
  *,
884
- data_sources: typing.Union[_IResolvable_da3f097b, typing.Union["CfnExperimentTemplate.DataSourcesProperty", typing.Dict[builtins.str, typing.Any]]],
885
884
  outputs: typing.Union[_IResolvable_da3f097b, typing.Union["CfnExperimentTemplate.OutputsProperty", typing.Dict[builtins.str, typing.Any]]],
885
+ data_sources: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union["CfnExperimentTemplate.DataSourcesProperty", typing.Dict[builtins.str, typing.Any]]]] = None,
886
886
  post_experiment_duration: typing.Optional[builtins.str] = None,
887
887
  pre_experiment_duration: typing.Optional[builtins.str] = None,
888
888
  ) -> None:
889
889
  '''Describes the report configuration for the experiment template.
890
890
 
891
- :param data_sources: The data sources for the experiment report.
892
891
  :param outputs: The output destinations of the experiment report.
892
+ :param data_sources: The data sources for the experiment report.
893
893
  :param post_experiment_duration: The duration after the experiment end time for the data sources to include in the report.
894
894
  :param pre_experiment_duration: The duration before the experiment start time for the data sources to include in the report.
895
895
 
@@ -903,11 +903,6 @@ class CfnExperimentTemplate(
903
903
  from aws_cdk import aws_fis as fis
904
904
 
905
905
  experiment_template_experiment_report_configuration_property = fis.CfnExperimentTemplate.ExperimentTemplateExperimentReportConfigurationProperty(
906
- data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
907
- cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
908
- dashboard_identifier="dashboardIdentifier"
909
- )]
910
- ),
911
906
  outputs=fis.CfnExperimentTemplate.OutputsProperty(
912
907
  experiment_report_s3_configuration=fis.CfnExperimentTemplate.ExperimentReportS3ConfigurationProperty(
913
908
  bucket_name="bucketName",
@@ -918,37 +913,31 @@ class CfnExperimentTemplate(
918
913
  ),
919
914
 
920
915
  # the properties below are optional
916
+ data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
917
+ cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
918
+ dashboard_identifier="dashboardIdentifier"
919
+ )]
920
+ ),
921
921
  post_experiment_duration="postExperimentDuration",
922
922
  pre_experiment_duration="preExperimentDuration"
923
923
  )
924
924
  '''
925
925
  if __debug__:
926
926
  type_hints = typing.get_type_hints(_typecheckingstub__51250f1a178d2946c5b1c7861f630df4cda9761e1d2636f4a9e9703799cba3a1)
927
- check_type(argname="argument data_sources", value=data_sources, expected_type=type_hints["data_sources"])
928
927
  check_type(argname="argument outputs", value=outputs, expected_type=type_hints["outputs"])
928
+ check_type(argname="argument data_sources", value=data_sources, expected_type=type_hints["data_sources"])
929
929
  check_type(argname="argument post_experiment_duration", value=post_experiment_duration, expected_type=type_hints["post_experiment_duration"])
930
930
  check_type(argname="argument pre_experiment_duration", value=pre_experiment_duration, expected_type=type_hints["pre_experiment_duration"])
931
931
  self._values: typing.Dict[builtins.str, typing.Any] = {
932
- "data_sources": data_sources,
933
932
  "outputs": outputs,
934
933
  }
934
+ if data_sources is not None:
935
+ self._values["data_sources"] = data_sources
935
936
  if post_experiment_duration is not None:
936
937
  self._values["post_experiment_duration"] = post_experiment_duration
937
938
  if pre_experiment_duration is not None:
938
939
  self._values["pre_experiment_duration"] = pre_experiment_duration
939
940
 
940
- @builtins.property
941
- def data_sources(
942
- self,
943
- ) -> typing.Union[_IResolvable_da3f097b, "CfnExperimentTemplate.DataSourcesProperty"]:
944
- '''The data sources for the experiment report.
945
-
946
- :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fis-experimenttemplate-experimenttemplateexperimentreportconfiguration.html#cfn-fis-experimenttemplate-experimenttemplateexperimentreportconfiguration-datasources
947
- '''
948
- result = self._values.get("data_sources")
949
- assert result is not None, "Required property 'data_sources' is missing"
950
- return typing.cast(typing.Union[_IResolvable_da3f097b, "CfnExperimentTemplate.DataSourcesProperty"], result)
951
-
952
941
  @builtins.property
953
942
  def outputs(
954
943
  self,
@@ -961,6 +950,17 @@ class CfnExperimentTemplate(
961
950
  assert result is not None, "Required property 'outputs' is missing"
962
951
  return typing.cast(typing.Union[_IResolvable_da3f097b, "CfnExperimentTemplate.OutputsProperty"], result)
963
952
 
953
+ @builtins.property
954
+ def data_sources(
955
+ self,
956
+ ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnExperimentTemplate.DataSourcesProperty"]]:
957
+ '''The data sources for the experiment report.
958
+
959
+ :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fis-experimenttemplate-experimenttemplateexperimentreportconfiguration.html#cfn-fis-experimenttemplate-experimenttemplateexperimentreportconfiguration-datasources
960
+ '''
961
+ result = self._values.get("data_sources")
962
+ return typing.cast(typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnExperimentTemplate.DataSourcesProperty"]], result)
963
+
964
964
  @builtins.property
965
965
  def post_experiment_duration(self) -> typing.Optional[builtins.str]:
966
966
  '''The duration after the experiment end time for the data sources to include in the report.
@@ -1634,11 +1634,6 @@ class CfnExperimentTemplateProps:
1634
1634
  empty_target_resolution_mode="emptyTargetResolutionMode"
1635
1635
  ),
1636
1636
  experiment_report_configuration=fis.CfnExperimentTemplate.ExperimentTemplateExperimentReportConfigurationProperty(
1637
- data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
1638
- cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
1639
- dashboard_identifier="dashboardIdentifier"
1640
- )]
1641
- ),
1642
1637
  outputs=fis.CfnExperimentTemplate.OutputsProperty(
1643
1638
  experiment_report_s3_configuration=fis.CfnExperimentTemplate.ExperimentReportS3ConfigurationProperty(
1644
1639
  bucket_name="bucketName",
@@ -1649,6 +1644,11 @@ class CfnExperimentTemplateProps:
1649
1644
  ),
1650
1645
 
1651
1646
  # the properties below are optional
1647
+ data_sources=fis.CfnExperimentTemplate.DataSourcesProperty(
1648
+ cloud_watch_dashboards=[fis.CfnExperimentTemplate.CloudWatchDashboardProperty(
1649
+ dashboard_identifier="dashboardIdentifier"
1650
+ )]
1651
+ ),
1652
1652
  post_experiment_duration="postExperimentDuration",
1653
1653
  pre_experiment_duration="preExperimentDuration"
1654
1654
  ),
@@ -2201,8 +2201,8 @@ def _typecheckingstub__869a915a73baa324d4389a3b1a7ec2993ddb272931a4d1ca1bca643f0
2201
2201
 
2202
2202
  def _typecheckingstub__51250f1a178d2946c5b1c7861f630df4cda9761e1d2636f4a9e9703799cba3a1(
2203
2203
  *,
2204
- data_sources: typing.Union[_IResolvable_da3f097b, typing.Union[CfnExperimentTemplate.DataSourcesProperty, typing.Dict[builtins.str, typing.Any]]],
2205
2204
  outputs: typing.Union[_IResolvable_da3f097b, typing.Union[CfnExperimentTemplate.OutputsProperty, typing.Dict[builtins.str, typing.Any]]],
2205
+ data_sources: typing.Optional[typing.Union[_IResolvable_da3f097b, typing.Union[CfnExperimentTemplate.DataSourcesProperty, typing.Dict[builtins.str, typing.Any]]]] = None,
2206
2206
  post_experiment_duration: typing.Optional[builtins.str] = None,
2207
2207
  pre_experiment_duration: typing.Optional[builtins.str] = None,
2208
2208
  ) -> None:
@@ -393,7 +393,7 @@ class CfnDataRepositoryAssociation(
393
393
  '''
394
394
  :param scope: Scope in which this resource is defined.
395
395
  :param id: Construct identifier for this resource (unique in its scope).
396
- :param data_repository_path: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format ``s3://bucket-name/prefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
396
+ :param data_repository_path: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format ``s3://myBucket/myPrefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
397
397
  :param file_system_id: The ID of the file system on which the data repository association is configured.
398
398
  :param file_system_path: A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as ``/ns1/`` ) or subdirectory (such as ``/ns1/subdir/`` ) that will be mapped 1-1 with ``DataRepositoryPath`` . The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path ``/ns1/`` , then you cannot link another data repository with file system path ``/ns1/ns2`` . This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. .. epigraph:: If you specify only a forward slash ( ``/`` ) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.
399
399
  :param batch_import_meta_data_on_create: A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to ``true`` .
@@ -824,7 +824,7 @@ class CfnDataRepositoryAssociationProps:
824
824
  ) -> None:
825
825
  '''Properties for defining a ``CfnDataRepositoryAssociation``.
826
826
 
827
- :param data_repository_path: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format ``s3://bucket-name/prefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
827
+ :param data_repository_path: The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format ``s3://myBucket/myPrefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
828
828
  :param file_system_id: The ID of the file system on which the data repository association is configured.
829
829
  :param file_system_path: A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as ``/ns1/`` ) or subdirectory (such as ``/ns1/subdir/`` ) that will be mapped 1-1 with ``DataRepositoryPath`` . The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path ``/ns1/`` , then you cannot link another data repository with file system path ``/ns1/ns2`` . This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory. .. epigraph:: If you specify only a forward slash ( ``/`` ) as the file system path, you can link only one data repository to the file system. You can only specify "/" as the file system path for the first data repository associated with a file system.
830
830
  :param batch_import_meta_data_on_create: A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to ``true`` .
@@ -890,7 +890,7 @@ class CfnDataRepositoryAssociationProps:
890
890
  def data_repository_path(self) -> builtins.str:
891
891
  '''The path to the Amazon S3 data repository that will be linked to the file system.
892
892
 
893
- The path can be an S3 bucket or prefix in the format ``s3://bucket-name/prefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
893
+ The path can be an S3 bucket or prefix in the format ``s3://myBucket/myPrefix/`` . This path specifies where in the S3 data repository files will be imported from or exported to.
894
894
 
895
895
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-fsx-datarepositoryassociation.html#cfn-fsx-datarepositoryassociation-datarepositorypath
896
896
  '''
@@ -2565,7 +2565,7 @@ class CfnFileSystem(
2565
2565
  :param endpoint_ip_address_range: (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system will be created. By default in the Amazon FSx API and Amazon FSx console, Amazon FSx selects an available /28 IP address range for you from one of the VPC's CIDR ranges. You can have overlapping endpoint IP addresses for file systems deployed in the same VPC/route tables.
2566
2566
  :param options: To delete a file system if there are child volumes present below the root volume, use the string ``DELETE_CHILD_VOLUMES_AND_SNAPSHOTS`` . If your file system has child volumes and you don't use this option, the delete request will fail.
2567
2567
  :param preferred_subnet_id: Required when ``DeploymentType`` is set to ``MULTI_AZ_1`` . This specifies the subnet in which you want the preferred file server to be located.
2568
- :param read_cache_configuration:
2568
+ :param read_cache_configuration: Specifies the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
2569
2569
  :param root_volume_configuration: The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS file system. All volumes are children of the root volume.
2570
2570
  :param route_table_ids: (Multi-AZ only) Specifies the route tables in which Amazon FSx creates the rules for routing traffic to the correct file server. You should specify all virtual private cloud (VPC) route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table.
2571
2571
  :param throughput_capacity: Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows: - For ``MULTI_AZ_1`` and ``SINGLE_AZ_2`` , valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps. - For ``SINGLE_AZ_1`` , valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps. You pay for additional throughput capacity that you provision.
@@ -2783,7 +2783,8 @@ class CfnFileSystem(
2783
2783
  def read_cache_configuration(
2784
2784
  self,
2785
2785
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnFileSystem.ReadCacheConfigurationProperty"]]:
2786
- '''
2786
+ '''Specifies the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
2787
+
2787
2788
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-openzfsconfiguration.html#cfn-fsx-filesystem-openzfsconfiguration-readcacheconfiguration
2788
2789
  '''
2789
2790
  result = self._values.get("read_cache_configuration")
@@ -2867,9 +2868,10 @@ class CfnFileSystem(
2867
2868
  size_gib: typing.Optional[jsii.Number] = None,
2868
2869
  sizing_mode: typing.Optional[builtins.str] = None,
2869
2870
  ) -> None:
2870
- '''
2871
- :param size_gib:
2872
- :param sizing_mode:
2871
+ '''The configuration for the optional provisioned SSD read cache on file systems that use the Intelligent-Tiering storage class.
2872
+
2873
+ :param size_gib: Required if ``SizingMode`` is set to ``USER_PROVISIONED`` . Specifies the size of the file system's SSD read cache, in gibibytes (GiB).
2874
+ :param sizing_mode: Specifies how the provisioned SSD read cache is sized, as follows:. - Set to ``NO_CACHE`` if you do not want to use an SSD read cache with your Intelligent-Tiering file system. - Set to ``USER_PROVISIONED`` to specify the exact size of your SSD read cache. - Set to ``PROPORTIONAL_TO_THROUGHPUT_CAPACITY`` to have your SSD read cache automatically sized based on your throughput capacity.
2873
2875
 
2874
2876
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-readcacheconfiguration.html
2875
2877
  :exampleMetadata: fixture=_generated
@@ -2897,7 +2899,10 @@ class CfnFileSystem(
2897
2899
 
2898
2900
  @builtins.property
2899
2901
  def size_gib(self) -> typing.Optional[jsii.Number]:
2900
- '''
2902
+ '''Required if ``SizingMode`` is set to ``USER_PROVISIONED`` .
2903
+
2904
+ Specifies the size of the file system's SSD read cache, in gibibytes (GiB).
2905
+
2901
2906
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-readcacheconfiguration.html#cfn-fsx-filesystem-readcacheconfiguration-sizegib
2902
2907
  '''
2903
2908
  result = self._values.get("size_gib")
@@ -2905,7 +2910,12 @@ class CfnFileSystem(
2905
2910
 
2906
2911
  @builtins.property
2907
2912
  def sizing_mode(self) -> typing.Optional[builtins.str]:
2908
- '''
2913
+ '''Specifies how the provisioned SSD read cache is sized, as follows:.
2914
+
2915
+ - Set to ``NO_CACHE`` if you do not want to use an SSD read cache with your Intelligent-Tiering file system.
2916
+ - Set to ``USER_PROVISIONED`` to specify the exact size of your SSD read cache.
2917
+ - Set to ``PROPORTIONAL_TO_THROUGHPUT_CAPACITY`` to have your SSD read cache automatically sized based on your throughput capacity.
2918
+
2909
2919
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-fsx-filesystem-readcacheconfiguration.html#cfn-fsx-filesystem-readcacheconfiguration-sizingmode
2910
2920
  '''
2911
2921
  result = self._values.get("sizing_mode")
@@ -1068,7 +1068,7 @@ class CfnConnection(
1068
1068
  ) -> None:
1069
1069
  '''A structure that is used to specify a connection to create or update.
1070
1070
 
1071
- :param connection_type: The type of the connection. Currently, these types are supported:. - ``JDBC`` - Designates a connection to a database through Java Database Connectivity (JDBC). ``JDBC`` Connections use the following ConnectionParameters. - Required: All of ( ``HOST`` , ``PORT`` , ``JDBC_ENGINE`` ) or ``JDBC_CONNECTION_URL`` . - Required: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - Optional: ``JDBC_ENFORCE_SSL`` , ``CUSTOM_JDBC_CERT`` , ``CUSTOM_JDBC_CERT_STRING`` , ``SKIP_CUSTOM_JDBC_CERT_VALIDATION`` . These parameters are used to configure SSL with JDBC. - ``KAFKA`` - Designates a connection to an Apache Kafka streaming platform. ``KAFKA`` Connections use the following ConnectionParameters. - Required: ``KAFKA_BOOTSTRAP_SERVERS`` . - Optional: ``KAFKA_SSL_ENABLED`` , ``KAFKA_CUSTOM_CERT`` , ``KAFKA_SKIP_CUSTOM_CERT_VALIDATION`` . These parameters are used to configure SSL with ``KAFKA`` . - Optional: ``KAFKA_CLIENT_KEYSTORE`` , ``KAFKA_CLIENT_KEYSTORE_PASSWORD`` , ``KAFKA_CLIENT_KEY_PASSWORD`` , ``ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD`` , ``ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD`` . These parameters are used to configure TLS client configuration with SSL in ``KAFKA`` . - Optional: ``KAFKA_SASL_MECHANISM`` . Can be specified as ``SCRAM-SHA-512`` , ``GSSAPI`` , or ``AWS_MSK_IAM`` . - Optional: ``KAFKA_SASL_SCRAM_USERNAME`` , ``KAFKA_SASL_SCRAM_PASSWORD`` , ``ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD`` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with ``KAFKA`` . - Optional: ``KAFKA_SASL_GSSAPI_KEYTAB`` , ``KAFKA_SASL_GSSAPI_KRB5_CONF`` , ``KAFKA_SASL_GSSAPI_SERVICE`` , ``KAFKA_SASL_GSSAPI_PRINCIPAL`` . These parameters are used to configure SASL/GSSAPI authentication with ``KAFKA`` . - ``MONGODB`` - Designates a connection to a MongoDB document database. ``MONGODB`` Connections use the following ConnectionParameters. - Required: ``CONNECTION_URL`` . - Required: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - ``SALESFORCE`` - Designates a connection to Salesforce using OAuth authencation. - Requires the ``AuthenticationConfiguration`` member to be configured. - ``VIEW_VALIDATION_REDSHIFT`` - Designates a connection used for view validation by Amazon Redshift. - ``VIEW_VALIDATION_ATHENA`` - Designates a connection used for view validation by Amazon Athena. - ``NETWORK`` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). ``NETWORK`` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. - ``MARKETPLACE`` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue . ``MARKETPLACE`` Connections use the following ConnectionParameters. - Required: ``CONNECTOR_TYPE`` , ``CONNECTOR_URL`` , ``CONNECTOR_CLASS_NAME`` , ``CONNECTION_URL`` . - Required for ``JDBC`` ``CONNECTOR_TYPE`` connections: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - ``CUSTOM`` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue . ``SFTP`` is not supported. For more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult `AWS Glue connection properties <https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html>`_ . For more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult `Using connectors and connections <https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html>`_ .
1071
+ :param connection_type: The type of the connection. Currently, these types are supported:. - ``JDBC`` - Designates a connection to a database through Java Database Connectivity (JDBC). ``JDBC`` Connections use the following ConnectionParameters. - Required: All of ( ``HOST`` , ``PORT`` , ``JDBC_ENGINE`` ) or ``JDBC_CONNECTION_URL`` . - Required: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - Optional: ``JDBC_ENFORCE_SSL`` , ``CUSTOM_JDBC_CERT`` , ``CUSTOM_JDBC_CERT_STRING`` , ``SKIP_CUSTOM_JDBC_CERT_VALIDATION`` . These parameters are used to configure SSL with JDBC. - ``KAFKA`` - Designates a connection to an Apache Kafka streaming platform. ``KAFKA`` Connections use the following ConnectionParameters. - Required: ``KAFKA_BOOTSTRAP_SERVERS`` . - Optional: ``KAFKA_SSL_ENABLED`` , ``KAFKA_CUSTOM_CERT`` , ``KAFKA_SKIP_CUSTOM_CERT_VALIDATION`` . These parameters are used to configure SSL with ``KAFKA`` . - Optional: ``KAFKA_CLIENT_KEYSTORE`` , ``KAFKA_CLIENT_KEYSTORE_PASSWORD`` , ``KAFKA_CLIENT_KEY_PASSWORD`` , ``ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD`` , ``ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD`` . These parameters are used to configure TLS client configuration with SSL in ``KAFKA`` . - Optional: ``KAFKA_SASL_MECHANISM`` . Can be specified as ``SCRAM-SHA-512`` , ``GSSAPI`` , or ``AWS_MSK_IAM`` . - Optional: ``KAFKA_SASL_SCRAM_USERNAME`` , ``KAFKA_SASL_SCRAM_PASSWORD`` , ``ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD`` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with ``KAFKA`` . - Optional: ``KAFKA_SASL_GSSAPI_KEYTAB`` , ``KAFKA_SASL_GSSAPI_KRB5_CONF`` , ``KAFKA_SASL_GSSAPI_SERVICE`` , ``KAFKA_SASL_GSSAPI_PRINCIPAL`` . These parameters are used to configure SASL/GSSAPI authentication with ``KAFKA`` . - ``MONGODB`` - Designates a connection to a MongoDB document database. ``MONGODB`` Connections use the following ConnectionParameters. - Required: ``CONNECTION_URL`` . - Required: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - ``VIEW_VALIDATION_REDSHIFT`` - Designates a connection used for view validation by Amazon Redshift. - ``VIEW_VALIDATION_ATHENA`` - Designates a connection used for view validation by Amazon Athena. - ``NETWORK`` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC). ``NETWORK`` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements. - ``MARKETPLACE`` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue . ``MARKETPLACE`` Connections use the following ConnectionParameters. - Required: ``CONNECTOR_TYPE`` , ``CONNECTOR_URL`` , ``CONNECTOR_CLASS_NAME`` , ``CONNECTION_URL`` . - Required for ``JDBC`` ``CONNECTOR_TYPE`` connections: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` . - ``CUSTOM`` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue . Additionally, a ``ConnectionType`` for the following SaaS connectors is supported: - ``FACEBOOKADS`` - Designates a connection to Facebook Ads. - ``GOOGLEADS`` - Designates a connection to Google Ads. - ``GOOGLESHEETS`` - Designates a connection to Google Sheets. - ``GOOGLEANALYTICS4`` - Designates a connection to Google Analytics 4. - ``HUBSPOT`` - Designates a connection to HubSpot. - ``INSTAGRAMADS`` - Designates a connection to Instagram Ads. - ``INTERCOM`` - Designates a connection to Intercom. - ``JIRACLOUD`` - Designates a connection to Jira Cloud. - ``MARKETO`` - Designates a connection to Adobe Marketo Engage. - ``NETSUITEERP`` - Designates a connection to Oracle NetSuite. - ``SALESFORCE`` - Designates a connection to Salesforce using OAuth authentication. - ``SALESFORCEMARKETINGCLOUD`` - Designates a connection to Salesforce Marketing Cloud. - ``SALESFORCEPARDOT`` - Designates a connection to Salesforce Marketing Cloud Account Engagement (MCAE). - ``SAPODATA`` - Designates a connection to SAP OData. - ``SERVICENOW`` - Designates a connection to ServiceNow. - ``SLACK`` - Designates a connection to Slack. - ``SNAPCHATADS`` - Designates a connection to Snapchat Ads. - ``STRIPE`` - Designates a connection to Stripe. - ``ZENDESK`` - Designates a connection to Zendesk. - ``ZOHOCRM`` - Designates a connection to Zoho CRM. For more information on the connection parameters needed for a particular connector, see the documentation for the connector in `Adding an AWS Glue connection <https://docs.aws.amazon.com/glue/latest/dg/console-connections.html>`_ in the AWS Glue User Guide. ``SFTP`` is not supported. For more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult `AWS Glue connection properties <https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html>`_ . For more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult `Using connectors and connections <https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html>`_ .
1072
1072
  :param connection_properties: These key-value pairs define parameters for the connection.
1073
1073
  :param description: The description of the connection.
1074
1074
  :param match_criteria: A list of criteria that can be used in selecting this connection.
@@ -1150,8 +1150,6 @@ class CfnConnection(
1150
1150
 
1151
1151
  - Required: ``CONNECTION_URL`` .
1152
1152
  - Required: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` .
1153
- - ``SALESFORCE`` - Designates a connection to Salesforce using OAuth authencation.
1154
- - Requires the ``AuthenticationConfiguration`` member to be configured.
1155
1153
  - ``VIEW_VALIDATION_REDSHIFT`` - Designates a connection used for view validation by Amazon Redshift.
1156
1154
  - ``VIEW_VALIDATION_ATHENA`` - Designates a connection used for view validation by Amazon Athena.
1157
1155
  - ``NETWORK`` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
@@ -1166,6 +1164,31 @@ class CfnConnection(
1166
1164
  - Required for ``JDBC`` ``CONNECTOR_TYPE`` connections: All of ( ``USERNAME`` , ``PASSWORD`` ) or ``SECRET_ID`` .
1167
1165
  - ``CUSTOM`` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .
1168
1166
 
1167
+ Additionally, a ``ConnectionType`` for the following SaaS connectors is supported:
1168
+
1169
+ - ``FACEBOOKADS`` - Designates a connection to Facebook Ads.
1170
+ - ``GOOGLEADS`` - Designates a connection to Google Ads.
1171
+ - ``GOOGLESHEETS`` - Designates a connection to Google Sheets.
1172
+ - ``GOOGLEANALYTICS4`` - Designates a connection to Google Analytics 4.
1173
+ - ``HUBSPOT`` - Designates a connection to HubSpot.
1174
+ - ``INSTAGRAMADS`` - Designates a connection to Instagram Ads.
1175
+ - ``INTERCOM`` - Designates a connection to Intercom.
1176
+ - ``JIRACLOUD`` - Designates a connection to Jira Cloud.
1177
+ - ``MARKETO`` - Designates a connection to Adobe Marketo Engage.
1178
+ - ``NETSUITEERP`` - Designates a connection to Oracle NetSuite.
1179
+ - ``SALESFORCE`` - Designates a connection to Salesforce using OAuth authentication.
1180
+ - ``SALESFORCEMARKETINGCLOUD`` - Designates a connection to Salesforce Marketing Cloud.
1181
+ - ``SALESFORCEPARDOT`` - Designates a connection to Salesforce Marketing Cloud Account Engagement (MCAE).
1182
+ - ``SAPODATA`` - Designates a connection to SAP OData.
1183
+ - ``SERVICENOW`` - Designates a connection to ServiceNow.
1184
+ - ``SLACK`` - Designates a connection to Slack.
1185
+ - ``SNAPCHATADS`` - Designates a connection to Snapchat Ads.
1186
+ - ``STRIPE`` - Designates a connection to Stripe.
1187
+ - ``ZENDESK`` - Designates a connection to Zendesk.
1188
+ - ``ZOHOCRM`` - Designates a connection to Zoho CRM.
1189
+
1190
+ For more information on the connection parameters needed for a particular connector, see the documentation for the connector in `Adding an AWS Glue connection <https://docs.aws.amazon.com/glue/latest/dg/console-connections.html>`_ in the AWS Glue User Guide.
1191
+
1169
1192
  ``SFTP`` is not supported.
1170
1193
 
1171
1194
  For more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult `AWS Glue connection properties <https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html>`_ .
@@ -6099,7 +6122,7 @@ class CfnJob(
6099
6122
  :param security_configuration: The name of the ``SecurityConfiguration`` structure to be used with this job.
6100
6123
  :param tags: The tags to use with this job.
6101
6124
  :param timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
6102
- :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
6125
+ :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
6103
6126
  '''
6104
6127
  if __debug__:
6105
6128
  type_hints = typing.get_type_hints(_typecheckingstub__2bea698eff4ea1d2bc08b1ab842f318f77ba719c0241a0959453e26989b5b53e)
@@ -6864,7 +6887,7 @@ class CfnJobProps:
6864
6887
  :param security_configuration: The name of the ``SecurityConfiguration`` structure to be used with this job.
6865
6888
  :param tags: The tags to use with this job.
6866
6889
  :param timeout: The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours).
6867
- :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
6890
+ :param worker_type: The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type. - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs. - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
6868
6891
 
6869
6892
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-glue-job.html
6870
6893
  :exampleMetadata: fixture=_generated
@@ -7251,12 +7274,12 @@ class CfnJobProps:
7251
7274
 
7252
7275
  Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.
7253
7276
 
7254
- - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
7255
- - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
7256
- - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
7257
- - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type.
7258
- - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 streaming jobs.
7259
- - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
7277
+ - For the ``G.1X`` worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
7278
+ - For the ``G.2X`` worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.
7279
+ - For the ``G.4X`` worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs in the following AWS Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).
7280
+ - For the ``G.8X`` worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for AWS Glue version 3.0 or later Spark ETL jobs, in the same AWS Regions as supported for the ``G.4X`` worker type.
7281
+ - For the ``G.025X`` worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for AWS Glue version 3.0 or later streaming jobs.
7282
+ - For the ``Z.2X`` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.
7260
7283
 
7261
7284
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-glue-job.html#cfn-glue-job-workertype
7262
7285
  '''
@@ -7339,7 +7339,7 @@ class CfnResourceDefinition(
7339
7339
  :param local_device_resource_data: Settings for a local device resource.
7340
7340
  :param local_volume_resource_data: Settings for a local volume resource.
7341
7341
  :param s3_machine_learning_model_resource_data: Settings for a machine learning resource stored in Amazon S3 .
7342
- :param sage_maker_machine_learning_model_resource_data: Settings for a machine learning resource saved as an SageMaker training job.
7342
+ :param sage_maker_machine_learning_model_resource_data: Settings for a machine learning resource saved as an SageMaker AI training job.
7343
7343
  :param secrets_manager_secret_resource_data: Settings for a secret resource.
7344
7344
 
7345
7345
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinition-resourcedatacontainer.html
@@ -7459,7 +7459,7 @@ class CfnResourceDefinition(
7459
7459
  def sage_maker_machine_learning_model_resource_data(
7460
7460
  self,
7461
7461
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnResourceDefinition.SageMakerMachineLearningModelResourceDataProperty"]]:
7462
- '''Settings for a machine learning resource saved as an SageMaker training job.
7462
+ '''Settings for a machine learning resource saved as an SageMaker AI training job.
7463
7463
 
7464
7464
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinition-resourcedatacontainer.html#cfn-greengrass-resourcedefinition-resourcedatacontainer-sagemakermachinelearningmodelresourcedata
7465
7465
  '''
@@ -7972,7 +7972,7 @@ class CfnResourceDefinition(
7972
7972
  In an AWS CloudFormation template, ``SageMakerMachineLearningModelResourceData`` can be used in the ```ResourceDataContainer`` <https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinition-resourcedatacontainer.html>`_ property type.
7973
7973
 
7974
7974
  :param destination_path: The absolute local path of the resource inside the Lambda environment.
7975
- :param sage_maker_job_arn: The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.
7975
+ :param sage_maker_job_arn: The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.
7976
7976
  :param owner_setting: The owner setting for the downloaded machine learning resource. For more information, see `Access Machine Learning Resources from Lambda Functions <https://docs.aws.amazon.com/greengrass/v1/developerguide/access-ml-resources.html>`_ in the *Developer Guide* .
7977
7977
 
7978
7978
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinition-sagemakermachinelearningmodelresourcedata.html
@@ -8019,7 +8019,7 @@ class CfnResourceDefinition(
8019
8019
 
8020
8020
  @builtins.property
8021
8021
  def sage_maker_job_arn(self) -> builtins.str:
8022
- '''The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.
8022
+ '''The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.
8023
8023
 
8024
8024
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinition-sagemakermachinelearningmodelresourcedata.html#cfn-greengrass-resourcedefinition-sagemakermachinelearningmodelresourcedata-sagemakerjobarn
8025
8025
  '''
@@ -8793,7 +8793,7 @@ class CfnResourceDefinitionVersion(
8793
8793
  :param local_device_resource_data: Settings for a local device resource.
8794
8794
  :param local_volume_resource_data: Settings for a local volume resource.
8795
8795
  :param s3_machine_learning_model_resource_data: Settings for a machine learning resource stored in Amazon S3 .
8796
- :param sage_maker_machine_learning_model_resource_data: Settings for a machine learning resource saved as an SageMaker training job.
8796
+ :param sage_maker_machine_learning_model_resource_data: Settings for a machine learning resource saved as an SageMaker AI training job.
8797
8797
  :param secrets_manager_secret_resource_data: Settings for a secret resource.
8798
8798
 
8799
8799
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinitionversion-resourcedatacontainer.html
@@ -8913,7 +8913,7 @@ class CfnResourceDefinitionVersion(
8913
8913
  def sage_maker_machine_learning_model_resource_data(
8914
8914
  self,
8915
8915
  ) -> typing.Optional[typing.Union[_IResolvable_da3f097b, "CfnResourceDefinitionVersion.SageMakerMachineLearningModelResourceDataProperty"]]:
8916
- '''Settings for a machine learning resource saved as an SageMaker training job.
8916
+ '''Settings for a machine learning resource saved as an SageMaker AI training job.
8917
8917
 
8918
8918
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinitionversion-resourcedatacontainer.html#cfn-greengrass-resourcedefinitionversion-resourcedatacontainer-sagemakermachinelearningmodelresourcedata
8919
8919
  '''
@@ -9308,7 +9308,7 @@ class CfnResourceDefinitionVersion(
9308
9308
  In an AWS CloudFormation template, ``SageMakerMachineLearningModelResourceData`` can be used in the ```ResourceDataContainer`` <https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinitionversion-resourcedatacontainer.html>`_ property type.
9309
9309
 
9310
9310
  :param destination_path: The absolute local path of the resource inside the Lambda environment.
9311
- :param sage_maker_job_arn: The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.
9311
+ :param sage_maker_job_arn: The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.
9312
9312
  :param owner_setting: The owner setting for the downloaded machine learning resource. For more information, see `Access Machine Learning Resources from Lambda Functions <https://docs.aws.amazon.com/greengrass/v1/developerguide/access-ml-resources.html>`_ in the *Developer Guide* .
9313
9313
 
9314
9314
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinitionversion-sagemakermachinelearningmodelresourcedata.html
@@ -9355,7 +9355,7 @@ class CfnResourceDefinitionVersion(
9355
9355
 
9356
9356
  @builtins.property
9357
9357
  def sage_maker_job_arn(self) -> builtins.str:
9358
- '''The Amazon Resource Name (ARN) of the Amazon SageMaker training job that represents the source model.
9358
+ '''The Amazon Resource Name (ARN) of the Amazon SageMaker AI training job that represents the source model.
9359
9359
 
9360
9360
  :see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-greengrass-resourcedefinitionversion-sagemakermachinelearningmodelresourcedata.html#cfn-greengrass-resourcedefinitionversion-sagemakermachinelearningmodelresourcedata-sagemakerjobarn
9361
9361
  '''
@@ -2858,7 +2858,7 @@ class CfnMalwareProtectionPlan(
2858
2858
  )
2859
2859
  class TagItemProperty:
2860
2860
  def __init__(self, *, key: builtins.str, value: builtins.str) -> None:
2861
- '''Contains information about a tag.
2861
+ '''Describes a tag.
2862
2862
 
2863
2863
  :param key: The tag key.
2864
2864
  :param value: The tag value.