@aws-sdk/client-glue 3.857.0 → 3.858.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist-cjs/index.js +54 -18
  2. package/dist-es/models/models_0.js +8 -5
  3. package/dist-es/models/models_1.js +5 -0
  4. package/dist-es/models/models_2.js +0 -9
  5. package/dist-es/models/models_3.js +9 -0
  6. package/dist-es/protocols/Aws_json1_1.js +26 -0
  7. package/dist-types/commands/BatchGetJobsCommand.d.ts +181 -67
  8. package/dist-types/commands/CreateJobCommand.d.ts +181 -67
  9. package/dist-types/commands/GetClassifierCommand.d.ts +2 -1
  10. package/dist-types/commands/GetClassifiersCommand.d.ts +1 -1
  11. package/dist-types/commands/GetColumnStatisticsForPartitionCommand.d.ts +1 -2
  12. package/dist-types/commands/GetJobCommand.d.ts +181 -67
  13. package/dist-types/commands/GetJobsCommand.d.ts +181 -67
  14. package/dist-types/commands/ModifyIntegrationCommand.d.ts +2 -1
  15. package/dist-types/commands/PutDataCatalogEncryptionSettingsCommand.d.ts +1 -1
  16. package/dist-types/commands/PutDataQualityProfileAnnotationCommand.d.ts +1 -1
  17. package/dist-types/commands/PutResourcePolicyCommand.d.ts +1 -1
  18. package/dist-types/commands/UpdateJobCommand.d.ts +181 -67
  19. package/dist-types/models/models_0.d.ts +445 -448
  20. package/dist-types/models/models_1.d.ts +430 -231
  21. package/dist-types/models/models_2.d.ts +232 -232
  22. package/dist-types/models/models_3.d.ts +298 -27
  23. package/dist-types/ts3.4/commands/GetClassifierCommand.d.ts +2 -4
  24. package/dist-types/ts3.4/commands/GetClassifiersCommand.d.ts +1 -1
  25. package/dist-types/ts3.4/commands/GetColumnStatisticsForPartitionCommand.d.ts +4 -2
  26. package/dist-types/ts3.4/commands/ModifyIntegrationCommand.d.ts +2 -4
  27. package/dist-types/ts3.4/commands/PutDataCatalogEncryptionSettingsCommand.d.ts +1 -1
  28. package/dist-types/ts3.4/commands/PutDataQualityProfileAnnotationCommand.d.ts +1 -1
  29. package/dist-types/ts3.4/commands/PutResourcePolicyCommand.d.ts +1 -1
  30. package/dist-types/ts3.4/models/models_0.d.ts +94 -75
  31. package/dist-types/ts3.4/models/models_1.d.ts +82 -54
  32. package/dist-types/ts3.4/models/models_2.d.ts +56 -59
  33. package/dist-types/ts3.4/models/models_3.d.ts +69 -4
  34. package/package.json +5 -5
@@ -1,8 +1,238 @@
1
1
  import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";
2
2
  import { GlueServiceException as __BaseException } from "./GlueServiceException";
3
- import { Action, Aggregate, AmazonRedshiftSource, AmazonRedshiftTarget, AthenaConnectorSource, AuthenticationConfigurationInput, BasicCatalogTarget, CatalogDeltaSource, CatalogHudiSource, CatalogKafkaSource, CatalogKinesisSource, CatalogSource, Column, ConnectionsList, ConnectorDataSource, ConnectorDataTarget, CrawlerTargets, CustomCode, DataSource, DirectJDBCSource, DirectKafkaSource, DirectKinesisSource, DropDuplicates, DropFields, DropNullFields, DynamicTransform, DynamoDBCatalogSource, ErrorDetail, EvaluateDataQuality, EvaluateDataQualityMultiFrame, EventBatchingCondition, ExecutionClass, ExecutionProperty, FillMissingValues, Filter, GovernedCatalogSource, GovernedCatalogTarget, JDBCConnectorSource, JDBCConnectorTarget, JobCommand, JobMode, Join, LakeFormationConfiguration, LineageConfiguration, Merge, MicrosoftSQLServerCatalogSource, MicrosoftSQLServerCatalogTarget, MySQLCatalogSource, MySQLCatalogTarget, NotificationProperty, OracleSQLCatalogSource, OracleSQLCatalogTarget, PartitionInput, PIIDetection, PostgreSQLCatalogSource, PostgreSQLCatalogTarget, Predicate, Recipe, RecrawlPolicy, RedshiftSource, RedshiftTarget, RelationalCatalogSource, RenameField, S3CatalogDeltaSource, S3CatalogHudiSource, S3CatalogSource, S3CatalogTarget, S3CsvSource, S3DeltaCatalogTarget, S3DeltaDirectTarget, S3DeltaSource, S3DirectTarget, S3ExcelSource, S3GlueParquetTarget, S3HudiCatalogTarget, S3HudiDirectTarget, S3HudiSource, S3HyperDirectTarget, S3IcebergDirectTarget, S3JsonSource, S3ParquetSource, SchemaChangePolicy, SchemaId, SelectFields, SelectFromCollection, SnowflakeSource, SnowflakeTarget, SourceControlAuthStrategy, SourceControlDetails, SourceControlProvider, SparkConnectorSource, SparkConnectorTarget, SparkSQL, Spigot, SplitFields, StorageDescriptor, TableOptimizerConfiguration, TableOptimizerType, Trigger, Union, WorkerType } from "./models_0";
4
- import { CatalogInput, Compatibility, ConnectionInput, ConnectionPropertyKey, ConnectionType, CsvHeaderOption, CsvSerdeOption, DatabaseInput, IcebergPartitionSpec, IcebergSchema, IcebergSortOrder, Permission, ProfileConfiguration, RegistryId, SchemaVersionStatus, SourceProcessingProperties, SourceTableConfig, TableIdentifier, TableInput, TargetProcessingProperties, TargetTableConfig, TransformParameters, UserDefinedFunctionInput } from "./models_1";
5
- import { ColumnRowFilter, ColumnStatistics, DataQualityEvaluationRunAdditionalRunOptions, FederatedTable, JobBookmarkEntry, MetadataKeyValuePair, ResourceAction, ResourceShareType, ResourceState, SchemaVersionNumber, ViewDefinition, ViewValidation } from "./models_2";
3
+ import { Action, Aggregate, AmazonRedshiftSource, AmazonRedshiftTarget, AthenaConnectorSource, AuthenticationConfigurationInput, BasicCatalogTarget, CatalogDeltaSource, CatalogHudiSource, CatalogIcebergSource, CatalogKafkaSource, CatalogKinesisSource, CatalogSource, Column, ConnectionsList, ConnectorDataSource, ConnectorDataTarget, CrawlerTargets, CustomCode, DataSource, DirectJDBCSource, DirectKafkaSource, DirectKinesisSource, DropDuplicates, DropFields, DropNullFields, DynamicTransform, DynamoDBCatalogSource, DynamoDBELTConnectorSource, ErrorDetail, EvaluateDataQuality, EvaluateDataQualityMultiFrame, EventBatchingCondition, ExecutionClass, ExecutionProperty, FillMissingValues, Filter, GovernedCatalogSource, GovernedCatalogTarget, InclusionAnnotationValue, JDBCConnectorSource, JDBCConnectorTarget, JobCommand, JobMode, Join, LakeFormationConfiguration, LineageConfiguration, Merge, MicrosoftSQLServerCatalogSource, MicrosoftSQLServerCatalogTarget, MySQLCatalogSource, MySQLCatalogTarget, NotificationProperty, OracleSQLCatalogSource, OracleSQLCatalogTarget, PartitionInput, PIIDetection, PostgreSQLCatalogSource, PostgreSQLCatalogTarget, Predicate, Recipe, RecrawlPolicy, RedshiftSource, RedshiftTarget, RelationalCatalogSource, RenameField, Route, S3CatalogDeltaSource, S3CatalogHudiSource, S3CatalogIcebergSource, S3CatalogSource, S3CatalogTarget, S3CsvSource, S3DeltaCatalogTarget, S3DeltaDirectTarget, S3DeltaSource, S3DirectTarget, S3ExcelSource, S3GlueParquetTarget, S3HudiCatalogTarget, S3HudiDirectTarget, S3HudiSource, S3HyperDirectTarget, S3IcebergCatalogTarget, S3IcebergDirectTarget, S3JsonSource, S3ParquetSource, SchemaChangePolicy, SchemaId, SelectFields, SelectFromCollection, SnowflakeSource, SnowflakeTarget, SourceControlAuthStrategy, SourceControlDetails, SourceControlProvider, SparkConnectorSource, SparkConnectorTarget, SparkSQL, Spigot, SplitFields, StorageDescriptor, TableOptimizerConfiguration, TableOptimizerType, Trigger, Union, WorkerType } from "./models_0";
4
+ import { CatalogInput, Compatibility, ConnectionInput, ConnectionPropertyKey, ConnectionType, CsvHeaderOption, CsvSerdeOption, DatabaseInput, IcebergPartitionSpec, IcebergSchema, IcebergSortOrder, IntegrationError, IntegrationStatus, Permission, ProfileConfiguration, RegistryId, SchemaVersionStatus, SourceProcessingProperties, SourceTableConfig, TableIdentifier, TableInput, Tag, TargetProcessingProperties, TargetTableConfig, TransformParameters, UserDefinedFunctionInput } from "./models_1";
5
+ import { ColumnRowFilter, ColumnStatistics, DataCatalogEncryptionSettings, DataQualityEvaluationRunAdditionalRunOptions, FederatedTable, JobBookmarkEntry, ResourceAction, ResourceShareType, ResourceState, SchemaVersionNumber, ViewDefinition, ViewValidation } from "./models_2";
6
+ /**
7
+ * @public
8
+ */
9
+ export interface ModifyIntegrationResponse {
10
+ /**
11
+ * <p>The ARN of the source for the integration.</p>
12
+ * @public
13
+ */
14
+ SourceArn: string | undefined;
15
+ /**
16
+ * <p>The ARN of the target for the integration.</p>
17
+ * @public
18
+ */
19
+ TargetArn: string | undefined;
20
+ /**
21
+ * <p>A unique name for an integration in Glue.</p>
22
+ * @public
23
+ */
24
+ IntegrationName: string | undefined;
25
+ /**
26
+ * <p>A description of the integration.</p>
27
+ * @public
28
+ */
29
+ Description?: string | undefined;
30
+ /**
31
+ * <p>The Amazon Resource Name (ARN) for the integration.</p>
32
+ * @public
33
+ */
34
+ IntegrationArn: string | undefined;
35
+ /**
36
+ * <p>The ARN of a KMS key used for encrypting the channel.</p>
37
+ * @public
38
+ */
39
+ KmsKeyId?: string | undefined;
40
+ /**
41
+ * <p>An optional set of non-secret key–value pairs that contains additional contextual information for encryption.</p>
42
+ * @public
43
+ */
44
+ AdditionalEncryptionContext?: Record<string, string> | undefined;
45
+ /**
46
+ * <p>Metadata assigned to the resource consisting of a list of key-value pairs.</p>
47
+ * @public
48
+ */
49
+ Tags?: Tag[] | undefined;
50
+ /**
51
+ * <p>The status of the integration being modified.</p>
52
+ * <p>The possible statuses are:</p>
53
+ * <ul>
54
+ * <li>
55
+ * <p>CREATING: The integration is being created.</p>
56
+ * </li>
57
+ * <li>
58
+ * <p>ACTIVE: The integration creation succeeds.</p>
59
+ * </li>
60
+ * <li>
61
+ * <p>MODIFYING: The integration is being modified.</p>
62
+ * </li>
63
+ * <li>
64
+ * <p>FAILED: The integration creation fails. </p>
65
+ * </li>
66
+ * <li>
67
+ * <p>DELETING: The integration is deleted.</p>
68
+ * </li>
69
+ * <li>
70
+ * <p>SYNCING: The integration is synchronizing.</p>
71
+ * </li>
72
+ * <li>
73
+ * <p>NEEDS_ATTENTION: The integration needs attention, such as synchronization.</p>
74
+ * </li>
75
+ * </ul>
76
+ * @public
77
+ */
78
+ Status: IntegrationStatus | undefined;
79
+ /**
80
+ * <p>The time when the integration was created, in UTC.</p>
81
+ * @public
82
+ */
83
+ CreateTime: Date | undefined;
84
+ /**
85
+ * <p>A list of errors associated with the integration modification.</p>
86
+ * @public
87
+ */
88
+ Errors?: IntegrationError[] | undefined;
89
+ /**
90
+ * <p>Selects source tables for the integration using Maxwell filter syntax.</p>
91
+ * @public
92
+ */
93
+ DataFilter?: string | undefined;
94
+ }
95
+ /**
96
+ * @public
97
+ */
98
+ export interface PutDataCatalogEncryptionSettingsRequest {
99
+ /**
100
+ * <p>The ID of the Data Catalog to set the security configuration for. If none is provided, the
101
+ * Amazon Web Services account ID is used by default.</p>
102
+ * @public
103
+ */
104
+ CatalogId?: string | undefined;
105
+ /**
106
+ * <p>The security configuration to set.</p>
107
+ * @public
108
+ */
109
+ DataCatalogEncryptionSettings: DataCatalogEncryptionSettings | undefined;
110
+ }
111
+ /**
112
+ * @public
113
+ */
114
+ export interface PutDataCatalogEncryptionSettingsResponse {
115
+ }
116
+ /**
117
+ * @public
118
+ */
119
+ export interface PutDataQualityProfileAnnotationRequest {
120
+ /**
121
+ * <p>The ID of the data quality monitoring profile to annotate.</p>
122
+ * @public
123
+ */
124
+ ProfileId: string | undefined;
125
+ /**
126
+ * <p>The inclusion annotation value to apply to the profile.</p>
127
+ * @public
128
+ */
129
+ InclusionAnnotation: InclusionAnnotationValue | undefined;
130
+ }
131
+ /**
132
+ * <p>Left blank.</p>
133
+ * @public
134
+ */
135
+ export interface PutDataQualityProfileAnnotationResponse {
136
+ }
137
+ /**
138
+ * @public
139
+ * @enum
140
+ */
141
+ export declare const EnableHybridValues: {
142
+ readonly FALSE: "FALSE";
143
+ readonly TRUE: "TRUE";
144
+ };
145
+ /**
146
+ * @public
147
+ */
148
+ export type EnableHybridValues = (typeof EnableHybridValues)[keyof typeof EnableHybridValues];
149
+ /**
150
+ * @public
151
+ * @enum
152
+ */
153
+ export declare const ExistCondition: {
154
+ readonly MUST_EXIST: "MUST_EXIST";
155
+ readonly NONE: "NONE";
156
+ readonly NOT_EXIST: "NOT_EXIST";
157
+ };
158
+ /**
159
+ * @public
160
+ */
161
+ export type ExistCondition = (typeof ExistCondition)[keyof typeof ExistCondition];
162
+ /**
163
+ * @public
164
+ */
165
+ export interface PutResourcePolicyRequest {
166
+ /**
167
+ * <p>Contains the policy document to set, in JSON format.</p>
168
+ * @public
169
+ */
170
+ PolicyInJson: string | undefined;
171
+ /**
172
+ * <p>Do not use. For internal use only.</p>
173
+ * @public
174
+ */
175
+ ResourceArn?: string | undefined;
176
+ /**
177
+ * <p>The hash value returned when the previous policy was set using
178
+ * <code>PutResourcePolicy</code>. Its purpose is to prevent concurrent modifications of a
179
+ * policy. Do not use this parameter if no previous policy has been set.</p>
180
+ * @public
181
+ */
182
+ PolicyHashCondition?: string | undefined;
183
+ /**
184
+ * <p>A value of <code>MUST_EXIST</code> is used to update a policy. A value of
185
+ * <code>NOT_EXIST</code> is used to create a new policy. If a value of <code>NONE</code> or a
186
+ * null value is used, the call does not depend on the existence of a policy.</p>
187
+ * @public
188
+ */
189
+ PolicyExistsCondition?: ExistCondition | undefined;
190
+ /**
191
+ * <p>If <code>'TRUE'</code>, indicates that you are using both methods to grant cross-account
192
+ * access to Data Catalog resources:</p>
193
+ * <ul>
194
+ * <li>
195
+ * <p>By directly updating the resource policy with <code>PutResourePolicy</code>
196
+ * </p>
197
+ * </li>
198
+ * <li>
199
+ * <p>By using the <b>Grant permissions</b> command on the Amazon Web Services Management Console.</p>
200
+ * </li>
201
+ * </ul>
202
+ * <p>Must be set to <code>'TRUE'</code> if you have already used the Management Console to
203
+ * grant cross-account access, otherwise the call fails. Default is 'FALSE'.</p>
204
+ * @public
205
+ */
206
+ EnableHybrid?: EnableHybridValues | undefined;
207
+ }
208
+ /**
209
+ * @public
210
+ */
211
+ export interface PutResourcePolicyResponse {
212
+ /**
213
+ * <p>A hash of the policy that has just been set. This must
214
+ * be included in a subsequent call that overwrites or updates
215
+ * this policy.</p>
216
+ * @public
217
+ */
218
+ PolicyHash?: string | undefined;
219
+ }
220
+ /**
221
+ * <p>A structure containing a key value pair for metadata.</p>
222
+ * @public
223
+ */
224
+ export interface MetadataKeyValuePair {
225
+ /**
226
+ * <p>A metadata key.</p>
227
+ * @public
228
+ */
229
+ MetadataKey?: string | undefined;
230
+ /**
231
+ * <p>A metadata key’s corresponding value.</p>
232
+ * @public
233
+ */
234
+ MetadataValue?: string | undefined;
235
+ }
6
236
  /**
7
237
  * @public
8
238
  */
@@ -3252,11 +3482,6 @@ export interface CodeGenConfigurationNode {
3252
3482
  * @public
3253
3483
  */
3254
3484
  S3CsvSource?: S3CsvSource | undefined;
3255
- /**
3256
- * <p>Defines configuration parameters for reading Excel files from Amazon S3.</p>
3257
- * @public
3258
- */
3259
- S3ExcelSource?: S3ExcelSource | undefined;
3260
3485
  /**
3261
3486
  * <p>Specifies a JSON data store stored in Amazon S3.</p>
3262
3487
  * @public
@@ -3307,21 +3532,11 @@ export interface CodeGenConfigurationNode {
3307
3532
  * @public
3308
3533
  */
3309
3534
  S3GlueParquetTarget?: S3GlueParquetTarget | undefined;
3310
- /**
3311
- * <p>Defines configuration parameters for writing data to Amazon S3 using HyperDirect optimization.</p>
3312
- * @public
3313
- */
3314
- S3HyperDirectTarget?: S3HyperDirectTarget | undefined;
3315
3535
  /**
3316
3536
  * <p>Specifies a data target that writes to Amazon S3.</p>
3317
3537
  * @public
3318
3538
  */
3319
3539
  S3DirectTarget?: S3DirectTarget | undefined;
3320
- /**
3321
- * <p>Defines configuration parameters for writing data to Amazon S3 as an Apache Iceberg table.</p>
3322
- * @public
3323
- */
3324
- S3IcebergDirectTarget?: S3IcebergDirectTarget | undefined;
3325
3540
  /**
3326
3541
  * <p>Specifies a transform that maps data property keys in the data source to data property keys in the data target. You can rename keys, modify the data types for keys, and choose which keys to drop from the dataset.</p>
3327
3542
  * @public
@@ -3483,6 +3698,11 @@ export interface CodeGenConfigurationNode {
3483
3698
  * @public
3484
3699
  */
3485
3700
  PostgreSQLCatalogTarget?: PostgreSQLCatalogTarget | undefined;
3701
+ /**
3702
+ * <p>Specifies a route node that directs data to different output paths based on defined filtering conditions.</p>
3703
+ * @public
3704
+ */
3705
+ Route?: Route | undefined;
3486
3706
  /**
3487
3707
  * <p>Specifies a custom visual transform created by a user.</p>
3488
3708
  * @public
@@ -3588,6 +3808,41 @@ export interface CodeGenConfigurationNode {
3588
3808
  * @public
3589
3809
  */
3590
3810
  ConnectorDataTarget?: ConnectorDataTarget | undefined;
3811
+ /**
3812
+ * <p>Specifies an Apache Iceberg data source that is registered in the Glue Data Catalog. The Iceberg data source must be stored in Amazon S3.</p>
3813
+ * @public
3814
+ */
3815
+ S3CatalogIcebergSource?: S3CatalogIcebergSource | undefined;
3816
+ /**
3817
+ * <p>Specifies an Apache Iceberg data source that is registered in the Glue Data Catalog.</p>
3818
+ * @public
3819
+ */
3820
+ CatalogIcebergSource?: CatalogIcebergSource | undefined;
3821
+ /**
3822
+ * <p>Specifies an Apache Iceberg catalog target that writes data to Amazon S3 and registers the table in the Glue Data Catalog.</p>
3823
+ * @public
3824
+ */
3825
+ S3IcebergCatalogTarget?: S3IcebergCatalogTarget | undefined;
3826
+ /**
3827
+ * <p>Defines configuration parameters for writing data to Amazon S3 as an Apache Iceberg table.</p>
3828
+ * @public
3829
+ */
3830
+ S3IcebergDirectTarget?: S3IcebergDirectTarget | undefined;
3831
+ /**
3832
+ * <p>Defines configuration parameters for reading Excel files from Amazon S3.</p>
3833
+ * @public
3834
+ */
3835
+ S3ExcelSource?: S3ExcelSource | undefined;
3836
+ /**
3837
+ * <p>Defines configuration parameters for writing data to Amazon S3 using HyperDirect optimization.</p>
3838
+ * @public
3839
+ */
3840
+ S3HyperDirectTarget?: S3HyperDirectTarget | undefined;
3841
+ /**
3842
+ * <p>Specifies a DynamoDB ELT connector source for extracting data from DynamoDB tables.</p>
3843
+ * @public
3844
+ */
3845
+ DynamoDBELTConnectorSource?: DynamoDBELTConnectorSource | undefined;
3591
3846
  }
3592
3847
  /**
3593
3848
  * @public
@@ -3900,7 +4155,7 @@ export interface CreateJobRequest {
3900
4155
  * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
3901
4156
  * </li>
3902
4157
  * <li>
3903
- * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
4158
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Mumbai), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), Europe (London), Europe (Spain), Europe (Stockholm), and South America (São Paulo).</p>
3904
4159
  * </li>
3905
4160
  * <li>
3906
4161
  * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
@@ -4112,26 +4367,42 @@ export interface Job {
4112
4367
  */
4113
4368
  MaxCapacity?: number | undefined;
4114
4369
  /**
4115
- * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
4116
- * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4370
+ * <p>The type of predefined worker that is allocated when a job runs.</p>
4371
+ * <p>Glue provides multiple worker types to accommodate different workload requirements:</p>
4372
+ * <p>G Worker Types (General-purpose compute workers):</p>
4117
4373
  * <ul>
4118
4374
  * <li>
4119
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 94GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4375
+ * <p>G.1X: 1 DPU (4 vCPUs, 16 GB memory, 94GB disk)</p>
4120
4376
  * </li>
4121
4377
  * <li>
4122
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 138GB disk, and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4378
+ * <p>G.2X: 2 DPU (8 vCPUs, 32 GB memory, 138GB disk)</p>
4123
4379
  * </li>
4124
4380
  * <li>
4125
- * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
4381
+ * <p>G.4X: 4 DPU (16 vCPUs, 64 GB memory, 256GB disk)</p>
4126
4382
  * </li>
4127
4383
  * <li>
4128
- * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk, and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
4384
+ * <p>G.8X: 8 DPU (32 vCPUs, 128 GB memory, 512GB disk)</p>
4129
4385
  * </li>
4130
4386
  * <li>
4131
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 or later streaming jobs.</p>
4387
+ * <p>G.12X: 12 DPU (48 vCPUs, 192 GB memory, 768GB disk)</p>
4132
4388
  * </li>
4133
4389
  * <li>
4134
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk, and provides up to 8 Ray workers based on the autoscaler.</p>
4390
+ * <p>G.16X: 16 DPU (64 vCPUs, 256 GB memory, 1024GB disk)</p>
4391
+ * </li>
4392
+ * </ul>
4393
+ * <p>R Worker Types (Memory-optimized workers):</p>
4394
+ * <ul>
4395
+ * <li>
4396
+ * <p>R.1X: 1 M-DPU (4 vCPUs, 32 GB memory)</p>
4397
+ * </li>
4398
+ * <li>
4399
+ * <p>R.2X: 2 M-DPU (8 vCPUs, 64 GB memory)</p>
4400
+ * </li>
4401
+ * <li>
4402
+ * <p>R.4X: 4 M-DPU (16 vCPUs, 128 GB memory)</p>
4403
+ * </li>
4404
+ * <li>
4405
+ * <p>R.8X: 8 M-DPU (32 vCPUs, 256 GB memory)</p>
4135
4406
  * </li>
4136
4407
  * </ul>
4137
4408
  * @public
@@ -5,10 +5,8 @@ import {
5
5
  ServiceInputTypes,
6
6
  ServiceOutputTypes,
7
7
  } from "../GlueClient";
8
- import {
9
- GetClassifierRequest,
10
- GetClassifierResponse,
11
- } from "../models/models_1";
8
+ import { GetClassifierRequest } from "../models/models_1";
9
+ import { GetClassifierResponse } from "../models/models_2";
12
10
  export { __MetadataBearer };
13
11
  export { $Command };
14
12
  export interface GetClassifierCommandInput extends GetClassifierRequest {}
@@ -8,7 +8,7 @@ import {
8
8
  import {
9
9
  GetClassifiersRequest,
10
10
  GetClassifiersResponse,
11
- } from "../models/models_1";
11
+ } from "../models/models_2";
12
12
  export { __MetadataBearer };
13
13
  export { $Command };
14
14
  export interface GetClassifiersCommandInput extends GetClassifiersRequest {}
@@ -5,8 +5,10 @@ import {
5
5
  ServiceInputTypes,
6
6
  ServiceOutputTypes,
7
7
  } from "../GlueClient";
8
- import { GetColumnStatisticsForPartitionRequest } from "../models/models_1";
9
- import { GetColumnStatisticsForPartitionResponse } from "../models/models_2";
8
+ import {
9
+ GetColumnStatisticsForPartitionRequest,
10
+ GetColumnStatisticsForPartitionResponse,
11
+ } from "../models/models_2";
10
12
  export { __MetadataBearer };
11
13
  export { $Command };
12
14
  export interface GetColumnStatisticsForPartitionCommandInput
@@ -5,10 +5,8 @@ import {
5
5
  ServiceInputTypes,
6
6
  ServiceOutputTypes,
7
7
  } from "../GlueClient";
8
- import {
9
- ModifyIntegrationRequest,
10
- ModifyIntegrationResponse,
11
- } from "../models/models_2";
8
+ import { ModifyIntegrationRequest } from "../models/models_2";
9
+ import { ModifyIntegrationResponse } from "../models/models_3";
12
10
  export { __MetadataBearer };
13
11
  export { $Command };
14
12
  export interface ModifyIntegrationCommandInput
@@ -8,7 +8,7 @@ import {
8
8
  import {
9
9
  PutDataCatalogEncryptionSettingsRequest,
10
10
  PutDataCatalogEncryptionSettingsResponse,
11
- } from "../models/models_2";
11
+ } from "../models/models_3";
12
12
  export { __MetadataBearer };
13
13
  export { $Command };
14
14
  export interface PutDataCatalogEncryptionSettingsCommandInput
@@ -8,7 +8,7 @@ import {
8
8
  import {
9
9
  PutDataQualityProfileAnnotationRequest,
10
10
  PutDataQualityProfileAnnotationResponse,
11
- } from "../models/models_2";
11
+ } from "../models/models_3";
12
12
  export { __MetadataBearer };
13
13
  export { $Command };
14
14
  export interface PutDataQualityProfileAnnotationCommandInput
@@ -8,7 +8,7 @@ import {
8
8
  import {
9
9
  PutResourcePolicyRequest,
10
10
  PutResourcePolicyResponse,
11
- } from "../models/models_2";
11
+ } from "../models/models_3";
12
12
  export { __MetadataBearer };
13
13
  export { $Command };
14
14
  export interface PutResourcePolicyCommandInput