@aws-sdk/client-glue 3.371.0 → 3.376.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/dist-cjs/models/models_0.js +1 -5
  2. package/dist-cjs/models/models_1.js +5 -1
  3. package/dist-cjs/protocols/Aws_json1_1.js +4 -0
  4. package/dist-es/models/models_0.js +0 -4
  5. package/dist-es/models/models_1.js +4 -0
  6. package/dist-es/protocols/Aws_json1_1.js +4 -0
  7. package/dist-types/commands/BatchGetCrawlersCommand.d.ts +8 -0
  8. package/dist-types/commands/BatchGetJobsCommand.d.ts +8 -0
  9. package/dist-types/commands/CreateCrawlerCommand.d.ts +8 -0
  10. package/dist-types/commands/CreateJobCommand.d.ts +8 -0
  11. package/dist-types/commands/GetCrawlerCommand.d.ts +8 -0
  12. package/dist-types/commands/GetCrawlersCommand.d.ts +8 -0
  13. package/dist-types/commands/GetJobCommand.d.ts +8 -0
  14. package/dist-types/commands/GetJobsCommand.d.ts +8 -0
  15. package/dist-types/commands/GetUnfilteredPartitionsMetadataCommand.d.ts +2 -1
  16. package/dist-types/commands/GetUnfilteredTableMetadataCommand.d.ts +1 -2
  17. package/dist-types/commands/UpdateCrawlerCommand.d.ts +8 -0
  18. package/dist-types/commands/UpdateJobCommand.d.ts +8 -0
  19. package/dist-types/models/models_0.d.ts +69 -58
  20. package/dist-types/models/models_1.d.ts +60 -63
  21. package/dist-types/models/models_2.d.ts +97 -37
  22. package/dist-types/ts3.4/commands/GetUnfilteredPartitionsMetadataCommand.d.ts +2 -4
  23. package/dist-types/ts3.4/commands/GetUnfilteredTableMetadataCommand.d.ts +4 -2
  24. package/dist-types/ts3.4/models/models_0.d.ts +16 -14
  25. package/dist-types/ts3.4/models/models_1.d.ts +15 -18
  26. package/dist-types/ts3.4/models/models_2.d.ts +22 -1
  27. package/package.json +1 -1
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.InvalidStateException = exports.FederationSourceRetryableException = exports.FederationSourceException = exports.FederationSourceErrorCode = exports.SourceControlProvider = exports.SourceControlAuthStrategy = exports.ExecutionClass = exports.UnionType = exports.HudiTargetCompressionType = exports.ParquetCompressionType = exports.TargetFormat = exports.DeltaTargetCompressionType = exports.Separator = exports.QuoteChar = exports.CompressionType = exports.PiiType = exports.JoinType = exports.GlueRecordType = exports.JDBCDataType = exports.UpdateCatalogBehavior = exports.FilterLogicalOperator = exports.FilterValueType = exports.FilterOperation = exports.DQStopJobOnFailureTiming = exports.DQTransformOutput = exports.ParamType = exports.JDBCConnectionType = exports.StartingPosition = exports.WorkerType = exports.DataQualityRuleResultStatus = exports.JdbcMetadataEntry = exports.CrawlerState = exports.UpdateBehavior = exports.DeleteBehavior = exports.ScheduleState = exports.RecrawlBehavior = exports.CrawlerLineageSettings = exports.LastCrawlStatus = exports.BlueprintStatus = exports.ResourceNotReadyException = exports.ResourceNumberLimitExceededException = exports.OperationTimeoutException = exports.InvalidInputException = exports.InternalServiceException = exports.GlueEncryptionException = exports.EntityNotFoundException = exports.AlreadyExistsException = exports.AggFunction = exports.AdditionalOptionKeys = exports.AccessDeniedException = void 0;
4
- exports.MLUserDataEncryptionModeString = exports.TransformType = exports.ValidationException = exports.FederatedResourceAlreadyExistsException = exports.Permission = exports.ConcurrentModificationException = exports.IdempotentParameterMismatchException = exports.ConnectionType = exports.ConnectionPropertyKey = exports.CsvHeaderOption = exports.DataFormat = exports.IllegalSessionStateException = exports.TaskStatusType = exports.WorkflowRunStatus = exports.NodeType = exports.TriggerType = exports.TriggerState = exports.Logical = exports.JobRunState = exports.LogicalOperator = exports.CrawlState = void 0;
4
+ exports.TransformType = exports.ValidationException = exports.FederatedResourceAlreadyExistsException = exports.Permission = exports.ConcurrentModificationException = exports.IdempotentParameterMismatchException = exports.ConnectionType = exports.ConnectionPropertyKey = exports.CsvHeaderOption = exports.DataFormat = exports.IllegalSessionStateException = exports.TaskStatusType = exports.WorkflowRunStatus = exports.NodeType = exports.TriggerType = exports.TriggerState = exports.Logical = exports.JobRunState = exports.LogicalOperator = exports.CrawlState = void 0;
5
5
  const GlueServiceException_1 = require("./GlueServiceException");
6
6
  class AccessDeniedException extends GlueServiceException_1.GlueServiceException {
7
7
  constructor(opts) {
@@ -634,7 +634,3 @@ exports.ValidationException = ValidationException;
634
634
  exports.TransformType = {
635
635
  FIND_MATCHES: "FIND_MATCHES",
636
636
  };
637
- exports.MLUserDataEncryptionModeString = {
638
- DISABLED: "DISABLED",
639
- SSEKMS: "SSE-KMS",
640
- };
@@ -1,7 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.PermissionTypeMismatchException = exports.PermissionType = exports.StatementState = exports.SchemaDiffType = exports.PartitionIndexStatus = exports.BackfillErrorCode = exports.TransformSortColumnType = exports.TransformStatusType = exports.SortDirectionType = exports.TaskRunSortColumnType = exports.TaskType = exports.CatalogEncryptionMode = exports.ResourceShareType = exports.ColumnStatisticsType = exports.BlueprintRunState = exports.ConditionCheckFailureException = exports.RegistryStatus = exports.ConflictException = exports.SchedulerTransitioningException = exports.CrawlerRunningException = exports.ResourceType = exports.PrincipalType = exports.MetadataOperation = exports.SessionStatus = exports.S3EncryptionMode = exports.JobBookmarksEncryptionMode = exports.CloudWatchEncryptionMode = exports.Language = exports.SchemaVersionStatus = exports.SchemaStatus = exports.Compatibility = void 0;
3
+ exports.PermissionTypeMismatchException = exports.PermissionType = exports.StatementState = exports.SchemaDiffType = exports.PartitionIndexStatus = exports.BackfillErrorCode = exports.TransformSortColumnType = exports.TransformStatusType = exports.SortDirectionType = exports.TaskRunSortColumnType = exports.TaskType = exports.CatalogEncryptionMode = exports.ResourceShareType = exports.ColumnStatisticsType = exports.BlueprintRunState = exports.ConditionCheckFailureException = exports.RegistryStatus = exports.ConflictException = exports.SchedulerTransitioningException = exports.CrawlerRunningException = exports.ResourceType = exports.PrincipalType = exports.MetadataOperation = exports.SessionStatus = exports.S3EncryptionMode = exports.JobBookmarksEncryptionMode = exports.CloudWatchEncryptionMode = exports.Language = exports.SchemaVersionStatus = exports.SchemaStatus = exports.Compatibility = exports.MLUserDataEncryptionModeString = void 0;
4
4
  const GlueServiceException_1 = require("./GlueServiceException");
5
+ exports.MLUserDataEncryptionModeString = {
6
+ DISABLED: "DISABLED",
7
+ SSEKMS: "SSE-KMS",
8
+ };
5
9
  exports.Compatibility = {
6
10
  BACKWARD: "BACKWARD",
7
11
  BACKWARD_ALL: "BACKWARD_ALL",
@@ -10667,6 +10667,7 @@ const se_CodeGenConfigurationNode = (input, context) => {
10667
10667
  PIIDetection: (_) => se_PIIDetection(_, context),
10668
10668
  PostgreSQLCatalogSource: smithy_client_1._json,
10669
10669
  PostgreSQLCatalogTarget: smithy_client_1._json,
10670
+ Recipe: smithy_client_1._json,
10670
10671
  RedshiftSource: smithy_client_1._json,
10671
10672
  RedshiftTarget: smithy_client_1._json,
10672
10673
  RelationalCatalogSource: smithy_client_1._json,
@@ -10730,6 +10731,7 @@ const se_CrawlerTargets = (input, context) => {
10730
10731
  CatalogTargets: smithy_client_1._json,
10731
10732
  DeltaTargets: smithy_client_1._json,
10732
10733
  DynamoDBTargets: (_) => se_DynamoDBTargetList(_, context),
10734
+ HudiTargets: smithy_client_1._json,
10733
10735
  IcebergTargets: smithy_client_1._json,
10734
10736
  JdbcTargets: smithy_client_1._json,
10735
10737
  MongoDBTargets: smithy_client_1._json,
@@ -11489,6 +11491,7 @@ const de_CodeGenConfigurationNode = (output, context) => {
11489
11491
  PIIDetection: (_) => de_PIIDetection(_, context),
11490
11492
  PostgreSQLCatalogSource: smithy_client_1._json,
11491
11493
  PostgreSQLCatalogTarget: smithy_client_1._json,
11494
+ Recipe: smithy_client_1._json,
11492
11495
  RedshiftSource: smithy_client_1._json,
11493
11496
  RedshiftTarget: smithy_client_1._json,
11494
11497
  RelationalCatalogSource: smithy_client_1._json,
@@ -11698,6 +11701,7 @@ const de_CrawlerTargets = (output, context) => {
11698
11701
  CatalogTargets: smithy_client_1._json,
11699
11702
  DeltaTargets: smithy_client_1._json,
11700
11703
  DynamoDBTargets: (_) => de_DynamoDBTargetList(_, context),
11704
+ HudiTargets: smithy_client_1._json,
11701
11705
  IcebergTargets: smithy_client_1._json,
11702
11706
  JdbcTargets: smithy_client_1._json,
11703
11707
  MongoDBTargets: smithy_client_1._json,
@@ -613,7 +613,3 @@ export class ValidationException extends __BaseException {
613
613
  export const TransformType = {
614
614
  FIND_MATCHES: "FIND_MATCHES",
615
615
  };
616
- export const MLUserDataEncryptionModeString = {
617
- DISABLED: "DISABLED",
618
- SSEKMS: "SSE-KMS",
619
- };
@@ -1,4 +1,8 @@
1
1
  import { GlueServiceException as __BaseException } from "./GlueServiceException";
2
+ export const MLUserDataEncryptionModeString = {
3
+ DISABLED: "DISABLED",
4
+ SSEKMS: "SSE-KMS",
5
+ };
2
6
  export const Compatibility = {
3
7
  BACKWARD: "BACKWARD",
4
8
  BACKWARD_ALL: "BACKWARD_ALL",
@@ -10252,6 +10252,7 @@ const se_CodeGenConfigurationNode = (input, context) => {
10252
10252
  PIIDetection: (_) => se_PIIDetection(_, context),
10253
10253
  PostgreSQLCatalogSource: _json,
10254
10254
  PostgreSQLCatalogTarget: _json,
10255
+ Recipe: _json,
10255
10256
  RedshiftSource: _json,
10256
10257
  RedshiftTarget: _json,
10257
10258
  RelationalCatalogSource: _json,
@@ -10315,6 +10316,7 @@ const se_CrawlerTargets = (input, context) => {
10315
10316
  CatalogTargets: _json,
10316
10317
  DeltaTargets: _json,
10317
10318
  DynamoDBTargets: (_) => se_DynamoDBTargetList(_, context),
10319
+ HudiTargets: _json,
10318
10320
  IcebergTargets: _json,
10319
10321
  JdbcTargets: _json,
10320
10322
  MongoDBTargets: _json,
@@ -11074,6 +11076,7 @@ const de_CodeGenConfigurationNode = (output, context) => {
11074
11076
  PIIDetection: (_) => de_PIIDetection(_, context),
11075
11077
  PostgreSQLCatalogSource: _json,
11076
11078
  PostgreSQLCatalogTarget: _json,
11079
+ Recipe: _json,
11077
11080
  RedshiftSource: _json,
11078
11081
  RedshiftTarget: _json,
11079
11082
  RelationalCatalogSource: _json,
@@ -11283,6 +11286,7 @@ const de_CrawlerTargets = (output, context) => {
11283
11286
  CatalogTargets: _json,
11284
11287
  DeltaTargets: _json,
11285
11288
  DynamoDBTargets: (_) => de_DynamoDBTargetList(_, context),
11289
+ HudiTargets: _json,
11286
11290
  IcebergTargets: _json,
11287
11291
  JdbcTargets: _json,
11288
11292
  MongoDBTargets: _json,
@@ -114,6 +114,14 @@ export interface BatchGetCrawlersCommandOutput extends BatchGetCrawlersResponse,
114
114
  * // MaximumTraversalDepth: Number("int"),
115
115
  * // },
116
116
  * // ],
117
+ * // HudiTargets: [ // HudiTargetList
118
+ * // { // HudiTarget
119
+ * // Paths: "<PathList>",
120
+ * // ConnectionName: "STRING_VALUE",
121
+ * // Exclusions: "<PathList>",
122
+ * // MaximumTraversalDepth: Number("int"),
123
+ * // },
124
+ * // ],
117
125
  * // },
118
126
  * // DatabaseName: "STRING_VALUE",
119
127
  * // Description: "STRING_VALUE",
@@ -1021,6 +1021,14 @@ export interface BatchGetJobsCommandOutput extends BatchGetJobsResponse, __Metad
1021
1021
  * // StopJobOnFailureTiming: "Immediate" || "AfterDataLoad",
1022
1022
  * // },
1023
1023
  * // },
1024
+ * // Recipe: { // Recipe
1025
+ * // Name: "STRING_VALUE", // required
1026
+ * // Inputs: "<OneInput>", // required
1027
+ * // RecipeReference: { // RecipeReference
1028
+ * // RecipeArn: "STRING_VALUE", // required
1029
+ * // RecipeVersion: "STRING_VALUE", // required
1030
+ * // },
1031
+ * // },
1024
1032
  * // },
1025
1033
  * // },
1026
1034
  * // ExecutionClass: "FLEX" || "STANDARD",
@@ -109,6 +109,14 @@ export interface CreateCrawlerCommandOutput extends CreateCrawlerResponse, __Met
109
109
  * MaximumTraversalDepth: Number("int"),
110
110
  * },
111
111
  * ],
112
+ * HudiTargets: [ // HudiTargetList
113
+ * { // HudiTarget
114
+ * Paths: "<PathList>",
115
+ * ConnectionName: "STRING_VALUE",
116
+ * Exclusions: "<PathList>",
117
+ * MaximumTraversalDepth: Number("int"),
118
+ * },
119
+ * ],
112
120
  * },
113
121
  * Schedule: "STRING_VALUE",
114
122
  * Classifiers: [ // ClassifierNameList
@@ -1012,6 +1012,14 @@ export interface CreateJobCommandOutput extends CreateJobResponse, __MetadataBea
1012
1012
  * StopJobOnFailureTiming: "Immediate" || "AfterDataLoad",
1013
1013
  * },
1014
1014
  * },
1015
+ * Recipe: { // Recipe
1016
+ * Name: "STRING_VALUE", // required
1017
+ * Inputs: "<OneInput>", // required
1018
+ * RecipeReference: { // RecipeReference
1019
+ * RecipeArn: "STRING_VALUE", // required
1020
+ * RecipeVersion: "STRING_VALUE", // required
1021
+ * },
1022
+ * },
1015
1023
  * },
1016
1024
  * },
1017
1025
  * ExecutionClass: "FLEX" || "STANDARD",
@@ -111,6 +111,14 @@ export interface GetCrawlerCommandOutput extends GetCrawlerResponse, __MetadataB
111
111
  * // MaximumTraversalDepth: Number("int"),
112
112
  * // },
113
113
  * // ],
114
+ * // HudiTargets: [ // HudiTargetList
115
+ * // { // HudiTarget
116
+ * // Paths: "<PathList>",
117
+ * // ConnectionName: "STRING_VALUE",
118
+ * // Exclusions: "<PathList>",
119
+ * // MaximumTraversalDepth: Number("int"),
120
+ * // },
121
+ * // ],
114
122
  * // },
115
123
  * // DatabaseName: "STRING_VALUE",
116
124
  * // Description: "STRING_VALUE",
@@ -114,6 +114,14 @@ export interface GetCrawlersCommandOutput extends GetCrawlersResponse, __Metadat
114
114
  * // MaximumTraversalDepth: Number("int"),
115
115
  * // },
116
116
  * // ],
117
+ * // HudiTargets: [ // HudiTargetList
118
+ * // { // HudiTarget
119
+ * // Paths: "<PathList>",
120
+ * // ConnectionName: "STRING_VALUE",
121
+ * // Exclusions: "<PathList>",
122
+ * // MaximumTraversalDepth: Number("int"),
123
+ * // },
124
+ * // ],
117
125
  * // },
118
126
  * // DatabaseName: "STRING_VALUE",
119
127
  * // Description: "STRING_VALUE",
@@ -1017,6 +1017,14 @@ export interface GetJobCommandOutput extends GetJobResponse, __MetadataBearer {
1017
1017
  * // StopJobOnFailureTiming: "Immediate" || "AfterDataLoad",
1018
1018
  * // },
1019
1019
  * // },
1020
+ * // Recipe: { // Recipe
1021
+ * // Name: "STRING_VALUE", // required
1022
+ * // Inputs: "<OneInput>", // required
1023
+ * // RecipeReference: { // RecipeReference
1024
+ * // RecipeArn: "STRING_VALUE", // required
1025
+ * // RecipeVersion: "STRING_VALUE", // required
1026
+ * // },
1027
+ * // },
1020
1028
  * // },
1021
1029
  * // },
1022
1030
  * // ExecutionClass: "FLEX" || "STANDARD",
@@ -1019,6 +1019,14 @@ export interface GetJobsCommandOutput extends GetJobsResponse, __MetadataBearer
1019
1019
  * // StopJobOnFailureTiming: "Immediate" || "AfterDataLoad",
1020
1020
  * // },
1021
1021
  * // },
1022
+ * // Recipe: { // Recipe
1023
+ * // Name: "STRING_VALUE", // required
1024
+ * // Inputs: "<OneInput>", // required
1025
+ * // RecipeReference: { // RecipeReference
1026
+ * // RecipeArn: "STRING_VALUE", // required
1027
+ * // RecipeVersion: "STRING_VALUE", // required
1028
+ * // },
1029
+ * // },
1022
1030
  * // },
1023
1031
  * // },
1024
1032
  * // ExecutionClass: "FLEX" || "STANDARD",
@@ -2,7 +2,8 @@ import { EndpointParameterInstructions } from "@smithy/middleware-endpoint";
2
2
  import { Command as $Command } from "@smithy/smithy-client";
3
3
  import { Handler, HttpHandlerOptions as __HttpHandlerOptions, MetadataBearer as __MetadataBearer, MiddlewareStack } from "@smithy/types";
4
4
  import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient";
5
- import { GetUnfilteredPartitionsMetadataRequest, GetUnfilteredPartitionsMetadataResponse } from "../models/models_1";
5
+ import { GetUnfilteredPartitionsMetadataRequest } from "../models/models_1";
6
+ import { GetUnfilteredPartitionsMetadataResponse } from "../models/models_2";
6
7
  /**
7
8
  * @public
8
9
  */
@@ -2,8 +2,7 @@ import { EndpointParameterInstructions } from "@smithy/middleware-endpoint";
2
2
  import { Command as $Command } from "@smithy/smithy-client";
3
3
  import { Handler, HttpHandlerOptions as __HttpHandlerOptions, MetadataBearer as __MetadataBearer, MiddlewareStack } from "@smithy/types";
4
4
  import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient";
5
- import { GetUnfilteredTableMetadataRequest } from "../models/models_1";
6
- import { GetUnfilteredTableMetadataResponse } from "../models/models_2";
5
+ import { GetUnfilteredTableMetadataRequest, GetUnfilteredTableMetadataResponse } from "../models/models_2";
7
6
  /**
8
7
  * @public
9
8
  */
@@ -109,6 +109,14 @@ export interface UpdateCrawlerCommandOutput extends UpdateCrawlerResponse, __Met
109
109
  * MaximumTraversalDepth: Number("int"),
110
110
  * },
111
111
  * ],
112
+ * HudiTargets: [ // HudiTargetList
113
+ * { // HudiTarget
114
+ * Paths: "<PathList>",
115
+ * ConnectionName: "STRING_VALUE",
116
+ * Exclusions: "<PathList>",
117
+ * MaximumTraversalDepth: Number("int"),
118
+ * },
119
+ * ],
112
120
  * },
113
121
  * Schedule: "STRING_VALUE",
114
122
  * Classifiers: [ // ClassifierNameList
@@ -1009,6 +1009,14 @@ export interface UpdateJobCommandOutput extends UpdateJobResponse, __MetadataBea
1009
1009
  * StopJobOnFailureTiming: "Immediate" || "AfterDataLoad",
1010
1010
  * },
1011
1011
  * },
1012
+ * Recipe: { // Recipe
1013
+ * Name: "STRING_VALUE", // required
1014
+ * Inputs: "<OneInput>", // required
1015
+ * RecipeReference: { // RecipeReference
1016
+ * RecipeArn: "STRING_VALUE", // required
1017
+ * RecipeVersion: "STRING_VALUE", // required
1018
+ * },
1019
+ * },
1012
1020
  * },
1013
1021
  * },
1014
1022
  * ExecutionClass: "FLEX" || "STANDARD",
@@ -1370,6 +1370,30 @@ export interface DynamoDBTarget {
1370
1370
  */
1371
1371
  scanRate?: number;
1372
1372
  }
1373
+ /**
1374
+ * @public
1375
+ * <p>Specifies an Apache Hudi data source.</p>
1376
+ */
1377
+ export interface HudiTarget {
1378
+ /**
1379
+ * <p>An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder.</p>
1380
+ * <p>The crawler will scan all folders underneath a path for a Hudi folder.</p>
1381
+ */
1382
+ Paths?: string[];
1383
+ /**
1384
+ * <p>The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.</p>
1385
+ */
1386
+ ConnectionName?: string;
1387
+ /**
1388
+ * <p>A list of glob patterns used to exclude from the crawl.
1389
+ * For more information, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html">Catalog Tables with a Crawler</a>.</p>
1390
+ */
1391
+ Exclusions?: string[];
1392
+ /**
1393
+ * <p>The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.</p>
1394
+ */
1395
+ MaximumTraversalDepth?: number;
1396
+ }
1373
1397
  /**
1374
1398
  * @public
1375
1399
  * <p>Specifies an Apache Iceberg data source where Iceberg tables are stored in Amazon S3.</p>
@@ -1512,6 +1536,10 @@ export interface CrawlerTargets {
1512
1536
  * <p>Specifies Apache Iceberg data store targets.</p>
1513
1537
  */
1514
1538
  IcebergTargets?: IcebergTarget[];
1539
+ /**
1540
+ * <p>Specifies Apache Hudi data store targets.</p>
1541
+ */
1542
+ HudiTargets?: HudiTarget[];
1515
1543
  }
1516
1544
  /**
1517
1545
  * @public
@@ -3539,6 +3567,38 @@ export interface PostgreSQLCatalogTarget {
3539
3567
  */
3540
3568
  Table: string | undefined;
3541
3569
  }
3570
+ /**
3571
+ * @public
3572
+ * <p>A reference to a Glue DataBrew recipe.</p>
3573
+ */
3574
+ export interface RecipeReference {
3575
+ /**
3576
+ * <p>The ARN of the DataBrew recipe.</p>
3577
+ */
3578
+ RecipeArn: string | undefined;
3579
+ /**
3580
+ * <p>The RecipeVersion of the DataBrew recipe.</p>
3581
+ */
3582
+ RecipeVersion: string | undefined;
3583
+ }
3584
+ /**
3585
+ * @public
3586
+ * <p>A Glue Studio node that uses a Glue DataBrew recipe in Glue jobs.</p>
3587
+ */
3588
+ export interface Recipe {
3589
+ /**
3590
+ * <p>The name of the Glue Studio node.</p>
3591
+ */
3592
+ Name: string | undefined;
3593
+ /**
3594
+ * <p>The nodes that are inputs to the recipe node, identified by id.</p>
3595
+ */
3596
+ Inputs: string[] | undefined;
3597
+ /**
3598
+ * <p>A reference to the DataBrew recipe used by the node.</p>
3599
+ */
3600
+ RecipeReference: RecipeReference | undefined;
3601
+ }
3542
3602
  /**
3543
3603
  * @public
3544
3604
  * <p>Specifies an Amazon Redshift data store.</p>
@@ -5278,24 +5338,25 @@ export interface JobRun {
5278
5338
  MaxCapacity?: number;
5279
5339
  /**
5280
5340
  * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
5281
- * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
5341
+ * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
5282
5342
  * <ul>
5283
5343
  * <li>
5284
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
5344
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
5285
5345
  * </li>
5286
5346
  * <li>
5287
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
5347
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
5288
5348
  * </li>
5289
5349
  * <li>
5290
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
5350
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
5291
5351
  * </li>
5292
5352
  * <li>
5293
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
5353
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
5294
5354
  * </li>
5295
5355
  * <li>
5296
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
5297
- * emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the
5298
- * autoscaler.</p>
5356
+ * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
5357
+ * </li>
5358
+ * <li>
5359
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
5299
5360
  * </li>
5300
5361
  * </ul>
5301
5362
  */
@@ -6899,53 +6960,3 @@ export declare const TransformType: {
6899
6960
  * @public
6900
6961
  */
6901
6962
  export type TransformType = (typeof TransformType)[keyof typeof TransformType];
6902
- /**
6903
- * @public
6904
- * <p>The algorithm-specific parameters that are associated with the machine learning
6905
- * transform.</p>
6906
- */
6907
- export interface TransformParameters {
6908
- /**
6909
- * <p>The type of machine learning transform.</p>
6910
- * <p>For information about the types of machine learning transforms, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html">Creating Machine Learning Transforms</a>.</p>
6911
- */
6912
- TransformType: TransformType | string | undefined;
6913
- /**
6914
- * <p>The parameters for the find matches algorithm.</p>
6915
- */
6916
- FindMatchesParameters?: FindMatchesParameters;
6917
- }
6918
- /**
6919
- * @public
6920
- * @enum
6921
- */
6922
- export declare const MLUserDataEncryptionModeString: {
6923
- readonly DISABLED: "DISABLED";
6924
- readonly SSEKMS: "SSE-KMS";
6925
- };
6926
- /**
6927
- * @public
6928
- */
6929
- export type MLUserDataEncryptionModeString = (typeof MLUserDataEncryptionModeString)[keyof typeof MLUserDataEncryptionModeString];
6930
- /**
6931
- * @public
6932
- * <p>The encryption-at-rest settings of the transform that apply to accessing user data.</p>
6933
- */
6934
- export interface MLUserDataEncryption {
6935
- /**
6936
- * <p>The encryption mode applied to user data. Valid values are:</p>
6937
- * <ul>
6938
- * <li>
6939
- * <p>DISABLED: encryption is disabled</p>
6940
- * </li>
6941
- * <li>
6942
- * <p>SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) for user data stored in Amazon S3.</p>
6943
- * </li>
6944
- * </ul>
6945
- */
6946
- MlUserDataEncryptionMode: MLUserDataEncryptionModeString | string | undefined;
6947
- /**
6948
- * <p>The ID for the customer-provided KMS key.</p>
6949
- */
6950
- KmsKeyId?: string;
6951
- }
@@ -1,6 +1,56 @@
1
1
  import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";
2
2
  import { GlueServiceException as __BaseException } from "./GlueServiceException";
3
- import { Action, AuditContext, Blueprint, Column, ConnectionsList, ConnectionType, Crawler, CsvHeaderOption, DatabaseIdentifier, DataFormat, DataQualityRuleResult, DataQualityTargetTable, DataSource, DevEndpoint, ErrorDetail, EventBatchingCondition, FederatedDatabase, GlueTable, JobRun, MLUserDataEncryption, Partition, PartitionInput, PartitionValueList, PhysicalConnectionRequirements, Predicate, PrincipalPermissions, SchemaId, StorageDescriptor, TaskStatusType, TransformParameters, TransformType, Trigger, TriggerType, WorkerType } from "./models_0";
3
+ import { Action, AuditContext, Blueprint, Column, ConnectionsList, ConnectionType, Crawler, CsvHeaderOption, DatabaseIdentifier, DataFormat, DataQualityRuleResult, DataQualityTargetTable, DataSource, DevEndpoint, ErrorDetail, EventBatchingCondition, FederatedDatabase, FindMatchesParameters, GlueTable, JobRun, Partition, PartitionInput, PartitionValueList, PhysicalConnectionRequirements, Predicate, PrincipalPermissions, SchemaId, StorageDescriptor, TaskStatusType, TransformType, Trigger, TriggerType, WorkerType } from "./models_0";
4
+ /**
5
+ * @public
6
+ * <p>The algorithm-specific parameters that are associated with the machine learning
7
+ * transform.</p>
8
+ */
9
+ export interface TransformParameters {
10
+ /**
11
+ * <p>The type of machine learning transform.</p>
12
+ * <p>For information about the types of machine learning transforms, see <a href="https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html">Creating Machine Learning Transforms</a>.</p>
13
+ */
14
+ TransformType: TransformType | string | undefined;
15
+ /**
16
+ * <p>The parameters for the find matches algorithm.</p>
17
+ */
18
+ FindMatchesParameters?: FindMatchesParameters;
19
+ }
20
+ /**
21
+ * @public
22
+ * @enum
23
+ */
24
+ export declare const MLUserDataEncryptionModeString: {
25
+ readonly DISABLED: "DISABLED";
26
+ readonly SSEKMS: "SSE-KMS";
27
+ };
28
+ /**
29
+ * @public
30
+ */
31
+ export type MLUserDataEncryptionModeString = (typeof MLUserDataEncryptionModeString)[keyof typeof MLUserDataEncryptionModeString];
32
+ /**
33
+ * @public
34
+ * <p>The encryption-at-rest settings of the transform that apply to accessing user data.</p>
35
+ */
36
+ export interface MLUserDataEncryption {
37
+ /**
38
+ * <p>The encryption mode applied to user data. Valid values are:</p>
39
+ * <ul>
40
+ * <li>
41
+ * <p>DISABLED: encryption is disabled</p>
42
+ * </li>
43
+ * <li>
44
+ * <p>SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) for user data stored in Amazon S3.</p>
45
+ * </li>
46
+ * </ul>
47
+ */
48
+ MlUserDataEncryptionMode: MLUserDataEncryptionModeString | string | undefined;
49
+ /**
50
+ * <p>The ID for the customer-provided KMS key.</p>
51
+ */
52
+ KmsKeyId?: string;
53
+ }
4
54
  /**
5
55
  * @public
6
56
  * <p>The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.</p>
@@ -728,19 +778,23 @@ export interface CreateSessionRequest {
728
778
  */
729
779
  NumberOfWorkers?: number;
730
780
  /**
731
- * <p>The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.</p>
781
+ * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
782
+ * G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.</p>
732
783
  * <ul>
733
784
  * <li>
734
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
785
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
786
+ * </li>
787
+ * <li>
788
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
735
789
  * </li>
736
790
  * <li>
737
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
791
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
738
792
  * </li>
739
793
  * <li>
740
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
794
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
741
795
  * </li>
742
796
  * <li>
743
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
797
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
744
798
  * </li>
745
799
  * </ul>
746
800
  */
@@ -6159,60 +6213,3 @@ export interface GetUnfilteredPartitionsMetadataRequest {
6159
6213
  */
6160
6214
  MaxResults?: number;
6161
6215
  }
6162
- /**
6163
- * @public
6164
- * <p>A partition that contains unfiltered metadata.</p>
6165
- */
6166
- export interface UnfilteredPartition {
6167
- /**
6168
- * <p>The partition object.</p>
6169
- */
6170
- Partition?: Partition;
6171
- /**
6172
- * <p>The list of columns the user has permissions to access.</p>
6173
- */
6174
- AuthorizedColumns?: string[];
6175
- /**
6176
- * <p>A Boolean value indicating that the partition location is registered with Lake Formation.</p>
6177
- */
6178
- IsRegisteredWithLakeFormation?: boolean;
6179
- }
6180
- /**
6181
- * @public
6182
- */
6183
- export interface GetUnfilteredPartitionsMetadataResponse {
6184
- /**
6185
- * <p>A list of requested partitions.</p>
6186
- */
6187
- UnfilteredPartitions?: UnfilteredPartition[];
6188
- /**
6189
- * <p>A continuation token, if the returned list of partitions does not include the last
6190
- * one.</p>
6191
- */
6192
- NextToken?: string;
6193
- }
6194
- /**
6195
- * @public
6196
- */
6197
- export interface GetUnfilteredTableMetadataRequest {
6198
- /**
6199
- * <p>The catalog ID where the table resides.</p>
6200
- */
6201
- CatalogId: string | undefined;
6202
- /**
6203
- * <p>(Required) Specifies the name of a database that contains the table.</p>
6204
- */
6205
- DatabaseName: string | undefined;
6206
- /**
6207
- * <p>(Required) Specifies the name of a table for which you are requesting metadata.</p>
6208
- */
6209
- Name: string | undefined;
6210
- /**
6211
- * <p>A structure containing Lake Formation audit context information.</p>
6212
- */
6213
- AuditContext?: AuditContext;
6214
- /**
6215
- * <p>(Required) A list of supported permission types. </p>
6216
- */
6217
- SupportedPermissionTypes: (PermissionType | string)[] | undefined;
6218
- }
@@ -1,7 +1,64 @@
1
1
  import { ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";
2
2
  import { GlueServiceException as __BaseException } from "./GlueServiceException";
3
- import { Action, Aggregate, AmazonRedshiftSource, AmazonRedshiftTarget, AthenaConnectorSource, BasicCatalogTarget, CatalogDeltaSource, CatalogHudiSource, CatalogKafkaSource, CatalogKinesisSource, CatalogSource, ConnectionInput, ConnectionsList, CrawlerTargets, CsvHeaderOption, CustomCode, CustomEntityType, DatabaseInput, DataQualityTargetTable, DataSource, DirectJDBCSource, DirectKafkaSource, DirectKinesisSource, DropDuplicates, DropFields, DropNullFields, DynamicTransform, DynamoDBCatalogSource, ErrorDetail, EvaluateDataQuality, EvaluateDataQualityMultiFrame, EventBatchingCondition, ExecutionClass, ExecutionProperty, FillMissingValues, Filter, GovernedCatalogSource, GovernedCatalogTarget, JDBCConnectorSource, JDBCConnectorTarget, JobCommand, Join, LakeFormationConfiguration, LineageConfiguration, Merge, MicrosoftSQLServerCatalogSource, MicrosoftSQLServerCatalogTarget, MySQLCatalogSource, MySQLCatalogTarget, NotificationProperty, OracleSQLCatalogSource, OracleSQLCatalogTarget, PartitionInput, PIIDetection, PostgreSQLCatalogSource, PostgreSQLCatalogTarget, Predicate, RecrawlPolicy, RedshiftSource, RedshiftTarget, RelationalCatalogSource, RenameField, S3CatalogDeltaSource, S3CatalogHudiSource, S3CatalogSource, S3CatalogTarget, S3CsvSource, S3DeltaCatalogTarget, S3DeltaDirectTarget, S3DeltaSource, S3DirectTarget, S3GlueParquetTarget, S3HudiCatalogTarget, S3HudiDirectTarget, S3HudiSource, S3JsonSource, S3ParquetSource, SchemaChangePolicy, SchemaId, SelectFields, SelectFromCollection, SourceControlAuthStrategy, SourceControlDetails, SourceControlProvider, SparkConnectorSource, SparkConnectorTarget, SparkSQL, Spigot, SplitFields, TaskStatusType, TransformParameters, Trigger, Union, WorkerType, Workflow, WorkflowRun } from "./models_0";
4
- import { ColumnStatistics, Compatibility, DataCatalogEncryptionSettings, DataQualityEvaluationRunAdditionalRunOptions, JobBookmarkEntry, PrincipalType, RegistryId, RegistryStatus, ResourceShareType, ResourceUri, SchemaStatus, SchemaVersionNumber, SchemaVersionStatus, Session, Statement, Table, TableInput, TransformFilterCriteria, TransformSortCriteria, UserDefinedFunctionInput } from "./models_1";
3
+ import { Action, Aggregate, AmazonRedshiftSource, AmazonRedshiftTarget, AthenaConnectorSource, AuditContext, BasicCatalogTarget, CatalogDeltaSource, CatalogHudiSource, CatalogKafkaSource, CatalogKinesisSource, CatalogSource, ConnectionInput, ConnectionsList, CrawlerTargets, CsvHeaderOption, CustomCode, CustomEntityType, DatabaseInput, DataQualityTargetTable, DataSource, DirectJDBCSource, DirectKafkaSource, DirectKinesisSource, DropDuplicates, DropFields, DropNullFields, DynamicTransform, DynamoDBCatalogSource, ErrorDetail, EvaluateDataQuality, EvaluateDataQualityMultiFrame, EventBatchingCondition, ExecutionClass, ExecutionProperty, FillMissingValues, Filter, GovernedCatalogSource, GovernedCatalogTarget, JDBCConnectorSource, JDBCConnectorTarget, JobCommand, Join, LakeFormationConfiguration, LineageConfiguration, Merge, MicrosoftSQLServerCatalogSource, MicrosoftSQLServerCatalogTarget, MySQLCatalogSource, MySQLCatalogTarget, NotificationProperty, OracleSQLCatalogSource, OracleSQLCatalogTarget, Partition, PartitionInput, PIIDetection, PostgreSQLCatalogSource, PostgreSQLCatalogTarget, Predicate, Recipe, RecrawlPolicy, RedshiftSource, RedshiftTarget, RelationalCatalogSource, RenameField, S3CatalogDeltaSource, S3CatalogHudiSource, S3CatalogSource, S3CatalogTarget, S3CsvSource, S3DeltaCatalogTarget, S3DeltaDirectTarget, S3DeltaSource, S3DirectTarget, S3GlueParquetTarget, S3HudiCatalogTarget, S3HudiDirectTarget, S3HudiSource, S3JsonSource, S3ParquetSource, SchemaChangePolicy, SchemaId, SelectFields, SelectFromCollection, SourceControlAuthStrategy, SourceControlDetails, SourceControlProvider, SparkConnectorSource, SparkConnectorTarget, SparkSQL, Spigot, SplitFields, TaskStatusType, Trigger, Union, WorkerType, Workflow, WorkflowRun } from "./models_0";
4
+ import { ColumnStatistics, Compatibility, DataCatalogEncryptionSettings, DataQualityEvaluationRunAdditionalRunOptions, JobBookmarkEntry, PermissionType, PrincipalType, RegistryId, RegistryStatus, ResourceShareType, ResourceUri, SchemaStatus, SchemaVersionNumber, SchemaVersionStatus, Session, Statement, Table, TableInput, TransformFilterCriteria, TransformParameters, TransformSortCriteria, UserDefinedFunctionInput } from "./models_1";
5
+ /**
6
+ * @public
7
+ * <p>A partition that contains unfiltered metadata.</p>
8
+ */
9
+ export interface UnfilteredPartition {
10
+ /**
11
+ * <p>The partition object.</p>
12
+ */
13
+ Partition?: Partition;
14
+ /**
15
+ * <p>The list of columns the user has permissions to access.</p>
16
+ */
17
+ AuthorizedColumns?: string[];
18
+ /**
19
+ * <p>A Boolean value indicating that the partition location is registered with Lake Formation.</p>
20
+ */
21
+ IsRegisteredWithLakeFormation?: boolean;
22
+ }
23
+ /**
24
+ * @public
25
+ */
26
+ export interface GetUnfilteredPartitionsMetadataResponse {
27
+ /**
28
+ * <p>A list of requested partitions.</p>
29
+ */
30
+ UnfilteredPartitions?: UnfilteredPartition[];
31
+ /**
32
+ * <p>A continuation token, if the returned list of partitions does not include the last
33
+ * one.</p>
34
+ */
35
+ NextToken?: string;
36
+ }
37
+ /**
38
+ * @public
39
+ */
40
+ export interface GetUnfilteredTableMetadataRequest {
41
+ /**
42
+ * <p>The catalog ID where the table resides.</p>
43
+ */
44
+ CatalogId: string | undefined;
45
+ /**
46
+ * <p>(Required) Specifies the name of a database that contains the table.</p>
47
+ */
48
+ DatabaseName: string | undefined;
49
+ /**
50
+ * <p>(Required) Specifies the name of a table for which you are requesting metadata.</p>
51
+ */
52
+ Name: string | undefined;
53
+ /**
54
+ * <p>A structure containing Lake Formation audit context information.</p>
55
+ */
56
+ AuditContext?: AuditContext;
57
+ /**
58
+ * <p>(Required) A list of supported permission types. </p>
59
+ */
60
+ SupportedPermissionTypes: (PermissionType | string)[] | undefined;
61
+ }
5
62
  /**
6
63
  * @public
7
64
  * <p>A filter that uses both column-level and row-level filtering.</p>
@@ -2190,24 +2247,25 @@ export interface StartJobRunRequest {
2190
2247
  NotificationProperty?: NotificationProperty;
2191
2248
  /**
2192
2249
  * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
2193
- * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
2250
+ * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
2194
2251
  * <ul>
2195
2252
  * <li>
2196
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
2253
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
2197
2254
  * </li>
2198
2255
  * <li>
2199
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
2256
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
2200
2257
  * </li>
2201
2258
  * <li>
2202
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
2259
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
2203
2260
  * </li>
2204
2261
  * <li>
2205
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
2262
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
2206
2263
  * </li>
2207
2264
  * <li>
2208
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m
2209
- * emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the
2210
- * autoscaler.</p>
2265
+ * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
2266
+ * </li>
2267
+ * <li>
2268
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
2211
2269
  * </li>
2212
2270
  * </ul>
2213
2271
  */
@@ -3804,6 +3862,10 @@ export interface CodeGenConfigurationNode {
3804
3862
  * <p>Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.</p>
3805
3863
  */
3806
3864
  EvaluateDataQualityMultiFrame?: EvaluateDataQualityMultiFrame;
3865
+ /**
3866
+ * <p>Specifies a Glue DataBrew recipe node.</p>
3867
+ */
3868
+ Recipe?: Recipe;
3807
3869
  }
3808
3870
  /**
3809
3871
  * @public
@@ -3936,24 +3998,25 @@ export interface CreateJobRequest {
3936
3998
  NumberOfWorkers?: number;
3937
3999
  /**
3938
4000
  * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
3939
- * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4001
+ * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
3940
4002
  * <ul>
3941
4003
  * <li>
3942
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
4004
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4005
+ * </li>
4006
+ * <li>
4007
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
3943
4008
  * </li>
3944
4009
  * <li>
3945
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
4010
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
3946
4011
  * </li>
3947
4012
  * <li>
3948
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
4013
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
3949
4014
  * </li>
3950
4015
  * <li>
3951
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4016
+ * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
3952
4017
  * </li>
3953
4018
  * <li>
3954
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
3955
- * emory, 128 GB disk), and provides up to 8 Ray workers based on the
3956
- * autoscaler.</p>
4019
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
3957
4020
  * </li>
3958
4021
  * </ul>
3959
4022
  */
@@ -4084,29 +4147,25 @@ export interface Job {
4084
4147
  MaxCapacity?: number;
4085
4148
  /**
4086
4149
  * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
4087
- * Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4150
+ * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4088
4151
  * <ul>
4089
4152
  * <li>
4090
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
4091
- * </li>
4092
- * <li>
4093
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4153
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4094
4154
  * </li>
4095
4155
  * <li>
4096
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4156
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4097
4157
  * </li>
4098
4158
  * <li>
4099
- * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
4159
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
4100
4160
  * </li>
4101
4161
  * <li>
4102
- * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
4162
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
4103
4163
  * </li>
4104
4164
  * <li>
4105
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4165
+ * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4106
4166
  * </li>
4107
4167
  * <li>
4108
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
4109
- * emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).</p>
4168
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
4110
4169
  * </li>
4111
4170
  * </ul>
4112
4171
  */
@@ -4252,24 +4311,25 @@ export interface JobUpdate {
4252
4311
  MaxCapacity?: number;
4253
4312
  /**
4254
4313
  * <p>The type of predefined worker that is allocated when a job runs. Accepts a value of
4255
- * Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4314
+ * G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.</p>
4256
4315
  * <ul>
4257
4316
  * <li>
4258
- * <p>For the <code>Standard</code> worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.</p>
4317
+ * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4318
+ * </li>
4319
+ * <li>
4320
+ * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs.</p>
4259
4321
  * </li>
4260
4322
  * <li>
4261
- * <p>For the <code>G.1X</code> worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
4323
+ * <p>For the <code>G.4X</code> worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm).</p>
4262
4324
  * </li>
4263
4325
  * <li>
4264
- * <p>For the <code>G.2X</code> worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.</p>
4326
+ * <p>For the <code>G.8X</code> worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the <code>G.4X</code> worker type.</p>
4265
4327
  * </li>
4266
4328
  * <li>
4267
- * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4329
+ * <p>For the <code>G.025X</code> worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.</p>
4268
4330
  * </li>
4269
4331
  * <li>
4270
- * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m
4271
- * emory, 128 GB disk), and provides up to 8 Ray workers based on the
4272
- * autoscaler.</p>
4332
+ * <p>For the <code>Z.2X</code> worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.</p>
4273
4333
  * </li>
4274
4334
  * </ul>
4275
4335
  */
@@ -11,10 +11,8 @@ import {
11
11
  ServiceInputTypes,
12
12
  ServiceOutputTypes,
13
13
  } from "../GlueClient";
14
- import {
15
- GetUnfilteredPartitionsMetadataRequest,
16
- GetUnfilteredPartitionsMetadataResponse,
17
- } from "../models/models_1";
14
+ import { GetUnfilteredPartitionsMetadataRequest } from "../models/models_1";
15
+ import { GetUnfilteredPartitionsMetadataResponse } from "../models/models_2";
18
16
  export { __MetadataBearer, $Command };
19
17
  export interface GetUnfilteredPartitionsMetadataCommandInput
20
18
  extends GetUnfilteredPartitionsMetadataRequest {}
@@ -11,8 +11,10 @@ import {
11
11
  ServiceInputTypes,
12
12
  ServiceOutputTypes,
13
13
  } from "../GlueClient";
14
- import { GetUnfilteredTableMetadataRequest } from "../models/models_1";
15
- import { GetUnfilteredTableMetadataResponse } from "../models/models_2";
14
+ import {
15
+ GetUnfilteredTableMetadataRequest,
16
+ GetUnfilteredTableMetadataResponse,
17
+ } from "../models/models_2";
16
18
  export { __MetadataBearer, $Command };
17
19
  export interface GetUnfilteredTableMetadataCommandInput
18
20
  extends GetUnfilteredTableMetadataRequest {}
@@ -433,6 +433,12 @@ export interface DynamoDBTarget {
433
433
  scanAll?: boolean;
434
434
  scanRate?: number;
435
435
  }
436
+ export interface HudiTarget {
437
+ Paths?: string[];
438
+ ConnectionName?: string;
439
+ Exclusions?: string[];
440
+ MaximumTraversalDepth?: number;
441
+ }
436
442
  export interface IcebergTarget {
437
443
  Paths?: string[];
438
444
  ConnectionName?: string;
@@ -472,6 +478,7 @@ export interface CrawlerTargets {
472
478
  CatalogTargets?: CatalogTarget[];
473
479
  DeltaTargets?: DeltaTarget[];
474
480
  IcebergTargets?: IcebergTarget[];
481
+ HudiTargets?: HudiTarget[];
475
482
  }
476
483
  export interface Crawler {
477
484
  Name?: string;
@@ -1085,6 +1092,15 @@ export interface PostgreSQLCatalogTarget {
1085
1092
  Database: string | undefined;
1086
1093
  Table: string | undefined;
1087
1094
  }
1095
+ export interface RecipeReference {
1096
+ RecipeArn: string | undefined;
1097
+ RecipeVersion: string | undefined;
1098
+ }
1099
+ export interface Recipe {
1100
+ Name: string | undefined;
1101
+ Inputs: string[] | undefined;
1102
+ RecipeReference: RecipeReference | undefined;
1103
+ }
1088
1104
  export interface RedshiftSource {
1089
1105
  Name: string | undefined;
1090
1106
  Database: string | undefined;
@@ -2081,17 +2097,3 @@ export declare const TransformType: {
2081
2097
  readonly FIND_MATCHES: "FIND_MATCHES";
2082
2098
  };
2083
2099
  export type TransformType = (typeof TransformType)[keyof typeof TransformType];
2084
- export interface TransformParameters {
2085
- TransformType: TransformType | string | undefined;
2086
- FindMatchesParameters?: FindMatchesParameters;
2087
- }
2088
- export declare const MLUserDataEncryptionModeString: {
2089
- readonly DISABLED: "DISABLED";
2090
- readonly SSEKMS: "SSE-KMS";
2091
- };
2092
- export type MLUserDataEncryptionModeString =
2093
- (typeof MLUserDataEncryptionModeString)[keyof typeof MLUserDataEncryptionModeString];
2094
- export interface MLUserDataEncryption {
2095
- MlUserDataEncryptionMode: MLUserDataEncryptionModeString | string | undefined;
2096
- KmsKeyId?: string;
2097
- }
@@ -18,9 +18,9 @@ import {
18
18
  ErrorDetail,
19
19
  EventBatchingCondition,
20
20
  FederatedDatabase,
21
+ FindMatchesParameters,
21
22
  GlueTable,
22
23
  JobRun,
23
- MLUserDataEncryption,
24
24
  Partition,
25
25
  PartitionInput,
26
26
  PartitionValueList,
@@ -30,12 +30,25 @@ import {
30
30
  SchemaId,
31
31
  StorageDescriptor,
32
32
  TaskStatusType,
33
- TransformParameters,
34
33
  TransformType,
35
34
  Trigger,
36
35
  TriggerType,
37
36
  WorkerType,
38
37
  } from "./models_0";
38
+ export interface TransformParameters {
39
+ TransformType: TransformType | string | undefined;
40
+ FindMatchesParameters?: FindMatchesParameters;
41
+ }
42
+ export declare const MLUserDataEncryptionModeString: {
43
+ readonly DISABLED: "DISABLED";
44
+ readonly SSEKMS: "SSE-KMS";
45
+ };
46
+ export type MLUserDataEncryptionModeString =
47
+ (typeof MLUserDataEncryptionModeString)[keyof typeof MLUserDataEncryptionModeString];
48
+ export interface MLUserDataEncryption {
49
+ MlUserDataEncryptionMode: MLUserDataEncryptionModeString | string | undefined;
50
+ KmsKeyId?: string;
51
+ }
39
52
  export interface TransformEncryption {
40
53
  MlUserDataEncryption?: MLUserDataEncryption;
41
54
  TaskRunSecurityConfigurationName?: string;
@@ -1639,19 +1652,3 @@ export interface GetUnfilteredPartitionsMetadataRequest {
1639
1652
  Segment?: Segment;
1640
1653
  MaxResults?: number;
1641
1654
  }
1642
- export interface UnfilteredPartition {
1643
- Partition?: Partition;
1644
- AuthorizedColumns?: string[];
1645
- IsRegisteredWithLakeFormation?: boolean;
1646
- }
1647
- export interface GetUnfilteredPartitionsMetadataResponse {
1648
- UnfilteredPartitions?: UnfilteredPartition[];
1649
- NextToken?: string;
1650
- }
1651
- export interface GetUnfilteredTableMetadataRequest {
1652
- CatalogId: string | undefined;
1653
- DatabaseName: string | undefined;
1654
- Name: string | undefined;
1655
- AuditContext?: AuditContext;
1656
- SupportedPermissionTypes: (PermissionType | string)[] | undefined;
1657
- }
@@ -6,6 +6,7 @@ import {
6
6
  AmazonRedshiftSource,
7
7
  AmazonRedshiftTarget,
8
8
  AthenaConnectorSource,
9
+ AuditContext,
9
10
  BasicCatalogTarget,
10
11
  CatalogDeltaSource,
11
12
  CatalogHudiSource,
@@ -53,11 +54,13 @@ import {
53
54
  NotificationProperty,
54
55
  OracleSQLCatalogSource,
55
56
  OracleSQLCatalogTarget,
57
+ Partition,
56
58
  PartitionInput,
57
59
  PIIDetection,
58
60
  PostgreSQLCatalogSource,
59
61
  PostgreSQLCatalogTarget,
60
62
  Predicate,
63
+ Recipe,
61
64
  RecrawlPolicy,
62
65
  RedshiftSource,
63
66
  RedshiftTarget,
@@ -91,7 +94,6 @@ import {
91
94
  Spigot,
92
95
  SplitFields,
93
96
  TaskStatusType,
94
- TransformParameters,
95
97
  Trigger,
96
98
  Union,
97
99
  WorkerType,
@@ -104,6 +106,7 @@ import {
104
106
  DataCatalogEncryptionSettings,
105
107
  DataQualityEvaluationRunAdditionalRunOptions,
106
108
  JobBookmarkEntry,
109
+ PermissionType,
107
110
  PrincipalType,
108
111
  RegistryId,
109
112
  RegistryStatus,
@@ -117,9 +120,26 @@ import {
117
120
  Table,
118
121
  TableInput,
119
122
  TransformFilterCriteria,
123
+ TransformParameters,
120
124
  TransformSortCriteria,
121
125
  UserDefinedFunctionInput,
122
126
  } from "./models_1";
127
+ export interface UnfilteredPartition {
128
+ Partition?: Partition;
129
+ AuthorizedColumns?: string[];
130
+ IsRegisteredWithLakeFormation?: boolean;
131
+ }
132
+ export interface GetUnfilteredPartitionsMetadataResponse {
133
+ UnfilteredPartitions?: UnfilteredPartition[];
134
+ NextToken?: string;
135
+ }
136
+ export interface GetUnfilteredTableMetadataRequest {
137
+ CatalogId: string | undefined;
138
+ DatabaseName: string | undefined;
139
+ Name: string | undefined;
140
+ AuditContext?: AuditContext;
141
+ SupportedPermissionTypes: (PermissionType | string)[] | undefined;
142
+ }
123
143
  export interface ColumnRowFilter {
124
144
  ColumnName?: string;
125
145
  RowFilterExpression?: string;
@@ -1176,6 +1196,7 @@ export interface CodeGenConfigurationNode {
1176
1196
  AmazonRedshiftSource?: AmazonRedshiftSource;
1177
1197
  AmazonRedshiftTarget?: AmazonRedshiftTarget;
1178
1198
  EvaluateDataQualityMultiFrame?: EvaluateDataQualityMultiFrame;
1199
+ Recipe?: Recipe;
1179
1200
  }
1180
1201
  export interface CreateJobRequest {
1181
1202
  Name: string | undefined;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@aws-sdk/client-glue",
3
3
  "description": "AWS SDK for JavaScript Glue Client for Node.js, Browser and React Native",
4
- "version": "3.371.0",
4
+ "version": "3.376.0",
5
5
  "scripts": {
6
6
  "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'",
7
7
  "build:cjs": "tsc -p tsconfig.cjs.json",