@aws-sdk/client-glue 3.428.0 → 3.429.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -124,7 +124,7 @@ export interface AggregateOperation {
124
124
  * <p>Possible aggregation functions include: avg countDistinct, count, first, last, kurtosis, max, min, skewness,
125
125
  * stddev_samp, stddev_pop, sum, sumDistinct, var_samp, var_pop</p>
126
126
  */
127
- AggFunc: AggFunction | string | undefined;
127
+ AggFunc: AggFunction | undefined;
128
128
  }
129
129
  /**
130
130
  * @public
@@ -1235,7 +1235,7 @@ export interface Blueprint {
1235
1235
  * </li>
1236
1236
  * </ul>
1237
1237
  */
1238
- Status?: BlueprintStatus | string;
1238
+ Status?: BlueprintStatus;
1239
1239
  /**
1240
1240
  * @public
1241
1241
  * <p>An error message.</p>
@@ -1311,7 +1311,7 @@ export interface LastCrawlInfo {
1311
1311
  * @public
1312
1312
  * <p>Status of the last crawl.</p>
1313
1313
  */
1314
- Status?: LastCrawlStatus | string;
1314
+ Status?: LastCrawlStatus;
1315
1315
  /**
1316
1316
  * @public
1317
1317
  * <p>If an error occurred, the error information about the last crawl.</p>
@@ -1367,7 +1367,7 @@ export interface LineageConfiguration {
1367
1367
  * </li>
1368
1368
  * </ul>
1369
1369
  */
1370
- CrawlerLineageSettings?: CrawlerLineageSettings | string;
1370
+ CrawlerLineageSettings?: CrawlerLineageSettings;
1371
1371
  }
1372
1372
  /**
1373
1373
  * @public
@@ -1394,7 +1394,7 @@ export interface RecrawlPolicy {
1394
1394
  * <p>A value of <code>CRAWL_NEW_FOLDERS_ONLY</code> specifies crawling only folders that were added since the last crawler run.</p>
1395
1395
  * <p>A value of <code>CRAWL_EVENT_MODE</code> specifies crawling only the changes identified by Amazon S3 events.</p>
1396
1396
  */
1397
- RecrawlBehavior?: RecrawlBehavior | string;
1397
+ RecrawlBehavior?: RecrawlBehavior;
1398
1398
  }
1399
1399
  /**
1400
1400
  * @public
@@ -1425,7 +1425,7 @@ export interface Schedule {
1425
1425
  * @public
1426
1426
  * <p>The state of the schedule.</p>
1427
1427
  */
1428
- State?: ScheduleState | string;
1428
+ State?: ScheduleState;
1429
1429
  }
1430
1430
  /**
1431
1431
  * @public
@@ -1461,12 +1461,12 @@ export interface SchemaChangePolicy {
1461
1461
  * @public
1462
1462
  * <p>The update behavior when the crawler finds a changed schema.</p>
1463
1463
  */
1464
- UpdateBehavior?: UpdateBehavior | string;
1464
+ UpdateBehavior?: UpdateBehavior;
1465
1465
  /**
1466
1466
  * @public
1467
1467
  * <p>The deletion behavior when the crawler finds a deleted object.</p>
1468
1468
  */
1469
- DeleteBehavior?: DeleteBehavior | string;
1469
+ DeleteBehavior?: DeleteBehavior;
1470
1470
  }
1471
1471
  /**
1472
1472
  * @public
@@ -1654,7 +1654,7 @@ export interface JdbcTarget {
1654
1654
  * <p>Specify a value of <code>RAWTYPES</code> or <code>COMMENTS</code> to enable additional metadata in table responses. <code>RAWTYPES</code> provides the native-level datatype. <code>COMMENTS</code> provides comments associated with a column or table in the database.</p>
1655
1655
  * <p>If you do not need additional metadata, keep the field empty.</p>
1656
1656
  */
1657
- EnableAdditionalMetadata?: (JdbcMetadataEntry | string)[];
1657
+ EnableAdditionalMetadata?: JdbcMetadataEntry[];
1658
1658
  }
1659
1659
  /**
1660
1660
  * @public
@@ -1819,7 +1819,7 @@ export interface Crawler {
1819
1819
  * @public
1820
1820
  * <p>Indicates whether the crawler is running, or whether a run is pending.</p>
1821
1821
  */
1822
- State?: CrawlerState | string;
1822
+ State?: CrawlerState;
1823
1823
  /**
1824
1824
  * @public
1825
1825
  * <p>The prefix added to the names of tables that are created.</p>
@@ -2036,7 +2036,7 @@ export interface DataQualityRuleResult {
2036
2036
  * @public
2037
2037
  * <p>A pass or fail status for the rule.</p>
2038
2038
  */
2039
- Result?: DataQualityRuleResultStatus | string;
2039
+ Result?: DataQualityRuleResultStatus;
2040
2040
  /**
2041
2041
  * @public
2042
2042
  * <p>A map of metrics associated with the evaluation of the rule.</p>
@@ -2220,7 +2220,7 @@ export interface DevEndpoint {
2220
2220
  * <p>Known issue: when a development endpoint is created with the <code>G.2X</code>
2221
2221
  * <code>WorkerType</code> configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk. </p>
2222
2222
  */
2223
- WorkerType?: WorkerType | string;
2223
+ WorkerType?: WorkerType;
2224
2224
  /**
2225
2225
  * @public
2226
2226
  * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints. </p>
@@ -2626,7 +2626,7 @@ export interface KinesisStreamingSourceOptions {
2626
2626
  * <p>The starting position in the Kinesis data stream to read data from. The possible values are <code>"latest"</code>, <code>"trim_horizon"</code>, <code>"earliest"</code>, or a timestamp string in UTC format in the pattern <code>yyyy-mm-ddTHH:MM:SSZ</code> (where <code>Z</code> represents a UTC timezone offset with a +/-. For example: "2023-04-04T08:00:00-04:00"). The default value is <code>"latest"</code>.</p>
2627
2627
  * <p>Note: Using a value that is a timestamp string in UTC format for "startingPosition" is supported only for Glue version 4.0 or later.</p>
2628
2628
  */
2629
- StartingPosition?: StartingPosition | string;
2629
+ StartingPosition?: StartingPosition;
2630
2630
  /**
2631
2631
  * @public
2632
2632
  * <p>The maximum time spent in the job executor to fetch a record from the Kinesis data stream per shard, specified in milliseconds (ms). The default value is <code>1000</code>.</p>
@@ -2871,7 +2871,7 @@ export interface DirectJDBCSource {
2871
2871
  * @public
2872
2872
  * <p>The connection type of the JDBC source.</p>
2873
2873
  */
2874
- ConnectionType: JDBCConnectionType | string | undefined;
2874
+ ConnectionType: JDBCConnectionType | undefined;
2875
2875
  /**
2876
2876
  * @public
2877
2877
  * <p>The temp directory of the JDBC Redshift source.</p>
@@ -3093,7 +3093,7 @@ export interface TransformConfigParameter {
3093
3093
  * @public
3094
3094
  * <p>Specifies the parameter type in the config file of the dynamic transform.</p>
3095
3095
  */
3096
- Type: ParamType | string | undefined;
3096
+ Type: ParamType | undefined;
3097
3097
  /**
3098
3098
  * @public
3099
3099
  * <p>Specifies the validation rule in the config file of the dynamic transform.</p>
@@ -3113,7 +3113,7 @@ export interface TransformConfigParameter {
3113
3113
  * @public
3114
3114
  * <p>Specifies the list type of the parameter in the config file of the dynamic transform.</p>
3115
3115
  */
3116
- ListType?: ParamType | string;
3116
+ ListType?: ParamType;
3117
3117
  /**
3118
3118
  * @public
3119
3119
  * <p>Specifies whether the parameter is optional or not in the config file of the dynamic transform.</p>
@@ -3246,7 +3246,7 @@ export interface DQStopJobOnFailureOptions {
3246
3246
  * @public
3247
3247
  * <p>When to stop job if your data quality evaluation fails. Options are Immediate or AfterDataLoad.</p>
3248
3248
  */
3249
- StopJobOnFailureTiming?: DQStopJobOnFailureTiming | string;
3249
+ StopJobOnFailureTiming?: DQStopJobOnFailureTiming;
3250
3250
  }
3251
3251
  /**
3252
3252
  * @public
@@ -3272,7 +3272,7 @@ export interface EvaluateDataQuality {
3272
3272
  * @public
3273
3273
  * <p>The output of your data quality evaluation.</p>
3274
3274
  */
3275
- Output?: DQTransformOutput | string;
3275
+ Output?: DQTransformOutput;
3276
3276
  /**
3277
3277
  * @public
3278
3278
  * <p>Options to configure how your results are published.</p>
@@ -3318,7 +3318,7 @@ export interface EvaluateDataQualityMultiFrame {
3318
3318
  * @public
3319
3319
  * <p>Options to configure runtime behavior of the transform.</p>
3320
3320
  */
3321
- AdditionalOptions?: Record<string, string>;
3321
+ AdditionalOptions?: Record<AdditionalOptionKeys, string>;
3322
3322
  /**
3323
3323
  * @public
3324
3324
  * <p>Options to configure how your job will stop if your data quality evaluation fails.</p>
@@ -3389,7 +3389,7 @@ export interface FilterValue {
3389
3389
  * @public
3390
3390
  * <p>The type of filter value.</p>
3391
3391
  */
3392
- Type: FilterValueType | string | undefined;
3392
+ Type: FilterValueType | undefined;
3393
3393
  /**
3394
3394
  * @public
3395
3395
  * <p>The value to be associated.</p>
@@ -3405,7 +3405,7 @@ export interface FilterExpression {
3405
3405
  * @public
3406
3406
  * <p>The type of operation to perform in the expression.</p>
3407
3407
  */
3408
- Operation: FilterOperation | string | undefined;
3408
+ Operation: FilterOperation | undefined;
3409
3409
  /**
3410
3410
  * @public
3411
3411
  * <p>Whether the expression is to be negated.</p>
@@ -3448,7 +3448,7 @@ export interface Filter {
3448
3448
  * @public
3449
3449
  * <p>The operator used to filter rows by comparing the key value to a specified value.</p>
3450
3450
  */
3451
- LogicalOperator: FilterLogicalOperator | string | undefined;
3451
+ LogicalOperator: FilterLogicalOperator | undefined;
3452
3452
  /**
3453
3453
  * @public
3454
3454
  * <p>Specifies a filter expression.</p>
@@ -3528,7 +3528,7 @@ export interface CatalogSchemaChangePolicy {
3528
3528
  * @public
3529
3529
  * <p>The update behavior when the crawler finds a changed schema.</p>
3530
3530
  */
3531
- UpdateBehavior?: UpdateCatalogBehavior | string;
3531
+ UpdateBehavior?: UpdateCatalogBehavior;
3532
3532
  }
3533
3533
  /**
3534
3534
  * @public
@@ -3683,7 +3683,7 @@ export interface JDBCConnectorOptions {
3683
3683
  * @public
3684
3684
  * <p>Custom data type mapping that builds a mapping from a JDBC data type to an Glue data type. For example, the option <code>"dataTypeMapping":\{"FLOAT":"STRING"\}</code> maps data fields of JDBC type <code>FLOAT</code> into the Java <code>String</code> type by calling the <code>ResultSet.getString()</code> method of the driver, and uses it to build the Glue record. The <code>ResultSet</code> object is implemented by each driver, so the behavior is specific to the driver you use. Refer to the documentation for your JDBC driver to understand how the driver performs the conversions.</p>
3685
3685
  */
3686
- DataTypeMapping?: Record<string, GlueRecordType | string>;
3686
+ DataTypeMapping?: Record<JDBCDataType, GlueRecordType>;
3687
3687
  }
3688
3688
  /**
3689
3689
  * @public
@@ -3828,7 +3828,7 @@ export interface Join {
3828
3828
  * @public
3829
3829
  * <p>Specifies the type of join to be performed on the datasets.</p>
3830
3830
  */
3831
- JoinType: JoinType | string | undefined;
3831
+ JoinType: JoinType | undefined;
3832
3832
  /**
3833
3833
  * @public
3834
3834
  * <p>A list of the two columns to be joined.</p>
@@ -4035,7 +4035,7 @@ export interface PIIDetection {
4035
4035
  * @public
4036
4036
  * <p>Indicates the type of PIIDetection transform. </p>
4037
4037
  */
4038
- PiiType: PiiType | string | undefined;
4038
+ PiiType: PiiType | undefined;
4039
4039
  /**
4040
4040
  * @public
4041
4041
  * <p>Indicates the types of entities the PIIDetection transform will identify as PII data. </p>
@@ -4506,7 +4506,7 @@ export interface S3CsvSource {
4506
4506
  * @public
4507
4507
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
4508
4508
  */
4509
- CompressionType?: CompressionType | string;
4509
+ CompressionType?: CompressionType;
4510
4510
  /**
4511
4511
  * @public
4512
4512
  * <p>A string containing a JSON list of Unix-style glob patterns to exclude. For example, "[\"**.pdf\"]" excludes all PDF files. </p>
@@ -4546,7 +4546,7 @@ export interface S3CsvSource {
4546
4546
  * @public
4547
4547
  * <p>Specifies the delimiter character. The default is a comma: ",", but any other character can be specified.</p>
4548
4548
  */
4549
- Separator: Separator | string | undefined;
4549
+ Separator: Separator | undefined;
4550
4550
  /**
4551
4551
  * @public
4552
4552
  * <p>Specifies a character to use for escaping. This option is used only when reading CSV files. The default value is <code>none</code>. If enabled, the character which immediately follows is used as-is, except for a small set of well-known escapes (<code>\n</code>, <code>\r</code>, <code>\t</code>, and <code>\0</code>).</p>
@@ -4556,7 +4556,7 @@ export interface S3CsvSource {
4556
4556
  * @public
4557
4557
  * <p>Specifies the character to use for quoting. The default is a double quote: <code>'"'</code>. Set this to <code>-1</code> to turn off quoting entirely.</p>
4558
4558
  */
4559
- QuoteChar: QuoteChar | string | undefined;
4559
+ QuoteChar: QuoteChar | undefined;
4560
4560
  /**
4561
4561
  * @public
4562
4562
  * <p>A Boolean value that specifies whether a single record can span multiple lines. This can occur when a field contains a quoted new-line character. You must set this option to True if any record spans multiple lines. The default value is <code>False</code>, which allows for more aggressive file-splitting during parsing.</p>
@@ -4672,7 +4672,7 @@ export interface DirectSchemaChangePolicy {
4672
4672
  * @public
4673
4673
  * <p>The update behavior when the crawler finds a changed schema.</p>
4674
4674
  */
4675
- UpdateBehavior?: UpdateCatalogBehavior | string;
4675
+ UpdateBehavior?: UpdateCatalogBehavior;
4676
4676
  /**
4677
4677
  * @public
4678
4678
  * <p>Specifies the table in the database that the schema change policy applies to.</p>
@@ -4713,12 +4713,12 @@ export interface S3DeltaDirectTarget {
4713
4713
  * @public
4714
4714
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
4715
4715
  */
4716
- Compression: DeltaTargetCompressionType | string | undefined;
4716
+ Compression: DeltaTargetCompressionType | undefined;
4717
4717
  /**
4718
4718
  * @public
4719
4719
  * <p>Specifies the data output format for the target.</p>
4720
4720
  */
4721
- Format: TargetFormat | string | undefined;
4721
+ Format: TargetFormat | undefined;
4722
4722
  /**
4723
4723
  * @public
4724
4724
  * <p>Specifies additional connection options for the connector.</p>
@@ -4795,7 +4795,7 @@ export interface S3DirectTarget {
4795
4795
  * @public
4796
4796
  * <p>Specifies the data output format for the target.</p>
4797
4797
  */
4798
- Format: TargetFormat | string | undefined;
4798
+ Format: TargetFormat | undefined;
4799
4799
  /**
4800
4800
  * @public
4801
4801
  * <p>A policy that specifies update behavior for the crawler.</p>
@@ -4846,7 +4846,7 @@ export interface S3GlueParquetTarget {
4846
4846
  * @public
4847
4847
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
4848
4848
  */
4849
- Compression?: ParquetCompressionType | string;
4849
+ Compression?: ParquetCompressionType;
4850
4850
  /**
4851
4851
  * @public
4852
4852
  * <p>A policy that specifies update behavior for the crawler.</p>
@@ -4932,7 +4932,7 @@ export interface S3HudiDirectTarget {
4932
4932
  * @public
4933
4933
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
4934
4934
  */
4935
- Compression: HudiTargetCompressionType | string | undefined;
4935
+ Compression: HudiTargetCompressionType | undefined;
4936
4936
  /**
4937
4937
  * @public
4938
4938
  * <p>Specifies native partitioning using a sequence of keys.</p>
@@ -4942,7 +4942,7 @@ export interface S3HudiDirectTarget {
4942
4942
  * @public
4943
4943
  * <p>Specifies the data output format for the target.</p>
4944
4944
  */
4945
- Format: TargetFormat | string | undefined;
4945
+ Format: TargetFormat | undefined;
4946
4946
  /**
4947
4947
  * @public
4948
4948
  * <p>Specifies additional connection options for the connector.</p>
@@ -5004,7 +5004,7 @@ export interface S3JsonSource {
5004
5004
  * @public
5005
5005
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
5006
5006
  */
5007
- CompressionType?: CompressionType | string;
5007
+ CompressionType?: CompressionType;
5008
5008
  /**
5009
5009
  * @public
5010
5010
  * <p>A string containing a JSON list of Unix-style glob patterns to exclude. For example, "[\"**.pdf\"]" excludes all PDF files. </p>
@@ -5075,7 +5075,7 @@ export interface S3ParquetSource {
5075
5075
  * @public
5076
5076
  * <p>Specifies how the data is compressed. This is generally not necessary if the data has a standard file extension. Possible values are <code>"gzip"</code> and <code>"bzip"</code>).</p>
5077
5077
  */
5078
- CompressionType?: ParquetCompressionType | string;
5078
+ CompressionType?: ParquetCompressionType;
5079
5079
  /**
5080
5080
  * @public
5081
5081
  * <p>A string containing a JSON list of Unix-style glob patterns to exclude. For example, "[\"**.pdf\"]" excludes all PDF files. </p>
@@ -5543,7 +5543,7 @@ export interface Union {
5543
5543
  * <p>Specify <code>ALL</code> to join all rows from data sources to the resulting DynamicFrame. The resulting union does not remove duplicate rows.</p>
5544
5544
  * <p>Specify <code>DISTINCT</code> to remove duplicate rows in the resulting DynamicFrame.</p>
5545
5545
  */
5546
- UnionType: UnionType | string | undefined;
5546
+ UnionType: UnionType | undefined;
5547
5547
  }
5548
5548
  /**
5549
5549
  * @public
@@ -5649,7 +5649,7 @@ export interface SourceControlDetails {
5649
5649
  * @public
5650
5650
  * <p>The provider for the remote repository.</p>
5651
5651
  */
5652
- Provider?: SourceControlProvider | string;
5652
+ Provider?: SourceControlProvider;
5653
5653
  /**
5654
5654
  * @public
5655
5655
  * <p>The name of the remote repository that contains the job artifacts.</p>
@@ -5679,7 +5679,7 @@ export interface SourceControlDetails {
5679
5679
  * @public
5680
5680
  * <p>The type of authentication, which can be an authentication token stored in Amazon Web Services Secrets Manager, or a personal access token.</p>
5681
5681
  */
5682
- AuthStrategy?: SourceControlAuthStrategy | string;
5682
+ AuthStrategy?: SourceControlAuthStrategy;
5683
5683
  /**
5684
5684
  * @public
5685
5685
  * <p>The value of an authorization token.</p>
@@ -5807,7 +5807,7 @@ export declare class FederationSourceException extends __BaseException {
5807
5807
  * @public
5808
5808
  * <p>The error code of the problem.</p>
5809
5809
  */
5810
- FederationSourceErrorCode?: FederationSourceErrorCode | string;
5810
+ FederationSourceErrorCode?: FederationSourceErrorCode;
5811
5811
  /**
5812
5812
  * @public
5813
5813
  * <p>The message describing the problem.</p>
@@ -5929,7 +5929,7 @@ export interface Condition {
5929
5929
  * @public
5930
5930
  * <p>A logical operator.</p>
5931
5931
  */
5932
- LogicalOperator?: LogicalOperator | string;
5932
+ LogicalOperator?: LogicalOperator;
5933
5933
  /**
5934
5934
  * @public
5935
5935
  * <p>The name of the job whose <code>JobRuns</code> this condition applies to, and on which
@@ -5940,7 +5940,7 @@ export interface Condition {
5940
5940
  * @public
5941
5941
  * <p>The condition state. Currently, the only job states that a trigger can listen for are <code>SUCCEEDED</code>, <code>STOPPED</code>, <code>FAILED</code>, and <code>TIMEOUT</code>. The only crawler states that a trigger can listen for are <code>SUCCEEDED</code>, <code>FAILED</code>, and <code>CANCELLED</code>.</p>
5942
5942
  */
5943
- State?: JobRunState | string;
5943
+ State?: JobRunState;
5944
5944
  /**
5945
5945
  * @public
5946
5946
  * <p>The name of the crawler to which this condition applies.</p>
@@ -5950,7 +5950,7 @@ export interface Condition {
5950
5950
  * @public
5951
5951
  * <p>The state of the crawler to which this condition applies.</p>
5952
5952
  */
5953
- CrawlState?: CrawlState | string;
5953
+ CrawlState?: CrawlState;
5954
5954
  }
5955
5955
  /**
5956
5956
  * @public
@@ -5974,7 +5974,7 @@ export interface Predicate {
5974
5974
  * <p>An optional field if only one condition is listed. If multiple conditions are listed, then
5975
5975
  * this field is required.</p>
5976
5976
  */
5977
- Logical?: Logical | string;
5977
+ Logical?: Logical;
5978
5978
  /**
5979
5979
  * @public
5980
5980
  * <p>A list of the conditions that determine when the trigger will fire.</p>
@@ -6037,12 +6037,12 @@ export interface Trigger {
6037
6037
  * @public
6038
6038
  * <p>The type of trigger that this is.</p>
6039
6039
  */
6040
- Type?: TriggerType | string;
6040
+ Type?: TriggerType;
6041
6041
  /**
6042
6042
  * @public
6043
6043
  * <p>The current state of the trigger.</p>
6044
6044
  */
6045
- State?: TriggerState | string;
6045
+ State?: TriggerState;
6046
6046
  /**
6047
6047
  * @public
6048
6048
  * <p>A description of this trigger.</p>
@@ -6145,7 +6145,7 @@ export interface Crawl {
6145
6145
  * @public
6146
6146
  * <p>The state of the crawler.</p>
6147
6147
  */
6148
- State?: CrawlState | string;
6148
+ State?: CrawlState;
6149
6149
  /**
6150
6150
  * @public
6151
6151
  * <p>The date and time on which the crawl started.</p>
@@ -6250,7 +6250,7 @@ export interface JobRun {
6250
6250
  * @public
6251
6251
  * <p>The current state of the job run. For more information about the statuses of jobs that have terminated abnormally, see <a href="https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html">Glue Job Run Statuses</a>.</p>
6252
6252
  */
6253
- JobRunState?: JobRunState | string;
6253
+ JobRunState?: JobRunState;
6254
6254
  /**
6255
6255
  * @public
6256
6256
  * <p>The job arguments associated with this run. For this job run, they replace the default
@@ -6353,7 +6353,7 @@ export interface JobRun {
6353
6353
  * </li>
6354
6354
  * </ul>
6355
6355
  */
6356
- WorkerType?: WorkerType | string;
6356
+ WorkerType?: WorkerType;
6357
6357
  /**
6358
6358
  * @public
6359
6359
  * <p>The number of workers of a defined <code>workerType</code> that are allocated when a job runs.</p>
@@ -6405,7 +6405,7 @@ export interface JobRun {
6405
6405
  * <p>The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. </p>
6406
6406
  * <p>Only jobs with Glue version 3.0 and above and command type <code>glueetl</code> will be allowed to set <code>ExecutionClass</code> to <code>FLEX</code>. The flexible execution class is available for Spark jobs.</p>
6407
6407
  */
6408
- ExecutionClass?: ExecutionClass | string;
6408
+ ExecutionClass?: ExecutionClass;
6409
6409
  }
6410
6410
  /**
6411
6411
  * @public
@@ -6451,7 +6451,7 @@ export interface Node {
6451
6451
  * @public
6452
6452
  * <p>The type of Glue component represented by the node.</p>
6453
6453
  */
6454
- Type?: NodeType | string;
6454
+ Type?: NodeType;
6455
6455
  /**
6456
6456
  * @public
6457
6457
  * <p>The name of the Glue component represented by the node.</p>
@@ -6613,7 +6613,7 @@ export interface WorkflowRun {
6613
6613
  * @public
6614
6614
  * <p>The status of the workflow run.</p>
6615
6615
  */
6616
- Status?: WorkflowRunStatus | string;
6616
+ Status?: WorkflowRunStatus;
6617
6617
  /**
6618
6618
  * @public
6619
6619
  * <p>This error message describes any error that may have occurred in starting the workflow run. Currently the only error message is "Concurrent runs exceeded for workflow: <code>foo</code>."</p>
@@ -6927,7 +6927,7 @@ export interface CancelMLTaskRunResponse {
6927
6927
  * @public
6928
6928
  * <p>The status for this run.</p>
6929
6929
  */
6930
- Status?: TaskStatusType | string;
6930
+ Status?: TaskStatusType;
6931
6931
  }
6932
6932
  /**
6933
6933
  * @public
@@ -6992,7 +6992,7 @@ export interface CheckSchemaVersionValidityInput {
6992
6992
  * @public
6993
6993
  * <p>The data format of the schema definition. Currently <code>AVRO</code>, <code>JSON</code> and <code>PROTOBUF</code> are supported.</p>
6994
6994
  */
6995
- DataFormat: DataFormat | string | undefined;
6995
+ DataFormat: DataFormat | undefined;
6996
6996
  /**
6997
6997
  * @public
6998
6998
  * <p>The definition of the schema that has to be validated.</p>
@@ -7099,7 +7099,7 @@ export interface CreateCsvClassifierRequest {
7099
7099
  * @public
7100
7100
  * <p>Indicates whether the CSV file contains a header.</p>
7101
7101
  */
7102
- ContainsHeader?: CsvHeaderOption | string;
7102
+ ContainsHeader?: CsvHeaderOption;
7103
7103
  /**
7104
7104
  * @public
7105
7105
  * <p>A list of strings representing column names.</p>
@@ -7129,7 +7129,7 @@ export interface CreateCsvClassifierRequest {
7129
7129
  * @public
7130
7130
  * <p>Sets the SerDe for processing CSV in the classifier, which will be applied in the Data Catalog. Valid values are <code>OpenCSVSerDe</code>, <code>LazySimpleSerDe</code>, and <code>None</code>. You can specify the <code>None</code> value when you want the crawler to do the detection.</p>
7131
7131
  */
7132
- Serde?: CsvSerdeOption | string;
7132
+ Serde?: CsvSerdeOption;
7133
7133
  }
7134
7134
  /**
7135
7135
  * @public
@@ -7431,7 +7431,7 @@ export interface ConnectionInput {
7431
7431
  * <p>For more information about how optional ConnectionProperties are used to configure features in Glue, consult <a href="https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html">Glue connection properties</a>.</p>
7432
7432
  * <p>For more information about how optional ConnectionProperties are used to configure features in Glue Studio, consult <a href="https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html">Using connectors and connections</a>.</p>
7433
7433
  */
7434
- ConnectionType: ConnectionType | string | undefined;
7434
+ ConnectionType: ConnectionType | undefined;
7435
7435
  /**
7436
7436
  * @public
7437
7437
  * <p>A list of criteria that can be used in selecting this connection.</p>
@@ -7441,7 +7441,7 @@ export interface ConnectionInput {
7441
7441
  * @public
7442
7442
  * <p>These key-value pairs define parameters for the connection.</p>
7443
7443
  */
7444
- ConnectionProperties: Record<string, string> | undefined;
7444
+ ConnectionProperties: Record<ConnectionPropertyKey, string> | undefined;
7445
7445
  /**
7446
7446
  * @public
7447
7447
  * <p>A map of physical connection requirements, such as virtual private cloud (VPC) and
@@ -7686,7 +7686,7 @@ export interface PrincipalPermissions {
7686
7686
  * @public
7687
7687
  * <p>The permissions that are granted to the principal.</p>
7688
7688
  */
7689
- Permissions?: (Permission | string)[];
7689
+ Permissions?: Permission[];
7690
7690
  }
7691
7691
  /**
7692
7692
  * @public
@@ -7951,7 +7951,7 @@ export interface CreateDevEndpointRequest {
7951
7951
  * <p>Known issue: when a development endpoint is created with the <code>G.2X</code>
7952
7952
  * <code>WorkerType</code> configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk. </p>
7953
7953
  */
7954
- WorkerType?: WorkerType | string;
7954
+ WorkerType?: WorkerType;
7955
7955
  /**
7956
7956
  * @public
7957
7957
  * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints. </p>
@@ -8050,7 +8050,7 @@ export interface CreateDevEndpointResponse {
8050
8050
  * @public
8051
8051
  * <p>The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.</p>
8052
8052
  */
8053
- WorkerType?: WorkerType | string;
8053
+ WorkerType?: WorkerType;
8054
8054
  /**
8055
8055
  * @public
8056
8056
  * <p>Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints. </p>