aws-sdk 2.1419.0 → 2.1421.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -163,6 +163,14 @@ declare class CostExplorer extends Service {
163
163
  * Creates recommendations that help you save cost by identifying idle and underutilized Amazon EC2 instances. Recommendations are generated to either downsize or terminate instances, along with providing savings detail and metrics. For more information about calculation and function, see Optimizing Your Cost with Rightsizing Recommendations in the Billing and Cost Management User Guide.
164
164
  */
165
165
  getRightsizingRecommendation(callback?: (err: AWSError, data: CostExplorer.Types.GetRightsizingRecommendationResponse) => void): Request<CostExplorer.Types.GetRightsizingRecommendationResponse, AWSError>;
166
+ /**
167
+ * Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.
168
+ */
169
+ getSavingsPlanPurchaseRecommendationDetails(params: CostExplorer.Types.GetSavingsPlanPurchaseRecommendationDetailsRequest, callback?: (err: AWSError, data: CostExplorer.Types.GetSavingsPlanPurchaseRecommendationDetailsResponse) => void): Request<CostExplorer.Types.GetSavingsPlanPurchaseRecommendationDetailsResponse, AWSError>;
170
+ /**
171
+ * Retrieves the details for a Savings Plan recommendation. These details include the hourly data-points that construct the new cost, coverage, and utilization charts.
172
+ */
173
+ getSavingsPlanPurchaseRecommendationDetails(callback?: (err: AWSError, data: CostExplorer.Types.GetSavingsPlanPurchaseRecommendationDetailsResponse) => void): Request<CostExplorer.Types.GetSavingsPlanPurchaseRecommendationDetailsResponse, AWSError>;
166
174
  /**
167
175
  * Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions: LINKED_ACCOUNT REGION SERVICE INSTANCE_FAMILY To determine valid values for a dimension, use the GetDimensionValues operation.
168
176
  */
@@ -284,11 +292,11 @@ declare class CostExplorer extends Service {
284
292
  */
285
293
  updateAnomalyMonitor(callback?: (err: AWSError, data: CostExplorer.Types.UpdateAnomalyMonitorResponse) => void): Request<CostExplorer.Types.UpdateAnomalyMonitorResponse, AWSError>;
286
294
  /**
287
- * Updates an existing cost anomaly monitor subscription.
295
+ * Updates an existing cost anomaly subscription. Specify the fields that you want to update. Omitted fields are unchanged. The JSON below describes the generic construct for each type. See Request Parameters for possible values as they apply to AnomalySubscription.
288
296
  */
289
297
  updateAnomalySubscription(params: CostExplorer.Types.UpdateAnomalySubscriptionRequest, callback?: (err: AWSError, data: CostExplorer.Types.UpdateAnomalySubscriptionResponse) => void): Request<CostExplorer.Types.UpdateAnomalySubscriptionResponse, AWSError>;
290
298
  /**
291
- * Updates an existing cost anomaly monitor subscription.
299
+ * Updates an existing cost anomaly subscription. Specify the fields that you want to update. Omitted fields are unchanged. The JSON below describes the generic construct for each type. See Request Parameters for possible values as they apply to AnomalySubscription.
292
300
  */
293
301
  updateAnomalySubscription(callback?: (err: AWSError, data: CostExplorer.Types.UpdateAnomalySubscriptionResponse) => void): Request<CostExplorer.Types.UpdateAnomalySubscriptionResponse, AWSError>;
294
302
  /**
@@ -426,11 +434,11 @@ declare namespace CostExplorer {
426
434
  */
427
435
  Subscribers: Subscribers;
428
436
  /**
429
- * (deprecated) The dollar value that triggers a notification if the threshold is exceeded. This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression. One of Threshold or ThresholdExpression is required for this resource.
437
+ * (deprecated) An absolute dollar value that must be exceeded by the anomaly's total impact (see Impact for more details) for an anomaly notification to be generated. This field has been deprecated. To specify a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression. One of Threshold or ThresholdExpression is required for this resource. You cannot specify both.
430
438
  */
431
439
  Threshold?: NullableNonNegativeDouble;
432
440
  /**
433
- * The frequency that anomaly reports are sent over email.
441
+ * The frequency that anomaly notifications are sent. Notifications are sent either over email (for DAILY and WEEKLY frequencies) or SNS (for IMMEDIATE frequency). For more information, see Creating an Amazon SNS topic for anomaly notifications.
434
442
  */
435
443
  Frequency: AnomalySubscriptionFrequency;
436
444
  /**
@@ -438,7 +446,7 @@ declare namespace CostExplorer {
438
446
  */
439
447
  SubscriptionName: GenericString;
440
448
  /**
441
- * An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000. One of Threshold or ThresholdExpression is required for this resource. The following are examples of valid ThresholdExpressions: Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] }
449
+ * An Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format. One of Threshold or ThresholdExpression is required for this resource. You cannot specify both. The following are examples of valid ThresholdExpressions: Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] }
442
450
  */
443
451
  ThresholdExpression?: Expression;
444
452
  }
@@ -565,7 +573,7 @@ declare namespace CostExplorer {
565
573
  export interface CostCategoryRule {
566
574
  Value?: CostCategoryValue;
567
575
  /**
568
- * An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, and LINKED_ACCOUNT_NAME. Root level OR isn't supported. We recommend that you create a separate rule instead. RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
576
+ * An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, LINKED_ACCOUNT_NAME, REGION, and USAGE_TYPE. RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.
569
577
  */
570
578
  Rule?: Expression;
571
579
  /**
@@ -1644,6 +1652,22 @@ declare namespace CostExplorer {
1644
1652
  */
1645
1653
  Configuration?: RightsizingRecommendationConfiguration;
1646
1654
  }
1655
+ export interface GetSavingsPlanPurchaseRecommendationDetailsRequest {
1656
+ /**
1657
+ * The ID that is associated with the Savings Plan recommendation.
1658
+ */
1659
+ RecommendationDetailId: RecommendationDetailId;
1660
+ }
1661
+ export interface GetSavingsPlanPurchaseRecommendationDetailsResponse {
1662
+ /**
1663
+ * The ID that is associated with the Savings Plan recommendation.
1664
+ */
1665
+ RecommendationDetailId?: RecommendationDetailId;
1666
+ /**
1667
+ * Contains detailed information about a specific Savings Plan recommendation.
1668
+ */
1669
+ RecommendationDetailData?: RecommendationDetailData;
1670
+ }
1647
1671
  export interface GetSavingsPlansCoverageRequest {
1648
1672
  /**
1649
1673
  * The time period that you want the usage and costs for. The Start date must be within 13 months. The End date must be after the Start date, and before the current date. Future dates can't be used as an End date.
@@ -2069,6 +2093,7 @@ declare namespace CostExplorer {
2069
2093
  Unit?: MetricUnit;
2070
2094
  }
2071
2095
  export type Metrics = {[key: string]: MetricValue};
2096
+ export type MetricsOverLookbackPeriod = RecommendationDetailHourlyMetrics[];
2072
2097
  export interface ModifyRecommendationDetail {
2073
2098
  /**
2074
2099
  * Determines whether this instance type is the Amazon Web Services default recommendation.
@@ -2169,6 +2194,138 @@ declare namespace CostExplorer {
2169
2194
  }
2170
2195
  export type RICostForUnusedHours = string;
2171
2196
  export type RealizedSavings = string;
2197
+ export interface RecommendationDetailData {
2198
+ /**
2199
+ * The account scope that you want your recommendations for. Amazon Web Services calculates recommendations including the management account and member accounts if the value is set to PAYER. If the value is LINKED, recommendations are calculated for individual member accounts only.
2200
+ */
2201
+ AccountScope?: AccountScope;
2202
+ /**
2203
+ * How many days of previous usage that Amazon Web Services considers when making this recommendation.
2204
+ */
2205
+ LookbackPeriodInDays?: LookbackPeriodInDays;
2206
+ /**
2207
+ * The requested Savings Plan recommendation type.
2208
+ */
2209
+ SavingsPlansType?: SupportedSavingsPlansType;
2210
+ /**
2211
+ * The term of the commitment in years.
2212
+ */
2213
+ TermInYears?: TermInYears;
2214
+ /**
2215
+ * The payment option for the commitment (for example, All Upfront or No Upfront).
2216
+ */
2217
+ PaymentOption?: PaymentOption;
2218
+ /**
2219
+ * The AccountID that the recommendation is generated for.
2220
+ */
2221
+ AccountId?: GenericString;
2222
+ /**
2223
+ * The currency code that Amazon Web Services used to generate the recommendation and present potential savings.
2224
+ */
2225
+ CurrencyCode?: GenericString;
2226
+ /**
2227
+ * The instance family of the recommended Savings Plan.
2228
+ */
2229
+ InstanceFamily?: GenericString;
2230
+ /**
2231
+ * The region the recommendation is generated for.
2232
+ */
2233
+ Region?: GenericString;
2234
+ /**
2235
+ * The unique ID that's used to distinguish Savings Plans from one another.
2236
+ */
2237
+ OfferingId?: GenericString;
2238
+ GenerationTimestamp?: ZonedDateTime;
2239
+ LatestUsageTimestamp?: ZonedDateTime;
2240
+ /**
2241
+ * The average value of hourly On-Demand spend over the lookback period of the applicable usage type.
2242
+ */
2243
+ CurrentAverageHourlyOnDemandSpend?: GenericString;
2244
+ /**
2245
+ * The highest value of hourly On-Demand spend over the lookback period of the applicable usage type.
2246
+ */
2247
+ CurrentMaximumHourlyOnDemandSpend?: GenericString;
2248
+ /**
2249
+ * The lowest value of hourly On-Demand spend over the lookback period of the applicable usage type.
2250
+ */
2251
+ CurrentMinimumHourlyOnDemandSpend?: GenericString;
2252
+ /**
2253
+ * The estimated utilization of the recommended Savings Plan.
2254
+ */
2255
+ EstimatedAverageUtilization?: GenericString;
2256
+ /**
2257
+ * The estimated monthly savings amount based on the recommended Savings Plan.
2258
+ */
2259
+ EstimatedMonthlySavingsAmount?: GenericString;
2260
+ /**
2261
+ * The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.
2262
+ */
2263
+ EstimatedOnDemandCost?: GenericString;
2264
+ /**
2265
+ * The estimated On-Demand costs you expect with no additional commitment, based on your usage of the selected time period and the Savings Plan you own.
2266
+ */
2267
+ EstimatedOnDemandCostWithCurrentCommitment?: GenericString;
2268
+ /**
2269
+ * The estimated return on investment that's based on the recommended Savings Plan that you purchased. This is calculated as estimatedSavingsAmount/estimatedSPCost*100.
2270
+ */
2271
+ EstimatedROI?: GenericString;
2272
+ /**
2273
+ * The cost of the recommended Savings Plan over the length of the lookback period.
2274
+ */
2275
+ EstimatedSPCost?: GenericString;
2276
+ /**
2277
+ * The estimated savings amount that's based on the recommended Savings Plan over the length of the lookback period.
2278
+ */
2279
+ EstimatedSavingsAmount?: GenericString;
2280
+ /**
2281
+ * The estimated savings percentage relative to the total cost of applicable On-Demand usage over the lookback period.
2282
+ */
2283
+ EstimatedSavingsPercentage?: GenericString;
2284
+ /**
2285
+ * The existing hourly commitment for the Savings Plan type.
2286
+ */
2287
+ ExistingHourlyCommitment?: GenericString;
2288
+ /**
2289
+ * The recommended hourly commitment level for the Savings Plan type and the configuration that's based on the usage during the lookback period.
2290
+ */
2291
+ HourlyCommitmentToPurchase?: GenericString;
2292
+ /**
2293
+ * The upfront cost of the recommended Savings Plan, based on the selected payment option.
2294
+ */
2295
+ UpfrontCost?: GenericString;
2296
+ /**
2297
+ * The average value of hourly coverage over the lookback period.
2298
+ */
2299
+ CurrentAverageCoverage?: GenericString;
2300
+ /**
2301
+ * The estimated coverage of the recommended Savings Plan.
2302
+ */
2303
+ EstimatedAverageCoverage?: GenericString;
2304
+ /**
2305
+ * The related hourly cost, coverage, and utilization metrics over the lookback period.
2306
+ */
2307
+ MetricsOverLookbackPeriod?: MetricsOverLookbackPeriod;
2308
+ }
2309
+ export interface RecommendationDetailHourlyMetrics {
2310
+ StartTime?: ZonedDateTime;
2311
+ /**
2312
+ * The remaining On-Demand cost estimated to not be covered by the recommended Savings Plan, over the length of the lookback period.
2313
+ */
2314
+ EstimatedOnDemandCost?: GenericString;
2315
+ /**
2316
+ * The current amount of Savings Plans eligible usage that the Savings Plan covered.
2317
+ */
2318
+ CurrentCoverage?: GenericString;
2319
+ /**
2320
+ * The estimated coverage amount based on the recommended Savings Plan.
2321
+ */
2322
+ EstimatedCoverage?: GenericString;
2323
+ /**
2324
+ * The estimated utilization for the recommended Savings Plan.
2325
+ */
2326
+ EstimatedNewCommitmentUtilization?: GenericString;
2327
+ }
2328
+ export type RecommendationDetailId = string;
2172
2329
  export type RecommendationId = string;
2173
2330
  export type RecommendationIdList = RecommendationId[];
2174
2331
  export type RecommendationTarget = "SAME_INSTANCE_FAMILY"|"CROSS_INSTANCE_FAMILY"|string;
@@ -2730,6 +2887,10 @@ declare namespace CostExplorer {
2730
2887
  * The average value of hourly On-Demand spend over the lookback period of the applicable usage type.
2731
2888
  */
2732
2889
  CurrentAverageHourlyOnDemandSpend?: GenericString;
2890
+ /**
2891
+ * Contains detailed information about a specific Savings Plan recommendation.
2892
+ */
2893
+ RecommendationDetailId?: RecommendationDetailId;
2733
2894
  }
2734
2895
  export type SavingsPlansPurchaseRecommendationDetailList = SavingsPlansPurchaseRecommendationDetail[];
2735
2896
  export interface SavingsPlansPurchaseRecommendationMetadata {
@@ -3056,7 +3217,7 @@ declare namespace CostExplorer {
3056
3217
  */
3057
3218
  SubscriptionArn: GenericString;
3058
3219
  /**
3059
- * (deprecated) The update to the threshold value for receiving notifications. This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression.
3220
+ * (deprecated) The update to the threshold value for receiving notifications. This field has been deprecated. To update a threshold, use ThresholdExpression. Continued use of Threshold will be treated as shorthand syntax for a ThresholdExpression. You can specify either Threshold or ThresholdExpression, but not both.
3060
3221
  */
3061
3222
  Threshold?: NullableNonNegativeDouble;
3062
3223
  /**
@@ -3076,7 +3237,7 @@ declare namespace CostExplorer {
3076
3237
  */
3077
3238
  SubscriptionName?: GenericString;
3078
3239
  /**
3079
- * The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE. The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000. The following are examples of valid ThresholdExpressions: Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] }
3240
+ * The update to the Expression object used to specify the anomalies that you want to generate alerts for. This supports dimensions and nested expressions. The supported dimensions are ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE, corresponding to an anomaly’s TotalImpact and TotalImpactPercentage, respectively (see Impact for more details). The supported nested expression types are AND and OR. The match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between 0 and 10,000,000,000 in string format. You can specify either Threshold or ThresholdExpression, but not both. The following are examples of valid ThresholdExpressions: Absolute threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } Percentage threshold: { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } AND two thresholds together: { "And": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] } OR two thresholds together: { "Or": [ { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } }, { "Dimensions": { "Key": "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ], "Values": [ "100" ] } } ] }
3080
3241
  */
3081
3242
  ThresholdExpression?: Expression;
3082
3243
  }
package/clients/ec2.d.ts CHANGED
@@ -34468,7 +34468,7 @@ declare namespace EC2 {
34468
34468
  export type SpotInstanceRequestId = string;
34469
34469
  export type SpotInstanceRequestIdList = SpotInstanceRequestId[];
34470
34470
  export type SpotInstanceRequestList = SpotInstanceRequest[];
34471
- export type SpotInstanceState = "open"|"active"|"closed"|"cancelled"|"failed"|string;
34471
+ export type SpotInstanceState = "open"|"active"|"closed"|"cancelled"|"failed"|"disabled"|string;
34472
34472
  export interface SpotInstanceStateFault {
34473
34473
  /**
34474
34474
  * The reason code for the Spot Instance state change.
package/clients/glue.d.ts CHANGED
@@ -3000,6 +3000,10 @@ declare namespace Glue {
3000
3000
  * Specifies your data quality evaluation criteria. Allows multiple input data and returns a collection of Dynamic Frames.
3001
3001
  */
3002
3002
  EvaluateDataQualityMultiFrame?: EvaluateDataQualityMultiFrame;
3003
+ /**
3004
+ * Specifies a Glue DataBrew recipe node.
3005
+ */
3006
+ Recipe?: Recipe;
3003
3007
  }
3004
3008
  export type CodeGenConfigurationNodes = {[key: string]: CodeGenConfigurationNode};
3005
3009
  export interface CodeGenEdge {
@@ -3535,6 +3539,10 @@ declare namespace Glue {
3535
3539
  * Specifies Apache Iceberg data store targets.
3536
3540
  */
3537
3541
  IcebergTargets?: IcebergTargetList;
3542
+ /**
3543
+ * Specifies Apache Hudi data store targets.
3544
+ */
3545
+ HudiTargets?: HudiTargetList;
3538
3546
  }
3539
3547
  export interface CrawlsFilter {
3540
3548
  /**
@@ -4017,7 +4025,7 @@ declare namespace Glue {
4017
4025
  */
4018
4026
  NumberOfWorkers?: NullableInteger;
4019
4027
  /**
4020
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
4028
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
4021
4029
  */
4022
4030
  WorkerType?: WorkerType;
4023
4031
  /**
@@ -4355,7 +4363,7 @@ declare namespace Glue {
4355
4363
  */
4356
4364
  NumberOfWorkers?: NullableInteger;
4357
4365
  /**
4358
- * The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
4366
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
4359
4367
  */
4360
4368
  WorkerType?: WorkerType;
4361
4369
  /**
@@ -8064,7 +8072,26 @@ declare namespace Glue {
8064
8072
  }
8065
8073
  export type GrokPattern = string;
8066
8074
  export type HashString = string;
8075
+ export interface HudiTarget {
8076
+ /**
8077
+ * An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder. The crawler will scan all folders underneath a path for a Hudi folder.
8078
+ */
8079
+ Paths?: PathList;
8080
+ /**
8081
+ * The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
8082
+ */
8083
+ ConnectionName?: ConnectionName;
8084
+ /**
8085
+ * A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
8086
+ */
8087
+ Exclusions?: PathList;
8088
+ /**
8089
+ * The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
8090
+ */
8091
+ MaximumTraversalDepth?: NullableInteger;
8092
+ }
8067
8093
  export type HudiTargetCompressionType = "gzip"|"lzo"|"uncompressed"|"snappy"|string;
8094
+ export type HudiTargetList = HudiTarget[];
8068
8095
  export interface IcebergInput {
8069
8096
  /**
8070
8097
  * A required metadata operation. Can only be set to CREATE.
@@ -8305,7 +8332,7 @@ declare namespace Glue {
8305
8332
  */
8306
8333
  MaxCapacity?: NullableDouble;
8307
8334
  /**
8308
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
8335
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8309
8336
  */
8310
8337
  WorkerType?: WorkerType;
8311
8338
  /**
@@ -8471,7 +8498,7 @@ declare namespace Glue {
8471
8498
  */
8472
8499
  MaxCapacity?: NullableDouble;
8473
8500
  /**
8474
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
8501
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8475
8502
  */
8476
8503
  WorkerType?: WorkerType;
8477
8504
  /**
@@ -8555,7 +8582,7 @@ declare namespace Glue {
8555
8582
  */
8556
8583
  MaxCapacity?: NullableDouble;
8557
8584
  /**
8558
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
8585
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8559
8586
  */
8560
8587
  WorkerType?: WorkerType;
8561
8588
  /**
@@ -10232,6 +10259,31 @@ declare namespace Glue {
10232
10259
  NextToken?: SchemaRegistryTokenString;
10233
10260
  }
10234
10261
  export type QuoteChar = "quote"|"quillemet"|"single_quote"|"disabled"|string;
10262
+ export interface Recipe {
10263
+ /**
10264
+ * The name of the Glue Studio node.
10265
+ */
10266
+ Name: NodeName;
10267
+ /**
10268
+ * The nodes that are inputs to the recipe node, identified by id.
10269
+ */
10270
+ Inputs: OneInput;
10271
+ /**
10272
+ * A reference to the DataBrew recipe used by the node.
10273
+ */
10274
+ RecipeReference: RecipeReference;
10275
+ }
10276
+ export interface RecipeReference {
10277
+ /**
10278
+ * The ARN of the DataBrew recipe.
10279
+ */
10280
+ RecipeArn: EnclosedInStringProperty;
10281
+ /**
10282
+ * The RecipeVersion of the DataBrew recipe.
10283
+ */
10284
+ RecipeVersion: RecipeVersion;
10285
+ }
10286
+ export type RecipeVersion = string;
10235
10287
  export type RecordsCount = number;
10236
10288
  export type RecrawlBehavior = "CRAWL_EVERYTHING"|"CRAWL_NEW_FOLDERS_ONLY"|"CRAWL_EVENT_MODE"|string;
10237
10289
  export interface RecrawlPolicy {
@@ -11775,7 +11827,7 @@ declare namespace Glue {
11775
11827
  */
11776
11828
  NotificationProperty?: NotificationProperty;
11777
11829
  /**
11778
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
11830
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
11779
11831
  */
11780
11832
  WorkerType?: WorkerType;
11781
11833
  /**
@@ -3051,7 +3051,7 @@ Within your job settings, all of your DVB-Sub settings must be identical.
3051
3051
  */
3052
3052
  SegmentLengthControl?: HlsSegmentLengthControl;
3053
3053
  /**
3054
- * Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.
3054
+ * Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.
3055
3055
  */
3056
3056
  SegmentsPerSubdirectory?: __integerMin1Max2147483647;
3057
3057
  /**
@@ -5013,7 +5013,7 @@ When you specify Version 1, you must also set ID3 metadata (timedMetadata) to Pa
5013
5013
  export type ProresScanTypeConversionMode = "INTERLACED"|"INTERLACED_OPTIMIZE"|string;
5014
5014
  export interface ProresSettings {
5015
5015
  /**
5016
- * This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP).
5016
+ * This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.
5017
5017
  */
5018
5018
  ChromaSampling?: ProresChromaSampling;
5019
5019
  /**
@@ -5596,7 +5596,7 @@ When you specify Version 1, you must also set ID3 metadata (timedMetadata) to Pa
5596
5596
  */
5597
5597
  AvcIntraSettings?: AvcIntraSettings;
5598
5598
  /**
5599
- * Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV.
5599
+ * Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV.
5600
5600
  */
5601
5601
  Codec?: VideoCodec;
5602
5602
  /**