cdk-comprehend-s3olap 2.0.61 → 2.0.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.jsii +4 -4
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/aws-sdk/CHANGELOG.md +16 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/dlm-2018-01-12.min.json +15 -9
  8. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +627 -623
  9. package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +306 -295
  10. package/node_modules/aws-sdk/apis/iotwireless-2020-11-22.min.json +128 -87
  11. package/node_modules/aws-sdk/apis/location-2020-11-19.min.json +60 -44
  12. package/node_modules/aws-sdk/apis/pinpoint-2016-12-01.min.json +274 -191
  13. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +501 -468
  14. package/node_modules/aws-sdk/apis/sagemaker-a2i-runtime-2019-11-07.min.json +12 -8
  15. package/node_modules/aws-sdk/clients/cloudwatch.d.ts +7 -7
  16. package/node_modules/aws-sdk/clients/dlm.d.ts +30 -25
  17. package/node_modules/aws-sdk/clients/ec2.d.ts +1 -0
  18. package/node_modules/aws-sdk/clients/glue.d.ts +31 -2
  19. package/node_modules/aws-sdk/clients/iotwireless.d.ts +45 -0
  20. package/node_modules/aws-sdk/clients/location.d.ts +27 -13
  21. package/node_modules/aws-sdk/clients/pinpoint.d.ts +96 -0
  22. package/node_modules/aws-sdk/clients/quicksight.d.ts +10 -10
  23. package/node_modules/aws-sdk/clients/sagemaker.d.ts +51 -5
  24. package/node_modules/aws-sdk/clients/sso.d.ts +19 -19
  25. package/node_modules/aws-sdk/clients/ssoadmin.d.ts +82 -82
  26. package/node_modules/aws-sdk/clients/ssooidc.d.ts +11 -11
  27. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  28. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +9 -9
  29. package/node_modules/aws-sdk/dist/aws-sdk.js +690 -670
  30. package/node_modules/aws-sdk/dist/aws-sdk.min.js +65 -65
  31. package/node_modules/aws-sdk/lib/core.js +1 -1
  32. package/node_modules/aws-sdk/package.json +1 -1
  33. package/node_modules/esbuild/install.js +4 -4
  34. package/node_modules/esbuild/lib/main.js +7 -7
  35. package/node_modules/esbuild/package.json +22 -22
  36. package/node_modules/esbuild-linux-64/bin/esbuild +0 -0
  37. package/node_modules/esbuild-linux-64/package.json +1 -1
  38. package/package.json +7 -7
@@ -3,7 +3,6 @@
3
3
  "metadata": {
4
4
  "apiVersion": "2019-11-07",
5
5
  "endpointPrefix": "a2i-runtime.sagemaker",
6
- "jsonVersion": "1.1",
7
6
  "protocol": "rest-json",
8
7
  "serviceFullName": "Amazon Augmented AI Runtime",
9
8
  "serviceId": "SageMaker A2I Runtime",
@@ -62,7 +61,7 @@
62
61
  ],
63
62
  "members": {
64
63
  "CreationTime": {
65
- "type": "timestamp"
64
+ "shape": "S6"
66
65
  },
67
66
  "FailureReason": {},
68
67
  "FailureCode": {},
@@ -94,14 +93,14 @@
94
93
  ],
95
94
  "members": {
96
95
  "CreationTimeAfter": {
96
+ "shape": "S6",
97
97
  "location": "querystring",
98
- "locationName": "CreationTimeAfter",
99
- "type": "timestamp"
98
+ "locationName": "CreationTimeAfter"
100
99
  },
101
100
  "CreationTimeBefore": {
101
+ "shape": "S6",
102
102
  "location": "querystring",
103
- "locationName": "CreationTimeBefore",
104
- "type": "timestamp"
103
+ "locationName": "CreationTimeBefore"
105
104
  },
106
105
  "FlowDefinitionArn": {
107
106
  "location": "querystring",
@@ -136,7 +135,7 @@
136
135
  "HumanLoopName": {},
137
136
  "HumanLoopStatus": {},
138
137
  "CreationTime": {
139
- "type": "timestamp"
138
+ "shape": "S6"
140
139
  },
141
140
  "FailureReason": {},
142
141
  "FlowDefinitionArn": {}
@@ -210,5 +209,10 @@
210
209
  }
211
210
  }
212
211
  },
213
- "shapes": {}
212
+ "shapes": {
213
+ "S6": {
214
+ "type": "timestamp",
215
+ "timestampFormat": "iso8601"
216
+ }
217
+ }
214
218
  }
@@ -21,11 +21,11 @@ declare class CloudWatch extends Service {
21
21
  */
22
22
  deleteAlarms(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
23
23
  /**
24
- * Deletes the specified anomaly detection model from your account.
24
+ * Deletes the specified anomaly detection model from your account. For more information about how to delete an anomaly detection model, see Deleting an anomaly detection model in the CloudWatch User Guide.
25
25
  */
26
26
  deleteAnomalyDetector(params: CloudWatch.Types.DeleteAnomalyDetectorInput, callback?: (err: AWSError, data: CloudWatch.Types.DeleteAnomalyDetectorOutput) => void): Request<CloudWatch.Types.DeleteAnomalyDetectorOutput, AWSError>;
27
27
  /**
28
- * Deletes the specified anomaly detection model from your account.
28
+ * Deletes the specified anomaly detection model from your account. For more information about how to delete an anomaly detection model, see Deleting an anomaly detection model in the CloudWatch User Guide.
29
29
  */
30
30
  deleteAnomalyDetector(callback?: (err: AWSError, data: CloudWatch.Types.DeleteAnomalyDetectorOutput) => void): Request<CloudWatch.Types.DeleteAnomalyDetectorOutput, AWSError>;
31
31
  /**
@@ -245,11 +245,11 @@ declare class CloudWatch extends Service {
245
245
  */
246
246
  putMetricAlarm(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
247
247
  /**
248
- * Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
248
+ * Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
249
249
  */
250
250
  putMetricData(params: CloudWatch.Types.PutMetricDataInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
251
251
  /**
252
- * Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 40 KB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 20 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 10 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
252
+ * Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
253
253
  */
254
254
  putMetricData(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
255
255
  /**
@@ -1537,7 +1537,7 @@ declare namespace CloudWatch {
1537
1537
  */
1538
1538
  StatisticValues?: StatisticSet;
1539
1539
  /**
1540
- * Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 150 unique values in each PutMetricData action that specifies a Values array. Although the Values array accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.
1540
+ * Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 500 unique values in each PutMetricData action that specifies a Values array. Although the Values array accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.
1541
1541
  */
1542
1542
  Values?: Values;
1543
1543
  /**
@@ -1836,7 +1836,7 @@ declare namespace CloudWatch {
1836
1836
  */
1837
1837
  Period?: Period;
1838
1838
  /**
1839
- * The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the alarm works as intended. However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and it behaves predictably. We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.
1839
+ * The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the alarm works as intended. However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and it behaves unpredictably. We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.
1840
1840
  */
1841
1841
  Unit?: StandardUnit;
1842
1842
  /**
@@ -1882,7 +1882,7 @@ declare namespace CloudWatch {
1882
1882
  */
1883
1883
  Namespace: Namespace;
1884
1884
  /**
1885
- * The data for the metric. The array can include no more than 20 metrics per call.
1885
+ * The data for the metric. The array can include no more than 1000 metrics per call.
1886
1886
  */
1887
1887
  MetricData: MetricData;
1888
1888
  }
@@ -126,7 +126,7 @@ declare namespace DLM {
126
126
  }
127
127
  export interface CreateRule {
128
128
  /**
129
- * Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default. If the policy targets resources in an Amazon Web Services Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.
129
+ * [Snapshot policies only] Specifies the destination for snapshots created by the policy. To create snapshots in the same Region as the source resource, specify CLOUD. To create snapshots on the same Outpost as the source resource, specify OUTPOST_LOCAL. If you omit this parameter, CLOUD is used by default. If the policy targets resources in an Amazon Web Services Region, then you must create snapshots in the same Region as the source resource. If the policy targets resources on an Outpost, then you can create snapshots on the same Outpost as the source resource, or in the Region of that Outpost.
130
130
  */
131
131
  Location?: LocationValues;
132
132
  /**
@@ -165,23 +165,23 @@ declare namespace DLM {
165
165
  */
166
166
  Interval?: Interval;
167
167
  /**
168
- * The unit of time in which to measure the Interval.
168
+ * The unit of time in which to measure the Interval. For example, to deprecate a cross-Region AMI copy after 3 months, specify Interval=3 and IntervalUnit=MONTHS.
169
169
  */
170
170
  IntervalUnit?: RetentionIntervalUnitValues;
171
171
  }
172
172
  export interface CrossRegionCopyRetainRule {
173
173
  /**
174
- * The amount of time to retain each snapshot. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.
174
+ * The amount of time to retain a cross-Region snapshot or AMI copy. The maximum is 100 years. This is equivalent to 1200 months, 5200 weeks, or 36500 days.
175
175
  */
176
176
  Interval?: Interval;
177
177
  /**
178
- * The unit of time for time-based retention.
178
+ * The unit of time for time-based retention. For example, to retain a cross-Region copy for 3 months, specify Interval=3 and IntervalUnit=MONTHS.
179
179
  */
180
180
  IntervalUnit?: RetentionIntervalUnitValues;
181
181
  }
182
182
  export interface CrossRegionCopyRule {
183
183
  /**
184
- * Avoid using this parameter when creating new policies. Instead, use Target to specify a target Region or a target Outpost for snapshot copies. For policies created before the Target parameter was introduced, this parameter indicates the target Region for snapshot copies.
184
+ * Avoid using this parameter when creating new policies. Instead, use Target to specify a target Region or a target Outpost for snapshot copies. For policies created before the Target parameter was introduced, this parameter indicates the target Region for snapshot copies.
185
185
  */
186
186
  TargetRegion?: TargetRegion;
187
187
  /**
@@ -197,15 +197,15 @@ declare namespace DLM {
197
197
  */
198
198
  CmkArn?: CmkArn;
199
199
  /**
200
- * Indicates whether to copy all user-defined tags from the source snapshot to the cross-Region snapshot copy.
200
+ * Indicates whether to copy all user-defined tags from the source snapshot or AMI to the cross-Region copy.
201
201
  */
202
202
  CopyTags?: CopyTagsNullable;
203
203
  /**
204
- * The retention rule that indicates how long snapshot copies are to be retained in the destination Region.
204
+ * The retention rule that indicates how long the cross-Region snapshot or AMI copies are to be retained in the destination Region.
205
205
  */
206
206
  RetainRule?: CrossRegionCopyRetainRule;
207
207
  /**
208
- * The AMI deprecation rule for cross-Region AMI copies created by the rule.
208
+ * [AMI policies only] The AMI deprecation rule for cross-Region AMI copies created by the rule.
209
209
  */
210
210
  DeprecateRule?: CrossRegionCopyDeprecateRule;
211
211
  }
@@ -271,6 +271,7 @@ declare namespace DLM {
271
271
  export type EventSourceValues = "MANAGED_CWE"|string;
272
272
  export type EventTypeValues = "shareSnapshot"|string;
273
273
  export type ExcludeBootVolume = boolean;
274
+ export type ExcludeDataVolumeTagList = Tag[];
274
275
  export type ExecutionRoleArn = string;
275
276
  export interface FastRestoreRule {
276
277
  /**
@@ -393,7 +394,7 @@ declare namespace DLM {
393
394
  */
394
395
  Tags?: TagMap;
395
396
  /**
396
- * The type of policy. EBS_SNAPSHOT_MANAGEMENT indicates that the policy manages the lifecycle of Amazon EBS snapshots. IMAGE_MANAGEMENT indicates that the policy manages the lifecycle of EBS-backed AMIs.
397
+ * The type of policy. EBS_SNAPSHOT_MANAGEMENT indicates that the policy manages the lifecycle of Amazon EBS snapshots. IMAGE_MANAGEMENT indicates that the policy manages the lifecycle of EBS-backed AMIs. EVENT_BASED_POLICY indicates that the policy automates cross-account snapshot copies for snapshots that are shared with your account.
397
398
  */
398
399
  PolicyType?: PolicyTypeValues;
399
400
  }
@@ -414,47 +415,51 @@ declare namespace DLM {
414
415
  export type NoReboot = boolean;
415
416
  export interface Parameters {
416
417
  /**
417
- * [EBS Snapshot Management Instance policies only] Indicates whether to exclude the root volume from snapshots created using CreateSnapshots. The default is false.
418
+ * [Snapshot policies that target instances only] Indicates whether to exclude the root volume from multi-volume snapshot sets. The default is false. If you specify true, then the root volumes attached to targeted instances will be excluded from the multi-volume snapshot sets created by the policy.
418
419
  */
419
420
  ExcludeBootVolume?: ExcludeBootVolume;
420
421
  /**
421
- * Applies to AMI lifecycle policies only. Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instances are not rebooted).
422
+ * [AMI policies only] Indicates whether targeted instances are rebooted when the lifecycle policy runs. true indicates that targeted instances are not rebooted when the policy runs. false indicates that target instances are rebooted when the policy runs. The default is true (instances are not rebooted).
422
423
  */
423
424
  NoReboot?: NoReboot;
425
+ /**
426
+ * [Snapshot policies that target instances only] The tags used to identify data (non-root) volumes to exclude from multi-volume snapshot sets. If you create a snapshot lifecycle policy that targets instances and you specify tags for this parameter, then data volumes with the specified tags that are attached to targeted instances will be excluded from the multi-volume snapshot sets created by the policy.
427
+ */
428
+ ExcludeDataVolumeTags?: ExcludeDataVolumeTagList;
424
429
  }
425
430
  export type PolicyArn = string;
426
431
  export type PolicyDescription = string;
427
432
  export interface PolicyDetails {
428
433
  /**
429
- * The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account. The default is EBS_SNAPSHOT_MANAGEMENT.
434
+ * [All policy types] The valid target resource types and actions a policy can manage. Specify EBS_SNAPSHOT_MANAGEMENT to create a lifecycle policy that manages the lifecycle of Amazon EBS snapshots. Specify IMAGE_MANAGEMENT to create a lifecycle policy that manages the lifecycle of EBS-backed AMIs. Specify EVENT_BASED_POLICY to create an event-based policy that performs specific actions when a defined event occurs in your Amazon Web Services account. The default is EBS_SNAPSHOT_MANAGEMENT.
430
435
  */
431
436
  PolicyType?: PolicyTypeValues;
432
437
  /**
433
- * The target resource type for snapshot and AMI lifecycle policies. Use VOLUME to create snapshots of individual volumes or use INSTANCE to create multi-volume snapshots from the volumes for an instance. This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.
438
+ * [Snapshot policies only] The target resource type for snapshot and AMI lifecycle policies. Use VOLUME to create snapshots of individual volumes or use INSTANCE to create multi-volume snapshots from the volumes for an instance.
434
439
  */
435
440
  ResourceTypes?: ResourceTypeValuesList;
436
441
  /**
437
- * The location of the resources to backup. If the source resources are located in an Amazon Web Services Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.
442
+ * [Snapshot and AMI policies only] The location of the resources to backup. If the source resources are located in an Amazon Web Services Region, specify CLOUD. If the source resources are located on an Outpost in your account, specify OUTPOST. If you specify OUTPOST, Amazon Data Lifecycle Manager backs up all resources of the specified type with matching target tags across all of the Outposts in your account.
438
443
  */
439
444
  ResourceLocations?: ResourceLocationList;
440
445
  /**
441
- * The single tag that identifies targeted resources for this policy. This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.
446
+ * [Snapshot and AMI policies only] The single tag that identifies targeted resources for this policy.
442
447
  */
443
448
  TargetTags?: TargetTagList;
444
449
  /**
445
- * The schedules of policy-defined actions for snapshot and AMI lifecycle policies. A policy can have up to four schedules—one mandatory schedule and up to three optional schedules. This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.
450
+ * [Snapshot and AMI policies only] The schedules of policy-defined actions for snapshot and AMI lifecycle policies. A policy can have up to four schedules—one mandatory schedule and up to three optional schedules.
446
451
  */
447
452
  Schedules?: ScheduleList;
448
453
  /**
449
- * A set of optional parameters for snapshot and AMI lifecycle policies. This parameter is required for snapshot and AMI policies only. If you are creating an event-based policy, omit this parameter.
454
+ * [Snapshot and AMI policies only] A set of optional parameters for snapshot and AMI lifecycle policies. If you are modifying a policy that was created or previously modified using the Amazon Data Lifecycle Manager console, then you must include this parameter and specify either the default values or the new values that you require. You can't omit this parameter or set its values to null.
450
455
  */
451
456
  Parameters?: Parameters;
452
457
  /**
453
- * The event that triggers the event-based policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter.
458
+ * [Event-based policies only] The event that activates the event-based policy.
454
459
  */
455
460
  EventSource?: EventSource;
456
461
  /**
457
- * The actions to be performed when the event-based policy is triggered. You can specify only one action per policy. This parameter is required for event-based policies only. If you are creating a snapshot or AMI policy, omit this parameter.
462
+ * [Event-based policies only] The actions to be performed when the event-based policy is activated. You can specify only one action per policy.
458
463
  */
459
464
  Actions?: ActionList;
460
465
  }
@@ -494,7 +499,7 @@ declare namespace DLM {
494
499
  */
495
500
  TagsToAdd?: TagsToAddList;
496
501
  /**
497
- * A collection of key/value pairs with values determined dynamically when the policy is executed. Keys may be any valid Amazon EC2 tag key. Values must be in one of the two following formats: $(instance-id) or $(timestamp). Variable tags are only valid for EBS Snapshot Management – Instance policies.
502
+ * [AMI policies and snapshot policies that target instances only] A collection of key/value pairs with values determined dynamically when the policy is executed. Keys may be any valid Amazon EC2 tag key. Values must be in one of the two following formats: $(instance-id) or $(timestamp). Variable tags are only valid for EBS Snapshot Management – Instance policies.
498
503
  */
499
504
  VariableTags?: VariableTagsList;
500
505
  /**
@@ -502,23 +507,23 @@ declare namespace DLM {
502
507
  */
503
508
  CreateRule?: CreateRule;
504
509
  /**
505
- * The retention rule.
510
+ * The retention rule for snapshots or AMIs created by the policy.
506
511
  */
507
512
  RetainRule?: RetainRule;
508
513
  /**
509
- * The rule for enabling fast snapshot restore.
514
+ * [Snapshot policies only] The rule for enabling fast snapshot restore.
510
515
  */
511
516
  FastRestoreRule?: FastRestoreRule;
512
517
  /**
513
- * The rule for cross-Region snapshot copies. You can only specify cross-Region copy rules for policies that create snapshots in a Region. If the policy creates snapshots on an Outpost, then you cannot copy the snapshots to a Region or to an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.
518
+ * Specifies a rule for copying snapshots or AMIs across regions. You can't specify cross-Region copy rules for policies that create snapshots on an Outpost. If the policy creates snapshots in a Region, then snapshots can be copied to up to three Regions or Outposts.
514
519
  */
515
520
  CrossRegionCopyRules?: CrossRegionCopyRules;
516
521
  /**
517
- * The rule for sharing snapshots with other Amazon Web Services accounts.
522
+ * [Snapshot policies only] The rule for sharing snapshots with other Amazon Web Services accounts.
518
523
  */
519
524
  ShareRules?: ShareRules;
520
525
  /**
521
- * The AMI deprecation rule for the schedule.
526
+ * [AMI policies only] The AMI deprecation rule for the schedule.
522
527
  */
523
528
  DeprecateRule?: DeprecateRule;
524
529
  }
@@ -21498,6 +21498,7 @@ declare namespace EC2 {
21498
21498
  * Excludes the root volume from being snapshotted.
21499
21499
  */
21500
21500
  ExcludeBootVolume?: Boolean;
21501
+ ExcludeDataVolumeIds?: VolumeIdStringList;
21501
21502
  }
21502
21503
  export interface InstanceState {
21503
21504
  /**
@@ -2887,7 +2887,7 @@ declare namespace Glue {
2887
2887
  }
2888
2888
  export type CrawlId = string;
2889
2889
  export type CrawlList = Crawl[];
2890
- export type CrawlState = "RUNNING"|"CANCELLING"|"CANCELLED"|"SUCCEEDED"|"FAILED"|string;
2890
+ export type CrawlState = "RUNNING"|"CANCELLING"|"CANCELLED"|"SUCCEEDED"|"FAILED"|"ERROR"|string;
2891
2891
  export interface Crawler {
2892
2892
  /**
2893
2893
  * The name of the crawler.
@@ -3531,6 +3531,10 @@ declare namespace Glue {
3531
3531
  * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.
3532
3532
  */
3533
3533
  CodeGenConfigurationNodes?: CodeGenConfigurationNodes;
3534
+ /**
3535
+ * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
3536
+ */
3537
+ ExecutionClass?: ExecutionClass;
3534
3538
  }
3535
3539
  export interface CreateJobResponse {
3536
3540
  /**
@@ -4973,6 +4977,7 @@ declare namespace Glue {
4973
4977
  BatchWindow?: BatchWindow;
4974
4978
  }
4975
4979
  export type EventQueueArn = string;
4980
+ export type ExecutionClass = "FLEX"|"STANDARD"|string;
4976
4981
  export interface ExecutionProperty {
4977
4982
  /**
4978
4983
  * The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit.
@@ -6994,6 +6999,10 @@ declare namespace Glue {
6994
6999
  * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.
6995
7000
  */
6996
7001
  CodeGenConfigurationNodes?: CodeGenConfigurationNodes;
7002
+ /**
7003
+ * Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
7004
+ */
7005
+ ExecutionClass?: ExecutionClass;
6997
7006
  }
6998
7007
  export interface JobBookmarkEntry {
6999
7008
  /**
@@ -7152,9 +7161,13 @@ declare namespace Glue {
7152
7161
  * This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for G.1X, 2 for G.2X, or 0.25 for G.025X workers). This value may be different than the executionEngineRuntime * MaxCapacity as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the MaxCapacity. Therefore, it is possible that the value of DPUSeconds is less than executionEngineRuntime * MaxCapacity.
7153
7162
  */
7154
7163
  DPUSeconds?: NullableDouble;
7164
+ /**
7165
+ * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
7166
+ */
7167
+ ExecutionClass?: ExecutionClass;
7155
7168
  }
7156
7169
  export type JobRunList = JobRun[];
7157
- export type JobRunState = "STARTING"|"RUNNING"|"STOPPING"|"STOPPED"|"SUCCEEDED"|"FAILED"|"TIMEOUT"|string;
7170
+ export type JobRunState = "STARTING"|"RUNNING"|"STOPPING"|"STOPPED"|"SUCCEEDED"|"FAILED"|"TIMEOUT"|"ERROR"|"WAITING"|string;
7158
7171
  export interface JobUpdate {
7159
7172
  /**
7160
7173
  * Description of the job being defined.
@@ -7228,6 +7241,10 @@ declare namespace Glue {
7228
7241
  * The representation of a directed acyclic graph on which both the Glue Studio visual component and Glue Studio code generation is based.
7229
7242
  */
7230
7243
  CodeGenConfigurationNodes?: CodeGenConfigurationNodes;
7244
+ /**
7245
+ * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
7246
+ */
7247
+ ExecutionClass?: ExecutionClass;
7231
7248
  }
7232
7249
  export interface Join {
7233
7250
  /**
@@ -9943,6 +9960,10 @@ declare namespace Glue {
9943
9960
  * The number of workers of a defined workerType that are allocated when a job runs.
9944
9961
  */
9945
9962
  NumberOfWorkers?: NullableInteger;
9963
+ /**
9964
+ * Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. The flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary. Only jobs with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs.
9965
+ */
9966
+ ExecutionClass?: ExecutionClass;
9946
9967
  }
9947
9968
  export interface StartJobRunResponse {
9948
9969
  /**
@@ -11463,6 +11484,14 @@ declare namespace Glue {
11463
11484
  * Total number Actions in running state.
11464
11485
  */
11465
11486
  RunningActions?: IntegerValue;
11487
+ /**
11488
+ * Indicates the count of job runs in the ERROR state in the workflow run.
11489
+ */
11490
+ ErroredActions?: IntegerValue;
11491
+ /**
11492
+ * Indicates the count of job runs in WAITING state in the workflow run.
11493
+ */
11494
+ WaitingActions?: IntegerValue;
11466
11495
  }
11467
11496
  export type WorkflowRunStatus = "RUNNING"|"COMPLETED"|"STOPPING"|"STOPPED"|"ERROR"|string;
11468
11497
  export type WorkflowRuns = WorkflowRun[];
@@ -836,6 +836,7 @@ declare namespace IoTWireless {
836
836
  */
837
837
  VerticalAccuracy?: VerticalAccuracy;
838
838
  }
839
+ export type AckModeRetryDurationSecs = number;
839
840
  export type AddGwMetadata = boolean;
840
841
  export type AmazonId = string;
841
842
  export type AmazonResourceName = string;
@@ -1473,7 +1474,9 @@ declare namespace IoTWireless {
1473
1474
  }
1474
1475
  export type DownlinkQueueMessagesList = DownlinkQueueMessage[];
1475
1476
  export type DrMax = number;
1477
+ export type DrMaxBox = number;
1476
1478
  export type DrMin = number;
1479
+ export type DrMinBox = number;
1477
1480
  export type EndPoint = string;
1478
1481
  export type Event = "discovered"|"lost"|"ack"|"nack"|"passthrough"|string;
1479
1482
  export interface EventConfigurationItem {
@@ -1509,6 +1512,10 @@ declare namespace IoTWireless {
1509
1512
  * Connection status event configuration for an event configuration item.
1510
1513
  */
1511
1514
  ConnectionStatus?: ConnectionStatusEventConfiguration;
1515
+ /**
1516
+ * Message delivery status event configuration for an event configuration item.
1517
+ */
1518
+ MessageDeliveryStatus?: MessageDeliveryStatusEventConfiguration;
1512
1519
  }
1513
1520
  export type EventNotificationPartnerType = "Sidewalk"|string;
1514
1521
  export type EventNotificationResourceType = "SidewalkAccount"|"WirelessDevice"|"WirelessGateway"|string;
@@ -1619,6 +1626,10 @@ declare namespace IoTWireless {
1619
1626
  * Resource type event configuration for the connection status event.
1620
1627
  */
1621
1628
  ConnectionStatus?: ConnectionStatusResourceTypeEventConfiguration;
1629
+ /**
1630
+ * Resource type event configuration object for the message delivery status event.
1631
+ */
1632
+ MessageDeliveryStatus?: MessageDeliveryStatusResourceTypeEventConfiguration;
1622
1633
  }
1623
1634
  export interface GetFuotaTaskRequest {
1624
1635
  Id: FuotaTaskId;
@@ -1786,6 +1797,10 @@ declare namespace IoTWireless {
1786
1797
  * Event configuration for the connection status event.
1787
1798
  */
1788
1799
  ConnectionStatus?: ConnectionStatusEventConfiguration;
1800
+ /**
1801
+ * Event configuration for the message delivery status event.
1802
+ */
1803
+ MessageDeliveryStatus?: MessageDeliveryStatusEventConfiguration;
1789
1804
  }
1790
1805
  export interface GetResourceLogLevelRequest {
1791
1806
  ResourceIdentifier: ResourceIdentifier;
@@ -2712,6 +2727,14 @@ declare namespace IoTWireless {
2712
2727
  * The AddGWMetaData value.
2713
2728
  */
2714
2729
  AddGwMetadata?: AddGwMetadata;
2730
+ /**
2731
+ * The DrMin value.
2732
+ */
2733
+ DrMin?: DrMinBox;
2734
+ /**
2735
+ * The DrMax value.
2736
+ */
2737
+ DrMax?: DrMaxBox;
2715
2738
  }
2716
2739
  export interface LoRaWANStartFuotaTask {
2717
2740
  StartTime?: StartTime;
@@ -2772,6 +2795,16 @@ declare namespace IoTWireless {
2772
2795
  export type MaxEirp = number;
2773
2796
  export type MaxResults = number;
2774
2797
  export type McGroupId = number;
2798
+ export interface MessageDeliveryStatusEventConfiguration {
2799
+ Sidewalk?: SidewalkEventNotificationConfigurations;
2800
+ /**
2801
+ * Enum to denote whether the wireless device id device registration state event topic is enabled or disabled.
2802
+ */
2803
+ WirelessDeviceIdEventTopic?: EventNotificationTopicStatus;
2804
+ }
2805
+ export interface MessageDeliveryStatusResourceTypeEventConfiguration {
2806
+ Sidewalk?: SidewalkResourceTypeEventConfiguration;
2807
+ }
2775
2808
  export type MessageId = string;
2776
2809
  export type MessageType = "CUSTOM_COMMAND_ID_NOTIFY"|"CUSTOM_COMMAND_ID_GET"|"CUSTOM_COMMAND_ID_SET"|"CUSTOM_COMMAND_ID_RESP"|string;
2777
2810
  export type MinGwDiversity = number;
@@ -3173,6 +3206,10 @@ declare namespace IoTWireless {
3173
3206
  */
3174
3207
  Seq?: Seq;
3175
3208
  MessageType?: MessageType;
3209
+ /**
3210
+ * The duration of time in seconds for which you want to retry sending the ACK.
3211
+ */
3212
+ AckModeRetryDurationSecs?: AckModeRetryDurationSecs;
3176
3213
  }
3177
3214
  export interface SidewalkUpdateAccount {
3178
3215
  /**
@@ -3331,6 +3368,10 @@ declare namespace IoTWireless {
3331
3368
  * Connection status resource type event configuration object for enabling and disabling wireless gateway topic.
3332
3369
  */
3333
3370
  ConnectionStatus?: ConnectionStatusResourceTypeEventConfiguration;
3371
+ /**
3372
+ * Message delivery status resource type event configuration object for enabling and disabling wireless device topic.
3373
+ */
3374
+ MessageDeliveryStatus?: MessageDeliveryStatusResourceTypeEventConfiguration;
3334
3375
  }
3335
3376
  export interface UpdateEventConfigurationByResourceTypesResponse {
3336
3377
  }
@@ -3449,6 +3490,10 @@ declare namespace IoTWireless {
3449
3490
  * Event configuration for the connection status event.
3450
3491
  */
3451
3492
  ConnectionStatus?: ConnectionStatusEventConfiguration;
3493
+ /**
3494
+ * Event configuration for the message delivery status event.
3495
+ */
3496
+ MessageDeliveryStatus?: MessageDeliveryStatusEventConfiguration;
3452
3497
  }
3453
3498
  export interface UpdateResourceEventConfigurationResponse {
3454
3499
  }