aws-sdk 2.1394.0 → 2.1396.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1067,6 +1067,22 @@ declare class Connect extends Service {
1067
1067
  * Searches for available phone numbers that you can claim to your Amazon Connect instance or traffic distribution group. If the provided TargetArn is a traffic distribution group, you can call this API in both Amazon Web Services Regions associated with the traffic distribution group.
1068
1068
  */
1069
1069
  searchAvailablePhoneNumbers(callback?: (err: AWSError, data: Connect.Types.SearchAvailablePhoneNumbersResponse) => void): Request<Connect.Types.SearchAvailablePhoneNumbersResponse, AWSError>;
1070
+ /**
1071
+ * Searches the hours of operation in an Amazon Connect instance, with optional filtering.
1072
+ */
1073
+ searchHoursOfOperations(params: Connect.Types.SearchHoursOfOperationsRequest, callback?: (err: AWSError, data: Connect.Types.SearchHoursOfOperationsResponse) => void): Request<Connect.Types.SearchHoursOfOperationsResponse, AWSError>;
1074
+ /**
1075
+ * Searches the hours of operation in an Amazon Connect instance, with optional filtering.
1076
+ */
1077
+ searchHoursOfOperations(callback?: (err: AWSError, data: Connect.Types.SearchHoursOfOperationsResponse) => void): Request<Connect.Types.SearchHoursOfOperationsResponse, AWSError>;
1078
+ /**
1079
+ * Searches prompts in an Amazon Connect instance, with optional filtering.
1080
+ */
1081
+ searchPrompts(params: Connect.Types.SearchPromptsRequest, callback?: (err: AWSError, data: Connect.Types.SearchPromptsResponse) => void): Request<Connect.Types.SearchPromptsResponse, AWSError>;
1082
+ /**
1083
+ * Searches prompts in an Amazon Connect instance, with optional filtering.
1084
+ */
1085
+ searchPrompts(callback?: (err: AWSError, data: Connect.Types.SearchPromptsResponse) => void): Request<Connect.Types.SearchPromptsResponse, AWSError>;
1070
1086
  /**
1071
1087
  * This API is in preview release for Amazon Connect and is subject to change. Searches queues in an Amazon Connect instance, with optional filtering.
1072
1088
  */
@@ -1075,6 +1091,14 @@ declare class Connect extends Service {
1075
1091
  * This API is in preview release for Amazon Connect and is subject to change. Searches queues in an Amazon Connect instance, with optional filtering.
1076
1092
  */
1077
1093
  searchQueues(callback?: (err: AWSError, data: Connect.Types.SearchQueuesResponse) => void): Request<Connect.Types.SearchQueuesResponse, AWSError>;
1094
+ /**
1095
+ * Searches quick connects in an Amazon Connect instance, with optional filtering.
1096
+ */
1097
+ searchQuickConnects(params: Connect.Types.SearchQuickConnectsRequest, callback?: (err: AWSError, data: Connect.Types.SearchQuickConnectsResponse) => void): Request<Connect.Types.SearchQuickConnectsResponse, AWSError>;
1098
+ /**
1099
+ * Searches quick connects in an Amazon Connect instance, with optional filtering.
1100
+ */
1101
+ searchQuickConnects(callback?: (err: AWSError, data: Connect.Types.SearchQuickConnectsResponse) => void): Request<Connect.Types.SearchQuickConnectsResponse, AWSError>;
1078
1102
  /**
1079
1103
  * This API is in preview release for Amazon Connect and is subject to change. Searches routing profiles in an Amazon Connect instance, with optional filtering.
1080
1104
  */
@@ -4931,7 +4955,26 @@ declare namespace Connect {
4931
4955
  export type HoursOfOperationDays = "SUNDAY"|"MONDAY"|"TUESDAY"|"WEDNESDAY"|"THURSDAY"|"FRIDAY"|"SATURDAY"|string;
4932
4956
  export type HoursOfOperationDescription = string;
4933
4957
  export type HoursOfOperationId = string;
4958
+ export type HoursOfOperationList = HoursOfOperation[];
4934
4959
  export type HoursOfOperationName = string;
4960
+ export type HoursOfOperationSearchConditionList = HoursOfOperationSearchCriteria[];
4961
+ export interface HoursOfOperationSearchCriteria {
4962
+ /**
4963
+ * A list of conditions which would be applied together with an OR condition.
4964
+ */
4965
+ OrConditions?: HoursOfOperationSearchConditionList;
4966
+ /**
4967
+ * A list of conditions which would be applied together with an AND condition.
4968
+ */
4969
+ AndConditions?: HoursOfOperationSearchConditionList;
4970
+ /**
4971
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, timezone, and resourceID.
4972
+ */
4973
+ StringCondition?: StringCondition;
4974
+ }
4975
+ export interface HoursOfOperationSearchFilter {
4976
+ TagFilter?: ControlPlaneTagFilter;
4977
+ }
4935
4978
  export interface HoursOfOperationSummary {
4936
4979
  /**
4937
4980
  * The identifier of the hours of operation.
@@ -6423,7 +6466,7 @@ declare namespace Connect {
6423
6466
  */
6424
6467
  Name?: CommonNameLength127;
6425
6468
  /**
6426
- * A description for the prompt.
6469
+ * The description of the prompt.
6427
6470
  */
6428
6471
  Description?: PromptDescription;
6429
6472
  /**
@@ -6433,8 +6476,27 @@ declare namespace Connect {
6433
6476
  }
6434
6477
  export type PromptDescription = string;
6435
6478
  export type PromptId = string;
6479
+ export type PromptList = Prompt[];
6436
6480
  export type PromptName = string;
6437
6481
  export type PromptPresignedUrl = string;
6482
+ export type PromptSearchConditionList = PromptSearchCriteria[];
6483
+ export interface PromptSearchCriteria {
6484
+ /**
6485
+ * A list of conditions which would be applied together with an OR condition.
6486
+ */
6487
+ OrConditions?: PromptSearchConditionList;
6488
+ /**
6489
+ * A list of conditions which would be applied together with an AND condition.
6490
+ */
6491
+ AndConditions?: PromptSearchConditionList;
6492
+ /**
6493
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID.
6494
+ */
6495
+ StringCondition?: StringCondition;
6496
+ }
6497
+ export interface PromptSearchFilter {
6498
+ TagFilter?: ControlPlaneTagFilter;
6499
+ }
6438
6500
  export interface PromptSummary {
6439
6501
  /**
6440
6502
  * The identifier of the prompt.
@@ -6548,6 +6610,9 @@ declare namespace Connect {
6548
6610
  * A list of conditions which would be applied together with an AND condition.
6549
6611
  */
6550
6612
  AndConditions?: QueueSearchConditionList;
6613
+ /**
6614
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID.
6615
+ */
6551
6616
  StringCondition?: StringCondition;
6552
6617
  /**
6553
6618
  * The type of queue.
@@ -6628,6 +6693,25 @@ declare namespace Connect {
6628
6693
  export type QuickConnectDescription = string;
6629
6694
  export type QuickConnectId = string;
6630
6695
  export type QuickConnectName = string;
6696
+ export type QuickConnectSearchConditionList = QuickConnectSearchCriteria[];
6697
+ export interface QuickConnectSearchCriteria {
6698
+ /**
6699
+ * A list of conditions which would be applied together with an OR condition.
6700
+ */
6701
+ OrConditions?: QuickConnectSearchConditionList;
6702
+ /**
6703
+ * A list of conditions which would be applied together with an AND condition.
6704
+ */
6705
+ AndConditions?: QuickConnectSearchConditionList;
6706
+ /**
6707
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID.
6708
+ */
6709
+ StringCondition?: StringCondition;
6710
+ }
6711
+ export interface QuickConnectSearchFilter {
6712
+ TagFilter?: ControlPlaneTagFilter;
6713
+ }
6714
+ export type QuickConnectSearchSummaryList = QuickConnect[];
6631
6715
  export interface QuickConnectSummary {
6632
6716
  /**
6633
6717
  * The identifier for the quick connect.
@@ -6883,6 +6967,9 @@ declare namespace Connect {
6883
6967
  * A list of conditions which would be applied together with an AND condition.
6884
6968
  */
6885
6969
  AndConditions?: RoutingProfileSearchConditionList;
6970
+ /**
6971
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID.
6972
+ */
6886
6973
  StringCondition?: StringCondition;
6887
6974
  }
6888
6975
  export interface RoutingProfileSearchFilter {
@@ -7073,6 +7160,78 @@ declare namespace Connect {
7073
7160
  */
7074
7161
  AvailableNumbersList?: AvailableNumbersList;
7075
7162
  }
7163
+ export interface SearchHoursOfOperationsRequest {
7164
+ /**
7165
+ * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
7166
+ */
7167
+ InstanceId: InstanceId;
7168
+ /**
7169
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
7170
+ */
7171
+ NextToken?: NextToken2500;
7172
+ /**
7173
+ * The maximum number of results to return per page.
7174
+ */
7175
+ MaxResults?: MaxResult100;
7176
+ /**
7177
+ * Filters to be applied to search results.
7178
+ */
7179
+ SearchFilter?: HoursOfOperationSearchFilter;
7180
+ /**
7181
+ * The search criteria to be used to return hours of operations.
7182
+ */
7183
+ SearchCriteria?: HoursOfOperationSearchCriteria;
7184
+ }
7185
+ export interface SearchHoursOfOperationsResponse {
7186
+ /**
7187
+ * Information about the hours of operations.
7188
+ */
7189
+ HoursOfOperations?: HoursOfOperationList;
7190
+ /**
7191
+ * If there are additional results, this is the token for the next set of results.
7192
+ */
7193
+ NextToken?: NextToken2500;
7194
+ /**
7195
+ * The total number of hours of operations which matched your search query.
7196
+ */
7197
+ ApproximateTotalCount?: ApproximateTotalCount;
7198
+ }
7199
+ export interface SearchPromptsRequest {
7200
+ /**
7201
+ * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
7202
+ */
7203
+ InstanceId: InstanceId;
7204
+ /**
7205
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
7206
+ */
7207
+ NextToken?: NextToken2500;
7208
+ /**
7209
+ * The maximum number of results to return per page.
7210
+ */
7211
+ MaxResults?: MaxResult100;
7212
+ /**
7213
+ * Filters to be applied to search results.
7214
+ */
7215
+ SearchFilter?: PromptSearchFilter;
7216
+ /**
7217
+ * The search criteria to be used to return prompts.
7218
+ */
7219
+ SearchCriteria?: PromptSearchCriteria;
7220
+ }
7221
+ export interface SearchPromptsResponse {
7222
+ /**
7223
+ * Information about the prompts.
7224
+ */
7225
+ Prompts?: PromptList;
7226
+ /**
7227
+ * If there are additional results, this is the token for the next set of results.
7228
+ */
7229
+ NextToken?: NextToken2500;
7230
+ /**
7231
+ * The total number of quick connects which matched your search query.
7232
+ */
7233
+ ApproximateTotalCount?: ApproximateTotalCount;
7234
+ }
7076
7235
  export interface SearchQueuesRequest {
7077
7236
  /**
7078
7237
  * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
@@ -7109,6 +7268,42 @@ declare namespace Connect {
7109
7268
  */
7110
7269
  ApproximateTotalCount?: ApproximateTotalCount;
7111
7270
  }
7271
+ export interface SearchQuickConnectsRequest {
7272
+ /**
7273
+ * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
7274
+ */
7275
+ InstanceId: InstanceId;
7276
+ /**
7277
+ * The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.
7278
+ */
7279
+ NextToken?: NextToken2500;
7280
+ /**
7281
+ * The maximum number of results to return per page.
7282
+ */
7283
+ MaxResults?: MaxResult100;
7284
+ /**
7285
+ * Filters to be applied to search results.
7286
+ */
7287
+ SearchFilter?: QuickConnectSearchFilter;
7288
+ /**
7289
+ * The search criteria to be used to return quick connects.
7290
+ */
7291
+ SearchCriteria?: QuickConnectSearchCriteria;
7292
+ }
7293
+ export interface SearchQuickConnectsResponse {
7294
+ /**
7295
+ * Information about the quick connects.
7296
+ */
7297
+ QuickConnects?: QuickConnectSearchSummaryList;
7298
+ /**
7299
+ * If there are additional results, this is the token for the next set of results.
7300
+ */
7301
+ NextToken?: NextToken2500;
7302
+ /**
7303
+ * The total number of quick connects which matched your search query.
7304
+ */
7305
+ ApproximateTotalCount?: ApproximateTotalCount;
7306
+ }
7112
7307
  export interface SearchRoutingProfilesRequest {
7113
7308
  /**
7114
7309
  * The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.
@@ -9075,7 +9270,7 @@ declare namespace Connect {
9075
9270
  */
9076
9271
  AndConditions?: UserSearchConditionList;
9077
9272
  /**
9078
- * A leaf node condition which can be used to specify a string condition.
9273
+ * A leaf node condition which can be used to specify a string condition. The currently supported values for FieldName are name, description, and resourceID.
9079
9274
  */
9080
9275
  StringCondition?: StringCondition;
9081
9276
  /**
@@ -23,11 +23,11 @@ declare class DynamoDB extends DynamoDBCustomizations {
23
23
  */
24
24
  batchExecuteStatement(callback?: (err: AWSError, data: DynamoDB.Types.BatchExecuteStatementOutput) => void): Request<DynamoDB.Types.BatchExecuteStatementOutput, AWSError>;
25
25
  /**
26
- * The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key. A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get. If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call." For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables. In order to minimize response latency, BatchGetItem may retrieve items in parallel. When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
26
+ * The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key. A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get. If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call." For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables. In order to minimize response latency, BatchGetItem may retrieve items in parallel. When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
27
27
  */
28
28
  batchGetItem(params: DynamoDB.Types.BatchGetItemInput, callback?: (err: AWSError, data: DynamoDB.Types.BatchGetItemOutput) => void): Request<DynamoDB.Types.BatchGetItemOutput, AWSError>;
29
29
  /**
30
- * The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key. A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get. If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call." For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables. In order to minimize response latency, BatchGetItem may retrieve items in parallel. When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
30
+ * The BatchGetItem operation returns the attributes of one or more items from one or more tables. You identify requested items by primary key. A single operation can retrieve up to 16 MB of data, which can contain as many as 100 items. BatchGetItem returns a partial result if the response size limit is exceeded, the table's provisioned throughput is exceeded, more than 1MB per partition is requested, or an internal processing failure occurs. If a partial result is returned, the operation returns a value for UnprocessedKeys. You can use this value to retry the operation starting with the next item to get. If you request more than 100 items, BatchGetItem returns a ValidationException with the message "Too many items requested for the BatchGetItem call." For example, if you ask to retrieve 100 items, but each individual item is 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). It also returns an appropriate UnprocessedKeys value so you can get the next page of results. If desired, your application can include its own logic to assemble the pages of results into one dataset. If none of the items can be processed due to insufficient provisioned throughput on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. If at least one of the items is successfully processed, then BatchGetItem completes successfully, while returning the keys of the unread items in UnprocessedKeys. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to throttling on the individual tables. If you delay the batch operation using exponential backoff, the individual requests in the batch are much more likely to succeed. For more information, see Batch Operations and Error Handling in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs eventually consistent reads on every table in the request. If you want strongly consistent reads instead, you can set ConsistentRead to true for any or all tables. In order to minimize response latency, BatchGetItem may retrieve items in parallel. When designing your application, keep in mind that DynamoDB does not return items in any particular order. To help parse the response by item, include the primary key values for the items in your request in the ProjectionExpression parameter. If a requested item does not exist, it is not returned in the result. Requests for nonexistent items consume the minimum read capacity units according to the type of read. For more information, see Working with Tables in the Amazon DynamoDB Developer Guide.
31
31
  */
32
32
  batchGetItem(callback?: (err: AWSError, data: DynamoDB.Types.BatchGetItemOutput) => void): Request<DynamoDB.Types.BatchGetItemOutput, AWSError>;
33
33
  /**
@@ -226,7 +226,7 @@ declare namespace DynamoDBStreams {
226
226
  */
227
227
  eventVersion?: String;
228
228
  /**
229
- * The AWS service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.
229
+ * The Amazon Web Services service from which the stream record originated. For DynamoDB Streams, this is aws:dynamodb.
230
230
  */
231
231
  eventSource?: String;
232
232
  /**
@@ -282,7 +282,7 @@ declare namespace DynamoDBStreams {
282
282
  */
283
283
  TableName?: TableName;
284
284
  /**
285
- * A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: the AWS customer ID. the table name the StreamLabel
285
+ * A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: the Amazon Web Services customer ID. the table name the StreamLabel
286
286
  */
287
287
  StreamLabel?: String;
288
288
  }
@@ -293,7 +293,7 @@ declare namespace DynamoDBStreams {
293
293
  */
294
294
  StreamArn?: StreamArn;
295
295
  /**
296
- * A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: the AWS customer ID. the table name the StreamLabel
296
+ * A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel is not a unique identifier for the stream, because it is possible that a stream from another table might have the same timestamp. However, the combination of the following three elements is guaranteed to be unique: the Amazon Web Services customer ID. the table name the StreamLabel
297
297
  */
298
298
  StreamLabel?: String;
299
299
  /**
@@ -328,7 +328,7 @@ declare namespace DynamoDBStreams {
328
328
  export type StreamList = Stream[];
329
329
  export interface StreamRecord {
330
330
  /**
331
- * The approximate date and time when the stream record was created, in UNIX epoch time format.
331
+ * The approximate date and time when the stream record was created, in UNIX epoch time format and rounded down to the closest second.
332
332
  */
333
333
  ApproximateCreationDateTime?: _Date;
334
334
  /**
package/clients/fsx.d.ts CHANGED
@@ -44,11 +44,11 @@ declare class FSx extends Service {
44
44
  */
45
45
  createBackup(callback?: (err: AWSError, data: FSx.Types.CreateBackupResponse) => void): Request<FSx.Types.CreateBackupResponse, AWSError>;
46
46
  /**
47
- * Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type. Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.
47
+ * Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type. Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.
48
48
  */
49
49
  createDataRepositoryAssociation(params: FSx.Types.CreateDataRepositoryAssociationRequest, callback?: (err: AWSError, data: FSx.Types.CreateDataRepositoryAssociationResponse) => void): Request<FSx.Types.CreateDataRepositoryAssociationResponse, AWSError>;
50
50
  /**
51
- * Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type. Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.
51
+ * Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type. Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket. CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.
52
52
  */
53
53
  createDataRepositoryAssociation(callback?: (err: AWSError, data: FSx.Types.CreateDataRepositoryAssociationResponse) => void): Request<FSx.Types.CreateDataRepositoryAssociationResponse, AWSError>;
54
54
  /**
@@ -124,11 +124,11 @@ declare class FSx extends Service {
124
124
  */
125
125
  deleteBackup(callback?: (err: AWSError, data: FSx.Types.DeleteBackupResponse) => void): Request<FSx.Types.DeleteBackupResponse, AWSError>;
126
126
  /**
127
- * Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1 deployment type.
127
+ * Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.
128
128
  */
129
129
  deleteDataRepositoryAssociation(params: FSx.Types.DeleteDataRepositoryAssociationRequest, callback?: (err: AWSError, data: FSx.Types.DeleteDataRepositoryAssociationResponse) => void): Request<FSx.Types.DeleteDataRepositoryAssociationResponse, AWSError>;
130
130
  /**
131
- * Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported for all file systems except for Scratch_1 deployment type.
131
+ * Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.
132
132
  */
133
133
  deleteDataRepositoryAssociation(callback?: (err: AWSError, data: FSx.Types.DeleteDataRepositoryAssociationResponse) => void): Request<FSx.Types.DeleteDataRepositoryAssociationResponse, AWSError>;
134
134
  /**
@@ -180,11 +180,11 @@ declare class FSx extends Service {
180
180
  */
181
181
  describeBackups(callback?: (err: AWSError, data: FSx.Types.DescribeBackupsResponse) => void): Request<FSx.Types.DescribeBackupsResponse, AWSError>;
182
182
  /**
183
- * Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types. You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.
183
+ * Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type. You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.
184
184
  */
185
185
  describeDataRepositoryAssociations(params: FSx.Types.DescribeDataRepositoryAssociationsRequest, callback?: (err: AWSError, data: FSx.Types.DescribeDataRepositoryAssociationsResponse) => void): Request<FSx.Types.DescribeDataRepositoryAssociationsResponse, AWSError>;
186
186
  /**
187
- * Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all Amazon FSx for Lustre file systems excluding Scratch_1 deployment types. You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.
187
+ * Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported on Amazon File Cache resources and all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type. You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling. When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.
188
188
  */
189
189
  describeDataRepositoryAssociations(callback?: (err: AWSError, data: FSx.Types.DescribeDataRepositoryAssociationsResponse) => void): Request<FSx.Types.DescribeDataRepositoryAssociationsResponse, AWSError>;
190
190
  /**
@@ -292,11 +292,11 @@ declare class FSx extends Service {
292
292
  */
293
293
  untagResource(callback?: (err: AWSError, data: FSx.Types.UntagResourceResponse) => void): Request<FSx.Types.UntagResourceResponse, AWSError>;
294
294
  /**
295
- * Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.
295
+ * Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.
296
296
  */
297
297
  updateDataRepositoryAssociation(params: FSx.Types.UpdateDataRepositoryAssociationRequest, callback?: (err: AWSError, data: FSx.Types.UpdateDataRepositoryAssociationResponse) => void): Request<FSx.Types.UpdateDataRepositoryAssociationResponse, AWSError>;
298
298
  /**
299
- * Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported for all file systems except for Scratch_1 deployment type.
299
+ * Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported on all FSx for Lustre 2.12 and newer file systems, excluding scratch_1 deployment type.
300
300
  */
301
301
  updateDataRepositoryAssociation(callback?: (err: AWSError, data: FSx.Types.UpdateDataRepositoryAssociationResponse) => void): Request<FSx.Types.UpdateDataRepositoryAssociationResponse, AWSError>;
302
302
  /**
@@ -324,11 +324,11 @@ declare class FSx extends Service {
324
324
  */
325
325
  updateSnapshot(callback?: (err: AWSError, data: FSx.Types.UpdateSnapshotResponse) => void): Request<FSx.Types.UpdateSnapshotResponse, AWSError>;
326
326
  /**
327
- * Updates an Amazon FSx for ONTAP storage virtual machine (SVM).
327
+ * Updates an FSx for ONTAP storage virtual machine (SVM).
328
328
  */
329
329
  updateStorageVirtualMachine(params: FSx.Types.UpdateStorageVirtualMachineRequest, callback?: (err: AWSError, data: FSx.Types.UpdateStorageVirtualMachineResponse) => void): Request<FSx.Types.UpdateStorageVirtualMachineResponse, AWSError>;
330
330
  /**
331
- * Updates an Amazon FSx for ONTAP storage virtual machine (SVM).
331
+ * Updates an FSx for ONTAP storage virtual machine (SVM).
332
332
  */
333
333
  updateStorageVirtualMachine(callback?: (err: AWSError, data: FSx.Types.UpdateStorageVirtualMachineResponse) => void): Request<FSx.Types.UpdateStorageVirtualMachineResponse, AWSError>;
334
334
  /**
@@ -843,11 +843,11 @@ declare namespace FSx {
843
843
  CopyTagsToVolumes?: Flag;
844
844
  DailyAutomaticBackupStartTime?: DailyTime;
845
845
  /**
846
- * Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following: SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MB/s. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon). SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MB/s using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions. For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.
846
+ * Specifies the file system deployment type. Single AZ deployment types are configured for redundancy within a single Availability Zone in an Amazon Web Services Region . Valid values are the following: SINGLE_AZ_1- (Default) Creates file systems with throughput capacities of 64 - 4,096 MBps. Single_AZ_1 is available in all Amazon Web Services Regions where Amazon FSx for OpenZFS is available, except US West (Oregon). SINGLE_AZ_2- Creates file systems with throughput capacities of 160 - 10,240 MBps using an NVMe L2ARC cache. Single_AZ_2 is available only in the US East (N. Virginia), US East (Ohio), US West (Oregon), and Europe (Ireland) Amazon Web Services Regions. For more information, see: Deployment type availability and File system performance in the Amazon FSx for OpenZFS User Guide.
847
847
  */
848
848
  DeploymentType: OpenZFSDeploymentType;
849
849
  /**
850
- * Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MB/s). Valid values depend on the DeploymentType you choose, as follows: For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s. For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MB/s. You pay for additional throughput capacity that you provision.
850
+ * Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second (MBps). Valid values depend on the DeploymentType you choose, as follows: For SINGLE_AZ_1, valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MBps. For SINGLE_AZ_2, valid values are 160, 320, 640, 1280, 2560, 3840, 5120, 7680, or 10240 MBps. You pay for additional throughput capacity that you provision.
851
851
  */
852
852
  ThroughputCapacity: MegabytesPerSecond;
853
853
  WeeklyMaintenanceStartTime?: WeeklyTime;
@@ -1757,7 +1757,7 @@ declare namespace FSx {
1757
1757
  }
1758
1758
  export interface DiskIopsConfiguration {
1759
1759
  /**
1760
- * Specifies whether the number of IOPS for the file system is using the system default (AUTOMATIC) or was provisioned by the customer (USER_PROVISIONED).
1760
+ * Specifies whether the file system is using the AUTOMATIC setting of SSD IOPS of 3 IOPS per GB of storage capacity, , or if it using a USER_PROVISIONED value.
1761
1761
  */
1762
1762
  Mode?: DiskIopsConfigurationMode;
1763
1763
  /**
@@ -2239,6 +2239,10 @@ declare namespace FSx {
2239
2239
  RouteTableIds?: RouteTableIds;
2240
2240
  ThroughputCapacity?: MegabytesPerSecond;
2241
2241
  WeeklyMaintenanceStartTime?: WeeklyTime;
2242
+ /**
2243
+ * You can use the fsxadmin user account to access the NetApp ONTAP CLI and REST API. The password value is always redacted in the response.
2244
+ */
2245
+ FsxAdminPassword?: AdminPassword;
2242
2246
  }
2243
2247
  export interface OntapVolumeConfiguration {
2244
2248
  /**
@@ -2563,17 +2567,29 @@ declare namespace FSx {
2563
2567
  }
2564
2568
  export interface SelfManagedActiveDirectoryConfigurationUpdates {
2565
2569
  /**
2566
- * The user name for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain. This account must have the permission to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.
2570
+ * Specifies the updated user name for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain. This account must have the permissions required to join computers to the domain in the organizational unit provided in OrganizationalUnitDistinguishedName.
2567
2571
  */
2568
2572
  UserName?: DirectoryUserName;
2569
2573
  /**
2570
- * The password for the service account on your self-managed AD domain that Amazon FSx will use to join to your AD domain.
2574
+ * Specifies the updated password for the service account on your self-managed AD domain. Amazon FSx uses this account to join to your self-managed AD domain.
2571
2575
  */
2572
2576
  Password?: DirectoryPassword;
2573
2577
  /**
2574
- * A list of up to three IP addresses of DNS servers or domain controllers in the self-managed AD directory.
2578
+ * A list of up to three DNS server or domain controller IP addresses in your self-managed AD domain.
2575
2579
  */
2576
2580
  DnsIps?: DnsIps;
2581
+ /**
2582
+ * Specifies an updated fully qualified domain name of your self-managed AD configuration.
2583
+ */
2584
+ DomainName?: ActiveDirectoryFullyQualifiedName;
2585
+ /**
2586
+ * Specifies an updated fully qualified distinguished name of the organization unit within your self-managed AD.
2587
+ */
2588
+ OrganizationalUnitDistinguishedName?: OrganizationalUnitDistinguishedName;
2589
+ /**
2590
+ * Specifies the updated name of the self-managed AD domain group whose members are granted administrative privileges for the Amazon FSx resource.
2591
+ */
2592
+ FileSystemAdministratorsGroup?: FileSystemAdministratorsGroupName;
2577
2593
  }
2578
2594
  export interface Snapshot {
2579
2595
  ResourceARN?: ResourceARN;
@@ -2695,7 +2711,7 @@ declare namespace FSx {
2695
2711
  export type SucceededCount = number;
2696
2712
  export interface SvmActiveDirectoryConfiguration {
2697
2713
  /**
2698
- * The NetBIOS name of the Active Directory computer object that is joined to your SVM.
2714
+ * The NetBIOS name of the AD computer object to which the SVM is joined.
2699
2715
  */
2700
2716
  NetBiosName?: NetBiosAlias;
2701
2717
  SelfManagedActiveDirectoryConfiguration?: SelfManagedActiveDirectoryAttributes;
@@ -2847,16 +2863,16 @@ declare namespace FSx {
2847
2863
  AutomaticBackupRetentionDays?: AutomaticBackupRetentionDays;
2848
2864
  DailyAutomaticBackupStartTime?: DailyTime;
2849
2865
  /**
2850
- * The ONTAP administrative password for the fsxadmin user.
2866
+ * Update the password for the fsxadmin user by entering a new password. You use the fsxadmin user to access the NetApp ONTAP CLI and REST API to manage your file system resources. For more information, see Managing resources using NetApp Applicaton.
2851
2867
  */
2852
2868
  FsxAdminPassword?: AdminPassword;
2853
2869
  WeeklyMaintenanceStartTime?: WeeklyTime;
2854
2870
  /**
2855
- * The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned.
2871
+ * The SSD IOPS (input output operations per second) configuration for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per GB of storage. The configuration consists of an IOPS mode (AUTOMATIC or USER_PROVISIONED), and in the case of USER_PROVISIONED IOPS, the total number of SSD IOPS provisioned. For more information, see Updating SSD storage capacity and IOPS.
2856
2872
  */
2857
2873
  DiskIopsConfiguration?: DiskIopsConfiguration;
2858
2874
  /**
2859
- * Specifies the throughput of an FSx for NetApp ONTAP file system, measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps.
2875
+ * Enter a new value to change the amount of throughput capacity for the file system. Throughput capacity is measured in megabytes per second (MBps). Valid values are 128, 256, 512, 1024, 2048, and 4096 MBps. For more information, see Managing throughput capacity in the FSx for ONTAP User Guide.
2860
2876
  */
2861
2877
  ThroughputCapacity?: MegabytesPerSecond;
2862
2878
  /**
@@ -2896,7 +2912,7 @@ declare namespace FSx {
2896
2912
  */
2897
2913
  ClientRequestToken?: ClientRequestToken;
2898
2914
  /**
2899
- * Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating. You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress. For Lustre file systems, the storage capacity target value can be the following: For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity. For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity. For SCRATCH_1 file systems, you can't increase the storage capacity. For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide. For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide. For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSx for Windows File Server User Guide. For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
2915
+ * Use this parameter to increase the storage capacity of an FSx for Windows File Server, FSx for Lustre, FSx for OpenZFS, or FSx for ONTAP file system. Specifies the storage capacity target value, in GiB, to increase the storage capacity for the file system that you're updating. You can't make a storage capacity increase request if there is an existing storage capacity increase request in progress. For Lustre file systems, the storage capacity target value can be the following: For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 SSD deployment types, valid values are in multiples of 2400 GiB. The value must be greater than the current storage capacity. For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput per TiB file systems. The values must be greater than the current storage capacity. For SCRATCH_1 file systems, you can't increase the storage capacity. For more information, see Managing storage and throughput capacity in the FSx for Lustre User Guide. For FSx for OpenZFS file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity in the FSx for OpenZFS User Guide. For Windows file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. To increase storage capacity, the file system must have at least 16 MBps of throughput capacity. For more information, see Managing storage capacity in the Amazon FSxfor Windows File Server User Guide. For ONTAP file systems, the storage capacity target value must be at least 10 percent greater than the current storage capacity value. For more information, see Managing storage capacity and provisioned IOPS in the Amazon FSx for NetApp ONTAP User Guide.
2900
2916
  */
2901
2917
  StorageCapacity?: StorageCapacity;
2902
2918
  /**
@@ -2906,7 +2922,7 @@ declare namespace FSx {
2906
2922
  LustreConfiguration?: UpdateFileSystemLustreConfiguration;
2907
2923
  OntapConfiguration?: UpdateFileSystemOntapConfiguration;
2908
2924
  /**
2909
- * The configuration updates for an Amazon FSx for OpenZFS file system.
2925
+ * The configuration updates for an FSx for OpenZFS file system.
2910
2926
  */
2911
2927
  OpenZFSConfiguration?: UpdateFileSystemOpenZFSConfiguration;
2912
2928
  }
@@ -3021,7 +3037,7 @@ declare namespace FSx {
3021
3037
  }
3022
3038
  export interface UpdateStorageVirtualMachineRequest {
3023
3039
  /**
3024
- * Updates the Microsoft Active Directory (AD) configuration for an SVM that is joined to an AD.
3040
+ * Specifies updates to an SVM's Microsoft Active Directory (AD) configuration.
3025
3041
  */
3026
3042
  ActiveDirectoryConfiguration?: UpdateSvmActiveDirectoryConfiguration;
3027
3043
  ClientRequestToken?: ClientRequestToken;
@@ -3030,7 +3046,7 @@ declare namespace FSx {
3030
3046
  */
3031
3047
  StorageVirtualMachineId: StorageVirtualMachineId;
3032
3048
  /**
3033
- * Enter a new SvmAdminPassword if you are updating it.
3049
+ * Specifies a new SvmAdminPassword.
3034
3050
  */
3035
3051
  SvmAdminPassword?: AdminPassword;
3036
3052
  }
@@ -3039,6 +3055,10 @@ declare namespace FSx {
3039
3055
  }
3040
3056
  export interface UpdateSvmActiveDirectoryConfiguration {
3041
3057
  SelfManagedActiveDirectoryConfiguration?: SelfManagedActiveDirectoryConfigurationUpdates;
3058
+ /**
3059
+ * Specifies an updated NetBIOS name of the AD computer object NetBiosName to which an SVM is joined.
3060
+ */
3061
+ NetBiosName?: NetBiosAlias;
3042
3062
  }
3043
3063
  export interface UpdateVolumeRequest {
3044
3064
  ClientRequestToken?: ClientRequestToken;
@@ -950,9 +950,13 @@ declare namespace OpenSearch {
950
950
  export type ConnectionMode = "DIRECT"|"VPC_ENDPOINT"|string;
951
951
  export interface ConnectionProperties {
952
952
  /**
953
- * The endpoint of the remote domain.
953
+ * The Endpoint attribute cannot be modified. The endpoint of the remote domain. Applicable for VPC_ENDPOINT connection mode.
954
954
  */
955
955
  Endpoint?: Endpoint;
956
+ /**
957
+ * The connection properties for cross cluster search.
958
+ */
959
+ CrossClusterSearch?: CrossClusterSearchConnectionProperties;
956
960
  }
957
961
  export type ConnectionStatusMessage = string;
958
962
  export interface CreateDomainRequest {
@@ -1052,6 +1056,10 @@ declare namespace OpenSearch {
1052
1056
  * The connection mode.
1053
1057
  */
1054
1058
  ConnectionMode?: ConnectionMode;
1059
+ /**
1060
+ * The ConnectionProperties for the outbound connection.
1061
+ */
1062
+ ConnectionProperties?: ConnectionProperties;
1055
1063
  }
1056
1064
  export interface CreateOutboundConnectionResponse {
1057
1065
  /**
@@ -1128,6 +1136,12 @@ declare namespace OpenSearch {
1128
1136
  VpcEndpoint: VpcEndpoint;
1129
1137
  }
1130
1138
  export type CreatedAt = Date;
1139
+ export interface CrossClusterSearchConnectionProperties {
1140
+ /**
1141
+ * Status of SkipUnavailable param for outbound connection.
1142
+ */
1143
+ SkipUnavailable?: SkipUnavailableStatus;
1144
+ }
1131
1145
  export interface DeleteDomainRequest {
1132
1146
  /**
1133
1147
  * The name of the domain you want to permanently delete.
@@ -2971,6 +2985,7 @@ declare namespace OpenSearch {
2971
2985
  OptionalDeployment?: Boolean;
2972
2986
  }
2973
2987
  export type ServiceUrl = string;
2988
+ export type SkipUnavailableStatus = "ENABLED"|"DISABLED"|string;
2974
2989
  export interface SnapshotOptions {
2975
2990
  /**
2976
2991
  * The time, in UTC format, when OpenSearch Service takes a daily automated snapshot of the specified domain. Default is 0 hours.