cdk-comprehend-s3olap 2.0.198 → 2.0.199

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/aws-sdk/CHANGELOG.md +10 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/appstream-2016-12-01.min.json +523 -212
  8. package/node_modules/aws-sdk/apis/appstream-2016-12-01.paginators.json +10 -0
  9. package/node_modules/aws-sdk/apis/chime-2018-05-01.min.json +344 -84
  10. package/node_modules/aws-sdk/apis/cleanrooms-2022-02-17.min.json +56 -49
  11. package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.min.json +265 -256
  12. package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +624 -606
  13. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +913 -854
  14. package/node_modules/aws-sdk/clients/appstream.d.ts +426 -8
  15. package/node_modules/aws-sdk/clients/chime.d.ts +268 -268
  16. package/node_modules/aws-sdk/clients/cleanrooms.d.ts +33 -22
  17. package/node_modules/aws-sdk/clients/dynamodb.d.ts +28 -0
  18. package/node_modules/aws-sdk/clients/gamelift.d.ts +6 -6
  19. package/node_modules/aws-sdk/clients/glue.d.ts +23 -0
  20. package/node_modules/aws-sdk/clients/sagemaker.d.ts +89 -10
  21. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  22. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +8 -8
  23. package/node_modules/aws-sdk/dist/aws-sdk.js +268 -259
  24. package/node_modules/aws-sdk/dist/aws-sdk.min.js +72 -72
  25. package/node_modules/aws-sdk/lib/core.js +1 -1
  26. package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +28 -0
  27. package/node_modules/aws-sdk/package.json +1 -1
  28. package/package.json +5 -5
@@ -236,11 +236,11 @@ declare class CleanRooms extends Service {
236
236
  */
237
237
  listTagsForResource(callback?: (err: AWSError, data: CleanRooms.Types.ListTagsForResourceOutput) => void): Request<CleanRooms.Types.ListTagsForResourceOutput, AWSError>;
238
238
  /**
239
- * Creates a protected query that is started by AWS Clean Rooms.
239
+ * Creates a protected query that is started by Clean Rooms .
240
240
  */
241
241
  startProtectedQuery(params: CleanRooms.Types.StartProtectedQueryInput, callback?: (err: AWSError, data: CleanRooms.Types.StartProtectedQueryOutput) => void): Request<CleanRooms.Types.StartProtectedQueryOutput, AWSError>;
242
242
  /**
243
- * Creates a protected query that is started by AWS Clean Rooms.
243
+ * Creates a protected query that is started by Clean Rooms .
244
244
  */
245
245
  startProtectedQuery(callback?: (err: AWSError, data: CleanRooms.Types.StartProtectedQueryOutput) => void): Request<CleanRooms.Types.StartProtectedQueryOutput, AWSError>;
246
246
  /**
@@ -377,9 +377,13 @@ declare namespace CleanRooms {
377
377
  */
378
378
  joinColumns: AnalysisRuleColumnList;
379
379
  /**
380
- * Control that requires member who runs query to do a join with their configured table and/or other configured table in query
380
+ * Control that requires member who runs query to do a join with their configured table and/or other configured table in query.
381
381
  */
382
382
  joinRequired?: JoinRequiredOption;
383
+ /**
384
+ * Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is AND.
385
+ */
386
+ allowedJoinOperators?: JoinOperatorsList;
383
387
  /**
384
388
  * The columns that query runners are allowed to select, group by, or filter by.
385
389
  */
@@ -398,9 +402,13 @@ declare namespace CleanRooms {
398
402
  export type AnalysisRuleColumnName = string;
399
403
  export interface AnalysisRuleList {
400
404
  /**
401
- * Columns that can be used to join a configured table with the table of the member who can query and another members' configured tables.
405
+ * Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables.
402
406
  */
403
407
  joinColumns: AnalysisRuleListJoinColumnsList;
408
+ /**
409
+ * Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is AND.
410
+ */
411
+ allowedJoinOperators?: JoinOperatorsList;
404
412
  /**
405
413
  * Columns that can be listed in the output.
406
414
  */
@@ -480,7 +488,7 @@ declare namespace CleanRooms {
480
488
  */
481
489
  description?: CollaborationDescription;
482
490
  /**
483
- * The identifier used to reference members of the collaboration. Currently only supports AWS account ID.
491
+ * The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
484
492
  */
485
493
  creatorAccountId: AccountId;
486
494
  /**
@@ -535,7 +543,7 @@ declare namespace CleanRooms {
535
543
  */
536
544
  name: CollaborationName;
537
545
  /**
538
- * The identifier used to reference members of the collaboration. Currently only supports AWS Account ID.
546
+ * The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
539
547
  */
540
548
  creatorAccountId: AccountId;
541
549
  /**
@@ -595,7 +603,7 @@ declare namespace CleanRooms {
595
603
  */
596
604
  description?: TableDescription;
597
605
  /**
598
- * The AWS Glue table that this configured table represents.
606
+ * The Glue table that this configured table represents.
599
607
  */
600
608
  tableReference: TableReference;
601
609
  /**
@@ -615,7 +623,7 @@ declare namespace CleanRooms {
615
623
  */
616
624
  analysisMethod: AnalysisMethod;
617
625
  /**
618
- * The columns within the underlying AWS Glue table that can be utilized within collaborations.
626
+ * The columns within the underlying Glue table that can be utilized within collaborations.
619
627
  */
620
628
  allowedColumns: AllowedColumnList;
621
629
  }
@@ -881,7 +889,7 @@ declare namespace CleanRooms {
881
889
  */
882
890
  description?: TableDescription;
883
891
  /**
884
- * A reference to the AWS Glue table being configured.
892
+ * A reference to the Glue table being configured.
885
893
  */
886
894
  tableReference: TableReference;
887
895
  /**
@@ -1123,17 +1131,20 @@ declare namespace CleanRooms {
1123
1131
  */
1124
1132
  schema: Schema;
1125
1133
  }
1126
- export type GlueResourceName = string;
1134
+ export type GlueDatabaseName = string;
1135
+ export type GlueTableName = string;
1127
1136
  export interface GlueTableReference {
1128
1137
  /**
1129
- * The name of the AWS Glue table.
1138
+ * The name of the Glue table.
1130
1139
  */
1131
- tableName: GlueResourceName;
1140
+ tableName: GlueTableName;
1132
1141
  /**
1133
- * The name of the database the AWS Glue table belongs to.
1142
+ * The name of the database the Glue table belongs to.
1134
1143
  */
1135
- databaseName: GlueResourceName;
1144
+ databaseName: GlueDatabaseName;
1136
1145
  }
1146
+ export type JoinOperator = "OR"|"AND"|string;
1147
+ export type JoinOperatorsList = JoinOperator[];
1137
1148
  export type JoinRequiredOption = "QUERY_RUNNER"|string;
1138
1149
  export type KeyPrefix = string;
1139
1150
  export interface ListCollaborationsInput {
@@ -1327,7 +1338,7 @@ declare namespace CleanRooms {
1327
1338
  export type MemberList = MemberSpecification[];
1328
1339
  export interface MemberSpecification {
1329
1340
  /**
1330
- * The identifier used to reference members of the collaboration. Currently only supports AWS Account ID.
1341
+ * The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
1331
1342
  */
1332
1343
  accountId: AccountId;
1333
1344
  /**
@@ -1342,7 +1353,7 @@ declare namespace CleanRooms {
1342
1353
  export type MemberStatus = "INVITED"|"ACTIVE"|"LEFT"|"REMOVED"|string;
1343
1354
  export interface MemberSummary {
1344
1355
  /**
1345
- * The identifier used to reference members of the collaboration. Currently only supports AWS Account ID.
1356
+ * The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
1346
1357
  */
1347
1358
  accountId: AccountId;
1348
1359
  /**
@@ -1393,7 +1404,7 @@ declare namespace CleanRooms {
1393
1404
  */
1394
1405
  collaborationId: UUID;
1395
1406
  /**
1396
- * The identifier used to reference members of the collaboration. Currently only supports AWS account ID.
1407
+ * The identifier used to reference members of the collaboration. Currently only supports Amazon Web Services account ID.
1397
1408
  */
1398
1409
  collaborationCreatorAccountId: AccountId;
1399
1410
  /**
@@ -1447,7 +1458,7 @@ declare namespace CleanRooms {
1447
1458
  */
1448
1459
  collaborationId: CollaborationIdentifier;
1449
1460
  /**
1450
- * The identifier of the AWS principal that created the collaboration. Currently only supports AWS account ID.
1461
+ * The identifier of the Amazon Web Services principal that created the collaboration. Currently only supports Amazon Web Services account ID.
1451
1462
  */
1452
1463
  collaborationCreatorAccountId: AccountId;
1453
1464
  /**
@@ -1579,7 +1590,7 @@ declare namespace CleanRooms {
1579
1590
  /**
1580
1591
  * The query string to be submitted.
1581
1592
  */
1582
- queryString: ProtectedQuerySQLParametersQueryStringString;
1593
+ queryString?: ProtectedQuerySQLParametersQueryStringString;
1583
1594
  }
1584
1595
  export type ProtectedQuerySQLParametersQueryStringString = string;
1585
1596
  export interface ProtectedQueryStatistics {
@@ -1635,7 +1646,7 @@ declare namespace CleanRooms {
1635
1646
  */
1636
1647
  analysisMethod?: AnalysisMethod;
1637
1648
  /**
1638
- * The unique account ID for the AWS account that owns the schema.
1649
+ * The unique account ID for the Amazon Web Services account that owns the schema.
1639
1650
  */
1640
1651
  creatorAccountId: AccountId;
1641
1652
  /**
@@ -1678,7 +1689,7 @@ declare namespace CleanRooms {
1678
1689
  */
1679
1690
  type: SchemaType;
1680
1691
  /**
1681
- * The unique account ID for the AWS account that owns the schema.
1692
+ * The unique account ID for the Amazon Web Services account that owns the schema.
1682
1693
  */
1683
1694
  creatorAccountId: AccountId;
1684
1695
  /**
@@ -1738,7 +1749,7 @@ declare namespace CleanRooms {
1738
1749
  export type TableDescription = string;
1739
1750
  export interface TableReference {
1740
1751
  /**
1741
- * If present, a reference to the AWS Glue table referred to by this table reference.
1752
+ * If present, a reference to the Glue table referred to by this table reference.
1742
1753
  */
1743
1754
  glue?: GlueTableReference;
1744
1755
  }
@@ -792,6 +792,10 @@ declare namespace DynamoDB {
792
792
  * The error message associated with the PartiQL batch response.
793
793
  */
794
794
  Message?: String;
795
+ /**
796
+ * The item which caused the condition check to fail. This will be set if ReturnValuesOnConditionCheckFailure is specified as ALL_OLD.
797
+ */
798
+ Item?: AttributeMap;
795
799
  }
796
800
  export type BatchStatementErrorCodeEnum = "ConditionalCheckFailed"|"ItemCollectionSizeLimitExceeded"|"RequestLimitExceeded"|"ValidationError"|"ProvisionedThroughputExceeded"|"TransactionConflict"|"ThrottlingError"|"InternalServerError"|"ResourceNotFound"|"AccessDenied"|"DuplicateItem"|string;
797
801
  export interface BatchStatementRequest {
@@ -807,6 +811,10 @@ declare namespace DynamoDB {
807
811
  * The read consistency of the PartiQL batch request.
808
812
  */
809
813
  ConsistentRead?: ConsistentRead;
814
+ /**
815
+ * An optional parameter that returns the item attributes for a PartiQL batch request operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
816
+ */
817
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
810
818
  }
811
819
  export interface BatchStatementResponse {
812
820
  /**
@@ -1213,6 +1221,10 @@ declare namespace DynamoDB {
1213
1221
  * One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.
1214
1222
  */
1215
1223
  ExpressionAttributeValues?: ExpressionAttributeValueMap;
1224
+ /**
1225
+ * An optional parameter that returns the item attributes for a DeleteItem operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
1226
+ */
1227
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
1216
1228
  }
1217
1229
  export interface DeleteItemOutput {
1218
1230
  /**
@@ -1489,6 +1501,10 @@ declare namespace DynamoDB {
1489
1501
  * The maximum number of items to evaluate (not necessarily the number of matching items). If DynamoDB processes the number of items up to the limit while processing the results, it stops the operation and returns the matching values up to that point, along with a key in LastEvaluatedKey to apply in a subsequent operation so you can pick up where you left off. Also, if the processed dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and returns the matching values up to the limit, and a key in LastEvaluatedKey to apply in a subsequent operation to continue the operation.
1490
1502
  */
1491
1503
  Limit?: PositiveIntegerObject;
1504
+ /**
1505
+ * An optional parameter that returns the item attributes for an ExecuteStatement operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
1506
+ */
1507
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
1492
1508
  }
1493
1509
  export interface ExecuteStatementOutput {
1494
1510
  /**
@@ -2432,6 +2448,10 @@ declare namespace DynamoDB {
2432
2448
  * The parameter values.
2433
2449
  */
2434
2450
  Parameters?: PreparedStatementParameters;
2451
+ /**
2452
+ * An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
2453
+ */
2454
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
2435
2455
  }
2436
2456
  export type ParameterizedStatements = ParameterizedStatement[];
2437
2457
  export type PartiQLBatchRequest = BatchStatementRequest[];
@@ -2577,6 +2597,10 @@ declare namespace DynamoDB {
2577
2597
  * One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.
2578
2598
  */
2579
2599
  ExpressionAttributeValues?: ExpressionAttributeValueMap;
2600
+ /**
2601
+ * An optional parameter that returns the item attributes for a PutItem operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
2602
+ */
2603
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
2580
2604
  }
2581
2605
  export type PutItemInputAttributeMap = {[key: string]: AttributeValue};
2582
2606
  export interface PutItemOutput {
@@ -3724,6 +3748,10 @@ declare namespace DynamoDB {
3724
3748
  * One or more values that can be substituted in an expression. Use the : (colon) character in an expression to dereference an attribute value. For example, suppose that you wanted to check whether the value of the ProductStatus attribute was one of the following: Available | Backordered | Discontinued You would first need to specify ExpressionAttributeValues as follows: { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} } You could then use these values in an expression, such as this: ProductStatus IN (:avail, :back, :disc) For more information on expression attribute values, see Condition Expressions in the Amazon DynamoDB Developer Guide.
3725
3749
  */
3726
3750
  ExpressionAttributeValues?: ExpressionAttributeValueMap;
3751
+ /**
3752
+ * An optional parameter that returns the item attributes for an UpdateItem operation that failed a condition check. There is no additional cost associated with requesting a return value aside from the small network and processing overhead of receiving a larger response. No read capacity units are consumed.
3753
+ */
3754
+ ReturnValuesOnConditionCheckFailure?: ReturnValuesOnConditionCheckFailure;
3727
3755
  }
3728
3756
  export interface UpdateItemOutput {
3729
3757
  /**
@@ -20,11 +20,11 @@ declare class GameLift extends Service {
20
20
  */
21
21
  acceptMatch(callback?: (err: AWSError, data: GameLift.Types.AcceptMatchOutput) => void): Request<GameLift.Types.AcceptMatchOutput, AWSError>;
22
22
  /**
23
- * This operation is used with the Amazon GameLift FleetIQ solution and game server groups. Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server. To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Filter options may be included to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is running on an instance in DRAINING status and provided filter option does not allow placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide
23
+ * This operation is used with the Amazon GameLift FleetIQ solution and game server groups. Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server. To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Add filter options to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is running on an instance in DRAINING status and the provided filter option does not allow placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide
24
24
  */
25
25
  claimGameServer(params: GameLift.Types.ClaimGameServerInput, callback?: (err: AWSError, data: GameLift.Types.ClaimGameServerOutput) => void): Request<GameLift.Types.ClaimGameServerOutput, AWSError>;
26
26
  /**
27
- * This operation is used with the Amazon GameLift FleetIQ solution and game server groups. Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server. To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Filter options may be included to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is running on an instance in DRAINING status and provided filter option does not allow placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide
27
+ * This operation is used with the Amazon GameLift FleetIQ solution and game server groups. Locates an available game server and temporarily reserves it to host gameplay and players. This operation is called from a game client or client service (such as a matchmaker) to request hosting resources for a new game session. In response, Amazon GameLift FleetIQ locates an available game server, places it in CLAIMED status for 60 seconds, and returns connection information that players can use to connect to the game server. To claim a game server, identify a game server group. You can also specify a game server ID, although this approach bypasses Amazon GameLift FleetIQ placement optimization. Optionally, include game data to pass to the game server at the start of a game session, such as a game map or player information. Add filter options to further restrict how a game server is chosen, such as only allowing game servers on ACTIVE instances to be claimed. When a game server is successfully claimed, connection information is returned. A claimed game server's utilization status remains AVAILABLE while the claim status is set to CLAIMED for up to 60 seconds. This time period gives the game server time to update its status to UTILIZED after players join. If the game server's status is not updated within 60 seconds, the game server reverts to unclaimed status and is available to be claimed by another request. The claim time period is a fixed value and is not configurable. If you try to claim a specific game server, this request will fail in the following cases: If the game server utilization status is UTILIZED. If the game server claim status is CLAIMED. If the game server is running on an instance in DRAINING status and the provided filter option does not allow placing on DRAINING instances. Learn more Amazon GameLift FleetIQ Guide
28
28
  */
29
29
  claimGameServer(callback?: (err: AWSError, data: GameLift.Types.ClaimGameServerOutput) => void): Request<GameLift.Types.ClaimGameServerOutput, AWSError>;
30
30
  /**
@@ -1460,7 +1460,7 @@ declare namespace GameLift {
1460
1460
  */
1461
1461
  NotificationTarget?: SnsArnStringModel;
1462
1462
  /**
1463
- * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.
1463
+ * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if FlexMatchMode is set to STANDALONE.
1464
1464
  */
1465
1465
  AdditionalPlayerCount?: WholeNumber;
1466
1466
  /**
@@ -3614,7 +3614,7 @@ declare namespace GameLift {
3614
3614
  */
3615
3615
  NotificationTarget?: SnsArnStringModel;
3616
3616
  /**
3617
- * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used when FlexMatchMode is set to STANDALONE.
3617
+ * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used when FlexMatchMode is set to STANDALONE.
3618
3618
  */
3619
3619
  AdditionalPlayerCount?: WholeNumber;
3620
3620
  /**
@@ -3729,7 +3729,7 @@ declare namespace GameLift {
3729
3729
  export type NonNegativeDouble = number;
3730
3730
  export type NonNegativeLimitedLengthDouble = string;
3731
3731
  export type NonZeroAndMaxString = string;
3732
- export type OperatingSystem = "WINDOWS_2012"|"AMAZON_LINUX"|"AMAZON_LINUX_2"|"WINDOWS_2016"|string;
3732
+ export type OperatingSystem = "WINDOWS_2012"|"AMAZON_LINUX"|"AMAZON_LINUX_2"|"WINDOWS_2016"|"AMAZON_LINUX_2023"|string;
3733
3733
  export interface PlacedPlayerSession {
3734
3734
  /**
3735
3735
  * A unique identifier for a player that is associated with this player session.
@@ -4758,7 +4758,7 @@ declare namespace GameLift {
4758
4758
  */
4759
4759
  NotificationTarget?: SnsArnStringModel;
4760
4760
  /**
4761
- * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 12-person team, and the additional player count is set to 2, only 10 players are selected for the match. This parameter is not used if FlexMatchMode is set to STANDALONE.
4761
+ * The number of player slots in a match to keep open for future players. For example, if the configuration's rule set specifies a match for a single 10-person team, and the additional player count is set to 2, 10 players will be selected for the match and 2 more player slots will be open for future players. This parameter is not used if FlexMatchMode is set to STANDALONE.
4762
4762
  */
4763
4763
  AdditionalPlayerCount?: WholeNumber;
4764
4764
  /**
@@ -3531,6 +3531,10 @@ declare namespace Glue {
3531
3531
  * Specifies Delta data store targets.
3532
3532
  */
3533
3533
  DeltaTargets?: DeltaTargetList;
3534
+ /**
3535
+ * Specifies Apache Iceberg data store targets.
3536
+ */
3537
+ IcebergTargets?: IcebergTargetList;
3534
3538
  }
3535
3539
  export interface CrawlsFilter {
3536
3540
  /**
@@ -8057,6 +8061,25 @@ declare namespace Glue {
8057
8061
  export type GrokPattern = string;
8058
8062
  export type HashString = string;
8059
8063
  export type HudiTargetCompressionType = "gzip"|"lzo"|"uncompressed"|"snappy"|string;
8064
+ export interface IcebergTarget {
8065
+ /**
8066
+ * One or more Amazon S3 paths that contains Iceberg metadata folders as s3://bucket/prefix.
8067
+ */
8068
+ Paths?: PathList;
8069
+ /**
8070
+ * The name of the connection to use to connect to the Iceberg target.
8071
+ */
8072
+ ConnectionName?: ConnectionName;
8073
+ /**
8074
+ * A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
8075
+ */
8076
+ Exclusions?: PathList;
8077
+ /**
8078
+ * The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Iceberg metadata folder in your Amazon S3 path. Used to limit the crawler run time.
8079
+ */
8080
+ MaximumTraversalDepth?: NullableInteger;
8081
+ }
8082
+ export type IcebergTargetList = IcebergTarget[];
8060
8083
  export type IdString = string;
8061
8084
  export interface ImportCatalogToGlueRequest {
8062
8085
  /**
@@ -2666,6 +2666,8 @@ declare namespace SageMaker {
2666
2666
  AgentCount: Long;
2667
2667
  }
2668
2668
  export type AgentVersions = AgentVersion[];
2669
+ export type AggregationTransformationValue = "sum"|"avg"|"first"|"min"|"max"|string;
2670
+ export type AggregationTransformations = {[key: string]: AggregationTransformationValue};
2669
2671
  export interface Alarm {
2670
2672
  /**
2671
2673
  * The name of a CloudWatch alarm in your account.
@@ -3207,11 +3209,11 @@ declare namespace SageMaker {
3207
3209
  }
3208
3210
  export interface AutoMLJobChannel {
3209
3211
  /**
3210
- * The type of channel. Defines whether the data are used for training or validation. The default value is training. Channels for training and validation must share the same ContentType
3212
+ * The type of channel. Defines whether the data are used for training or validation. The default value is training. Channels for training and validation must share the same ContentType The type of channel defaults to training for the time-series forecasting problem type.
3211
3213
  */
3212
3214
  ChannelType?: AutoMLChannelType;
3213
3215
  /**
3214
- * The content type of the data from the input source. The following are the allowed content types for different problems: For Tabular problem types: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present. For ImageClassification: image/png, image/jpeg, or image/*. The default value is image/*. For TextClassification: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.
3216
+ * The content type of the data from the input source. The following are the allowed content types for different problems: For tabular problem types: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present. For image classification: image/png, image/jpeg, or image/*. The default value is image/*. For text classification: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present. For time-series forecasting: text/csv;header=present or x-application/vnd.amazon+parquet. The default value is text/csv;header=present.
3215
3217
  */
3216
3218
  ContentType?: ContentType;
3217
3219
  /**
@@ -3225,7 +3227,7 @@ declare namespace SageMaker {
3225
3227
  }
3226
3228
  export interface AutoMLJobCompletionCriteria {
3227
3229
  /**
3228
- * The maximum number of times a training job is allowed to run. For job V2s (jobs created by calling CreateAutoMLJobV2), the supported value is 1.
3230
+ * The maximum number of times a training job is allowed to run. For text and image classification, as well as time-series forecasting problem types, the supported value is 1. For tabular problem types, the maximum value is 750.
3229
3231
  */
3230
3232
  MaxCandidates?: MaxCandidates;
3231
3233
  /**
@@ -3263,12 +3265,12 @@ declare namespace SageMaker {
3263
3265
  export type AutoMLJobName = string;
3264
3266
  export interface AutoMLJobObjective {
3265
3267
  /**
3266
- * The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset. For the list of all available metrics supported by Autopilot, see Autopilot metrics. If you do not specify a metric explicitly, the default behavior is to automatically use: For tabular problem types: Regression: MSE. Binary classification: F1. Multiclass classification: Accuracy. For image or text classification problem types: Accuracy
3268
+ * The name of the objective metric used to measure the predictive quality of a machine learning system. During training, the model's parameters are updated iteratively to optimize its performance based on the feedback provided by the objective metric when evaluating the model on the validation dataset. For the list of all available metrics supported by Autopilot, see Autopilot metrics. If you do not specify a metric explicitly, the default behavior is to automatically use: For tabular problem types: Regression: MSE. Binary classification: F1. Multiclass classification: Accuracy. For image or text classification problem types: Accuracy For time-series forecasting problem types: AverageWeightedQuantileLoss
3267
3269
  */
3268
3270
  MetricName: AutoMLMetricEnum;
3269
3271
  }
3270
3272
  export type AutoMLJobObjectiveType = "Maximize"|"Minimize"|string;
3271
- export type AutoMLJobSecondaryStatus = "Starting"|"AnalyzingData"|"FeatureEngineering"|"ModelTuning"|"MaxCandidatesReached"|"Failed"|"Stopped"|"MaxAutoMLJobRuntimeReached"|"Stopping"|"CandidateDefinitionsGenerated"|"GeneratingExplainabilityReport"|"Completed"|"ExplainabilityError"|"DeployingModel"|"ModelDeploymentError"|"GeneratingModelInsightsReport"|"ModelInsightsError"|"TrainingModels"|string;
3273
+ export type AutoMLJobSecondaryStatus = "Starting"|"AnalyzingData"|"FeatureEngineering"|"ModelTuning"|"MaxCandidatesReached"|"Failed"|"Stopped"|"MaxAutoMLJobRuntimeReached"|"Stopping"|"CandidateDefinitionsGenerated"|"GeneratingExplainabilityReport"|"Completed"|"ExplainabilityError"|"DeployingModel"|"ModelDeploymentError"|"GeneratingModelInsightsReport"|"ModelInsightsError"|"TrainingModels"|"PreTraining"|string;
3272
3274
  export type AutoMLJobStatus = "Completed"|"InProgress"|"Failed"|"Stopped"|"Stopping"|string;
3273
3275
  export interface AutoMLJobStepMetadata {
3274
3276
  /**
@@ -3316,8 +3318,8 @@ declare namespace SageMaker {
3316
3318
  PartialFailureReasons?: AutoMLPartialFailureReasons;
3317
3319
  }
3318
3320
  export type AutoMLMaxResults = number;
3319
- export type AutoMLMetricEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|string;
3320
- export type AutoMLMetricExtendedEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"LogLoss"|"InferenceLatency"|string;
3321
+ export type AutoMLMetricEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"MAPE"|"MASE"|"WAPE"|"AverageWeightedQuantileLoss"|string;
3322
+ export type AutoMLMetricExtendedEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"LogLoss"|"InferenceLatency"|"MAPE"|"MASE"|"WAPE"|"AverageWeightedQuantileLoss"|string;
3321
3323
  export type AutoMLMode = "AUTO"|"ENSEMBLING"|"HYPERPARAMETER_TUNING"|string;
3322
3324
  export type AutoMLNameContains = string;
3323
3325
  export interface AutoMLOutputDataConfig {
@@ -3350,8 +3352,12 @@ declare namespace SageMaker {
3350
3352
  * Settings used to configure an AutoML job V2 for a tabular problem type (regression, classification).
3351
3353
  */
3352
3354
  TabularJobConfig?: TabularJobConfig;
3355
+ /**
3356
+ * Settings used to configure an AutoML job V2 for a time-series forecasting problem type.
3357
+ */
3358
+ TimeSeriesForecastingJobConfig?: TimeSeriesForecastingJobConfig;
3353
3359
  }
3354
- export type AutoMLProblemTypeConfigName = "ImageClassification"|"TextClassification"|"Tabular"|string;
3360
+ export type AutoMLProblemTypeConfigName = "ImageClassification"|"TextClassification"|"Tabular"|"TimeSeriesForecasting"|string;
3355
3361
  export interface AutoMLProblemTypeResolvedAttributes {
3356
3362
  /**
3357
3363
  * Defines the resolved attributes for the TABULAR problem type.
@@ -3419,6 +3425,7 @@ declare namespace SageMaker {
3419
3425
  }
3420
3426
  export type AutotuneMode = "Enabled"|string;
3421
3427
  export type AwsManagedHumanLoopRequestSource = "AWS/Rekognition/DetectModerationLabels/Image/V3"|"AWS/Textract/AnalyzeDocument/Forms/V1"|string;
3428
+ export type BacktestResultsLocation = string;
3422
3429
  export interface BatchDataCaptureConfig {
3423
3430
  /**
3424
3431
  * The Amazon S3 location being used to capture the data.
@@ -3608,6 +3615,10 @@ declare namespace SageMaker {
3608
3615
  * The Amazon S3 prefix to the model insight artifacts generated for the AutoML candidate.
3609
3616
  */
3610
3617
  ModelInsights?: ModelInsightsLocation;
3618
+ /**
3619
+ * The Amazon S3 prefix to the accuracy metrics and the inference results observed over the testing window. Available only for the time-series forecasting problem type.
3620
+ */
3621
+ BacktestResults?: BacktestResultsLocation;
3611
3622
  }
3612
3623
  export type CandidateDefinitionNotebookLocation = string;
3613
3624
  export interface CandidateGenerationConfig {
@@ -4422,7 +4433,7 @@ declare namespace SageMaker {
4422
4433
  */
4423
4434
  AutoMLJobName: AutoMLJobName;
4424
4435
  /**
4425
- * An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the InputDataConfig attribute in the CreateAutoMLJob input parameters. The supported formats depend on the problem type: For Tabular problem types: S3Prefix, ManifestFile. For ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile. For TextClassification: S3Prefix.
4436
+ * An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the InputDataConfig attribute in the CreateAutoMLJob input parameters. The supported formats depend on the problem type: For tabular problem types: S3Prefix, ManifestFile. For image classification: S3Prefix, ManifestFile, AugmentedManifestFile. For text classification: S3Prefix. For time-series forecasting: S3Prefix.
4426
4437
  */
4427
4438
  AutoMLJobInputDataConfig: AutoMLJobInputDataConfig;
4428
4439
  /**
@@ -4454,7 +4465,7 @@ declare namespace SageMaker {
4454
4465
  */
4455
4466
  ModelDeployConfig?: ModelDeployConfig;
4456
4467
  /**
4457
- * This structure specifies how to split the data into train and validation datasets. The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size.
4468
+ * This structure specifies how to split the data into train and validation datasets. The validation and training datasets must contain the same headers. For jobs created by calling CreateAutoMLJob, the validation dataset must be less than 2 GB in size. This attribute must not be set for the time-series forecasting problem type, as Autopilot automatically splits the input dataset into training and validation sets.
4458
4469
  */
4459
4470
  DataSplitConfig?: AutoMLDataSplitConfig;
4460
4471
  }
@@ -11295,6 +11306,10 @@ declare namespace SageMaker {
11295
11306
  }
11296
11307
  export type FileSystemId = string;
11297
11308
  export type FileSystemType = "EFS"|"FSxLustre"|string;
11309
+ export type FillingTransformationMap = {[key: string]: FillingTransformationValue};
11310
+ export type FillingTransformationValue = string;
11311
+ export type FillingTransformations = {[key: string]: FillingTransformationMap};
11312
+ export type FillingType = "frontfill"|"middlefill"|"backfill"|"futurefill"|"frontfill_value"|"middlefill_value"|"backfill_value"|"futurefill_value"|string;
11298
11313
  export interface Filter {
11299
11314
  /**
11300
11315
  * A resource property name. For example, TrainingJobName. For valid property names, see SearchRecord. You must specify a valid property for the resource.
@@ -11388,6 +11403,10 @@ declare namespace SageMaker {
11388
11403
  export type FlowDefinitionTaskKeywords = FlowDefinitionTaskKeyword[];
11389
11404
  export type FlowDefinitionTaskTimeLimitInSeconds = number;
11390
11405
  export type FlowDefinitionTaskTitle = string;
11406
+ export type ForecastFrequency = string;
11407
+ export type ForecastHorizon = number;
11408
+ export type ForecastQuantile = string;
11409
+ export type ForecastQuantiles = ForecastQuantile[];
11391
11410
  export type Framework = "TENSORFLOW"|"KERAS"|"MXNET"|"ONNX"|"PYTORCH"|"XGBOOST"|"TFLITE"|"DARKNET"|"SKLEARN"|string;
11392
11411
  export type FrameworkVersion = string;
11393
11412
  export type GenerateCandidateDefinitionsOnly = boolean;
@@ -11505,6 +11524,8 @@ declare namespace SageMaker {
11505
11524
  }
11506
11525
  export type GitConfigUrl = string;
11507
11526
  export type Group = string;
11527
+ export type GroupingAttributeName = string;
11528
+ export type GroupingAttributeNames = GroupingAttributeName[];
11508
11529
  export type Groups = Group[];
11509
11530
  export type HookParameters = {[key: string]: ConfigValue};
11510
11531
  export type Horovod = boolean;
@@ -12618,6 +12639,7 @@ declare namespace SageMaker {
12618
12639
  export type InvocationsMaxRetries = number;
12619
12640
  export type InvocationsTimeoutInSeconds = number;
12620
12641
  export type IotRoleAlias = string;
12642
+ export type ItemIdentifierAttributeName = string;
12621
12643
  export type JobDurationInSeconds = number;
12622
12644
  export type JobReferenceCode = string;
12623
12645
  export type JobReferenceCodeContains = string;
@@ -20402,6 +20424,51 @@ declare namespace SageMaker {
20402
20424
  TargetLabelColumn?: TargetLabelColumn;
20403
20425
  }
20404
20426
  export type ThingName = string;
20427
+ export interface TimeSeriesConfig {
20428
+ /**
20429
+ * The name of the column representing the target variable that you want to predict for each item in your dataset. The data type of the target variable must be numerical.
20430
+ */
20431
+ TargetAttributeName: TargetAttributeName;
20432
+ /**
20433
+ * The name of the column indicating a point in time at which the target value of a given item is recorded.
20434
+ */
20435
+ TimestampAttributeName: TimestampAttributeName;
20436
+ /**
20437
+ * The name of the column that represents the set of item identifiers for which you want to predict the target value.
20438
+ */
20439
+ ItemIdentifierAttributeName: ItemIdentifierAttributeName;
20440
+ /**
20441
+ * A set of columns names that can be grouped with the item identifier column to create a composite key for which a target value is predicted.
20442
+ */
20443
+ GroupingAttributeNames?: GroupingAttributeNames;
20444
+ }
20445
+ export interface TimeSeriesForecastingJobConfig {
20446
+ /**
20447
+ * A URL to the Amazon S3 data source containing additional selected features that complement the target, itemID, timestamp, and grouped columns set in TimeSeriesConfig. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig. You can input FeatureAttributeNames (optional) in JSON format as shown below: { "FeatureAttributeNames":["col1", "col2", ...] }. You can also specify the data type of the feature (optional) in the format shown below: { "FeatureDataTypes":{"col1":"numeric", "col2":"categorical" ... } } Autopilot supports the following data types: numeric, categorical, text, and datetime. These column keys must not include any column set in TimeSeriesConfig. When not provided, the AutoML job V2 includes all the columns from the original dataset that are not already declared in TimeSeriesConfig. If provided, the AutoML job V2 only considers these additional columns as a complement to the ones declared in TimeSeriesConfig. Autopilot supports the following data types: numeric, categorical, text, and datetime.
20448
+ */
20449
+ FeatureSpecificationS3Uri?: S3Uri;
20450
+ CompletionCriteria?: AutoMLJobCompletionCriteria;
20451
+ /**
20452
+ * The frequency of predictions in a forecast. Valid intervals are an integer followed by Y (Year), M (Month), W (Week), D (Day), H (Hour), and min (Minute). For example, 1D indicates every day and 15min indicates every 15 minutes. The value of a frequency must not overlap with the next larger frequency. For example, you must use a frequency of 1H instead of 60min. The valid values for each frequency are the following: Minute - 1-59 Hour - 1-23 Day - 1-6 Week - 1-4 Month - 1-11 Year - 1
20453
+ */
20454
+ ForecastFrequency: ForecastFrequency;
20455
+ /**
20456
+ * The number of time-steps that the model predicts. The forecast horizon is also called the prediction length. The maximum forecast horizon is the lesser of 500 time-steps or 1/4 of the time-steps in the dataset.
20457
+ */
20458
+ ForecastHorizon: ForecastHorizon;
20459
+ /**
20460
+ * The quantiles used to train the model for forecasts at a specified quantile. You can specify quantiles from 0.01 (p1) to 0.99 (p99), by increments of 0.01 or higher. Up to five forecast quantiles can be specified. When ForecastQuantiles is not provided, the AutoML job uses the quantiles p10, p50, and p90 as default.
20461
+ */
20462
+ ForecastQuantiles?: ForecastQuantiles;
20463
+ /**
20464
+ * The transformations modifying specific attributes of the time-series, such as filling strategies for missing values.
20465
+ */
20466
+ Transformations?: TimeSeriesTransformations;
20467
+ /**
20468
+ * The collection of components that defines the time-series.
20469
+ */
20470
+ TimeSeriesConfig: TimeSeriesConfig;
20471
+ }
20405
20472
  export interface TimeSeriesForecastingSettings {
20406
20473
  /**
20407
20474
  * Describes whether time series forecasting is enabled or disabled in the Canvas application.
@@ -20412,7 +20479,18 @@ declare namespace SageMaker {
20412
20479
  */
20413
20480
  AmazonForecastRoleArn?: RoleArn;
20414
20481
  }
20482
+ export interface TimeSeriesTransformations {
20483
+ /**
20484
+ * A key value pair defining the filling method for a column, where the key is the column name and the value is an object which defines the filling logic. You can specify multiple filling methods for a single column. The supported filling methods and their corresponding options are: frontfill: none (Supported only for target column) middlefill: zero, value, median, mean, min, max backfill: zero, value, median, mean, min, max futurefill: zero, value, median, mean, min, max To set a filling method to a specific value, set the fill parameter to the chosen filling method value (for example "backfill" : "value"), and define the filling value in an additional parameter prefixed with "_value". For example, to set backfill to a value of 2, you must include two parameters: "backfill": "value" and "backfill_value":"2".
20485
+ */
20486
+ Filling?: FillingTransformations;
20487
+ /**
20488
+ * A key value pair defining the aggregation method for a column, where the key is the column name and the value is the aggregation method. The supported aggregation methods are sum (default), avg, first, min, max. Aggregation is only supported for the target column.
20489
+ */
20490
+ Aggregation?: AggregationTransformations;
20491
+ }
20415
20492
  export type Timestamp = Date;
20493
+ export type TimestampAttributeName = string;
20416
20494
  export type TrafficDurationInSeconds = number;
20417
20495
  export interface TrafficPattern {
20418
20496
  /**
@@ -20944,6 +21022,7 @@ declare namespace SageMaker {
20944
21022
  */
20945
21023
  S3Uri: S3Uri;
20946
21024
  }
21025
+ export type TransformationAttributeName = string;
20947
21026
  export interface Trial {
20948
21027
  /**
20949
21028
  * The name of the trial.
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1407.0',
86
+ VERSION: '2.1408.0',
87
87
 
88
88
  /**
89
89
  * @api private