aws-sdk 2.1400.0 → 2.1402.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -148,6 +148,14 @@ declare class Redshift extends Service {
148
148
  * Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group. For information about subnet groups, go to Amazon Redshift Cluster Subnet Groups in the Amazon Redshift Cluster Management Guide.
149
149
  */
150
150
  createClusterSubnetGroup(callback?: (err: AWSError, data: Redshift.Types.CreateClusterSubnetGroupResult) => void): Request<Redshift.Types.CreateClusterSubnetGroupResult, AWSError>;
151
+ /**
152
+ * Used to create a custom domain name for a cluster. Properties include the custom domain name, the cluster the custom domain is associated with, and the certificate Amazon Resource Name (ARN).
153
+ */
154
+ createCustomDomainAssociation(params: Redshift.Types.CreateCustomDomainAssociationMessage, callback?: (err: AWSError, data: Redshift.Types.CreateCustomDomainAssociationResult) => void): Request<Redshift.Types.CreateCustomDomainAssociationResult, AWSError>;
155
+ /**
156
+ * Used to create a custom domain name for a cluster. Properties include the custom domain name, the cluster the custom domain is associated with, and the certificate Amazon Resource Name (ARN).
157
+ */
158
+ createCustomDomainAssociation(callback?: (err: AWSError, data: Redshift.Types.CreateCustomDomainAssociationResult) => void): Request<Redshift.Types.CreateCustomDomainAssociationResult, AWSError>;
151
159
  /**
152
160
  * Creates a Redshift-managed VPC endpoint.
153
161
  */
@@ -276,6 +284,14 @@ declare class Redshift extends Service {
276
284
  * Deletes the specified cluster subnet group.
277
285
  */
278
286
  deleteClusterSubnetGroup(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
287
+ /**
288
+ * Contains information about deleting a custom domain association for a cluster.
289
+ */
290
+ deleteCustomDomainAssociation(params: Redshift.Types.DeleteCustomDomainAssociationMessage, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
291
+ /**
292
+ * Contains information about deleting a custom domain association for a cluster.
293
+ */
294
+ deleteCustomDomainAssociation(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
279
295
  /**
280
296
  * Deletes a Redshift-managed VPC endpoint.
281
297
  */
@@ -444,6 +460,14 @@ declare class Redshift extends Service {
444
460
  * Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide. If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values are returned. If both tag keys and values are omitted from the request, clusters are returned regardless of whether they have tag keys or values associated with them.
445
461
  */
446
462
  describeClusters(callback?: (err: AWSError, data: Redshift.Types.ClustersMessage) => void): Request<Redshift.Types.ClustersMessage, AWSError>;
463
+ /**
464
+ * Contains information for custom domain associations for a cluster.
465
+ */
466
+ describeCustomDomainAssociations(params: Redshift.Types.DescribeCustomDomainAssociationsMessage, callback?: (err: AWSError, data: Redshift.Types.CustomDomainAssociationsMessage) => void): Request<Redshift.Types.CustomDomainAssociationsMessage, AWSError>;
467
+ /**
468
+ * Contains information for custom domain associations for a cluster.
469
+ */
470
+ describeCustomDomainAssociations(callback?: (err: AWSError, data: Redshift.Types.CustomDomainAssociationsMessage) => void): Request<Redshift.Types.CustomDomainAssociationsMessage, AWSError>;
447
471
  /**
448
472
  * Shows the status of any inbound or outbound datashares available in the specified account.
449
473
  */
@@ -800,6 +824,14 @@ declare class Redshift extends Service {
800
824
  * Modifies a cluster subnet group to include the specified list of VPC subnets. The operation replaces the existing list of subnets with the new list of subnets.
801
825
  */
802
826
  modifyClusterSubnetGroup(callback?: (err: AWSError, data: Redshift.Types.ModifyClusterSubnetGroupResult) => void): Request<Redshift.Types.ModifyClusterSubnetGroupResult, AWSError>;
827
+ /**
828
+ * Contains information for changing a custom domain association.
829
+ */
830
+ modifyCustomDomainAssociation(params: Redshift.Types.ModifyCustomDomainAssociationMessage, callback?: (err: AWSError, data: Redshift.Types.ModifyCustomDomainAssociationResult) => void): Request<Redshift.Types.ModifyCustomDomainAssociationResult, AWSError>;
831
+ /**
832
+ * Contains information for changing a custom domain association.
833
+ */
834
+ modifyCustomDomainAssociation(callback?: (err: AWSError, data: Redshift.Types.ModifyCustomDomainAssociationResult) => void): Request<Redshift.Types.ModifyCustomDomainAssociationResult, AWSError>;
803
835
  /**
804
836
  * Modifies a Redshift-managed VPC endpoint.
805
837
  */
@@ -1069,6 +1101,21 @@ declare namespace Redshift {
1069
1101
  ConsumerRegion?: String;
1070
1102
  }
1071
1103
  export type AssociatedClusterList = ClusterAssociatedToSchedule[];
1104
+ export interface Association {
1105
+ /**
1106
+ * The Amazon Resource Name (ARN) for the certificate associated with the custom domain.
1107
+ */
1108
+ CustomDomainCertificateArn?: String;
1109
+ /**
1110
+ * The expiration date for the certificate.
1111
+ */
1112
+ CustomDomainCertificateExpiryDate?: TStamp;
1113
+ /**
1114
+ * A list of all associated clusters and domain names tied to a specific certificate.
1115
+ */
1116
+ CertificateAssociations?: CertificateAssociationList;
1117
+ }
1118
+ export type AssociationList = Association[];
1072
1119
  export type AttributeList = AccountAttribute[];
1073
1120
  export type AttributeNameList = String[];
1074
1121
  export type AttributeValueList = AttributeValueTarget[];
@@ -1146,7 +1193,7 @@ declare namespace Redshift {
1146
1193
  */
1147
1194
  SnapshotArn?: String;
1148
1195
  /**
1149
- * The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
1196
+ * The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
1150
1197
  */
1151
1198
  SnapshotClusterIdentifier?: String;
1152
1199
  /**
@@ -1218,6 +1265,17 @@ declare namespace Redshift {
1218
1265
  */
1219
1266
  ClusterIdentifier: String;
1220
1267
  }
1268
+ export interface CertificateAssociation {
1269
+ /**
1270
+ * The custom domain name for the certificate association.
1271
+ */
1272
+ CustomDomainName?: String;
1273
+ /**
1274
+ * The cluster identifier for the certificate association.
1275
+ */
1276
+ ClusterIdentifier?: String;
1277
+ }
1278
+ export type CertificateAssociationList = CertificateAssociation[];
1221
1279
  export interface Cluster {
1222
1280
  /**
1223
1281
  * The unique identifier of the cluster.
@@ -1427,6 +1485,18 @@ declare namespace Redshift {
1427
1485
  * The status of the reserved-node exchange request. Statuses include in-progress and requested.
1428
1486
  */
1429
1487
  ReservedNodeExchangeStatus?: ReservedNodeExchangeStatus;
1488
+ /**
1489
+ * The custom domain name associated with the cluster.
1490
+ */
1491
+ CustomDomainName?: String;
1492
+ /**
1493
+ * The certificate Amazon Resource Name (ARN) for the custom domain name.
1494
+ */
1495
+ CustomDomainCertificateArn?: String;
1496
+ /**
1497
+ * The expiration date for the certificate associated with the custom domain name.
1498
+ */
1499
+ CustomDomainCertificateExpiryDate?: TStamp;
1430
1500
  }
1431
1501
  export interface ClusterAssociatedToSchedule {
1432
1502
  /**
@@ -1745,7 +1815,7 @@ declare namespace Redshift {
1745
1815
  */
1746
1816
  SourceSnapshotIdentifier: String;
1747
1817
  /**
1748
- * The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the identifier for a valid cluster.
1818
+ * The identifier of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the identifier for a valid cluster.
1749
1819
  */
1750
1820
  SourceSnapshotClusterIdentifier?: String;
1751
1821
  /**
@@ -1798,11 +1868,11 @@ declare namespace Redshift {
1798
1868
  */
1799
1869
  NodeType: String;
1800
1870
  /**
1801
- * The user name associated with the admin user for the cluster that is being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
1871
+ * The user name associated with the admin user account for the cluster that is being created. Constraints: Must be 1 - 128 alphanumeric characters or hyphens. The user name can't be PUBLIC. Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen. The first character must be a letter. Must not contain a colon (:) or a slash (/). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
1802
1872
  */
1803
1873
  MasterUsername: String;
1804
1874
  /**
1805
- * The password associated with the admin user for the cluster that is being created. Constraints: Must be between 8 and 64 characters in length. Must contain at least one uppercase letter. Must contain at least one lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), " (double quote), \, /, or @.
1875
+ * The password associated with the admin user account for the cluster that is being created. Constraints: Must be between 8 and 64 characters in length. Must contain at least one uppercase letter. Must contain at least one lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), " (double quote), \, /, or @.
1806
1876
  */
1807
1877
  MasterUserPassword: String;
1808
1878
  /**
@@ -2001,6 +2071,38 @@ declare namespace Redshift {
2001
2071
  export interface CreateClusterSubnetGroupResult {
2002
2072
  ClusterSubnetGroup?: ClusterSubnetGroup;
2003
2073
  }
2074
+ export interface CreateCustomDomainAssociationMessage {
2075
+ /**
2076
+ * The custom domain name for a custom domain association.
2077
+ */
2078
+ CustomDomainName: CustomDomainNameString;
2079
+ /**
2080
+ * The certificate Amazon Resource Name (ARN) for the custom domain name association.
2081
+ */
2082
+ CustomDomainCertificateArn: CustomDomainCertificateArnString;
2083
+ /**
2084
+ * The cluster identifier that the custom domain is associated with.
2085
+ */
2086
+ ClusterIdentifier: String;
2087
+ }
2088
+ export interface CreateCustomDomainAssociationResult {
2089
+ /**
2090
+ * The custom domain name for the association result.
2091
+ */
2092
+ CustomDomainName?: CustomDomainNameString;
2093
+ /**
2094
+ * The Amazon Resource Name (ARN) for the certificate associated with the custom domain name.
2095
+ */
2096
+ CustomDomainCertificateArn?: CustomDomainCertificateArnString;
2097
+ /**
2098
+ * The identifier of the cluster that the custom domain is associated with.
2099
+ */
2100
+ ClusterIdentifier?: String;
2101
+ /**
2102
+ * The expiration time for the certificate for the custom domain.
2103
+ */
2104
+ CustomDomainCertExpiryTime?: String;
2105
+ }
2004
2106
  export interface CreateEndpointAccessMessage {
2005
2107
  /**
2006
2108
  * The cluster identifier of the cluster to access.
@@ -2223,6 +2325,18 @@ declare namespace Redshift {
2223
2325
  */
2224
2326
  Tags?: TagList;
2225
2327
  }
2328
+ export interface CustomDomainAssociationsMessage {
2329
+ /**
2330
+ * The marker for the custom domain association.
2331
+ */
2332
+ Marker?: String;
2333
+ /**
2334
+ * The associations for the custom domain.
2335
+ */
2336
+ Associations?: AssociationList;
2337
+ }
2338
+ export type CustomDomainCertificateArnString = string;
2339
+ export type CustomDomainNameString = string;
2226
2340
  export interface CustomerStorageMessage {
2227
2341
  /**
2228
2342
  * The total amount of storage currently used for snapshots.
@@ -2399,7 +2513,7 @@ declare namespace Redshift {
2399
2513
  */
2400
2514
  SnapshotIdentifier: String;
2401
2515
  /**
2402
- * The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the name of valid cluster.
2516
+ * The unique identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name. Constraints: Must be the name of valid cluster.
2403
2517
  */
2404
2518
  SnapshotClusterIdentifier?: String;
2405
2519
  }
@@ -2413,6 +2527,12 @@ declare namespace Redshift {
2413
2527
  */
2414
2528
  ClusterSubnetGroupName: String;
2415
2529
  }
2530
+ export interface DeleteCustomDomainAssociationMessage {
2531
+ /**
2532
+ * The identifier of the cluster to delete a custom domain association for.
2533
+ */
2534
+ ClusterIdentifier: String;
2535
+ }
2416
2536
  export interface DeleteEndpointAccessMessage {
2417
2537
  /**
2418
2538
  * The Redshift-managed VPC endpoint to delete.
@@ -2591,7 +2711,7 @@ declare namespace Redshift {
2591
2711
  */
2592
2712
  EndTime?: TStamp;
2593
2713
  /**
2594
- * The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 500.
2714
+ * The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: 100 Constraints: minimum 20, maximum 100.
2595
2715
  */
2596
2716
  MaxRecords?: IntegerOptional;
2597
2717
  /**
@@ -2695,6 +2815,24 @@ declare namespace Redshift {
2695
2815
  */
2696
2816
  TagValues?: TagValueList;
2697
2817
  }
2818
+ export interface DescribeCustomDomainAssociationsMessage {
2819
+ /**
2820
+ * The custom domain name for the custom domain association.
2821
+ */
2822
+ CustomDomainName?: CustomDomainNameString;
2823
+ /**
2824
+ * The certificate Amazon Resource Name (ARN) for the custom domain association.
2825
+ */
2826
+ CustomDomainCertificateArn?: CustomDomainCertificateArnString;
2827
+ /**
2828
+ * The maximum records setting for the associated custom domain.
2829
+ */
2830
+ MaxRecords?: IntegerOptional;
2831
+ /**
2832
+ * The marker for the custom domain association.
2833
+ */
2834
+ Marker?: String;
2835
+ }
2698
2836
  export interface DescribeDataSharesForConsumerMessage {
2699
2837
  /**
2700
2838
  * The Amazon Resource Name (ARN) of the consumer that returns in the list of datashares.
@@ -3624,7 +3762,7 @@ declare namespace Redshift {
3624
3762
  /**
3625
3763
  * The unique identifier of the cluster that contains the database for which you are requesting credentials. This parameter is case sensitive.
3626
3764
  */
3627
- ClusterIdentifier: String;
3765
+ ClusterIdentifier?: String;
3628
3766
  /**
3629
3767
  * The number of seconds until the returned temporary password expires. Constraint: minimum 900, maximum 3600. Default: 900
3630
3768
  */
@@ -3637,6 +3775,10 @@ declare namespace Redshift {
3637
3775
  * A list of the names of existing database groups that the user named in DbUser will join for the current session, in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC. Database group name constraints Must be 1 to 64 alphanumeric characters or hyphens Must contain only lowercase letters, numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen. First character must be a letter. Must not contain a colon ( : ) or slash ( / ). Cannot be a reserved word. A list of reserved words can be found in Reserved Words in the Amazon Redshift Database Developer Guide.
3638
3776
  */
3639
3777
  DbGroups?: DbGroupList;
3778
+ /**
3779
+ * The custom domain name for the cluster credentials.
3780
+ */
3781
+ CustomDomainName?: String;
3640
3782
  }
3641
3783
  export interface GetClusterCredentialsWithIAMMessage {
3642
3784
  /**
@@ -3646,11 +3788,15 @@ declare namespace Redshift {
3646
3788
  /**
3647
3789
  * The unique identifier of the cluster that contains the database for which you are requesting credentials.
3648
3790
  */
3649
- ClusterIdentifier: String;
3791
+ ClusterIdentifier?: String;
3650
3792
  /**
3651
3793
  * The number of seconds until the returned temporary password expires. Range: 900-3600. Default: 900.
3652
3794
  */
3653
3795
  DurationSeconds?: IntegerOptional;
3796
+ /**
3797
+ * The custom domain name for the IAM message cluster credentials.
3798
+ */
3799
+ CustomDomainName?: String;
3654
3800
  }
3655
3801
  export interface GetReservedNodeExchangeConfigurationOptionsInputMessage {
3656
3802
  /**
@@ -3979,7 +4125,7 @@ declare namespace Redshift {
3979
4125
  */
3980
4126
  VpcSecurityGroupIds?: VpcSecurityGroupIdList;
3981
4127
  /**
3982
- * The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Operations never return the password, so this operation provides a way to regain access to the admin user for a cluster if the password is lost. Default: Uses existing setting. Constraints: Must be between 8 and 64 characters in length. Must contain at least one uppercase letter. Must contain at least one lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), " (double quote), \, /, or @.
4128
+ * The new password for the cluster admin user. This change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword element exists in the PendingModifiedValues element of the operation response. Operations never return the password, so this operation provides a way to regain access to the admin user account for a cluster if the password is lost. Default: Uses existing setting. Constraints: Must be between 8 and 64 characters in length. Must contain at least one uppercase letter. Must contain at least one lowercase letter. Must contain one number. Can be any printable ASCII character (ASCII code 33-126) except ' (single quote), " (double quote), \, /, or @.
3983
4129
  */
3984
4130
  MasterUserPassword?: String;
3985
4131
  /**
@@ -4116,6 +4262,38 @@ declare namespace Redshift {
4116
4262
  export interface ModifyClusterSubnetGroupResult {
4117
4263
  ClusterSubnetGroup?: ClusterSubnetGroup;
4118
4264
  }
4265
+ export interface ModifyCustomDomainAssociationMessage {
4266
+ /**
4267
+ * The custom domain name for a changed custom domain association.
4268
+ */
4269
+ CustomDomainName?: CustomDomainNameString;
4270
+ /**
4271
+ * The certificate Amazon Resource Name (ARN) for the changed custom domain association.
4272
+ */
4273
+ CustomDomainCertificateArn?: CustomDomainCertificateArnString;
4274
+ /**
4275
+ * The identifier of the cluster to change a custom domain association for.
4276
+ */
4277
+ ClusterIdentifier: String;
4278
+ }
4279
+ export interface ModifyCustomDomainAssociationResult {
4280
+ /**
4281
+ * The custom domain name associated with the result for the changed custom domain association.
4282
+ */
4283
+ CustomDomainName?: CustomDomainNameString;
4284
+ /**
4285
+ * The certificate Amazon Resource Name (ARN) associated with the result for the changed custom domain association.
4286
+ */
4287
+ CustomDomainCertificateArn?: CustomDomainCertificateArnString;
4288
+ /**
4289
+ * The identifier of the cluster associated with the result for the changed custom domain association.
4290
+ */
4291
+ ClusterIdentifier?: String;
4292
+ /**
4293
+ * The certificate expiration time associated with the result for the changed custom domain association.
4294
+ */
4295
+ CustomDomainCertExpiryTime?: String;
4296
+ }
4119
4297
  export interface ModifyEndpointAccessMessage {
4120
4298
  /**
4121
4299
  * The endpoint to be modified.
@@ -4828,7 +5006,7 @@ declare namespace Redshift {
4828
5006
  */
4829
5007
  SnapshotArn?: String;
4830
5008
  /**
4831
- * The name of the cluster the source snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
5009
+ * The name of the cluster the source snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
4832
5010
  */
4833
5011
  SnapshotClusterIdentifier?: String;
4834
5012
  /**
@@ -5091,7 +5269,7 @@ declare namespace Redshift {
5091
5269
  */
5092
5270
  SnapshotArn?: String;
5093
5271
  /**
5094
- * The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user or role has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
5272
+ * The identifier of the cluster the snapshot was created from. This parameter is required if your IAM user has a policy containing a snapshot resource element that specifies anything other than * for the cluster name.
5095
5273
  */
5096
5274
  SnapshotClusterIdentifier?: String;
5097
5275
  /**
@@ -85,19 +85,19 @@ declare class SageMaker extends Service {
85
85
  */
86
86
  createArtifact(callback?: (err: AWSError, data: SageMaker.Types.CreateArtifactResponse) => void): Request<SageMaker.Types.CreateArtifactResponse, AWSError>;
87
87
  /**
88
- * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. Find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. CreateAutoMLJob only accepts tabular input data. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification. Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
88
+ * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification. Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
89
89
  */
90
90
  createAutoMLJob(params: SageMaker.Types.CreateAutoMLJobRequest, callback?: (err: AWSError, data: SageMaker.Types.CreateAutoMLJobResponse) => void): Request<SageMaker.Types.CreateAutoMLJobResponse, AWSError>;
91
91
  /**
92
- * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. Find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. CreateAutoMLJob only accepts tabular input data. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification. Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2.
92
+ * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification. Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob.
93
93
  */
94
94
  createAutoMLJob(callback?: (err: AWSError, data: SageMaker.Types.CreateAutoMLJobResponse) => void): Request<SageMaker.Types.CreateAutoMLJobResponse, AWSError>;
95
95
  /**
96
- * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification. Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. Find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. Calling DescribeAutoMLJob on a AutoML job V2 results in an error.
96
+ * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification. Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
97
97
  */
98
98
  createAutoMLJobV2(params: SageMaker.Types.CreateAutoMLJobV2Request, callback?: (err: AWSError, data: SageMaker.Types.CreateAutoMLJobV2Response) => void): Request<SageMaker.Types.CreateAutoMLJobV2Response, AWSError>;
99
99
  /**
100
- * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. We recommend using CreateAutoMLJobV2 for all problem types. CreateAutoMLJobV2 can process the same tabular data as its previous version CreateAutoMLJob, as well as non-tabular data for problem types such as image or text classification. Find guidelines about how to migrate CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. Find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2. Calling DescribeAutoMLJob on a AutoML job V2 results in an error.
100
+ * Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous version CreateAutoMLJob, as well as non-tabular problem types such as image or text classification. Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job V2 by calling DescribeAutoMLJobV2.
101
101
  */
102
102
  createAutoMLJobV2(callback?: (err: AWSError, data: SageMaker.Types.CreateAutoMLJobV2Response) => void): Request<SageMaker.Types.CreateAutoMLJobV2Response, AWSError>;
103
103
  /**
@@ -885,19 +885,19 @@ declare class SageMaker extends Service {
885
885
  */
886
886
  describeArtifact(callback?: (err: AWSError, data: SageMaker.Types.DescribeArtifactResponse) => void): Request<SageMaker.Types.DescribeArtifactResponse, AWSError>;
887
887
  /**
888
- * Returns information about an AutoML job created by calling CreateAutoMLJob.
888
+ * Returns information about an AutoML job created by calling CreateAutoMLJob. AutoML jobs created by calling CreateAutoMLJobV2 cannot be described by DescribeAutoMLJob.
889
889
  */
890
890
  describeAutoMLJob(params: SageMaker.Types.DescribeAutoMLJobRequest, callback?: (err: AWSError, data: SageMaker.Types.DescribeAutoMLJobResponse) => void): Request<SageMaker.Types.DescribeAutoMLJobResponse, AWSError>;
891
891
  /**
892
- * Returns information about an AutoML job created by calling CreateAutoMLJob.
892
+ * Returns information about an AutoML job created by calling CreateAutoMLJob. AutoML jobs created by calling CreateAutoMLJobV2 cannot be described by DescribeAutoMLJob.
893
893
  */
894
894
  describeAutoMLJob(callback?: (err: AWSError, data: SageMaker.Types.DescribeAutoMLJobResponse) => void): Request<SageMaker.Types.DescribeAutoMLJobResponse, AWSError>;
895
895
  /**
896
- * Returns information about an AutoML job V2 created by calling CreateAutoMLJobV2.
896
+ * Returns information about an AutoML job created by calling CreateAutoMLJobV2 or CreateAutoMLJob.
897
897
  */
898
898
  describeAutoMLJobV2(params: SageMaker.Types.DescribeAutoMLJobV2Request, callback?: (err: AWSError, data: SageMaker.Types.DescribeAutoMLJobV2Response) => void): Request<SageMaker.Types.DescribeAutoMLJobV2Response, AWSError>;
899
899
  /**
900
- * Returns information about an AutoML job V2 created by calling CreateAutoMLJobV2.
900
+ * Returns information about an AutoML job created by calling CreateAutoMLJobV2 or CreateAutoMLJob.
901
901
  */
902
902
  describeAutoMLJobV2(callback?: (err: AWSError, data: SageMaker.Types.DescribeAutoMLJobV2Response) => void): Request<SageMaker.Types.DescribeAutoMLJobV2Response, AWSError>;
903
903
  /**
@@ -4116,6 +4116,10 @@ declare namespace SageMaker {
4116
4116
  * Specifies additional configuration for multi-model endpoints.
4117
4117
  */
4118
4118
  MultiModelConfig?: MultiModelConfig;
4119
+ /**
4120
+ * Specifies the location of ML model data to deploy. Currently you cannot use ModelDataSource in conjuction with SageMaker batch transform, SageMaker serverless endpoints, SageMaker multi-model endpoints, and SageMaker Marketplace.
4121
+ */
4122
+ ModelDataSource?: ModelDataSource;
4119
4123
  }
4120
4124
  export type ContainerDefinitionList = ContainerDefinition[];
4121
4125
  export type ContainerEntrypoint = ContainerEntrypointString[];
@@ -4418,7 +4422,7 @@ declare namespace SageMaker {
4418
4422
  */
4419
4423
  AutoMLJobName: AutoMLJobName;
4420
4424
  /**
4421
- * An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to InputDataConfig supported by CreateAutoMLJob. The supported formats depend on the problem type: For Tabular problem types: S3Prefix, ManifestFile. For ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile. For TextClassification: S3Prefix.
4425
+ * An array of channel objects describing the input data and their location. Each channel is a named input source. Similar to the InputDataConfig attribute in the CreateAutoMLJob input parameters. The supported formats depend on the problem type: For Tabular problem types: S3Prefix, ManifestFile. For ImageClassification: S3Prefix, ManifestFile, AugmentedManifestFile. For TextClassification: S3Prefix.
4422
4426
  */
4423
4427
  AutoMLJobInputDataConfig: AutoMLJobInputDataConfig;
4424
4428
  /**
@@ -4426,7 +4430,7 @@ declare namespace SageMaker {
4426
4430
  */
4427
4431
  OutputDataConfig: AutoMLOutputDataConfig;
4428
4432
  /**
4429
- * Defines the configuration settings of one of the supported problem types. For tabular problem types, you must either specify the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType) and provide the AutoMLJobObjective, or none at all.
4433
+ * Defines the configuration settings of one of the supported problem types.
4430
4434
  */
4431
4435
  AutoMLProblemTypeConfig: AutoMLProblemTypeConfig;
4432
4436
  /**
@@ -4442,7 +4446,7 @@ declare namespace SageMaker {
4442
4446
  */
4443
4447
  SecurityConfig?: AutoMLSecurityConfig;
4444
4448
  /**
4445
- * Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective. For tabular problem types, you must either provide the AutoMLJobObjective and indicate the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none.
4449
+ * Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective. For tabular problem types, you must either provide both the AutoMLJobObjective and indicate the type of supervised learning problem in AutoMLProblemTypeConfig (TabularJobConfig.ProblemType), or none at all.
4446
4450
  */
4447
4451
  AutoMLJobObjective?: AutoMLJobObjective;
4448
4452
  /**
@@ -16406,6 +16410,7 @@ declare namespace SageMaker {
16406
16410
  */
16407
16411
  InvocationsMaxRetries?: InvocationsMaxRetries;
16408
16412
  }
16413
+ export type ModelCompressionType = "None"|"Gzip"|string;
16409
16414
  export interface ModelConfiguration {
16410
16415
  /**
16411
16416
  * The inference specification name in the model package version.
@@ -16563,6 +16568,12 @@ declare namespace SageMaker {
16563
16568
  */
16564
16569
  Constraints?: MetricsSource;
16565
16570
  }
16571
+ export interface ModelDataSource {
16572
+ /**
16573
+ * Specifies the S3 location of ML model data to deploy.
16574
+ */
16575
+ S3DataSource: S3ModelDataSource;
16576
+ }
16566
16577
  export interface ModelDeployConfig {
16567
16578
  /**
16568
16579
  * Set to True to automatically generate an endpoint name for a one-click Autopilot model deployment; set to False otherwise. The default value is False. If you set AutoGenerateEndpointName to True, do not specify the EndpointName; otherwise a 400 error is thrown.
@@ -17834,6 +17845,7 @@ declare namespace SageMaker {
17834
17845
  export type OptionalInteger = number;
17835
17846
  export type OptionalVolumeSizeInGB = number;
17836
17847
  export type OrderKey = "Ascending"|"Descending"|string;
17848
+ export type OutputCompressionType = "GZIP"|"NONE"|string;
17837
17849
  export interface OutputConfig {
17838
17850
  /**
17839
17851
  * Identifies the S3 bucket where you want Amazon SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.
@@ -17865,6 +17877,10 @@ declare namespace SageMaker {
17865
17877
  * Identifies the S3 path where you want SageMaker to store the model artifacts. For example, s3://bucket-name/key-name-prefix.
17866
17878
  */
17867
17879
  S3OutputPath: S3Uri;
17880
+ /**
17881
+ * The model output compression type. Select None to output an uncompressed model, recommended for large model outputs. Defaults to gzip.
17882
+ */
17883
+ CompressionType?: OutputCompressionType;
17868
17884
  }
17869
17885
  export interface OutputParameter {
17870
17886
  /**
@@ -19589,6 +19605,22 @@ declare namespace SageMaker {
19589
19605
  InstanceGroupNames?: InstanceGroupNames;
19590
19606
  }
19591
19607
  export type S3DataType = "ManifestFile"|"S3Prefix"|"AugmentedManifestFile"|string;
19608
+ export interface S3ModelDataSource {
19609
+ /**
19610
+ * Specifies the S3 path of ML model data to deploy.
19611
+ */
19612
+ S3Uri: S3ModelUri;
19613
+ /**
19614
+ * Specifies the type of ML model data to deploy. If you choose S3Prefix, S3Uri identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by S3Uri always ends with a forward slash (/). If you choose S3Object, S3Uri identifies an object that is the ML model data to deploy.
19615
+ */
19616
+ S3DataType: S3ModelDataType;
19617
+ /**
19618
+ * Specifies how the ML model data is prepared. If you choose Gzip and choose S3Object as the value of S3DataType, S3Uri identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment. If you choose None and chooose S3Object as the value of S3DataType, S3Uri identifies an object that represents an uncompressed ML model to deploy. If you choose None and choose S3Prefix as the value of S3DataType, S3Uri identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy. If you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code: If you choose S3Object as the value of S3DataType, then SageMaker will split the key of the S3 object referenced by S3Uri by slash (/), and use the last part as the filename of the file holding the content of the S3 object. If you choose S3Prefix as the value of S3DataType, then for each S3 object under the key name pefix referenced by S3Uri, SageMaker will trim its key by the prefix, and use the remainder as the path (relative to /opt/ml/model) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object. Do not use any of the following as file names or directory names: An empty or blank string A string which contains null bytes A string longer than 255 bytes A single dot (.) A double dot (..) Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects s3://mybucket/model/weights and s3://mybucket/model/weights/part1 and you specify s3://mybucket/model/ as the value of S3Uri and S3Prefix as the value of S3DataType, then it will result in name clash between /opt/ml/model/weights (a regular file) and /opt/ml/model/weights/ (a directory). Do not organize the model artifacts in S3 console using folders. When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure.
19619
+ */
19620
+ CompressionType: ModelCompressionType;
19621
+ }
19622
+ export type S3ModelDataType = "S3Prefix"|"S3Object"|string;
19623
+ export type S3ModelUri = string;
19592
19624
  export type S3OutputPath = string;
19593
19625
  export interface S3StorageConfig {
19594
19626
  /**
@@ -20259,7 +20291,7 @@ declare namespace SageMaker {
20259
20291
  */
20260
20292
  GenerateCandidateDefinitionsOnly?: GenerateCandidateDefinitionsOnly;
20261
20293
  /**
20262
- * The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types.
20294
+ * The type of supervised learning problem available for the model candidates of the AutoML job V2. For more information, see Amazon SageMaker Autopilot problem types. You must either specify the type of supervised learning problem in ProblemType and provide the AutoMLJobObjective metric, or none at all.
20263
20295
  */
20264
20296
  ProblemType?: ProblemType;
20265
20297
  /**
@@ -20417,7 +20449,7 @@ declare namespace SageMaker {
20417
20449
  }
20418
20450
  export type TrainingInputMode = "Pipe"|"File"|"FastFile"|string;
20419
20451
  export type TrainingInstanceCount = number;
20420
- export type TrainingInstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.g4dn.xlarge"|"ml.g4dn.2xlarge"|"ml.g4dn.4xlarge"|"ml.g4dn.8xlarge"|"ml.g4dn.12xlarge"|"ml.g4dn.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.p3dn.24xlarge"|"ml.p4d.24xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.c5n.xlarge"|"ml.c5n.2xlarge"|"ml.c5n.4xlarge"|"ml.c5n.9xlarge"|"ml.c5n.18xlarge"|"ml.g5.xlarge"|"ml.g5.2xlarge"|"ml.g5.4xlarge"|"ml.g5.8xlarge"|"ml.g5.16xlarge"|"ml.g5.12xlarge"|"ml.g5.24xlarge"|"ml.g5.48xlarge"|"ml.trn1.2xlarge"|"ml.trn1.32xlarge"|string;
20452
+ export type TrainingInstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.g4dn.xlarge"|"ml.g4dn.2xlarge"|"ml.g4dn.4xlarge"|"ml.g4dn.8xlarge"|"ml.g4dn.12xlarge"|"ml.g4dn.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.p3dn.24xlarge"|"ml.p4d.24xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.c5n.xlarge"|"ml.c5n.2xlarge"|"ml.c5n.4xlarge"|"ml.c5n.9xlarge"|"ml.c5n.18xlarge"|"ml.g5.xlarge"|"ml.g5.2xlarge"|"ml.g5.4xlarge"|"ml.g5.8xlarge"|"ml.g5.16xlarge"|"ml.g5.12xlarge"|"ml.g5.24xlarge"|"ml.g5.48xlarge"|"ml.trn1.2xlarge"|"ml.trn1.32xlarge"|"ml.trn1n.32xlarge"|string;
20421
20453
  export type TrainingInstanceTypes = TrainingInstanceType[];
20422
20454
  export interface TrainingJob {
20423
20455
  /**
@@ -761,6 +761,10 @@ declare namespace Transfer {
761
761
  * Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
762
762
  */
763
763
  WorkflowDetails?: WorkflowDetails;
764
+ /**
765
+ * Specifies the log groups to which your server logs are sent. To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows: arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations
766
+ */
767
+ StructuredLogDestinations?: StructuredLogDestinations;
764
768
  }
765
769
  export interface CreateServerResponse {
766
770
  /**
@@ -1493,6 +1497,10 @@ declare namespace Transfer {
1493
1497
  * Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded.
1494
1498
  */
1495
1499
  WorkflowDetails?: WorkflowDetails;
1500
+ /**
1501
+ * Specifies the log groups to which your server logs are sent. To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows: arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations
1502
+ */
1503
+ StructuredLogDestinations?: StructuredLogDestinations;
1496
1504
  }
1497
1505
  export interface DescribedUser {
1498
1506
  /**
@@ -2534,6 +2542,7 @@ declare namespace Transfer {
2534
2542
  */
2535
2543
  ServerId: ServerId;
2536
2544
  }
2545
+ export type StructuredLogDestinations = Arn[];
2537
2546
  export type SubnetId = string;
2538
2547
  export type SubnetIds = SubnetId[];
2539
2548
  export interface Tag {
@@ -2852,6 +2861,10 @@ declare namespace Transfer {
2852
2861
  * Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow. In addition to a workflow to execute when a file is uploaded completely, WorkflowDetails can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when the server session disconnects while the file is still being uploaded. To remove an associated workflow from a server, you can provide an empty OnUpload object, as in the following example. aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{"OnUpload":[]}'
2853
2862
  */
2854
2863
  WorkflowDetails?: WorkflowDetails;
2864
+ /**
2865
+ * Specifies the log groups to which your server logs are sent. To specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows: arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:* For example, arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:* If you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an update-server call. For example: update-server --server-id s-1234567890abcdef0 --structured-log-destinations
2866
+ */
2867
+ StructuredLogDestinations?: StructuredLogDestinations;
2855
2868
  }
2856
2869
  export interface UpdateServerResponse {
2857
2870
  /**
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1400.0',
86
+ VERSION: '2.1402.0',
87
87
 
88
88
  /**
89
89
  * @api private