cdk-comprehend-s3olap 2.0.15 → 2.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/aws-sdk/CHANGELOG.md +15 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +69 -13
  8. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +54 -5
  9. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.paginators.json +5 -0
  10. package/node_modules/aws-sdk/apis/metadata.json +0 -3
  11. package/node_modules/aws-sdk/apis/mgn-2020-02-26.min.json +233 -44
  12. package/node_modules/aws-sdk/apis/mgn-2020-02-26.paginators.json +6 -0
  13. package/node_modules/aws-sdk/apis/migration-hub-refactor-spaces-2021-10-26.min.json +75 -25
  14. package/node_modules/aws-sdk/apis/pricing-2017-10-15.min.json +6 -0
  15. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +231 -182
  16. package/node_modules/aws-sdk/clients/all.d.ts +0 -1
  17. package/node_modules/aws-sdk/clients/all.js +0 -1
  18. package/node_modules/aws-sdk/clients/apigateway.d.ts +1 -1
  19. package/node_modules/aws-sdk/clients/glue.d.ts +106 -2
  20. package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +69 -1
  21. package/node_modules/aws-sdk/clients/mediaconvert.d.ts +1 -1
  22. package/node_modules/aws-sdk/clients/mgn.d.ts +218 -3
  23. package/node_modules/aws-sdk/clients/migrationhubrefactorspaces.d.ts +70 -8
  24. package/node_modules/aws-sdk/clients/pricing.d.ts +4 -4
  25. package/node_modules/aws-sdk/clients/rdsdataservice.d.ts +8 -8
  26. package/node_modules/aws-sdk/clients/sagemaker.d.ts +69 -8
  27. package/node_modules/aws-sdk/clients/transfer.d.ts +9 -9
  28. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
  29. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +20 -55
  30. package/node_modules/aws-sdk/dist/aws-sdk.js +13 -8
  31. package/node_modules/aws-sdk/dist/aws-sdk.min.js +6 -6
  32. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +0 -2
  33. package/node_modules/aws-sdk/lib/core.js +1 -1
  34. package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +5 -1
  35. package/node_modules/aws-sdk/lib/dynamodb/document_client.js +4 -2
  36. package/node_modules/aws-sdk/package.json +1 -1
  37. package/package.json +5 -5
  38. package/node_modules/aws-sdk/apis/redshiftserverless-2021-04-21.examples.json +0 -5
  39. package/node_modules/aws-sdk/apis/redshiftserverless-2021-04-21.min.json +0 -1206
  40. package/node_modules/aws-sdk/apis/redshiftserverless-2021-04-21.paginators.json +0 -40
  41. package/node_modules/aws-sdk/clients/redshiftserverless.d.ts +0 -1525
  42. package/node_modules/aws-sdk/clients/redshiftserverless.js +0 -18
@@ -28,11 +28,11 @@ declare class MigrationHubRefactorSpaces extends Service {
28
28
  */
29
29
  createEnvironment(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateEnvironmentResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateEnvironmentResponse, AWSError>;
30
30
  /**
31
- * Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when the route is created. If the health check fails, the route transitions to FAILED, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLs, a target group is created and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is publicly resolvable.
31
+ * Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
32
32
  */
33
33
  createRoute(params: MigrationHubRefactorSpaces.Types.CreateRouteRequest, callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateRouteResponse, AWSError>;
34
34
  /**
35
- * Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when the route is created. If the health check fails, the route transitions to FAILED, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLs, a target group is created and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is publicly resolvable.
35
+ * Creates an Amazon Web Services Migration Hub Refactor Spaces route. The account owner of the service resource is always the environment owner, regardless of which account creates the route. Routes target a service in the application. If an application does not have any routes, then the first route must be created as a DEFAULT RouteType. When created, the default route defaults to an active state so state is not a required input. However, like all other state values the state of the default route can be updated after creation, but only when all other routes are also inactive. Conversely, no route can be active without the default route also being active. When you create a route, Refactor Spaces configures the Amazon API Gateway to send traffic to the target service as follows: If the service has a URL endpoint, and the endpoint resolves to a private IP address, Refactor Spaces routes traffic using the API Gateway VPC link. If the service has a URL endpoint, and the endpoint resolves to a public IP address, Refactor Spaces routes traffic over the public internet. If the service has an Lambda function endpoint, then Refactor Spaces configures the Lambda function's resource policy to allow the application's API Gateway to invoke the function. A one-time health check is performed on the service when either the route is updated from inactive to active, or when it is created with an active state. If the health check fails, the route transitions the route state to FAILED, an error code of SERVICE_ENDPOINT_HEALTH_CHECK_FAILURE is provided, and no traffic is sent to the service. For Lambda functions, the Lambda function state is checked. If the function is not active, the function configuration is updated so that Lambda resources are provisioned. If the Lambda state is Failed, then the route creation fails. For more information, see the GetFunctionConfiguration's State response parameter in the Lambda Developer Guide. For Lambda endpoints, a check is performed to determine that a Lambda function with the specified ARN exists. If it does not exist, the health check fails. For public URLs, a connection is opened to the public endpoint. If the URL is not reachable, the health check fails. For private URLS, a target group is created on the Elastic Load Balancing and the target group health check is run. The HealthCheckProtocol, HealthCheckPort, and HealthCheckPath are the same protocol, port, and path specified in the URL or health URL, if used. All other settings use the default values, as described in Health checks for your target groups. The health check is considered successful if at least one target within the target group transitions to a healthy state. Services can have HTTP or HTTPS URL endpoints. For HTTPS URLs, publicly-signed certificates are supported. Private Certificate Authorities (CAs) are permitted only if the CA's domain is also publicly resolvable.
36
36
  */
37
37
  createRoute(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.CreateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.CreateRouteResponse, AWSError>;
38
38
  /**
@@ -195,6 +195,14 @@ declare class MigrationHubRefactorSpaces extends Service {
195
195
  * Adds to or modifies the tags of the given resource. Tags are metadata which can be used to manage a resource. To untag a resource, the caller account must be the same as the resource’s OwnerAccountId. Untagging resources across accounts is not supported.
196
196
  */
197
197
  untagResource(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.UntagResourceResponse) => void): Request<MigrationHubRefactorSpaces.Types.UntagResourceResponse, AWSError>;
198
+ /**
199
+ * Updates an Amazon Web Services Migration Hub Refactor Spaces route.
200
+ */
201
+ updateRoute(params: MigrationHubRefactorSpaces.Types.UpdateRouteRequest, callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.UpdateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.UpdateRouteResponse, AWSError>;
202
+ /**
203
+ * Updates an Amazon Web Services Migration Hub Refactor Spaces route.
204
+ */
205
+ updateRoute(callback?: (err: AWSError, data: MigrationHubRefactorSpaces.Types.UpdateRouteResponse) => void): Request<MigrationHubRefactorSpaces.Types.UpdateRouteResponse, AWSError>;
198
206
  }
199
207
  declare namespace MigrationHubRefactorSpaces {
200
208
  export type AccountId = string;
@@ -496,6 +504,10 @@ declare namespace MigrationHubRefactorSpaces {
496
504
  * A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
497
505
  */
498
506
  ClientToken?: ClientToken;
507
+ /**
508
+ * Configuration for the default route type.
509
+ */
510
+ DefaultRoute?: DefaultRouteInput;
499
511
  /**
500
512
  * The ID of the environment in which the route is created.
501
513
  */
@@ -555,7 +567,7 @@ declare namespace MigrationHubRefactorSpaces {
555
567
  */
556
568
  ServiceId?: ServiceId;
557
569
  /**
558
- * The current state of the route.
570
+ * The current state of the route. Activation state only allows ACTIVE or INACTIVE as user inputs. FAILED is a route state that is system generated.
559
571
  */
560
572
  State?: RouteState;
561
573
  /**
@@ -563,7 +575,7 @@ declare namespace MigrationHubRefactorSpaces {
563
575
  */
564
576
  Tags?: TagMap;
565
577
  /**
566
- * onfiguration for the URI path route type.
578
+ * Configuration for the URI path route type.
567
579
  */
568
580
  UriPathRoute?: UriPathRouteInput;
569
581
  }
@@ -675,6 +687,12 @@ declare namespace MigrationHubRefactorSpaces {
675
687
  */
676
688
  VpcId?: VpcId;
677
689
  }
690
+ export interface DefaultRouteInput {
691
+ /**
692
+ * If set to ACTIVE, traffic is forwarded to this route’s service after the route is created.
693
+ */
694
+ ActivationState?: RouteActivationState;
695
+ }
678
696
  export interface DeleteApplicationRequest {
679
697
  /**
680
698
  * The ID of the application.
@@ -1433,7 +1451,7 @@ declare namespace MigrationHubRefactorSpaces {
1433
1451
  export type ResourceArn = string;
1434
1452
  export type ResourceIdentifier = string;
1435
1453
  export type ResourcePolicyIdentifier = string;
1436
- export type RouteActivationState = "ACTIVE"|string;
1454
+ export type RouteActivationState = "ACTIVE"|"INACTIVE"|string;
1437
1455
  export type RouteId = string;
1438
1456
  export type RouteState = "CREATING"|"ACTIVE"|"DELETING"|"FAILED"|"UPDATING"|"INACTIVE"|string;
1439
1457
  export type RouteSummaries = RouteSummary[];
@@ -1591,11 +1609,11 @@ declare namespace MigrationHubRefactorSpaces {
1591
1609
  export type TagMapValueString = string;
1592
1610
  export interface TagResourceRequest {
1593
1611
  /**
1594
- * The Amazon Resource Name (ARN) of the resource
1612
+ * The Amazon Resource Name (ARN) of the resource.
1595
1613
  */
1596
1614
  ResourceArn: String;
1597
1615
  /**
1598
- * The new or modified tags for the resource.
1616
+ * The new or modified tags for the resource.
1599
1617
  */
1600
1618
  Tags: TagMap;
1601
1619
  }
@@ -1615,11 +1633,55 @@ declare namespace MigrationHubRefactorSpaces {
1615
1633
  }
1616
1634
  export interface UntagResourceResponse {
1617
1635
  }
1636
+ export interface UpdateRouteRequest {
1637
+ /**
1638
+ * If set to ACTIVE, traffic is forwarded to this route’s service after the route is updated.
1639
+ */
1640
+ ActivationState: RouteActivationState;
1641
+ /**
1642
+ * The ID of the application within which the route is being updated.
1643
+ */
1644
+ ApplicationIdentifier: ApplicationId;
1645
+ /**
1646
+ * The ID of the environment in which the route is being updated.
1647
+ */
1648
+ EnvironmentIdentifier: EnvironmentId;
1649
+ /**
1650
+ * The unique identifier of the route to update.
1651
+ */
1652
+ RouteIdentifier: RouteId;
1653
+ }
1654
+ export interface UpdateRouteResponse {
1655
+ /**
1656
+ * The ID of the application in which the route is being updated.
1657
+ */
1658
+ ApplicationId?: ApplicationId;
1659
+ /**
1660
+ * The Amazon Resource Name (ARN) of the route. The format for this ARN is arn:aws:refactor-spaces:region:account-id:resource-type/resource-id . For more information about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference.
1661
+ */
1662
+ Arn?: ResourceArn;
1663
+ /**
1664
+ * A timestamp that indicates when the route was last updated.
1665
+ */
1666
+ LastUpdatedTime?: Timestamp;
1667
+ /**
1668
+ * The unique identifier of the route.
1669
+ */
1670
+ RouteId?: RouteId;
1671
+ /**
1672
+ * The ID of service in which the route was created. Traffic that matches this route is forwarded to this service.
1673
+ */
1674
+ ServiceId?: ServiceId;
1675
+ /**
1676
+ * The current state of the route.
1677
+ */
1678
+ State?: RouteState;
1679
+ }
1618
1680
  export type Uri = string;
1619
1681
  export type UriPath = string;
1620
1682
  export interface UriPathRouteInput {
1621
1683
  /**
1622
- * Indicates whether traffic is forwarded to this route’s service after the route is created.
1684
+ * If set to ACTIVE, traffic is forwarded to this route’s service after the route is created.
1623
1685
  */
1624
1686
  ActivationState: RouteActivationState;
1625
1687
  /**
@@ -20,11 +20,11 @@ declare class Pricing extends Service {
20
20
  */
21
21
  describeServices(callback?: (err: AWSError, data: Pricing.Types.DescribeServicesResponse) => void): Request<Pricing.Types.DescribeServicesResponse, AWSError>;
22
22
  /**
23
- * Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Amazon Web Services Billing and Cost Management User Guide.
23
+ * Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.
24
24
  */
25
25
  getAttributeValues(params: Pricing.Types.GetAttributeValuesRequest, callback?: (err: AWSError, data: Pricing.Types.GetAttributeValuesResponse) => void): Request<Pricing.Types.GetAttributeValuesResponse, AWSError>;
26
26
  /**
27
- * Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Amazon Web Services Billing and Cost Management User Guide.
27
+ * Returns a list of attribute values. Attributes are similar to the details in a Price List API offer file. For a list of available attributes, see Offer File Definitions in the Billing and Cost Management User Guide.
28
28
  */
29
29
  getAttributeValues(callback?: (err: AWSError, data: Pricing.Types.GetAttributeValuesResponse) => void): Request<Pricing.Types.GetAttributeValuesResponse, AWSError>;
30
30
  /**
@@ -126,7 +126,7 @@ declare namespace Pricing {
126
126
  /**
127
127
  * The code for the service whose products you want to retrieve.
128
128
  */
129
- ServiceCode?: String;
129
+ ServiceCode: String;
130
130
  /**
131
131
  * The list of filters that limit the returned products. only products that match all filters are returned.
132
132
  */
@@ -164,7 +164,7 @@ declare namespace Pricing {
164
164
  /**
165
165
  * The code for the Amazon Web Services service.
166
166
  */
167
- ServiceCode?: String;
167
+ ServiceCode: String;
168
168
  /**
169
169
  * The attributes that are available for this service.
170
170
  */
@@ -12,11 +12,11 @@ declare class RDSDataService extends Service {
12
12
  constructor(options?: RDSDataService.Types.ClientConfiguration)
13
13
  config: Config & RDSDataService.Types.ClientConfiguration;
14
14
  /**
15
- * Runs a batch SQL statement over an array of data. You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.
15
+ * Runs a batch SQL statement over an array of data. You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. There isn't a fixed upper limit on the number of parameter sets. However, the maximum size of the HTTP request submitted through the Data API is 4 MiB. If the request exceeds this limit, the Data API returns an error and doesn't process the request. This 4-MiB limit includes the size of the HTTP headers and the JSON notation in the request. Thus, the number of parameter sets that you can include depends on a combination of factors, such as the size of the SQL statement and the size of each parameter set. The response size limit is 1 MiB. If the call returns more than 1 MiB of response data, the call is terminated.
16
16
  */
17
17
  batchExecuteStatement(params: RDSDataService.Types.BatchExecuteStatementRequest, callback?: (err: AWSError, data: RDSDataService.Types.BatchExecuteStatementResponse) => void): Request<RDSDataService.Types.BatchExecuteStatementResponse, AWSError>;
18
18
  /**
19
- * Runs a batch SQL statement over an array of data. You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically.
19
+ * Runs a batch SQL statement over an array of data. You can run bulk update and insert operations for multiple records using a DML statement with different parameter sets. Bulk operations can provide a significant performance improvement over individual insert and update operations. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. There isn't a fixed upper limit on the number of parameter sets. However, the maximum size of the HTTP request submitted through the Data API is 4 MiB. If the request exceeds this limit, the Data API returns an error and doesn't process the request. This 4-MiB limit includes the size of the HTTP headers and the JSON notation in the request. Thus, the number of parameter sets that you can include depends on a combination of factors, such as the size of the SQL statement and the size of each parameter set. The response size limit is 1 MiB. If the call returns more than 1 MiB of response data, the call is terminated.
20
20
  */
21
21
  batchExecuteStatement(callback?: (err: AWSError, data: RDSDataService.Types.BatchExecuteStatementResponse) => void): Request<RDSDataService.Types.BatchExecuteStatementResponse, AWSError>;
22
22
  /**
@@ -44,11 +44,11 @@ declare class RDSDataService extends Service {
44
44
  */
45
45
  executeSql(callback?: (err: AWSError, data: RDSDataService.Types.ExecuteSqlResponse) => void): Request<RDSDataService.Types.ExecuteSqlResponse, AWSError>;
46
46
  /**
47
- * Runs a SQL statement against a database. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. If the binary response data from the database is more than 1 MB, the call is terminated.
47
+ * Runs a SQL statement against a database. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. If the binary response data from the database is more than 1 MB, the call is terminated.
48
48
  */
49
49
  executeStatement(params: RDSDataService.Types.ExecuteStatementRequest, callback?: (err: AWSError, data: RDSDataService.Types.ExecuteStatementResponse) => void): Request<RDSDataService.Types.ExecuteStatementResponse, AWSError>;
50
50
  /**
51
- * Runs a SQL statement against a database. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. If the binary response data from the database is more than 1 MB, the call is terminated.
51
+ * Runs a SQL statement against a database. If a call isn't part of a transaction because it doesn't include the transactionID parameter, changes that result from the call are committed automatically. If the binary response data from the database is more than 1 MB, the call is terminated.
52
52
  */
53
53
  executeStatement(callback?: (err: AWSError, data: RDSDataService.Types.ExecuteStatementResponse) => void): Request<RDSDataService.Types.ExecuteStatementResponse, AWSError>;
54
54
  /**
@@ -104,11 +104,11 @@ declare namespace RDSDataService {
104
104
  */
105
105
  schema?: DbName;
106
106
  /**
107
- * The name or ARN of the secret that enables access to the DB cluster.
107
+ * The ARN of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret. For information about creating the secret, see Create a database secret.
108
108
  */
109
109
  secretArn: Arn;
110
110
  /**
111
- * The SQL statement to run.
111
+ * The SQL statement to run. Don't include a semicolon (;) at the end of the SQL statement.
112
112
  */
113
113
  sql: SqlStatement;
114
114
  /**
@@ -237,7 +237,7 @@ declare namespace RDSDataService {
237
237
  export type DoubleArray = BoxedDouble[];
238
238
  export interface ExecuteSqlRequest {
239
239
  /**
240
- * The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster.
240
+ * The Amazon Resource Name (ARN) of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret. For information about creating the secret, see Create a database secret.
241
241
  */
242
242
  awsSecretStoreArn: Arn;
243
243
  /**
@@ -297,7 +297,7 @@ declare namespace RDSDataService {
297
297
  */
298
298
  schema?: DbName;
299
299
  /**
300
- * The name or ARN of the secret that enables access to the DB cluster.
300
+ * The ARN of the secret that enables access to the DB cluster. Enter the database user name and password for the credentials in the secret. For information about creating the secret, see Create a database secret.
301
301
  */
302
302
  secretArn: Arn;
303
303
  /**
@@ -237,11 +237,11 @@ declare class SageMaker extends Service {
237
237
  */
238
238
  createLabelingJob(callback?: (err: AWSError, data: SageMaker.Types.CreateLabelingJobResponse) => void): Request<SageMaker.Types.CreateLabelingJobResponse, AWSError>;
239
239
  /**
240
- * Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions. Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment. For an example that calls this method when deploying a model to SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)). To run a batch transform using your model, you start a job with the CreateTransformJob API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location. In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
240
+ * Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions. Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment. For an example that calls this method when deploying a model to SageMaker hosting services, see Create a Model (Amazon Web Services SDK for Python (Boto 3)). To run a batch transform using your model, you start a job with the CreateTransformJob API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location. In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
241
241
  */
242
242
  createModel(params: SageMaker.Types.CreateModelInput, callback?: (err: AWSError, data: SageMaker.Types.CreateModelOutput) => void): Request<SageMaker.Types.CreateModelOutput, AWSError>;
243
243
  /**
244
- * Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions. Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment. For an example that calls this method when deploying a model to SageMaker hosting services, see Deploy the Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto 3)). To run a batch transform using your model, you start a job with the CreateTransformJob API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location. In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
244
+ * Creates a model in SageMaker. In the request, you name the model and describe a primary container. For the primary container, you specify the Docker image that contains inference code, artifacts (from prior training), and a custom environment map that the inference code uses when you deploy the model for predictions. Use this API to create a model if you want to use SageMaker hosting services or run a batch transform job. To host your model, you create an endpoint configuration with the CreateEndpointConfig API, and then create an endpoint with the CreateEndpoint API. SageMaker then deploys all of the containers that you defined for the model in the hosting environment. For an example that calls this method when deploying a model to SageMaker hosting services, see Create a Model (Amazon Web Services SDK for Python (Boto 3)). To run a batch transform using your model, you start a job with the CreateTransformJob API. SageMaker uses your model and your dataset to get inferences which are then saved to a specified S3 location. In the request, you also provide an IAM role that SageMaker can assume to access model artifacts and docker image for deployment on ML compute hosting instances or for batch transform jobs. In addition, you also use the IAM role to manage permissions the inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
245
245
  */
246
246
  createModel(callback?: (err: AWSError, data: SageMaker.Types.CreateModelOutput) => void): Request<SageMaker.Types.CreateModelOutput, AWSError>;
247
247
  /**
@@ -2005,11 +2005,11 @@ declare class SageMaker extends Service {
2005
2005
  */
2006
2006
  updateUserProfile(callback?: (err: AWSError, data: SageMaker.Types.UpdateUserProfileResponse) => void): Request<SageMaker.Types.UpdateUserProfileResponse, AWSError>;
2007
2007
  /**
2008
- * Use this operation to update your workforce. You can use this operation to require that workers use specific IP addresses to work on tasks and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration. Use SourceIpConfig to restrict worker access to tasks to a specific range of IP addresses. You specify allowed IP addresses by creating a list of up to ten CIDRs. By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied and get a Not Found error message on the worker portal. Use OidcConfig to update the configuration of a workforce created using your own OIDC IdP. You can only update your OIDC IdP configuration when there are no work teams associated with your workforce. You can delete work teams using the operation. After restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you can view details about your update workforce using the operation. This operation only applies to private workforces.
2008
+ * Use this operation to update your workforce. You can use this operation to require that workers use specific IP addresses to work on tasks and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration. The worker portal is now supported in VPC and public internet. Use SourceIpConfig to restrict worker access to tasks to a specific range of IP addresses. You specify allowed IP addresses by creating a list of up to ten CIDRs. By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied and get a Not Found error message on the worker portal. To restrict access to all the workers in public internet, add the SourceIpConfig CIDR value as "0.0.0.0/0". Amazon SageMaker does not support Source Ip restriction for worker portals in VPC. Use OidcConfig to update the configuration of a workforce created using your own OIDC IdP. You can only update your OIDC IdP configuration when there are no work teams associated with your workforce. You can delete work teams using the operation. After restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you can view details about your update workforce using the operation. This operation only applies to private workforces.
2009
2009
  */
2010
2010
  updateWorkforce(params: SageMaker.Types.UpdateWorkforceRequest, callback?: (err: AWSError, data: SageMaker.Types.UpdateWorkforceResponse) => void): Request<SageMaker.Types.UpdateWorkforceResponse, AWSError>;
2011
2011
  /**
2012
- * Use this operation to update your workforce. You can use this operation to require that workers use specific IP addresses to work on tasks and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration. Use SourceIpConfig to restrict worker access to tasks to a specific range of IP addresses. You specify allowed IP addresses by creating a list of up to ten CIDRs. By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied and get a Not Found error message on the worker portal. Use OidcConfig to update the configuration of a workforce created using your own OIDC IdP. You can only update your OIDC IdP configuration when there are no work teams associated with your workforce. You can delete work teams using the operation. After restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you can view details about your update workforce using the operation. This operation only applies to private workforces.
2012
+ * Use this operation to update your workforce. You can use this operation to require that workers use specific IP addresses to work on tasks and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration. The worker portal is now supported in VPC and public internet. Use SourceIpConfig to restrict worker access to tasks to a specific range of IP addresses. You specify allowed IP addresses by creating a list of up to ten CIDRs. By default, a workforce isn't restricted to specific IP addresses. If you specify a range of IP addresses, workers who attempt to access tasks using any IP address outside the specified range are denied and get a Not Found error message on the worker portal. To restrict access to all the workers in public internet, add the SourceIpConfig CIDR value as "0.0.0.0/0". Amazon SageMaker does not support Source Ip restriction for worker portals in VPC. Use OidcConfig to update the configuration of a workforce created using your own OIDC IdP. You can only update your OIDC IdP configuration when there are no work teams associated with your workforce. You can delete work teams using the operation. After restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you can view details about your update workforce using the operation. This operation only applies to private workforces.
2013
2013
  */
2014
2014
  updateWorkforce(callback?: (err: AWSError, data: SageMaker.Types.UpdateWorkforceResponse) => void): Request<SageMaker.Types.UpdateWorkforceResponse, AWSError>;
2015
2015
  /**
@@ -2726,7 +2726,7 @@ declare namespace SageMaker {
2726
2726
  export type AutoMLContainerDefinitions = AutoMLContainerDefinition[];
2727
2727
  export interface AutoMLDataSource {
2728
2728
  /**
2729
- * The Amazon S3 location of the input data. The input data must be in CSV format and contain at least 500 rows.
2729
+ * The Amazon S3 location of the input data.
2730
2730
  */
2731
2731
  S3DataSource: AutoMLS3DataSource;
2732
2732
  }
@@ -2853,7 +2853,7 @@ declare namespace SageMaker {
2853
2853
  export type AutoMLPartialFailureReasons = AutoMLPartialFailureReason[];
2854
2854
  export interface AutoMLS3DataSource {
2855
2855
  /**
2856
- * The data type.
2856
+ * The data type. A ManifestFile should have the format shown below: [ {"prefix": "s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER/DOC-EXAMPLE-PREFIX/"}, "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-1", "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-2", ... "DOC-EXAMPLE-RELATIVE-PATH/DOC-EXAMPLE-FOLDER/DATA-N" ] An S3Prefix should have the following format: s3://DOC-EXAMPLE-BUCKET/DOC-EXAMPLE-FOLDER-OR-FILE
2857
2857
  */
2858
2858
  S3DataType: AutoMLS3DataType;
2859
2859
  /**
@@ -3557,7 +3557,7 @@ declare namespace SageMaker {
3557
3557
  */
3558
3558
  Tags?: TagList;
3559
3559
  /**
3560
- * The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance.
3560
+ * The instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. The value of InstanceType passed as part of the ResourceSpec in the CreateApp call overrides the value passed as part of the ResourceSpec configured for the user profile or the domain. If InstanceType is not specified in any of those three ResourceSpec values for a KernelGateway app, the CreateApp call fails with a request validation error.
3561
3561
  */
3562
3562
  ResourceSpec?: ResourceSpec;
3563
3563
  }
@@ -5042,6 +5042,10 @@ declare namespace SageMaker {
5042
5042
  * An array of key-value pairs that contain metadata to help you categorize and organize our workforce. Each tag consists of a key and a value, both of which you define.
5043
5043
  */
5044
5044
  Tags?: TagList;
5045
+ /**
5046
+ * Use this parameter to configure a workforce using VPC.
5047
+ */
5048
+ WorkforceVpcConfig?: WorkforceVpcConfigRequest;
5045
5049
  }
5046
5050
  export interface CreateWorkforceResponse {
5047
5051
  /**
@@ -10162,6 +10166,7 @@ declare namespace SageMaker {
10162
10166
  * The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training and inference jobs used for automated data labeling. You can only specify a VolumeKmsKeyId when you create a labeling job with automated data labeling enabled using the API operation CreateLabelingJob. You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for automated data labeling model training and inference when you create a labeling job using the console. To learn more, see Output Data and Storage Volume Encryption. The VolumeKmsKeyId can be any of the following formats: KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
10163
10167
  */
10164
10168
  VolumeKmsKeyId?: KmsKeyId;
10169
+ VpcConfig?: VpcConfig;
10165
10170
  }
10166
10171
  export interface LabelingJobS3DataSource {
10167
10172
  /**
@@ -15479,7 +15484,7 @@ declare namespace SageMaker {
15479
15484
  */
15480
15485
  SageMakerImageVersionArn?: ImageVersionArn;
15481
15486
  /**
15482
- * The instance type that the image version runs on. JupyterServer Apps only support the system value. KernelGateway Apps do not support the system value, but support all other values for available instance types.
15487
+ * The instance type that the image version runs on. JupyterServer apps only support the system value. For KernelGateway apps, the system value is translated to ml.t3.medium. KernelGateway apps also support all other values for available instance types.
15483
15488
  */
15484
15489
  InstanceType?: AppInstanceType;
15485
15490
  /**
@@ -17563,6 +17568,10 @@ declare namespace SageMaker {
17563
17568
  * Use this parameter to update your OIDC Identity Provider (IdP) configuration for a workforce made using your own IdP.
17564
17569
  */
17565
17570
  OidcConfig?: OidcConfig;
17571
+ /**
17572
+ * Use this parameter to update your VPC configuration for a workforce.
17573
+ */
17574
+ WorkforceVpcConfig?: WorkforceVpcConfigRequest;
17566
17575
  }
17567
17576
  export interface UpdateWorkforceResponse {
17568
17577
  /**
@@ -17747,9 +17756,61 @@ declare namespace SageMaker {
17747
17756
  * The date that the workforce is created.
17748
17757
  */
17749
17758
  CreateDate?: Timestamp;
17759
+ /**
17760
+ * The configuration of a VPC workforce.
17761
+ */
17762
+ WorkforceVpcConfig?: WorkforceVpcConfigResponse;
17763
+ /**
17764
+ * The status of your workforce.
17765
+ */
17766
+ Status?: WorkforceStatus;
17767
+ /**
17768
+ * The reason your workforce failed.
17769
+ */
17770
+ FailureReason?: WorkforceFailureReason;
17750
17771
  }
17751
17772
  export type WorkforceArn = string;
17773
+ export type WorkforceFailureReason = string;
17752
17774
  export type WorkforceName = string;
17775
+ export type WorkforceSecurityGroupId = string;
17776
+ export type WorkforceSecurityGroupIds = WorkforceSecurityGroupId[];
17777
+ export type WorkforceStatus = "Initializing"|"Updating"|"Deleting"|"Failed"|"Active"|string;
17778
+ export type WorkforceSubnetId = string;
17779
+ export type WorkforceSubnets = WorkforceSubnetId[];
17780
+ export interface WorkforceVpcConfigRequest {
17781
+ /**
17782
+ * The ID of the VPC that the workforce uses for communication.
17783
+ */
17784
+ VpcId?: WorkforceVpcId;
17785
+ /**
17786
+ * The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.
17787
+ */
17788
+ SecurityGroupIds?: WorkforceSecurityGroupIds;
17789
+ /**
17790
+ * The ID of the subnets in the VPC that you want to connect.
17791
+ */
17792
+ Subnets?: WorkforceSubnets;
17793
+ }
17794
+ export interface WorkforceVpcConfigResponse {
17795
+ /**
17796
+ * The ID of the VPC that the workforce uses for communication.
17797
+ */
17798
+ VpcId: WorkforceVpcId;
17799
+ /**
17800
+ * The VPC security group IDs, in the form sg-xxxxxxxx. The security groups must be for the same VPC as specified in the subnet.
17801
+ */
17802
+ SecurityGroupIds: WorkforceSecurityGroupIds;
17803
+ /**
17804
+ * The ID of the subnets in the VPC that you want to connect.
17805
+ */
17806
+ Subnets: WorkforceSubnets;
17807
+ /**
17808
+ * The IDs for the VPC service endpoints of your VPC workforce when it is created and updated.
17809
+ */
17810
+ VpcEndpointId?: WorkforceVpcEndpointId;
17811
+ }
17812
+ export type WorkforceVpcEndpointId = string;
17813
+ export type WorkforceVpcId = string;
17753
17814
  export type Workforces = Workforce[];
17754
17815
  export interface Workteam {
17755
17816
  /**
@@ -368,7 +368,7 @@ declare namespace Transfer {
368
368
  */
369
369
  EndpointType?: EndpointType;
370
370
  /**
371
- * The RSA private key as generated by the ssh-keygen -N "" -m PEM -f my-new-server-key command. If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Change the host key for your SFTP-enabled server in the Amazon Web Services Transfer Family User Guide.
371
+ * The RSA, ECDSA, or ED25519 private key to use for your server. Use the following command to generate an RSA 2048 bit key with no passphrase: ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key. Use a minimum value of 2048 for the -b option: you can create a stronger key using 3072 or 4096. Use the following command to generate an ECDSA 256 bit key with no passphrase: ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key. Valid values for the -b option for ECDSA are 256, 384, and 521. Use the following command to generate an ED25519 key with no passphrase: ssh-keygen -t ed25519 -N "" -f my-new-server-key. For all of these commands, you can replace my-new-server-key with a string of your choice. If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Change the host key for your SFTP-enabled server in the Amazon Web Services Transfer Family User Guide.
372
372
  */
373
373
  HostKey?: HostKey;
374
374
  /**
@@ -448,7 +448,7 @@ declare namespace Transfer {
448
448
  */
449
449
  ServerId: ServerId;
450
450
  /**
451
- * The public portion of the Secure Shell (SSH) key used to authenticate the user to the server. Currently, Transfer Family does not accept elliptical curve keys (keys beginning with ecdsa).
451
+ * The public portion of the Secure Shell (SSH) key used to authenticate the user to the server. Transfer Family accepts RSA, ECDSA, and ED25519 keys.
452
452
  */
453
453
  SshPublicKeyBody?: SshPublicKeyBody;
454
454
  /**
@@ -762,7 +762,7 @@ declare namespace Transfer {
762
762
  */
763
763
  Certificate?: Certificate;
764
764
  /**
765
- * The protocol settings that are configured for your server. Use the PassiveIp parameter to indicate passive mode. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.
765
+ * The protocol settings that are configured for your server. Use the PassiveIp parameter to indicate passive mode. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer.
766
766
  */
767
767
  ProtocolDetails?: ProtocolDetails;
768
768
  /**
@@ -1027,7 +1027,7 @@ declare namespace Transfer {
1027
1027
  */
1028
1028
  ServerId: ServerId;
1029
1029
  /**
1030
- * The public key portion of an SSH key pair.
1030
+ * The public key portion of an SSH key pair. Transfer Family accepts RSA, ECDSA, and ED25519 keys.
1031
1031
  */
1032
1032
  SshPublicKeyBody: SshPublicKeyBody;
1033
1033
  /**
@@ -1387,15 +1387,15 @@ declare namespace Transfer {
1387
1387
  export type Protocol = "SFTP"|"FTP"|"FTPS"|string;
1388
1388
  export interface ProtocolDetails {
1389
1389
  /**
1390
- * Indicates passive mode, for FTP and FTPS protocols. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer. For example: aws transfer update-server --protocol-details PassiveIp=0.0.0.0 Replace 0.0.0.0 in the example above with the actual IP address you want to use. If you change the PassiveIp value, you must stop and then restart your Transfer server for the change to take effect. For details on using Passive IP (PASV) in a NAT environment, see Configuring your FTPS server behind a firewall or NAT with Amazon Web Services Transfer Family.
1390
+ * Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. For example: aws transfer update-server --protocol-details PassiveIp=0.0.0.0 Replace 0.0.0.0 in the example above with the actual IP address you want to use. If you change the PassiveIp value, you must stop and then restart your Transfer Family server for the change to take effect. For details on using passive mode (PASV) in a NAT environment, see Configuring your FTPS server behind a firewall or NAT with Transfer Family.
1391
1391
  */
1392
1392
  PassiveIp?: PassiveIp;
1393
1393
  /**
1394
- * A property used with Transfer servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. TlsSessionResumptionMode determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during CreateServer and UpdateServer calls. If a TlsSessionResumptionMode value is not specified during CreateServer, it is set to ENFORCED by default. DISABLED: the server does not process TLS session resumption client requests and creates a new TLS session for each request. ENABLED: the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing. ENFORCED: the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to ENFORCED, test your clients. Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the ENFORCED value, you need to test your clients.
1394
+ * A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. TlsSessionResumptionMode determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during CreateServer and UpdateServer calls. If a TlsSessionResumptionMode value is not specified during CreateServer, it is set to ENFORCED by default. DISABLED: the server does not process TLS session resumption client requests and creates a new TLS session for each request. ENABLED: the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing. ENFORCED: the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to ENFORCED, test your clients. Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the ENFORCED value, you need to test your clients.
1395
1395
  */
1396
1396
  TlsSessionResumptionMode?: TlsSessionResumptionMode;
1397
1397
  /**
1398
- * Use the SetStatOption to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket. Some SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as SETSTAT when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded. Set the value to ENABLE_NO_OP to have the Transfer Family server ignore the SETSTAT command, and upload files without needing to make any changes to your SFTP client. While the SetStatOption ENABLE_NO_OP setting ignores the error, it does generate a log entry in CloudWatch Logs, so you can determine when the client is making a SETSTAT call. If you want to preserve the original timestamp for your file, and modify other file attributes using SETSTAT, you can use Amazon EFS as backend storage with Transfer Family.
1398
+ * Use the SetStatOption to ignore the error that is generated when the client attempts to use SETSTAT on a file you are uploading to an S3 bucket. Some SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as SETSTAT when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded. Set the value to ENABLE_NO_OP to have the Transfer Family server ignore the SETSTAT command, and upload files without needing to make any changes to your SFTP client. While the SetStatOption ENABLE_NO_OP setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a SETSTAT call. If you want to preserve the original timestamp for your file, and modify other file attributes using SETSTAT, you can use Amazon EFS as backend storage with Transfer Family.
1399
1399
  */
1400
1400
  SetStatOption?: SetStatOption;
1401
1401
  }
@@ -1491,7 +1491,7 @@ declare namespace Transfer {
1491
1491
  */
1492
1492
  DateImported: DateImported;
1493
1493
  /**
1494
- * Specifies the content of the SSH public key as specified by the PublicKeyId.
1494
+ * Specifies the content of the SSH public key as specified by the PublicKeyId. Transfer Family accepts RSA, ECDSA, and ED25519 keys.
1495
1495
  */
1496
1496
  SshPublicKeyBody: SshPublicKeyBody;
1497
1497
  /**
@@ -1668,7 +1668,7 @@ declare namespace Transfer {
1668
1668
  */
1669
1669
  EndpointType?: EndpointType;
1670
1670
  /**
1671
- * The RSA private key as generated by ssh-keygen -N "" -m PEM -f my-new-server-key. If you aren't planning to migrate existing users from an existing server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Change the host key for your SFTP-enabled server in the Amazon Web ServicesTransfer Family User Guide.
1671
+ * The RSA, ECDSA, or ED25519 private key to use for your server. Use the following command to generate an RSA 2048 bit key with no passphrase: ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key. Use a minimum value of 2048 for the -b option: you can create a stronger key using 3072 or 4096. Use the following command to generate an ECDSA 256 bit key with no passphrase: ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key. Valid values for the -b option for ECDSA are 256, 384, and 521. Use the following command to generate an ED25519 key with no passphrase: ssh-keygen -t ed25519 -N "" -f my-new-server-key. For all of these commands, you can replace my-new-server-key with a string of your choice. If you aren't planning to migrate existing users from an existing SFTP-enabled server to a new server, don't update the host key. Accidentally changing a server's host key can be disruptive. For more information, see Change the host key for your SFTP-enabled server in the Amazon Web Services Transfer Family User Guide.
1672
1672
  */
1673
1673
  HostKey?: HostKey;
1674
1674
  /**