cdk-docker-image-deployment 0.0.51 → 0.0.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,19 +12,19 @@ declare class DataSync extends Service {
12
12
  constructor(options?: DataSync.Types.ClientConfiguration)
13
13
  config: Config & DataSync.Types.ClientConfiguration;
14
14
  /**
15
- * Cancels execution of a task. When you cancel a task execution, the transfer of some files is abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully complete the transfer when you start the next task execution.
15
+ * Stops an DataSync task execution that's in progress. The transfer of some files are abruptly interrupted. File contents that're transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution using the same task and allow it to finish, file content on the destination will be complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully completes the transfer when you start the next task execution.
16
16
  */
17
17
  cancelTaskExecution(params: DataSync.Types.CancelTaskExecutionRequest, callback?: (err: AWSError, data: DataSync.Types.CancelTaskExecutionResponse) => void): Request<DataSync.Types.CancelTaskExecutionResponse, AWSError>;
18
18
  /**
19
- * Cancels execution of a task. When you cancel a task execution, the transfer of some files is abruptly interrupted. The contents of files that are transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution on the same task and you allow the task execution to complete, file content on the destination is complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully complete the transfer when you start the next task execution.
19
+ * Stops an DataSync task execution that's in progress. The transfer of some files are abruptly interrupted. File contents that're transferred to the destination might be incomplete or inconsistent with the source files. However, if you start a new task execution using the same task and allow it to finish, file content on the destination will be complete and consistent. This applies to other unexpected failures that interrupt a task execution. In all of these cases, DataSync successfully completes the transfer when you start the next task execution.
20
20
  */
21
21
  cancelTaskExecution(callback?: (err: AWSError, data: DataSync.Types.CancelTaskExecutionResponse) => void): Request<DataSync.Types.CancelTaskExecutionResponse, AWSError>;
22
22
  /**
23
- * Activates an DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet. You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.
23
+ * Activates an DataSync agent that you have deployed in your storage environment. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet. You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.
24
24
  */
25
25
  createAgent(params: DataSync.Types.CreateAgentRequest, callback?: (err: AWSError, data: DataSync.Types.CreateAgentResponse) => void): Request<DataSync.Types.CreateAgentResponse, AWSError>;
26
26
  /**
27
- * Activates an DataSync agent that you have deployed on your host. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet. You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.
27
+ * Activates an DataSync agent that you have deployed in your storage environment. The activation process associates your agent with your account. In the activation process, you specify information such as the Amazon Web Services Region that you want to activate the agent in. You activate the agent in the Amazon Web Services Region where your target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region. You can activate the agent in a VPC (virtual private cloud) or provide the agent access to a VPC endpoint so you can run tasks without going over the public internet. You can use an agent for more than one location. If a task uses multiple agents, all of them need to have status AVAILABLE for the task to run. If you use multiple agents for a source location, the status of all the agents must be AVAILABLE for the task to run. Agents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that ensures minimal interruption to your tasks.
28
28
  */
29
29
  createAgent(callback?: (err: AWSError, data: DataSync.Types.CreateAgentResponse) => void): Request<DataSync.Types.CreateAgentResponse, AWSError>;
30
30
  /**
@@ -52,11 +52,11 @@ declare class DataSync extends Service {
52
52
  */
53
53
  createLocationFsxOntap(callback?: (err: AWSError, data: DataSync.Types.CreateLocationFsxOntapResponse) => void): Request<DataSync.Types.CreateLocationFsxOntapResponse, AWSError>;
54
54
  /**
55
- * Creates an endpoint for an Amazon FSx for OpenZFS file system.
55
+ * Creates an endpoint for an Amazon FSx for OpenZFS file system that DataSync can access for a transfer. For more information, see Creating a location for FSx for OpenZFS. Request parameters related to SMB aren't supported with the CreateLocationFsxOpenZfs operation.
56
56
  */
57
57
  createLocationFsxOpenZfs(params: DataSync.Types.CreateLocationFsxOpenZfsRequest, callback?: (err: AWSError, data: DataSync.Types.CreateLocationFsxOpenZfsResponse) => void): Request<DataSync.Types.CreateLocationFsxOpenZfsResponse, AWSError>;
58
58
  /**
59
- * Creates an endpoint for an Amazon FSx for OpenZFS file system.
59
+ * Creates an endpoint for an Amazon FSx for OpenZFS file system that DataSync can access for a transfer. For more information, see Creating a location for FSx for OpenZFS. Request parameters related to SMB aren't supported with the CreateLocationFsxOpenZfs operation.
60
60
  */
61
61
  createLocationFsxOpenZfs(callback?: (err: AWSError, data: DataSync.Types.CreateLocationFsxOpenZfsResponse) => void): Request<DataSync.Types.CreateLocationFsxOpenZfsResponse, AWSError>;
62
62
  /**
@@ -92,11 +92,11 @@ declare class DataSync extends Service {
92
92
  */
93
93
  createLocationObjectStorage(callback?: (err: AWSError, data: DataSync.Types.CreateLocationObjectStorageResponse) => void): Request<DataSync.Types.CreateLocationObjectStorageResponse, AWSError>;
94
94
  /**
95
- * Creates an endpoint for an Amazon S3 bucket. For more information, see Create an Amazon S3 location in the DataSync User Guide.
95
+ * Creates an endpoint for an Amazon S3 bucket that DataSync can access for a transfer. For more information, see Create an Amazon S3 location in the DataSync User Guide.
96
96
  */
97
97
  createLocationS3(params: DataSync.Types.CreateLocationS3Request, callback?: (err: AWSError, data: DataSync.Types.CreateLocationS3Response) => void): Request<DataSync.Types.CreateLocationS3Response, AWSError>;
98
98
  /**
99
- * Creates an endpoint for an Amazon S3 bucket. For more information, see Create an Amazon S3 location in the DataSync User Guide.
99
+ * Creates an endpoint for an Amazon S3 bucket that DataSync can access for a transfer. For more information, see Create an Amazon S3 location in the DataSync User Guide.
100
100
  */
101
101
  createLocationS3(callback?: (err: AWSError, data: DataSync.Types.CreateLocationS3Response) => void): Request<DataSync.Types.CreateLocationS3Response, AWSError>;
102
102
  /**
@@ -156,27 +156,27 @@ declare class DataSync extends Service {
156
156
  */
157
157
  describeLocationEfs(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationEfsResponse) => void): Request<DataSync.Types.DescribeLocationEfsResponse, AWSError>;
158
158
  /**
159
- * Returns metadata about an Amazon FSx for Lustre location, such as information about its path.
159
+ * Provides details about how an DataSync location for an Amazon FSx for Lustre file system is configured.
160
160
  */
161
161
  describeLocationFsxLustre(params: DataSync.Types.DescribeLocationFsxLustreRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxLustreResponse) => void): Request<DataSync.Types.DescribeLocationFsxLustreResponse, AWSError>;
162
162
  /**
163
- * Returns metadata about an Amazon FSx for Lustre location, such as information about its path.
163
+ * Provides details about how an DataSync location for an Amazon FSx for Lustre file system is configured.
164
164
  */
165
165
  describeLocationFsxLustre(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxLustreResponse) => void): Request<DataSync.Types.DescribeLocationFsxLustreResponse, AWSError>;
166
166
  /**
167
- * Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured.
167
+ * Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured. If your location uses SMB, the DescribeLocationFsxOntap operation doesn't actually return a Password.
168
168
  */
169
169
  describeLocationFsxOntap(params: DataSync.Types.DescribeLocationFsxOntapRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxOntapResponse) => void): Request<DataSync.Types.DescribeLocationFsxOntapResponse, AWSError>;
170
170
  /**
171
- * Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured.
171
+ * Provides details about how an DataSync location for an Amazon FSx for NetApp ONTAP file system is configured. If your location uses SMB, the DescribeLocationFsxOntap operation doesn't actually return a Password.
172
172
  */
173
173
  describeLocationFsxOntap(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxOntapResponse) => void): Request<DataSync.Types.DescribeLocationFsxOntapResponse, AWSError>;
174
174
  /**
175
- * Returns metadata about an Amazon FSx for OpenZFS location, such as information about its path.
175
+ * Provides details about how an DataSync location for an Amazon FSx for OpenZFS file system is configured. Response elements related to SMB aren't supported with the DescribeLocationFsxOpenZfs operation.
176
176
  */
177
177
  describeLocationFsxOpenZfs(params: DataSync.Types.DescribeLocationFsxOpenZfsRequest, callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxOpenZfsResponse) => void): Request<DataSync.Types.DescribeLocationFsxOpenZfsResponse, AWSError>;
178
178
  /**
179
- * Returns metadata about an Amazon FSx for OpenZFS location, such as information about its path.
179
+ * Provides details about how an DataSync location for an Amazon FSx for OpenZFS file system is configured. Response elements related to SMB aren't supported with the DescribeLocationFsxOpenZfs operation.
180
180
  */
181
181
  describeLocationFsxOpenZfs(callback?: (err: AWSError, data: DataSync.Types.DescribeLocationFsxOpenZfsResponse) => void): Request<DataSync.Types.DescribeLocationFsxOpenZfsResponse, AWSError>;
182
182
  /**
@@ -332,11 +332,11 @@ declare class DataSync extends Service {
332
332
  */
333
333
  updateLocationNfs(callback?: (err: AWSError, data: DataSync.Types.UpdateLocationNfsResponse) => void): Request<DataSync.Types.UpdateLocationNfsResponse, AWSError>;
334
334
  /**
335
- * Updates some of the parameters of a previously created location for self-managed object storage server access. For information about creating a self-managed object storage location, see Creating a location for object storage.
335
+ * Updates some parameters of an existing object storage location that DataSync accesses for a transfer. For information about creating a self-managed object storage location, see Creating a location for object storage.
336
336
  */
337
337
  updateLocationObjectStorage(params: DataSync.Types.UpdateLocationObjectStorageRequest, callback?: (err: AWSError, data: DataSync.Types.UpdateLocationObjectStorageResponse) => void): Request<DataSync.Types.UpdateLocationObjectStorageResponse, AWSError>;
338
338
  /**
339
- * Updates some of the parameters of a previously created location for self-managed object storage server access. For information about creating a self-managed object storage location, see Creating a location for object storage.
339
+ * Updates some parameters of an existing object storage location that DataSync accesses for a transfer. For information about creating a self-managed object storage location, see Creating a location for object storage.
340
340
  */
341
341
  updateLocationObjectStorage(callback?: (err: AWSError, data: DataSync.Types.UpdateLocationObjectStorageResponse) => void): Request<DataSync.Types.UpdateLocationObjectStorageResponse, AWSError>;
342
342
  /**
@@ -388,7 +388,7 @@ declare namespace DataSync {
388
388
  export type BytesPerSecond = number;
389
389
  export interface CancelTaskExecutionRequest {
390
390
  /**
391
- * The Amazon Resource Name (ARN) of the task execution to cancel.
391
+ * The Amazon Resource Name (ARN) of the task execution to stop.
392
392
  */
393
393
  TaskExecutionArn: TaskExecutionArn;
394
394
  }
@@ -700,6 +700,10 @@ declare namespace DataSync {
700
700
  * Specifies the key-value pair that represents a tag that you want to add to the resource. Tags can help you manage, filter, and search for your resources. We recommend creating a name tag for your location.
701
701
  */
702
702
  Tags?: InputTagList;
703
+ /**
704
+ * Specifies a certificate to authenticate with an object storage system that uses a private or self-signed certificate authority (CA). You must specify a Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). The certificate can be up to 32768 bytes (before Base64 encoding). To use this parameter, configure ServerProtocol to HTTPS.
705
+ */
706
+ ServerCertificate?: ObjectStorageCertificate;
703
707
  }
704
708
  export interface CreateLocationObjectStorageResponse {
705
709
  /**
@@ -1127,7 +1131,7 @@ declare namespace DataSync {
1127
1131
  */
1128
1132
  LocationUri?: LocationUri;
1129
1133
  /**
1130
- * The access key (for example, a user name) required to authenticate with the object storage server.
1134
+ * The access key (for example, a user name) required to authenticate with the object storage system.
1131
1135
  */
1132
1136
  AccessKey?: ObjectStorageAccessKey;
1133
1137
  /**
@@ -1135,7 +1139,7 @@ declare namespace DataSync {
1135
1139
  */
1136
1140
  ServerPort?: ObjectStorageServerPort;
1137
1141
  /**
1138
- * The protocol that your object storage server uses to communicate.
1142
+ * The protocol that your object storage system uses to communicate.
1139
1143
  */
1140
1144
  ServerProtocol?: ObjectStorageServerProtocol;
1141
1145
  /**
@@ -1146,6 +1150,10 @@ declare namespace DataSync {
1146
1150
  * The time that the location was created.
1147
1151
  */
1148
1152
  CreationTime?: Time;
1153
+ /**
1154
+ * The self-signed certificate that DataSync uses to securely authenticate with your object storage system.
1155
+ */
1156
+ ServerCertificate?: ObjectStorageCertificate;
1149
1157
  }
1150
1158
  export interface DescribeLocationS3Request {
1151
1159
  /**
@@ -1229,11 +1237,11 @@ declare namespace DataSync {
1229
1237
  Status?: TaskExecutionStatus;
1230
1238
  Options?: Options;
1231
1239
  /**
1232
- * A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: "/folder1|/folder2"
1240
+ * A list of filter rules that determines which files to exclude from a task. The list should contain a single filter string that consists of the patterns to exclude. The patterns are delimited by "|" (that is, a pipe), for example: "/folder1|/folder2"
1233
1241
  */
1234
1242
  Excludes?: FilterList;
1235
1243
  /**
1236
- * A list of filter rules that determines which files to include when running a task. The list should contain a single filter string that consists of the patterns to include. The patterns are delimited by "|" (that is, a pipe), for example: "/folder1|/folder2"
1244
+ * A list of filter rules that determines which files to include when running a task. The list should contain a single filter string that consists of the patterns to include. The patterns are delimited by "|" (that is, a pipe), for example: "/folder1|/folder2"
1237
1245
  */
1238
1246
  Includes?: FilterList;
1239
1247
  /**
@@ -1264,6 +1272,10 @@ declare namespace DataSync {
1264
1272
  * The result of the task execution.
1265
1273
  */
1266
1274
  Result?: TaskExecutionResultDetail;
1275
+ /**
1276
+ * The physical number of bytes transferred over the network after compression was applied. In most cases, this number is less than BytesTransferred.
1277
+ */
1278
+ BytesCompressed?: long;
1267
1279
  }
1268
1280
  export interface DescribeTaskRequest {
1269
1281
  /**
@@ -1594,6 +1606,7 @@ declare namespace DataSync {
1594
1606
  export type NfsVersion = "AUTOMATIC"|"NFS3"|"NFS4_0"|"NFS4_1"|string;
1595
1607
  export type ObjectStorageAccessKey = string;
1596
1608
  export type ObjectStorageBucketName = string;
1609
+ export type ObjectStorageCertificate = Buffer|Uint8Array|Blob|string;
1597
1610
  export type ObjectStorageSecretKey = string;
1598
1611
  export type ObjectStorageServerPort = number;
1599
1612
  export type ObjectStorageServerProtocol = "HTTPS"|"HTTP"|string;
@@ -1965,33 +1978,37 @@ declare namespace DataSync {
1965
1978
  }
1966
1979
  export interface UpdateLocationObjectStorageRequest {
1967
1980
  /**
1968
- * The Amazon Resource Name (ARN) of the self-managed object storage server location to be updated.
1981
+ * Specifies the ARN of the object storage system location that you're updating.
1969
1982
  */
1970
1983
  LocationArn: LocationArn;
1971
1984
  /**
1972
- * The port that your self-managed object storage server accepts inbound network traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 (HTTPS). You can specify a custom port if your self-managed object storage server requires one.
1985
+ * Specifies the port that your object storage server accepts inbound network traffic on (for example, port 443).
1973
1986
  */
1974
1987
  ServerPort?: ObjectStorageServerPort;
1975
1988
  /**
1976
- * The protocol that the object storage server uses to communicate. Valid values are HTTP or HTTPS.
1989
+ * Specifies the protocol that your object storage server uses to communicate.
1977
1990
  */
1978
1991
  ServerProtocol?: ObjectStorageServerProtocol;
1979
1992
  /**
1980
- * The subdirectory in the self-managed object storage server that is used to read data from.
1993
+ * Specifies the object prefix for your object storage server. If this is a source location, DataSync only copies objects with this prefix. If this is a destination location, DataSync writes all objects with this prefix.
1981
1994
  */
1982
1995
  Subdirectory?: S3Subdirectory;
1983
1996
  /**
1984
- * Optional. The access key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use AccessKey and SecretKey to provide the user name and password, respectively.
1997
+ * Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server.
1985
1998
  */
1986
1999
  AccessKey?: ObjectStorageAccessKey;
1987
2000
  /**
1988
- * Optional. The secret key is used if credentials are required to access the self-managed object storage server. If your object storage requires a user name and password to authenticate, use AccessKey and SecretKey to provide the user name and password, respectively.
2001
+ * Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server.
1989
2002
  */
1990
2003
  SecretKey?: ObjectStorageSecretKey;
1991
2004
  /**
1992
- * The Amazon Resource Name (ARN) of the agents associated with the self-managed object storage server location.
2005
+ * Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.
1993
2006
  */
1994
2007
  AgentArns?: AgentArnList;
2008
+ /**
2009
+ * Specifies a certificate to authenticate with an object storage system that uses a private or self-signed certificate authority (CA). You must specify a Base64-encoded .pem file (for example, file:///home/user/.ssh/storage_sys_certificate.pem). The certificate can be up to 32768 bytes (before Base64 encoding). To use this parameter, configure ServerProtocol to HTTPS. Updating the certificate doesn't interfere with tasks that you have in progress.
2010
+ */
2011
+ ServerCertificate?: ObjectStorageCertificate;
1995
2012
  }
1996
2013
  export interface UpdateLocationObjectStorageResponse {
1997
2014
  }
@@ -1436,6 +1436,14 @@ declare class SageMaker extends Service {
1436
1436
  * Lists the images in your account and their properties. The list can be filtered by creation time or modified time, and whether the image name contains a specified string.
1437
1437
  */
1438
1438
  listImages(callback?: (err: AWSError, data: SageMaker.Types.ListImagesResponse) => void): Request<SageMaker.Types.ListImagesResponse, AWSError>;
1439
+ /**
1440
+ * Returns a list of the subtasks for an Inference Recommender job. The supported subtasks are benchmarks, which evaluate the performance of your model on different instance types.
1441
+ */
1442
+ listInferenceRecommendationsJobSteps(params: SageMaker.Types.ListInferenceRecommendationsJobStepsRequest, callback?: (err: AWSError, data: SageMaker.Types.ListInferenceRecommendationsJobStepsResponse) => void): Request<SageMaker.Types.ListInferenceRecommendationsJobStepsResponse, AWSError>;
1443
+ /**
1444
+ * Returns a list of the subtasks for an Inference Recommender job. The supported subtasks are benchmarks, which evaluate the performance of your model on different instance types.
1445
+ */
1446
+ listInferenceRecommendationsJobSteps(callback?: (err: AWSError, data: SageMaker.Types.ListInferenceRecommendationsJobStepsResponse) => void): Request<SageMaker.Types.ListInferenceRecommendationsJobStepsResponse, AWSError>;
1439
1447
  /**
1440
1448
  * Lists recommendation jobs that satisfy various filters.
1441
1449
  */
@@ -2932,7 +2940,7 @@ declare namespace SageMaker {
2932
2940
  }
2933
2941
  export type AutoMLMaxResults = number;
2934
2942
  export type AutoMLMetricEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|string;
2935
- export type AutoMLMetricExtendedEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"LogLoss"|string;
2943
+ export type AutoMLMetricExtendedEnum = "Accuracy"|"MSE"|"F1"|"F1macro"|"AUC"|"RMSE"|"MAE"|"R2"|"BalancedAccuracy"|"Precision"|"PrecisionMacro"|"Recall"|"RecallMacro"|"LogLoss"|"InferenceLatency"|string;
2936
2944
  export type AutoMLMode = "AUTO"|"ENSEMBLING"|"HYPERPARAMETER_TUNING"|string;
2937
2945
  export type AutoMLNameContains = string;
2938
2946
  export interface AutoMLOutputDataConfig {
@@ -10803,6 +10811,25 @@ declare namespace SageMaker {
10803
10811
  */
10804
10812
  FailureReason?: FailureReason;
10805
10813
  }
10814
+ export interface InferenceRecommendationsJobStep {
10815
+ /**
10816
+ * The type of the subtask. BENCHMARK: Evaluate the performance of your model on different instance types.
10817
+ */
10818
+ StepType: RecommendationStepType;
10819
+ /**
10820
+ * The name of the Inference Recommender job.
10821
+ */
10822
+ JobName: RecommendationJobName;
10823
+ /**
10824
+ * The current status of the benchmark.
10825
+ */
10826
+ Status: RecommendationJobStatus;
10827
+ /**
10828
+ * The details for a specific benchmark.
10829
+ */
10830
+ InferenceBenchmark?: RecommendationJobInferenceBenchmark;
10831
+ }
10832
+ export type InferenceRecommendationsJobSteps = InferenceRecommendationsJobStep[];
10806
10833
  export type InferenceRecommendationsJobs = InferenceRecommendationsJob[];
10807
10834
  export interface InferenceSpecification {
10808
10835
  /**
@@ -12377,6 +12404,38 @@ declare namespace SageMaker {
12377
12404
  */
12378
12405
  NextToken?: NextToken;
12379
12406
  }
12407
+ export interface ListInferenceRecommendationsJobStepsRequest {
12408
+ /**
12409
+ * The name for the Inference Recommender job.
12410
+ */
12411
+ JobName: RecommendationJobName;
12412
+ /**
12413
+ * A filter to return benchmarks of a specified status. If this field is left empty, then all benchmarks are returned.
12414
+ */
12415
+ Status?: RecommendationJobStatus;
12416
+ /**
12417
+ * A filter to return details about the specified type of subtask. BENCHMARK: Evaluate the performance of your model on different instance types.
12418
+ */
12419
+ StepType?: RecommendationStepType;
12420
+ /**
12421
+ * The maximum number of results to return.
12422
+ */
12423
+ MaxResults?: MaxResults;
12424
+ /**
12425
+ * A token that you can specify to return more results from the list. Specify this field if you have a token that was returned from a previous request.
12426
+ */
12427
+ NextToken?: NextToken;
12428
+ }
12429
+ export interface ListInferenceRecommendationsJobStepsResponse {
12430
+ /**
12431
+ * A list of all subtask details in Inference Recommender.
12432
+ */
12433
+ Steps?: InferenceRecommendationsJobSteps;
12434
+ /**
12435
+ * A token that you can specify in your next request to return more results from the list.
12436
+ */
12437
+ NextToken?: NextToken;
12438
+ }
12380
12439
  export interface ListInferenceRecommendationsJobsRequest {
12381
12440
  /**
12382
12441
  * A filter that returns only jobs created after the specified time (timestamp).
@@ -16367,6 +16426,7 @@ declare namespace SageMaker {
16367
16426
  }
16368
16427
  export type RStudioServerProUserGroup = "R_STUDIO_ADMIN"|"R_STUDIO_USER"|string;
16369
16428
  export type RealtimeInferenceInstanceTypes = ProductionVariantInstanceType[];
16429
+ export type RecommendationFailureReason = string;
16370
16430
  export type RecommendationJobArn = string;
16371
16431
  export interface RecommendationJobCompiledOutputConfig {
16372
16432
  /**
@@ -16405,6 +16465,15 @@ declare namespace SageMaker {
16405
16465
  SupportedInstanceTypes?: RecommendationJobSupportedInstanceTypes;
16406
16466
  }
16407
16467
  export type RecommendationJobDescription = string;
16468
+ export interface RecommendationJobInferenceBenchmark {
16469
+ Metrics?: RecommendationMetrics;
16470
+ EndpointConfiguration?: EndpointOutputConfiguration;
16471
+ ModelConfiguration: ModelConfiguration;
16472
+ /**
16473
+ * The reason why a benchmark failed.
16474
+ */
16475
+ FailureReason?: RecommendationFailureReason;
16476
+ }
16408
16477
  export interface RecommendationJobInputConfig {
16409
16478
  /**
16410
16479
  * The Amazon Resource Name (ARN) of a versioned model package.
@@ -16502,6 +16571,7 @@ declare namespace SageMaker {
16502
16571
  */
16503
16572
  ModelLatency: Integer;
16504
16573
  }
16574
+ export type RecommendationStepType = "BENCHMARK"|string;
16505
16575
  export type RecordWrapper = "None"|"RecordIO"|string;
16506
16576
  export type RedshiftClusterId = string;
16507
16577
  export type RedshiftDatabase = string;
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1238.0',
86
+ VERSION: '2.1239.0',
87
87
 
88
88
  /**
89
89
  * @api private