@aws-sdk/client-cleanroomsml 3.840.0 → 3.844.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -6
- package/dist-cjs/index.js +239 -18
- package/dist-es/CleanRoomsML.js +2 -0
- package/dist-es/commands/ListTrainedModelVersionsCommand.js +22 -0
- package/dist-es/commands/index.js +1 -0
- package/dist-es/models/models_0.js +40 -0
- package/dist-es/pagination/ListTrainedModelVersionsPaginator.js +4 -0
- package/dist-es/pagination/index.js +1 -0
- package/dist-es/protocols/Aws_restJson1.js +134 -7
- package/dist-types/CleanRoomsML.d.ts +8 -6
- package/dist-types/CleanRoomsMLClient.d.ts +4 -8
- package/dist-types/commands/CancelTrainedModelCommand.d.ts +4 -0
- package/dist-types/commands/CancelTrainedModelInferenceJobCommand.d.ts +3 -0
- package/dist-types/commands/CreateConfiguredModelAlgorithmAssociationCommand.d.ts +7 -0
- package/dist-types/commands/CreateMLInputChannelCommand.d.ts +3 -0
- package/dist-types/commands/CreateTrainedModelCommand.d.ts +16 -0
- package/dist-types/commands/DeleteConfiguredModelAlgorithmAssociationCommand.d.ts +3 -0
- package/dist-types/commands/DeleteMLConfigurationCommand.d.ts +3 -0
- package/dist-types/commands/DeleteMLInputChannelDataCommand.d.ts +3 -0
- package/dist-types/commands/DeleteTrainedModelOutputCommand.d.ts +5 -1
- package/dist-types/commands/GetCollaborationConfiguredModelAlgorithmAssociationCommand.d.ts +7 -0
- package/dist-types/commands/GetCollaborationMLInputChannelCommand.d.ts +3 -0
- package/dist-types/commands/GetCollaborationTrainedModelCommand.d.ts +13 -0
- package/dist-types/commands/GetConfiguredModelAlgorithmAssociationCommand.d.ts +7 -0
- package/dist-types/commands/GetMLConfigurationCommand.d.ts +3 -0
- package/dist-types/commands/GetMLInputChannelCommand.d.ts +3 -0
- package/dist-types/commands/GetTrainedModelCommand.d.ts +14 -0
- package/dist-types/commands/GetTrainedModelInferenceJobCommand.d.ts +4 -0
- package/dist-types/commands/ListCollaborationConfiguredModelAlgorithmAssociationsCommand.d.ts +3 -0
- package/dist-types/commands/ListCollaborationMLInputChannelsCommand.d.ts +3 -0
- package/dist-types/commands/ListCollaborationTrainedModelExportJobsCommand.d.ts +5 -0
- package/dist-types/commands/ListCollaborationTrainedModelInferenceJobsCommand.d.ts +5 -0
- package/dist-types/commands/ListCollaborationTrainedModelsCommand.d.ts +11 -0
- package/dist-types/commands/ListConfiguredModelAlgorithmAssociationsCommand.d.ts +3 -0
- package/dist-types/commands/ListMLInputChannelsCommand.d.ts +3 -0
- package/dist-types/commands/ListTrainedModelInferenceJobsCommand.d.ts +5 -0
- package/dist-types/commands/ListTrainedModelVersionsCommand.d.ts +109 -0
- package/dist-types/commands/ListTrainedModelsCommand.d.ts +11 -0
- package/dist-types/commands/PutMLConfigurationCommand.d.ts +3 -0
- package/dist-types/commands/StartAudienceGenerationJobCommand.d.ts +3 -0
- package/dist-types/commands/StartTrainedModelExportJobCommand.d.ts +4 -0
- package/dist-types/commands/StartTrainedModelInferenceJobCommand.d.ts +4 -0
- package/dist-types/commands/index.d.ts +1 -0
- package/dist-types/index.d.ts +1 -6
- package/dist-types/models/models_0.d.ts +341 -411
- package/dist-types/pagination/ListTrainedModelVersionsPaginator.d.ts +7 -0
- package/dist-types/pagination/index.d.ts +1 -0
- package/dist-types/protocols/Aws_restJson1.d.ts +9 -0
- package/dist-types/ts3.4/CleanRoomsML.d.ts +17 -0
- package/dist-types/ts3.4/CleanRoomsMLClient.d.ts +6 -0
- package/dist-types/ts3.4/commands/ListTrainedModelVersionsCommand.d.ts +51 -0
- package/dist-types/ts3.4/commands/index.d.ts +1 -0
- package/dist-types/ts3.4/models/models_0.d.ts +97 -0
- package/dist-types/ts3.4/pagination/ListTrainedModelVersionsPaginator.d.ts +11 -0
- package/dist-types/ts3.4/pagination/index.d.ts +1 -0
- package/dist-types/ts3.4/protocols/Aws_restJson1.d.ts +12 -0
- package/package.json +14 -14
|
@@ -59,6 +59,9 @@ declare const CancelTrainedModelInferenceJobCommand_base: {
|
|
|
59
59
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
60
60
|
* <p>The resource you are requesting does not exist.</p>
|
|
61
61
|
*
|
|
62
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
63
|
+
* <p>The request was denied due to request throttling.</p>
|
|
64
|
+
*
|
|
62
65
|
* @throws {@link ValidationException} (client fault)
|
|
63
66
|
* <p>The request parameters for this request are incorrect.</p>
|
|
64
67
|
*
|
|
@@ -53,6 +53,10 @@ declare const CreateConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
53
53
|
* containerMetrics: { // MetricsConfigurationPolicy
|
|
54
54
|
* noiseLevel: "HIGH" || "MEDIUM" || "LOW" || "NONE", // required
|
|
55
55
|
* },
|
|
56
|
+
* maxArtifactSize: { // TrainedModelArtifactMaxSize
|
|
57
|
+
* unit: "GB", // required
|
|
58
|
+
* value: Number("double"), // required
|
|
59
|
+
* },
|
|
56
60
|
* },
|
|
57
61
|
* trainedModelExports: { // TrainedModelExportsConfigurationPolicy
|
|
58
62
|
* maxSize: { // TrainedModelExportsMaxSize
|
|
@@ -109,6 +113,9 @@ declare const CreateConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
109
113
|
* @throws {@link ServiceQuotaExceededException} (client fault)
|
|
110
114
|
* <p>You have exceeded your service quota.</p>
|
|
111
115
|
*
|
|
116
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
117
|
+
* <p>The request was denied due to request throttling.</p>
|
|
118
|
+
*
|
|
112
119
|
* @throws {@link ValidationException} (client fault)
|
|
113
120
|
* <p>The request parameters for this request are incorrect.</p>
|
|
114
121
|
*
|
|
@@ -93,6 +93,9 @@ declare const CreateMLInputChannelCommand_base: {
|
|
|
93
93
|
* @throws {@link ServiceQuotaExceededException} (client fault)
|
|
94
94
|
* <p>You have exceeded your service quota.</p>
|
|
95
95
|
*
|
|
96
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
97
|
+
* <p>The request was denied due to request throttling.</p>
|
|
98
|
+
*
|
|
96
99
|
* @throws {@link ValidationException} (client fault)
|
|
97
100
|
* <p>The request parameters for this request are incorrect.</p>
|
|
98
101
|
*
|
|
@@ -52,12 +52,21 @@ declare const CreateTrainedModelCommand_base: {
|
|
|
52
52
|
* stoppingCondition: { // StoppingCondition
|
|
53
53
|
* maxRuntimeInSeconds: Number("int"),
|
|
54
54
|
* },
|
|
55
|
+
* incrementalTrainingDataChannels: [ // IncrementalTrainingDataChannels
|
|
56
|
+
* { // IncrementalTrainingDataChannel
|
|
57
|
+
* trainedModelArn: "STRING_VALUE", // required
|
|
58
|
+
* versionIdentifier: "STRING_VALUE",
|
|
59
|
+
* channelName: "STRING_VALUE", // required
|
|
60
|
+
* },
|
|
61
|
+
* ],
|
|
55
62
|
* dataChannels: [ // ModelTrainingDataChannels // required
|
|
56
63
|
* { // ModelTrainingDataChannel
|
|
57
64
|
* mlInputChannelArn: "STRING_VALUE", // required
|
|
58
65
|
* channelName: "STRING_VALUE", // required
|
|
66
|
+
* s3DataDistributionType: "FullyReplicated" || "ShardedByS3Key",
|
|
59
67
|
* },
|
|
60
68
|
* ],
|
|
69
|
+
* trainingInputMode: "File" || "FastFile" || "Pipe",
|
|
61
70
|
* description: "STRING_VALUE",
|
|
62
71
|
* kmsKeyArn: "STRING_VALUE",
|
|
63
72
|
* tags: { // TagMap
|
|
@@ -68,6 +77,7 @@ declare const CreateTrainedModelCommand_base: {
|
|
|
68
77
|
* const response = await client.send(command);
|
|
69
78
|
* // { // CreateTrainedModelResponse
|
|
70
79
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
80
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
71
81
|
* // };
|
|
72
82
|
*
|
|
73
83
|
* ```
|
|
@@ -84,12 +94,18 @@ declare const CreateTrainedModelCommand_base: {
|
|
|
84
94
|
* @throws {@link ConflictException} (client fault)
|
|
85
95
|
* <p>You can't complete this action because another resource depends on this resource.</p>
|
|
86
96
|
*
|
|
97
|
+
* @throws {@link InternalServiceException} (server fault)
|
|
98
|
+
* <p>An internal service error occurred. Retry your request. If the problem persists, contact AWS Support.</p>
|
|
99
|
+
*
|
|
87
100
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
88
101
|
* <p>The resource you are requesting does not exist.</p>
|
|
89
102
|
*
|
|
90
103
|
* @throws {@link ServiceQuotaExceededException} (client fault)
|
|
91
104
|
* <p>You have exceeded your service quota.</p>
|
|
92
105
|
*
|
|
106
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
107
|
+
* <p>The request was denied due to request throttling.</p>
|
|
108
|
+
*
|
|
93
109
|
* @throws {@link ValidationException} (client fault)
|
|
94
110
|
* <p>The request parameters for this request are incorrect.</p>
|
|
95
111
|
*
|
|
@@ -59,6 +59,9 @@ declare const DeleteConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
59
59
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
60
60
|
* <p>The resource you are requesting does not exist.</p>
|
|
61
61
|
*
|
|
62
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
63
|
+
* <p>The request was denied due to request throttling.</p>
|
|
64
|
+
*
|
|
62
65
|
* @throws {@link ValidationException} (client fault)
|
|
63
66
|
* <p>The request parameters for this request are incorrect.</p>
|
|
64
67
|
*
|
|
@@ -55,6 +55,9 @@ declare const DeleteMLConfigurationCommand_base: {
|
|
|
55
55
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
56
56
|
* <p>The resource you are requesting does not exist.</p>
|
|
57
57
|
*
|
|
58
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
59
|
+
* <p>The request was denied due to request throttling.</p>
|
|
60
|
+
*
|
|
58
61
|
* @throws {@link ValidationException} (client fault)
|
|
59
62
|
* <p>The request parameters for this request are incorrect.</p>
|
|
60
63
|
*
|
|
@@ -59,6 +59,9 @@ declare const DeleteMLInputChannelDataCommand_base: {
|
|
|
59
59
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
60
60
|
* <p>The resource you are requesting does not exist.</p>
|
|
61
61
|
*
|
|
62
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
63
|
+
* <p>The request was denied due to request throttling.</p>
|
|
64
|
+
*
|
|
62
65
|
* @throws {@link ValidationException} (client fault)
|
|
63
66
|
* <p>The request parameters for this request are incorrect.</p>
|
|
64
67
|
*
|
|
@@ -27,7 +27,7 @@ declare const DeleteTrainedModelOutputCommand_base: {
|
|
|
27
27
|
getEndpointParameterInstructions(): import("@smithy/middleware-endpoint").EndpointParameterInstructions;
|
|
28
28
|
};
|
|
29
29
|
/**
|
|
30
|
-
* <p>Deletes the
|
|
30
|
+
* <p>Deletes the model artifacts stored by the service.</p>
|
|
31
31
|
* @example
|
|
32
32
|
* Use a bare-bones client and the command you need to make an API call.
|
|
33
33
|
* ```javascript
|
|
@@ -37,6 +37,7 @@ declare const DeleteTrainedModelOutputCommand_base: {
|
|
|
37
37
|
* const input = { // DeleteTrainedModelOutputRequest
|
|
38
38
|
* trainedModelArn: "STRING_VALUE", // required
|
|
39
39
|
* membershipIdentifier: "STRING_VALUE", // required
|
|
40
|
+
* versionIdentifier: "STRING_VALUE",
|
|
40
41
|
* };
|
|
41
42
|
* const command = new DeleteTrainedModelOutputCommand(input);
|
|
42
43
|
* const response = await client.send(command);
|
|
@@ -59,6 +60,9 @@ declare const DeleteTrainedModelOutputCommand_base: {
|
|
|
59
60
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
60
61
|
* <p>The resource you are requesting does not exist.</p>
|
|
61
62
|
*
|
|
63
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
64
|
+
* <p>The request was denied due to request throttling.</p>
|
|
65
|
+
*
|
|
62
66
|
* @throws {@link ValidationException} (client fault)
|
|
63
67
|
* <p>The request parameters for this request are incorrect.</p>
|
|
64
68
|
*
|
|
@@ -64,6 +64,10 @@ declare const GetCollaborationConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
64
64
|
* // containerMetrics: { // MetricsConfigurationPolicy
|
|
65
65
|
* // noiseLevel: "HIGH" || "MEDIUM" || "LOW" || "NONE", // required
|
|
66
66
|
* // },
|
|
67
|
+
* // maxArtifactSize: { // TrainedModelArtifactMaxSize
|
|
68
|
+
* // unit: "GB", // required
|
|
69
|
+
* // value: Number("double"), // required
|
|
70
|
+
* // },
|
|
67
71
|
* // },
|
|
68
72
|
* // trainedModelExports: { // TrainedModelExportsConfigurationPolicy
|
|
69
73
|
* // maxSize: { // TrainedModelExportsMaxSize
|
|
@@ -106,6 +110,9 @@ declare const GetCollaborationConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
106
110
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
107
111
|
* <p>The resource you are requesting does not exist.</p>
|
|
108
112
|
*
|
|
113
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
114
|
+
* <p>The request was denied due to request throttling.</p>
|
|
115
|
+
*
|
|
109
116
|
* @throws {@link ValidationException} (client fault)
|
|
110
117
|
* <p>The request parameters for this request are incorrect.</p>
|
|
111
118
|
*
|
|
@@ -75,6 +75,9 @@ declare const GetCollaborationMLInputChannelCommand_base: {
|
|
|
75
75
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
76
76
|
* <p>The resource you are requesting does not exist.</p>
|
|
77
77
|
*
|
|
78
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
79
|
+
* <p>The request was denied due to request throttling.</p>
|
|
80
|
+
*
|
|
78
81
|
* @throws {@link ValidationException} (client fault)
|
|
79
82
|
* <p>The request parameters for this request are incorrect.</p>
|
|
80
83
|
*
|
|
@@ -37,6 +37,7 @@ declare const GetCollaborationTrainedModelCommand_base: {
|
|
|
37
37
|
* const input = { // GetCollaborationTrainedModelRequest
|
|
38
38
|
* trainedModelArn: "STRING_VALUE", // required
|
|
39
39
|
* collaborationIdentifier: "STRING_VALUE", // required
|
|
40
|
+
* versionIdentifier: "STRING_VALUE",
|
|
40
41
|
* };
|
|
41
42
|
* const command = new GetCollaborationTrainedModelCommand(input);
|
|
42
43
|
* const response = await client.send(command);
|
|
@@ -44,6 +45,14 @@ declare const GetCollaborationTrainedModelCommand_base: {
|
|
|
44
45
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
45
46
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
46
47
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
48
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
49
|
+
* // incrementalTrainingDataChannels: [ // IncrementalTrainingDataChannelsOutput
|
|
50
|
+
* // { // IncrementalTrainingDataChannelOutput
|
|
51
|
+
* // channelName: "STRING_VALUE", // required
|
|
52
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
53
|
+
* // modelName: "STRING_VALUE", // required
|
|
54
|
+
* // },
|
|
55
|
+
* // ],
|
|
47
56
|
* // name: "STRING_VALUE", // required
|
|
48
57
|
* // description: "STRING_VALUE",
|
|
49
58
|
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "DELETE_PENDING" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "INACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED", // required
|
|
@@ -57,6 +66,7 @@ declare const GetCollaborationTrainedModelCommand_base: {
|
|
|
57
66
|
* // instanceType: "ml.m4.xlarge" || "ml.m4.2xlarge" || "ml.m4.4xlarge" || "ml.m4.10xlarge" || "ml.m4.16xlarge" || "ml.g4dn.xlarge" || "ml.g4dn.2xlarge" || "ml.g4dn.4xlarge" || "ml.g4dn.8xlarge" || "ml.g4dn.12xlarge" || "ml.g4dn.16xlarge" || "ml.m5.large" || "ml.m5.xlarge" || "ml.m5.2xlarge" || "ml.m5.4xlarge" || "ml.m5.12xlarge" || "ml.m5.24xlarge" || "ml.c4.xlarge" || "ml.c4.2xlarge" || "ml.c4.4xlarge" || "ml.c4.8xlarge" || "ml.p2.xlarge" || "ml.p2.8xlarge" || "ml.p2.16xlarge" || "ml.p3.2xlarge" || "ml.p3.8xlarge" || "ml.p3.16xlarge" || "ml.p3dn.24xlarge" || "ml.p4d.24xlarge" || "ml.p4de.24xlarge" || "ml.p5.48xlarge" || "ml.c5.xlarge" || "ml.c5.2xlarge" || "ml.c5.4xlarge" || "ml.c5.9xlarge" || "ml.c5.18xlarge" || "ml.c5n.xlarge" || "ml.c5n.2xlarge" || "ml.c5n.4xlarge" || "ml.c5n.9xlarge" || "ml.c5n.18xlarge" || "ml.g5.xlarge" || "ml.g5.2xlarge" || "ml.g5.4xlarge" || "ml.g5.8xlarge" || "ml.g5.16xlarge" || "ml.g5.12xlarge" || "ml.g5.24xlarge" || "ml.g5.48xlarge" || "ml.trn1.2xlarge" || "ml.trn1.32xlarge" || "ml.trn1n.32xlarge" || "ml.m6i.large" || "ml.m6i.xlarge" || "ml.m6i.2xlarge" || "ml.m6i.4xlarge" || "ml.m6i.8xlarge" || "ml.m6i.12xlarge" || "ml.m6i.16xlarge" || "ml.m6i.24xlarge" || "ml.m6i.32xlarge" || "ml.c6i.xlarge" || "ml.c6i.2xlarge" || "ml.c6i.8xlarge" || "ml.c6i.4xlarge" || "ml.c6i.12xlarge" || "ml.c6i.16xlarge" || "ml.c6i.24xlarge" || "ml.c6i.32xlarge" || "ml.r5d.large" || "ml.r5d.xlarge" || "ml.r5d.2xlarge" || "ml.r5d.4xlarge" || "ml.r5d.8xlarge" || "ml.r5d.12xlarge" || "ml.r5d.16xlarge" || "ml.r5d.24xlarge" || "ml.t3.medium" || "ml.t3.large" || "ml.t3.xlarge" || "ml.t3.2xlarge" || "ml.r5.large" || "ml.r5.xlarge" || "ml.r5.2xlarge" || "ml.r5.4xlarge" || "ml.r5.8xlarge" || "ml.r5.12xlarge" || "ml.r5.16xlarge" || "ml.r5.24xlarge", // required
|
|
58
67
|
* // volumeSizeInGB: Number("int"), // required
|
|
59
68
|
* // },
|
|
69
|
+
* // trainingInputMode: "File" || "FastFile" || "Pipe",
|
|
60
70
|
* // stoppingCondition: { // StoppingCondition
|
|
61
71
|
* // maxRuntimeInSeconds: Number("int"),
|
|
62
72
|
* // },
|
|
@@ -84,6 +94,9 @@ declare const GetCollaborationTrainedModelCommand_base: {
|
|
|
84
94
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
85
95
|
* <p>The resource you are requesting does not exist.</p>
|
|
86
96
|
*
|
|
97
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
98
|
+
* <p>The request was denied due to request throttling.</p>
|
|
99
|
+
*
|
|
87
100
|
* @throws {@link ValidationException} (client fault)
|
|
88
101
|
* <p>The request parameters for this request are incorrect.</p>
|
|
89
102
|
*
|
|
@@ -62,6 +62,10 @@ declare const GetConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
62
62
|
* // containerMetrics: { // MetricsConfigurationPolicy
|
|
63
63
|
* // noiseLevel: "HIGH" || "MEDIUM" || "LOW" || "NONE", // required
|
|
64
64
|
* // },
|
|
65
|
+
* // maxArtifactSize: { // TrainedModelArtifactMaxSize
|
|
66
|
+
* // unit: "GB", // required
|
|
67
|
+
* // value: Number("double"), // required
|
|
68
|
+
* // },
|
|
65
69
|
* // },
|
|
66
70
|
* // trainedModelExports: { // TrainedModelExportsConfigurationPolicy
|
|
67
71
|
* // maxSize: { // TrainedModelExportsMaxSize
|
|
@@ -108,6 +112,9 @@ declare const GetConfiguredModelAlgorithmAssociationCommand_base: {
|
|
|
108
112
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
109
113
|
* <p>The resource you are requesting does not exist.</p>
|
|
110
114
|
*
|
|
115
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
116
|
+
* <p>The request was denied due to request throttling.</p>
|
|
117
|
+
*
|
|
111
118
|
* @throws {@link ValidationException} (client fault)
|
|
112
119
|
* <p>The request parameters for this request are incorrect.</p>
|
|
113
120
|
*
|
|
@@ -67,6 +67,9 @@ declare const GetMLConfigurationCommand_base: {
|
|
|
67
67
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
68
68
|
* <p>The resource you are requesting does not exist.</p>
|
|
69
69
|
*
|
|
70
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
71
|
+
* <p>The request was denied due to request throttling.</p>
|
|
72
|
+
*
|
|
70
73
|
* @throws {@link ValidationException} (client fault)
|
|
71
74
|
* <p>The request parameters for this request are incorrect.</p>
|
|
72
75
|
*
|
|
@@ -101,6 +101,9 @@ declare const GetMLInputChannelCommand_base: {
|
|
|
101
101
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
102
102
|
* <p>The resource you are requesting does not exist.</p>
|
|
103
103
|
*
|
|
104
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
105
|
+
* <p>The request was denied due to request throttling.</p>
|
|
106
|
+
*
|
|
104
107
|
* @throws {@link ValidationException} (client fault)
|
|
105
108
|
* <p>The request parameters for this request are incorrect.</p>
|
|
106
109
|
*
|
|
@@ -37,6 +37,7 @@ declare const GetTrainedModelCommand_base: {
|
|
|
37
37
|
* const input = { // GetTrainedModelRequest
|
|
38
38
|
* trainedModelArn: "STRING_VALUE", // required
|
|
39
39
|
* membershipIdentifier: "STRING_VALUE", // required
|
|
40
|
+
* versionIdentifier: "STRING_VALUE",
|
|
40
41
|
* };
|
|
41
42
|
* const command = new GetTrainedModelCommand(input);
|
|
42
43
|
* const response = await client.send(command);
|
|
@@ -44,6 +45,14 @@ declare const GetTrainedModelCommand_base: {
|
|
|
44
45
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
45
46
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
46
47
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
48
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
49
|
+
* // incrementalTrainingDataChannels: [ // IncrementalTrainingDataChannelsOutput
|
|
50
|
+
* // { // IncrementalTrainingDataChannelOutput
|
|
51
|
+
* // channelName: "STRING_VALUE", // required
|
|
52
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
53
|
+
* // modelName: "STRING_VALUE", // required
|
|
54
|
+
* // },
|
|
55
|
+
* // ],
|
|
47
56
|
* // name: "STRING_VALUE", // required
|
|
48
57
|
* // description: "STRING_VALUE",
|
|
49
58
|
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "DELETE_PENDING" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "INACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED", // required
|
|
@@ -57,6 +66,7 @@ declare const GetTrainedModelCommand_base: {
|
|
|
57
66
|
* // instanceType: "ml.m4.xlarge" || "ml.m4.2xlarge" || "ml.m4.4xlarge" || "ml.m4.10xlarge" || "ml.m4.16xlarge" || "ml.g4dn.xlarge" || "ml.g4dn.2xlarge" || "ml.g4dn.4xlarge" || "ml.g4dn.8xlarge" || "ml.g4dn.12xlarge" || "ml.g4dn.16xlarge" || "ml.m5.large" || "ml.m5.xlarge" || "ml.m5.2xlarge" || "ml.m5.4xlarge" || "ml.m5.12xlarge" || "ml.m5.24xlarge" || "ml.c4.xlarge" || "ml.c4.2xlarge" || "ml.c4.4xlarge" || "ml.c4.8xlarge" || "ml.p2.xlarge" || "ml.p2.8xlarge" || "ml.p2.16xlarge" || "ml.p3.2xlarge" || "ml.p3.8xlarge" || "ml.p3.16xlarge" || "ml.p3dn.24xlarge" || "ml.p4d.24xlarge" || "ml.p4de.24xlarge" || "ml.p5.48xlarge" || "ml.c5.xlarge" || "ml.c5.2xlarge" || "ml.c5.4xlarge" || "ml.c5.9xlarge" || "ml.c5.18xlarge" || "ml.c5n.xlarge" || "ml.c5n.2xlarge" || "ml.c5n.4xlarge" || "ml.c5n.9xlarge" || "ml.c5n.18xlarge" || "ml.g5.xlarge" || "ml.g5.2xlarge" || "ml.g5.4xlarge" || "ml.g5.8xlarge" || "ml.g5.16xlarge" || "ml.g5.12xlarge" || "ml.g5.24xlarge" || "ml.g5.48xlarge" || "ml.trn1.2xlarge" || "ml.trn1.32xlarge" || "ml.trn1n.32xlarge" || "ml.m6i.large" || "ml.m6i.xlarge" || "ml.m6i.2xlarge" || "ml.m6i.4xlarge" || "ml.m6i.8xlarge" || "ml.m6i.12xlarge" || "ml.m6i.16xlarge" || "ml.m6i.24xlarge" || "ml.m6i.32xlarge" || "ml.c6i.xlarge" || "ml.c6i.2xlarge" || "ml.c6i.8xlarge" || "ml.c6i.4xlarge" || "ml.c6i.12xlarge" || "ml.c6i.16xlarge" || "ml.c6i.24xlarge" || "ml.c6i.32xlarge" || "ml.r5d.large" || "ml.r5d.xlarge" || "ml.r5d.2xlarge" || "ml.r5d.4xlarge" || "ml.r5d.8xlarge" || "ml.r5d.12xlarge" || "ml.r5d.16xlarge" || "ml.r5d.24xlarge" || "ml.t3.medium" || "ml.t3.large" || "ml.t3.xlarge" || "ml.t3.2xlarge" || "ml.r5.large" || "ml.r5.xlarge" || "ml.r5.2xlarge" || "ml.r5.4xlarge" || "ml.r5.8xlarge" || "ml.r5.12xlarge" || "ml.r5.16xlarge" || "ml.r5.24xlarge", // required
|
|
58
67
|
* // volumeSizeInGB: Number("int"), // required
|
|
59
68
|
* // },
|
|
69
|
+
* // trainingInputMode: "File" || "FastFile" || "Pipe",
|
|
60
70
|
* // stoppingCondition: { // StoppingCondition
|
|
61
71
|
* // maxRuntimeInSeconds: Number("int"),
|
|
62
72
|
* // },
|
|
@@ -81,6 +91,7 @@ declare const GetTrainedModelCommand_base: {
|
|
|
81
91
|
* // { // ModelTrainingDataChannel
|
|
82
92
|
* // mlInputChannelArn: "STRING_VALUE", // required
|
|
83
93
|
* // channelName: "STRING_VALUE", // required
|
|
94
|
+
* // s3DataDistributionType: "FullyReplicated" || "ShardedByS3Key",
|
|
84
95
|
* // },
|
|
85
96
|
* // ],
|
|
86
97
|
* // };
|
|
@@ -99,6 +110,9 @@ declare const GetTrainedModelCommand_base: {
|
|
|
99
110
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
100
111
|
* <p>The resource you are requesting does not exist.</p>
|
|
101
112
|
*
|
|
113
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
114
|
+
* <p>The request was denied due to request throttling.</p>
|
|
115
|
+
*
|
|
102
116
|
* @throws {@link ValidationException} (client fault)
|
|
103
117
|
* <p>The request parameters for this request are incorrect.</p>
|
|
104
118
|
*
|
|
@@ -48,6 +48,7 @@ declare const GetTrainedModelInferenceJobCommand_base: {
|
|
|
48
48
|
* // name: "STRING_VALUE", // required
|
|
49
49
|
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED" || "INACTIVE", // required
|
|
50
50
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
51
|
+
* // trainedModelVersionIdentifier: "STRING_VALUE",
|
|
51
52
|
* // resourceConfig: { // InferenceResourceConfig
|
|
52
53
|
* // instanceType: "ml.r7i.48xlarge" || "ml.r6i.16xlarge" || "ml.m6i.xlarge" || "ml.m5.4xlarge" || "ml.p2.xlarge" || "ml.m4.16xlarge" || "ml.r7i.16xlarge" || "ml.m7i.xlarge" || "ml.m6i.12xlarge" || "ml.r7i.8xlarge" || "ml.r7i.large" || "ml.m7i.12xlarge" || "ml.m6i.24xlarge" || "ml.m7i.24xlarge" || "ml.r6i.8xlarge" || "ml.r6i.large" || "ml.g5.2xlarge" || "ml.m5.large" || "ml.p3.16xlarge" || "ml.m7i.48xlarge" || "ml.m6i.16xlarge" || "ml.p2.16xlarge" || "ml.g5.4xlarge" || "ml.m7i.16xlarge" || "ml.c4.2xlarge" || "ml.c5.2xlarge" || "ml.c6i.32xlarge" || "ml.c4.4xlarge" || "ml.g5.8xlarge" || "ml.c6i.xlarge" || "ml.c5.4xlarge" || "ml.g4dn.xlarge" || "ml.c7i.xlarge" || "ml.c6i.12xlarge" || "ml.g4dn.12xlarge" || "ml.c7i.12xlarge" || "ml.c6i.24xlarge" || "ml.g4dn.2xlarge" || "ml.c7i.24xlarge" || "ml.c7i.2xlarge" || "ml.c4.8xlarge" || "ml.c6i.2xlarge" || "ml.g4dn.4xlarge" || "ml.c7i.48xlarge" || "ml.c7i.4xlarge" || "ml.c6i.16xlarge" || "ml.c5.9xlarge" || "ml.g4dn.16xlarge" || "ml.c7i.16xlarge" || "ml.c6i.4xlarge" || "ml.c5.xlarge" || "ml.c4.xlarge" || "ml.g4dn.8xlarge" || "ml.c7i.8xlarge" || "ml.c7i.large" || "ml.g5.xlarge" || "ml.c6i.8xlarge" || "ml.c6i.large" || "ml.g5.12xlarge" || "ml.g5.24xlarge" || "ml.m7i.2xlarge" || "ml.c5.18xlarge" || "ml.g5.48xlarge" || "ml.m6i.2xlarge" || "ml.g5.16xlarge" || "ml.m7i.4xlarge" || "ml.p3.2xlarge" || "ml.r6i.32xlarge" || "ml.m6i.4xlarge" || "ml.m5.xlarge" || "ml.m4.10xlarge" || "ml.r6i.xlarge" || "ml.m5.12xlarge" || "ml.m4.xlarge" || "ml.r7i.2xlarge" || "ml.r7i.xlarge" || "ml.r6i.12xlarge" || "ml.m5.24xlarge" || "ml.r7i.12xlarge" || "ml.m7i.8xlarge" || "ml.m7i.large" || "ml.r6i.24xlarge" || "ml.r6i.2xlarge" || "ml.m4.2xlarge" || "ml.r7i.24xlarge" || "ml.r7i.4xlarge" || "ml.m6i.8xlarge" || "ml.m6i.large" || "ml.m5.2xlarge" || "ml.p2.8xlarge" || "ml.r6i.4xlarge" || "ml.m6i.32xlarge" || "ml.p3.8xlarge" || "ml.m4.4xlarge", // required
|
|
53
54
|
* // instanceCount: Number("int"),
|
|
@@ -100,6 +101,9 @@ declare const GetTrainedModelInferenceJobCommand_base: {
|
|
|
100
101
|
* @throws {@link ResourceNotFoundException} (client fault)
|
|
101
102
|
* <p>The resource you are requesting does not exist.</p>
|
|
102
103
|
*
|
|
104
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
105
|
+
* <p>The request was denied due to request throttling.</p>
|
|
106
|
+
*
|
|
103
107
|
* @throws {@link ValidationException} (client fault)
|
|
104
108
|
* <p>The request parameters for this request are incorrect.</p>
|
|
105
109
|
*
|
package/dist-types/commands/ListCollaborationConfiguredModelAlgorithmAssociationsCommand.d.ts
CHANGED
|
@@ -69,6 +69,9 @@ declare const ListCollaborationConfiguredModelAlgorithmAssociationsCommand_base:
|
|
|
69
69
|
* @throws {@link AccessDeniedException} (client fault)
|
|
70
70
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
71
71
|
*
|
|
72
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
73
|
+
* <p>The request was denied due to request throttling.</p>
|
|
74
|
+
*
|
|
72
75
|
* @throws {@link ValidationException} (client fault)
|
|
73
76
|
* <p>The request parameters for this request are incorrect.</p>
|
|
74
77
|
*
|
|
@@ -72,6 +72,9 @@ declare const ListCollaborationMLInputChannelsCommand_base: {
|
|
|
72
72
|
* @throws {@link AccessDeniedException} (client fault)
|
|
73
73
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
74
74
|
*
|
|
75
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
76
|
+
* <p>The request was denied due to request throttling.</p>
|
|
77
|
+
*
|
|
75
78
|
* @throws {@link ValidationException} (client fault)
|
|
76
79
|
* <p>The request parameters for this request are incorrect.</p>
|
|
77
80
|
*
|
|
@@ -39,6 +39,7 @@ declare const ListCollaborationTrainedModelExportJobsCommand_base: {
|
|
|
39
39
|
* maxResults: Number("int"),
|
|
40
40
|
* collaborationIdentifier: "STRING_VALUE", // required
|
|
41
41
|
* trainedModelArn: "STRING_VALUE", // required
|
|
42
|
+
* trainedModelVersionIdentifier: "STRING_VALUE",
|
|
42
43
|
* };
|
|
43
44
|
* const command = new ListCollaborationTrainedModelExportJobsCommand(input);
|
|
44
45
|
* const response = await client.send(command);
|
|
@@ -64,6 +65,7 @@ declare const ListCollaborationTrainedModelExportJobsCommand_base: {
|
|
|
64
65
|
* // description: "STRING_VALUE",
|
|
65
66
|
* // creatorAccountId: "STRING_VALUE", // required
|
|
66
67
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
68
|
+
* // trainedModelVersionIdentifier: "STRING_VALUE",
|
|
67
69
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
68
70
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
69
71
|
* // },
|
|
@@ -81,6 +83,9 @@ declare const ListCollaborationTrainedModelExportJobsCommand_base: {
|
|
|
81
83
|
* @throws {@link AccessDeniedException} (client fault)
|
|
82
84
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
83
85
|
*
|
|
86
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
87
|
+
* <p>The request was denied due to request throttling.</p>
|
|
88
|
+
*
|
|
84
89
|
* @throws {@link ValidationException} (client fault)
|
|
85
90
|
* <p>The request parameters for this request are incorrect.</p>
|
|
86
91
|
*
|
|
@@ -39,6 +39,7 @@ declare const ListCollaborationTrainedModelInferenceJobsCommand_base: {
|
|
|
39
39
|
* maxResults: Number("int"),
|
|
40
40
|
* collaborationIdentifier: "STRING_VALUE", // required
|
|
41
41
|
* trainedModelArn: "STRING_VALUE",
|
|
42
|
+
* trainedModelVersionIdentifier: "STRING_VALUE",
|
|
42
43
|
* };
|
|
43
44
|
* const command = new ListCollaborationTrainedModelInferenceJobsCommand(input);
|
|
44
45
|
* const response = await client.send(command);
|
|
@@ -50,6 +51,7 @@ declare const ListCollaborationTrainedModelInferenceJobsCommand_base: {
|
|
|
50
51
|
* // configuredModelAlgorithmAssociationArn: "STRING_VALUE",
|
|
51
52
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
52
53
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
54
|
+
* // trainedModelVersionIdentifier: "STRING_VALUE",
|
|
53
55
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
54
56
|
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED" || "INACTIVE", // required
|
|
55
57
|
* // outputConfiguration: { // InferenceOutputConfiguration
|
|
@@ -84,6 +86,9 @@ declare const ListCollaborationTrainedModelInferenceJobsCommand_base: {
|
|
|
84
86
|
* @throws {@link AccessDeniedException} (client fault)
|
|
85
87
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
86
88
|
*
|
|
89
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
90
|
+
* <p>The request was denied due to request throttling.</p>
|
|
91
|
+
*
|
|
87
92
|
* @throws {@link ValidationException} (client fault)
|
|
88
93
|
* <p>The request parameters for this request are incorrect.</p>
|
|
89
94
|
*
|
|
@@ -49,6 +49,14 @@ declare const ListCollaborationTrainedModelsCommand_base: {
|
|
|
49
49
|
* // updateTime: new Date("TIMESTAMP"), // required
|
|
50
50
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
51
51
|
* // name: "STRING_VALUE", // required
|
|
52
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
53
|
+
* // incrementalTrainingDataChannels: [ // IncrementalTrainingDataChannelsOutput
|
|
54
|
+
* // { // IncrementalTrainingDataChannelOutput
|
|
55
|
+
* // channelName: "STRING_VALUE", // required
|
|
56
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
57
|
+
* // modelName: "STRING_VALUE", // required
|
|
58
|
+
* // },
|
|
59
|
+
* // ],
|
|
52
60
|
* // description: "STRING_VALUE",
|
|
53
61
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
54
62
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
@@ -70,6 +78,9 @@ declare const ListCollaborationTrainedModelsCommand_base: {
|
|
|
70
78
|
* @throws {@link AccessDeniedException} (client fault)
|
|
71
79
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
72
80
|
*
|
|
81
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
82
|
+
* <p>The request was denied due to request throttling.</p>
|
|
83
|
+
*
|
|
73
84
|
* @throws {@link ValidationException} (client fault)
|
|
74
85
|
* <p>The request parameters for this request are incorrect.</p>
|
|
75
86
|
*
|
|
@@ -68,6 +68,9 @@ declare const ListConfiguredModelAlgorithmAssociationsCommand_base: {
|
|
|
68
68
|
* @throws {@link AccessDeniedException} (client fault)
|
|
69
69
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
70
70
|
*
|
|
71
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
72
|
+
* <p>The request was denied due to request throttling.</p>
|
|
73
|
+
*
|
|
71
74
|
* @throws {@link ValidationException} (client fault)
|
|
72
75
|
* <p>The request parameters for this request are incorrect.</p>
|
|
73
76
|
*
|
|
@@ -72,6 +72,9 @@ declare const ListMLInputChannelsCommand_base: {
|
|
|
72
72
|
* @throws {@link AccessDeniedException} (client fault)
|
|
73
73
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
74
74
|
*
|
|
75
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
76
|
+
* <p>The request was denied due to request throttling.</p>
|
|
77
|
+
*
|
|
75
78
|
* @throws {@link ValidationException} (client fault)
|
|
76
79
|
* <p>The request parameters for this request are incorrect.</p>
|
|
77
80
|
*
|
|
@@ -39,6 +39,7 @@ declare const ListTrainedModelInferenceJobsCommand_base: {
|
|
|
39
39
|
* maxResults: Number("int"),
|
|
40
40
|
* membershipIdentifier: "STRING_VALUE", // required
|
|
41
41
|
* trainedModelArn: "STRING_VALUE",
|
|
42
|
+
* trainedModelVersionIdentifier: "STRING_VALUE",
|
|
42
43
|
* };
|
|
43
44
|
* const command = new ListTrainedModelInferenceJobsCommand(input);
|
|
44
45
|
* const response = await client.send(command);
|
|
@@ -50,6 +51,7 @@ declare const ListTrainedModelInferenceJobsCommand_base: {
|
|
|
50
51
|
* // configuredModelAlgorithmAssociationArn: "STRING_VALUE",
|
|
51
52
|
* // membershipIdentifier: "STRING_VALUE", // required
|
|
52
53
|
* // trainedModelArn: "STRING_VALUE", // required
|
|
54
|
+
* // trainedModelVersionIdentifier: "STRING_VALUE",
|
|
53
55
|
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
54
56
|
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED" || "INACTIVE", // required
|
|
55
57
|
* // outputConfiguration: { // InferenceOutputConfiguration
|
|
@@ -83,6 +85,9 @@ declare const ListTrainedModelInferenceJobsCommand_base: {
|
|
|
83
85
|
* @throws {@link AccessDeniedException} (client fault)
|
|
84
86
|
* <p>You do not have sufficient access to perform this action.</p>
|
|
85
87
|
*
|
|
88
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
89
|
+
* <p>The request was denied due to request throttling.</p>
|
|
90
|
+
*
|
|
86
91
|
* @throws {@link ValidationException} (client fault)
|
|
87
92
|
* <p>The request parameters for this request are incorrect.</p>
|
|
88
93
|
*
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import { Command as $Command } from "@smithy/smithy-client";
|
|
2
|
+
import { MetadataBearer as __MetadataBearer } from "@smithy/types";
|
|
3
|
+
import { CleanRoomsMLClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CleanRoomsMLClient";
|
|
4
|
+
import { ListTrainedModelVersionsRequest, ListTrainedModelVersionsResponse } from "../models/models_0";
|
|
5
|
+
/**
|
|
6
|
+
* @public
|
|
7
|
+
*/
|
|
8
|
+
export type { __MetadataBearer };
|
|
9
|
+
export { $Command };
|
|
10
|
+
/**
|
|
11
|
+
* @public
|
|
12
|
+
*
|
|
13
|
+
* The input for {@link ListTrainedModelVersionsCommand}.
|
|
14
|
+
*/
|
|
15
|
+
export interface ListTrainedModelVersionsCommandInput extends ListTrainedModelVersionsRequest {
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* @public
|
|
19
|
+
*
|
|
20
|
+
* The output of {@link ListTrainedModelVersionsCommand}.
|
|
21
|
+
*/
|
|
22
|
+
export interface ListTrainedModelVersionsCommandOutput extends ListTrainedModelVersionsResponse, __MetadataBearer {
|
|
23
|
+
}
|
|
24
|
+
declare const ListTrainedModelVersionsCommand_base: {
|
|
25
|
+
new (input: ListTrainedModelVersionsCommandInput): import("@smithy/smithy-client").CommandImpl<ListTrainedModelVersionsCommandInput, ListTrainedModelVersionsCommandOutput, CleanRoomsMLClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes>;
|
|
26
|
+
new (input: ListTrainedModelVersionsCommandInput): import("@smithy/smithy-client").CommandImpl<ListTrainedModelVersionsCommandInput, ListTrainedModelVersionsCommandOutput, CleanRoomsMLClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes>;
|
|
27
|
+
getEndpointParameterInstructions(): import("@smithy/middleware-endpoint").EndpointParameterInstructions;
|
|
28
|
+
};
|
|
29
|
+
/**
|
|
30
|
+
* <p>Returns a list of trained model versions for a specified trained model. This operation allows you to view all versions of a trained model, including information about their status and creation details. You can use this to track the evolution of your trained models and select specific versions for inference or further training.</p>
|
|
31
|
+
* @example
|
|
32
|
+
* Use a bare-bones client and the command you need to make an API call.
|
|
33
|
+
* ```javascript
|
|
34
|
+
* import { CleanRoomsMLClient, ListTrainedModelVersionsCommand } from "@aws-sdk/client-cleanroomsml"; // ES Modules import
|
|
35
|
+
* // const { CleanRoomsMLClient, ListTrainedModelVersionsCommand } = require("@aws-sdk/client-cleanroomsml"); // CommonJS import
|
|
36
|
+
* const client = new CleanRoomsMLClient(config);
|
|
37
|
+
* const input = { // ListTrainedModelVersionsRequest
|
|
38
|
+
* nextToken: "STRING_VALUE",
|
|
39
|
+
* maxResults: Number("int"),
|
|
40
|
+
* membershipIdentifier: "STRING_VALUE", // required
|
|
41
|
+
* trainedModelArn: "STRING_VALUE", // required
|
|
42
|
+
* status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "DELETE_PENDING" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "INACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED",
|
|
43
|
+
* };
|
|
44
|
+
* const command = new ListTrainedModelVersionsCommand(input);
|
|
45
|
+
* const response = await client.send(command);
|
|
46
|
+
* // { // ListTrainedModelVersionsResponse
|
|
47
|
+
* // nextToken: "STRING_VALUE",
|
|
48
|
+
* // trainedModels: [ // TrainedModelList // required
|
|
49
|
+
* // { // TrainedModelSummary
|
|
50
|
+
* // createTime: new Date("TIMESTAMP"), // required
|
|
51
|
+
* // updateTime: new Date("TIMESTAMP"), // required
|
|
52
|
+
* // trainedModelArn: "STRING_VALUE", // required
|
|
53
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
54
|
+
* // incrementalTrainingDataChannels: [ // IncrementalTrainingDataChannelsOutput
|
|
55
|
+
* // { // IncrementalTrainingDataChannelOutput
|
|
56
|
+
* // channelName: "STRING_VALUE", // required
|
|
57
|
+
* // versionIdentifier: "STRING_VALUE",
|
|
58
|
+
* // modelName: "STRING_VALUE", // required
|
|
59
|
+
* // },
|
|
60
|
+
* // ],
|
|
61
|
+
* // name: "STRING_VALUE", // required
|
|
62
|
+
* // description: "STRING_VALUE",
|
|
63
|
+
* // membershipIdentifier: "STRING_VALUE", // required
|
|
64
|
+
* // collaborationIdentifier: "STRING_VALUE", // required
|
|
65
|
+
* // status: "CREATE_PENDING" || "CREATE_IN_PROGRESS" || "CREATE_FAILED" || "ACTIVE" || "DELETE_PENDING" || "DELETE_IN_PROGRESS" || "DELETE_FAILED" || "INACTIVE" || "CANCEL_PENDING" || "CANCEL_IN_PROGRESS" || "CANCEL_FAILED", // required
|
|
66
|
+
* // configuredModelAlgorithmAssociationArn: "STRING_VALUE", // required
|
|
67
|
+
* // },
|
|
68
|
+
* // ],
|
|
69
|
+
* // };
|
|
70
|
+
*
|
|
71
|
+
* ```
|
|
72
|
+
*
|
|
73
|
+
* @param ListTrainedModelVersionsCommandInput - {@link ListTrainedModelVersionsCommandInput}
|
|
74
|
+
* @returns {@link ListTrainedModelVersionsCommandOutput}
|
|
75
|
+
* @see {@link ListTrainedModelVersionsCommandInput} for command's `input` shape.
|
|
76
|
+
* @see {@link ListTrainedModelVersionsCommandOutput} for command's `response` shape.
|
|
77
|
+
* @see {@link CleanRoomsMLClientResolvedConfig | config} for CleanRoomsMLClient's `config` shape.
|
|
78
|
+
*
|
|
79
|
+
* @throws {@link AccessDeniedException} (client fault)
|
|
80
|
+
* <p>You do not have sufficient access to perform this action.</p>
|
|
81
|
+
*
|
|
82
|
+
* @throws {@link ResourceNotFoundException} (client fault)
|
|
83
|
+
* <p>The resource you are requesting does not exist.</p>
|
|
84
|
+
*
|
|
85
|
+
* @throws {@link ThrottlingException} (client fault)
|
|
86
|
+
* <p>The request was denied due to request throttling.</p>
|
|
87
|
+
*
|
|
88
|
+
* @throws {@link ValidationException} (client fault)
|
|
89
|
+
* <p>The request parameters for this request are incorrect.</p>
|
|
90
|
+
*
|
|
91
|
+
* @throws {@link CleanRoomsMLServiceException}
|
|
92
|
+
* <p>Base exception class for all service exceptions from CleanRoomsML service.</p>
|
|
93
|
+
*
|
|
94
|
+
*
|
|
95
|
+
* @public
|
|
96
|
+
*/
|
|
97
|
+
export declare class ListTrainedModelVersionsCommand extends ListTrainedModelVersionsCommand_base {
|
|
98
|
+
/** @internal type navigation helper, not in runtime. */
|
|
99
|
+
protected static __types: {
|
|
100
|
+
api: {
|
|
101
|
+
input: ListTrainedModelVersionsRequest;
|
|
102
|
+
output: ListTrainedModelVersionsResponse;
|
|
103
|
+
};
|
|
104
|
+
sdk: {
|
|
105
|
+
input: ListTrainedModelVersionsCommandInput;
|
|
106
|
+
output: ListTrainedModelVersionsCommandOutput;
|
|
107
|
+
};
|
|
108
|
+
};
|
|
109
|
+
}
|