cdk-comprehend-s3olap 2.0.62 → 2.0.65
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +4 -4
- package/lib/cdk-comprehend-s3olap.js +2 -2
- package/lib/comprehend-lambdas.js +2 -2
- package/lib/iam-roles.js +4 -4
- package/node_modules/aws-sdk/CHANGELOG.md +16 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/backupstorage-2018-04-10.examples.json +5 -0
- package/node_modules/aws-sdk/apis/backupstorage-2018-04-10.min.json +522 -0
- package/node_modules/aws-sdk/apis/backupstorage-2018-04-10.paginators.json +14 -0
- package/node_modules/aws-sdk/apis/dlm-2018-01-12.min.json +15 -9
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +627 -623
- package/node_modules/aws-sdk/apis/location-2020-11-19.min.json +60 -44
- package/node_modules/aws-sdk/apis/metadata.json +6 -0
- package/node_modules/aws-sdk/apis/privatenetworks-2021-12-03.examples.json +5 -0
- package/node_modules/aws-sdk/apis/privatenetworks-2021-12-03.min.json +1058 -0
- package/node_modules/aws-sdk/apis/privatenetworks-2021-12-03.paginators.json +34 -0
- package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +501 -468
- package/node_modules/aws-sdk/apis/sagemaker-a2i-runtime-2019-11-07.min.json +12 -8
- package/node_modules/aws-sdk/clients/all.d.ts +2 -0
- package/node_modules/aws-sdk/clients/all.js +3 -1
- package/node_modules/aws-sdk/clients/backupstorage.d.ts +469 -0
- package/node_modules/aws-sdk/clients/backupstorage.js +18 -0
- package/node_modules/aws-sdk/clients/cloudwatch.d.ts +7 -7
- package/node_modules/aws-sdk/clients/dlm.d.ts +30 -25
- package/node_modules/aws-sdk/clients/ec2.d.ts +1 -0
- package/node_modules/aws-sdk/clients/location.d.ts +27 -13
- package/node_modules/aws-sdk/clients/privatenetworks.d.ts +1092 -0
- package/node_modules/aws-sdk/clients/privatenetworks.js +18 -0
- package/node_modules/aws-sdk/clients/sagemaker.d.ts +51 -5
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +83 -9
- package/node_modules/aws-sdk/dist/aws-sdk.js +696 -670
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +65 -65
- package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/node_modules/esbuild/install.js +4 -4
- package/node_modules/esbuild/lib/main.js +7 -7
- package/node_modules/esbuild/package.json +22 -22
- package/node_modules/esbuild-linux-64/bin/esbuild +0 -0
- package/node_modules/esbuild-linux-64/package.json +1 -1
- package/package.json +7 -7
@@ -3,7 +3,6 @@
|
|
3
3
|
"metadata": {
|
4
4
|
"apiVersion": "2019-11-07",
|
5
5
|
"endpointPrefix": "a2i-runtime.sagemaker",
|
6
|
-
"jsonVersion": "1.1",
|
7
6
|
"protocol": "rest-json",
|
8
7
|
"serviceFullName": "Amazon Augmented AI Runtime",
|
9
8
|
"serviceId": "SageMaker A2I Runtime",
|
@@ -62,7 +61,7 @@
|
|
62
61
|
],
|
63
62
|
"members": {
|
64
63
|
"CreationTime": {
|
65
|
-
"
|
64
|
+
"shape": "S6"
|
66
65
|
},
|
67
66
|
"FailureReason": {},
|
68
67
|
"FailureCode": {},
|
@@ -94,14 +93,14 @@
|
|
94
93
|
],
|
95
94
|
"members": {
|
96
95
|
"CreationTimeAfter": {
|
96
|
+
"shape": "S6",
|
97
97
|
"location": "querystring",
|
98
|
-
"locationName": "CreationTimeAfter"
|
99
|
-
"type": "timestamp"
|
98
|
+
"locationName": "CreationTimeAfter"
|
100
99
|
},
|
101
100
|
"CreationTimeBefore": {
|
101
|
+
"shape": "S6",
|
102
102
|
"location": "querystring",
|
103
|
-
"locationName": "CreationTimeBefore"
|
104
|
-
"type": "timestamp"
|
103
|
+
"locationName": "CreationTimeBefore"
|
105
104
|
},
|
106
105
|
"FlowDefinitionArn": {
|
107
106
|
"location": "querystring",
|
@@ -136,7 +135,7 @@
|
|
136
135
|
"HumanLoopName": {},
|
137
136
|
"HumanLoopStatus": {},
|
138
137
|
"CreationTime": {
|
139
|
-
"
|
138
|
+
"shape": "S6"
|
140
139
|
},
|
141
140
|
"FailureReason": {},
|
142
141
|
"FlowDefinitionArn": {}
|
@@ -210,5 +209,10 @@
|
|
210
209
|
}
|
211
210
|
}
|
212
211
|
},
|
213
|
-
"shapes": {
|
212
|
+
"shapes": {
|
213
|
+
"S6": {
|
214
|
+
"type": "timestamp",
|
215
|
+
"timestampFormat": "iso8601"
|
216
|
+
}
|
217
|
+
}
|
214
218
|
}
|
@@ -310,3 +310,5 @@ export import ConnectCampaigns = require('./connectcampaigns');
|
|
310
310
|
export import RedshiftServerless = require('./redshiftserverless');
|
311
311
|
export import RolesAnywhere = require('./rolesanywhere');
|
312
312
|
export import LicenseManagerUserSubscriptions = require('./licensemanagerusersubscriptions');
|
313
|
+
export import BackupStorage = require('./backupstorage');
|
314
|
+
export import PrivateNetworks = require('./privatenetworks');
|
@@ -311,5 +311,7 @@ module.exports = {
|
|
311
311
|
ConnectCampaigns: require('./connectcampaigns'),
|
312
312
|
RedshiftServerless: require('./redshiftserverless'),
|
313
313
|
RolesAnywhere: require('./rolesanywhere'),
|
314
|
-
LicenseManagerUserSubscriptions: require('./licensemanagerusersubscriptions')
|
314
|
+
LicenseManagerUserSubscriptions: require('./licensemanagerusersubscriptions'),
|
315
|
+
BackupStorage: require('./backupstorage'),
|
316
|
+
PrivateNetworks: require('./privatenetworks')
|
315
317
|
};
|
@@ -0,0 +1,469 @@
|
|
1
|
+
import {Request} from '../lib/request';
|
2
|
+
import {Response} from '../lib/response';
|
3
|
+
import {AWSError} from '../lib/error';
|
4
|
+
import {Service} from '../lib/service';
|
5
|
+
import {ServiceConfigurationOptions} from '../lib/service';
|
6
|
+
import {ConfigBase as Config} from '../lib/config-base';
|
7
|
+
import {Readable} from 'stream';
|
8
|
+
interface Blob {}
|
9
|
+
declare class BackupStorage extends Service {
|
10
|
+
/**
|
11
|
+
* Constructs a service object. This object has one method for each API operation.
|
12
|
+
*/
|
13
|
+
constructor(options?: BackupStorage.Types.ClientConfiguration)
|
14
|
+
config: Config & BackupStorage.Types.ClientConfiguration;
|
15
|
+
/**
|
16
|
+
* Delete Object from the incremental base Backup.
|
17
|
+
*/
|
18
|
+
deleteObject(params: BackupStorage.Types.DeleteObjectInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
19
|
+
/**
|
20
|
+
* Delete Object from the incremental base Backup.
|
21
|
+
*/
|
22
|
+
deleteObject(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
23
|
+
/**
|
24
|
+
* Gets the specified object's chunk.
|
25
|
+
*/
|
26
|
+
getChunk(params: BackupStorage.Types.GetChunkInput, callback?: (err: AWSError, data: BackupStorage.Types.GetChunkOutput) => void): Request<BackupStorage.Types.GetChunkOutput, AWSError>;
|
27
|
+
/**
|
28
|
+
* Gets the specified object's chunk.
|
29
|
+
*/
|
30
|
+
getChunk(callback?: (err: AWSError, data: BackupStorage.Types.GetChunkOutput) => void): Request<BackupStorage.Types.GetChunkOutput, AWSError>;
|
31
|
+
/**
|
32
|
+
* Get metadata associated with an Object.
|
33
|
+
*/
|
34
|
+
getObjectMetadata(params: BackupStorage.Types.GetObjectMetadataInput, callback?: (err: AWSError, data: BackupStorage.Types.GetObjectMetadataOutput) => void): Request<BackupStorage.Types.GetObjectMetadataOutput, AWSError>;
|
35
|
+
/**
|
36
|
+
* Get metadata associated with an Object.
|
37
|
+
*/
|
38
|
+
getObjectMetadata(callback?: (err: AWSError, data: BackupStorage.Types.GetObjectMetadataOutput) => void): Request<BackupStorage.Types.GetObjectMetadataOutput, AWSError>;
|
39
|
+
/**
|
40
|
+
* List chunks in a given Object
|
41
|
+
*/
|
42
|
+
listChunks(params: BackupStorage.Types.ListChunksInput, callback?: (err: AWSError, data: BackupStorage.Types.ListChunksOutput) => void): Request<BackupStorage.Types.ListChunksOutput, AWSError>;
|
43
|
+
/**
|
44
|
+
* List chunks in a given Object
|
45
|
+
*/
|
46
|
+
listChunks(callback?: (err: AWSError, data: BackupStorage.Types.ListChunksOutput) => void): Request<BackupStorage.Types.ListChunksOutput, AWSError>;
|
47
|
+
/**
|
48
|
+
* List all Objects in a given Backup.
|
49
|
+
*/
|
50
|
+
listObjects(params: BackupStorage.Types.ListObjectsInput, callback?: (err: AWSError, data: BackupStorage.Types.ListObjectsOutput) => void): Request<BackupStorage.Types.ListObjectsOutput, AWSError>;
|
51
|
+
/**
|
52
|
+
* List all Objects in a given Backup.
|
53
|
+
*/
|
54
|
+
listObjects(callback?: (err: AWSError, data: BackupStorage.Types.ListObjectsOutput) => void): Request<BackupStorage.Types.ListObjectsOutput, AWSError>;
|
55
|
+
/**
|
56
|
+
* Complete upload
|
57
|
+
*/
|
58
|
+
notifyObjectComplete(params: BackupStorage.Types.NotifyObjectCompleteInput, callback?: (err: AWSError, data: BackupStorage.Types.NotifyObjectCompleteOutput) => void): Request<BackupStorage.Types.NotifyObjectCompleteOutput, AWSError>;
|
59
|
+
/**
|
60
|
+
* Complete upload
|
61
|
+
*/
|
62
|
+
notifyObjectComplete(callback?: (err: AWSError, data: BackupStorage.Types.NotifyObjectCompleteOutput) => void): Request<BackupStorage.Types.NotifyObjectCompleteOutput, AWSError>;
|
63
|
+
/**
|
64
|
+
* Upload chunk.
|
65
|
+
*/
|
66
|
+
putChunk(params: BackupStorage.Types.PutChunkInput, callback?: (err: AWSError, data: BackupStorage.Types.PutChunkOutput) => void): Request<BackupStorage.Types.PutChunkOutput, AWSError>;
|
67
|
+
/**
|
68
|
+
* Upload chunk.
|
69
|
+
*/
|
70
|
+
putChunk(callback?: (err: AWSError, data: BackupStorage.Types.PutChunkOutput) => void): Request<BackupStorage.Types.PutChunkOutput, AWSError>;
|
71
|
+
/**
|
72
|
+
* Upload object that can store object metadata String and data blob in single API call using inline chunk field.
|
73
|
+
*/
|
74
|
+
putObject(params: BackupStorage.Types.PutObjectInput, callback?: (err: AWSError, data: BackupStorage.Types.PutObjectOutput) => void): Request<BackupStorage.Types.PutObjectOutput, AWSError>;
|
75
|
+
/**
|
76
|
+
* Upload object that can store object metadata String and data blob in single API call using inline chunk field.
|
77
|
+
*/
|
78
|
+
putObject(callback?: (err: AWSError, data: BackupStorage.Types.PutObjectOutput) => void): Request<BackupStorage.Types.PutObjectOutput, AWSError>;
|
79
|
+
/**
|
80
|
+
* Start upload containing one or many chunks.
|
81
|
+
*/
|
82
|
+
startObject(params: BackupStorage.Types.StartObjectInput, callback?: (err: AWSError, data: BackupStorage.Types.StartObjectOutput) => void): Request<BackupStorage.Types.StartObjectOutput, AWSError>;
|
83
|
+
/**
|
84
|
+
* Start upload containing one or many chunks.
|
85
|
+
*/
|
86
|
+
startObject(callback?: (err: AWSError, data: BackupStorage.Types.StartObjectOutput) => void): Request<BackupStorage.Types.StartObjectOutput, AWSError>;
|
87
|
+
}
|
88
|
+
declare namespace BackupStorage {
|
89
|
+
export interface BackupObject {
|
90
|
+
/**
|
91
|
+
* Object name
|
92
|
+
*/
|
93
|
+
Name: string;
|
94
|
+
/**
|
95
|
+
* Number of chunks in object
|
96
|
+
*/
|
97
|
+
ChunksCount?: OptionalLong;
|
98
|
+
/**
|
99
|
+
* Metadata string associated with the Object
|
100
|
+
*/
|
101
|
+
MetadataString?: string;
|
102
|
+
/**
|
103
|
+
* Object checksum
|
104
|
+
*/
|
105
|
+
ObjectChecksum: string;
|
106
|
+
/**
|
107
|
+
* Checksum algorithm
|
108
|
+
*/
|
109
|
+
ObjectChecksumAlgorithm: SummaryChecksumAlgorithm;
|
110
|
+
/**
|
111
|
+
* Object token
|
112
|
+
*/
|
113
|
+
ObjectToken: string;
|
114
|
+
}
|
115
|
+
export interface Chunk {
|
116
|
+
/**
|
117
|
+
* Chunk index
|
118
|
+
*/
|
119
|
+
Index: long;
|
120
|
+
/**
|
121
|
+
* Chunk length
|
122
|
+
*/
|
123
|
+
Length: long;
|
124
|
+
/**
|
125
|
+
* Chunk checksum
|
126
|
+
*/
|
127
|
+
Checksum: string;
|
128
|
+
/**
|
129
|
+
* Checksum algorithm
|
130
|
+
*/
|
131
|
+
ChecksumAlgorithm: DataChecksumAlgorithm;
|
132
|
+
/**
|
133
|
+
* Chunk token
|
134
|
+
*/
|
135
|
+
ChunkToken: string;
|
136
|
+
}
|
137
|
+
export type ChunkList = Chunk[];
|
138
|
+
export type DataChecksumAlgorithm = "SHA256"|string;
|
139
|
+
export interface DeleteObjectInput {
|
140
|
+
/**
|
141
|
+
* Backup job Id for the in-progress backup.
|
142
|
+
*/
|
143
|
+
BackupJobId: string;
|
144
|
+
/**
|
145
|
+
* The name of the Object.
|
146
|
+
*/
|
147
|
+
ObjectName: string;
|
148
|
+
}
|
149
|
+
export interface GetChunkInput {
|
150
|
+
/**
|
151
|
+
* Storage job id
|
152
|
+
*/
|
153
|
+
StorageJobId: string;
|
154
|
+
/**
|
155
|
+
* Chunk token
|
156
|
+
*/
|
157
|
+
ChunkToken: string;
|
158
|
+
}
|
159
|
+
export interface GetChunkOutput {
|
160
|
+
/**
|
161
|
+
* Chunk data
|
162
|
+
*/
|
163
|
+
Data: PayloadBlob;
|
164
|
+
/**
|
165
|
+
* Data length
|
166
|
+
*/
|
167
|
+
Length: long;
|
168
|
+
/**
|
169
|
+
* Data checksum
|
170
|
+
*/
|
171
|
+
Checksum: string;
|
172
|
+
/**
|
173
|
+
* Checksum algorithm
|
174
|
+
*/
|
175
|
+
ChecksumAlgorithm: DataChecksumAlgorithm;
|
176
|
+
}
|
177
|
+
export interface GetObjectMetadataInput {
|
178
|
+
/**
|
179
|
+
* Backup job id for the in-progress backup.
|
180
|
+
*/
|
181
|
+
StorageJobId: string;
|
182
|
+
/**
|
183
|
+
* Object token.
|
184
|
+
*/
|
185
|
+
ObjectToken: string;
|
186
|
+
}
|
187
|
+
export interface GetObjectMetadataOutput {
|
188
|
+
/**
|
189
|
+
* Metadata string.
|
190
|
+
*/
|
191
|
+
MetadataString?: string;
|
192
|
+
/**
|
193
|
+
* Metadata blob.
|
194
|
+
*/
|
195
|
+
MetadataBlob?: PayloadBlob;
|
196
|
+
/**
|
197
|
+
* The size of MetadataBlob.
|
198
|
+
*/
|
199
|
+
MetadataBlobLength?: long;
|
200
|
+
/**
|
201
|
+
* MetadataBlob checksum.
|
202
|
+
*/
|
203
|
+
MetadataBlobChecksum?: string;
|
204
|
+
/**
|
205
|
+
* Checksum algorithm.
|
206
|
+
*/
|
207
|
+
MetadataBlobChecksumAlgorithm?: DataChecksumAlgorithm;
|
208
|
+
}
|
209
|
+
export interface ListChunksInput {
|
210
|
+
/**
|
211
|
+
* Storage job id
|
212
|
+
*/
|
213
|
+
StorageJobId: string;
|
214
|
+
/**
|
215
|
+
* Object token
|
216
|
+
*/
|
217
|
+
ObjectToken: string;
|
218
|
+
/**
|
219
|
+
* Maximum number of chunks
|
220
|
+
*/
|
221
|
+
MaxResults?: MaxResults;
|
222
|
+
/**
|
223
|
+
* Pagination token
|
224
|
+
*/
|
225
|
+
NextToken?: string;
|
226
|
+
}
|
227
|
+
export interface ListChunksOutput {
|
228
|
+
/**
|
229
|
+
* List of chunks
|
230
|
+
*/
|
231
|
+
ChunkList: ChunkList;
|
232
|
+
/**
|
233
|
+
* Pagination token
|
234
|
+
*/
|
235
|
+
NextToken?: string;
|
236
|
+
}
|
237
|
+
export interface ListObjectsInput {
|
238
|
+
/**
|
239
|
+
* Storage job id
|
240
|
+
*/
|
241
|
+
StorageJobId: string;
|
242
|
+
/**
|
243
|
+
* Optional, specifies the starting Object name to list from. Ignored if NextToken is not NULL
|
244
|
+
*/
|
245
|
+
StartingObjectName?: string;
|
246
|
+
/**
|
247
|
+
* Optional, specifies the starting Object prefix to list from. Ignored if NextToken is not NULL
|
248
|
+
*/
|
249
|
+
StartingObjectPrefix?: string;
|
250
|
+
/**
|
251
|
+
* Maximum objects count
|
252
|
+
*/
|
253
|
+
MaxResults?: MaxResults;
|
254
|
+
/**
|
255
|
+
* Pagination token
|
256
|
+
*/
|
257
|
+
NextToken?: string;
|
258
|
+
/**
|
259
|
+
* (Optional) Created before filter
|
260
|
+
*/
|
261
|
+
CreatedBefore?: timestamp;
|
262
|
+
/**
|
263
|
+
* (Optional) Created after filter
|
264
|
+
*/
|
265
|
+
CreatedAfter?: timestamp;
|
266
|
+
}
|
267
|
+
export interface ListObjectsOutput {
|
268
|
+
/**
|
269
|
+
* Object list
|
270
|
+
*/
|
271
|
+
ObjectList: ObjectList;
|
272
|
+
/**
|
273
|
+
* Pagination token
|
274
|
+
*/
|
275
|
+
NextToken?: string;
|
276
|
+
}
|
277
|
+
export type MaxResults = number;
|
278
|
+
export type MetadataString = string;
|
279
|
+
export interface NotifyObjectCompleteInput {
|
280
|
+
/**
|
281
|
+
* Backup job Id for the in-progress backup
|
282
|
+
*/
|
283
|
+
BackupJobId: string;
|
284
|
+
/**
|
285
|
+
* Upload Id for the in-progress upload
|
286
|
+
*/
|
287
|
+
UploadId: string;
|
288
|
+
/**
|
289
|
+
* Object checksum
|
290
|
+
*/
|
291
|
+
ObjectChecksum: string;
|
292
|
+
/**
|
293
|
+
* Checksum algorithm
|
294
|
+
*/
|
295
|
+
ObjectChecksumAlgorithm: SummaryChecksumAlgorithm;
|
296
|
+
/**
|
297
|
+
* Optional metadata associated with an Object. Maximum string length is 256 bytes.
|
298
|
+
*/
|
299
|
+
MetadataString?: MetadataString;
|
300
|
+
/**
|
301
|
+
* Optional metadata associated with an Object. Maximum length is 4MB.
|
302
|
+
*/
|
303
|
+
MetadataBlob?: PayloadBlob;
|
304
|
+
/**
|
305
|
+
* The size of MetadataBlob.
|
306
|
+
*/
|
307
|
+
MetadataBlobLength?: long;
|
308
|
+
/**
|
309
|
+
* Checksum of MetadataBlob.
|
310
|
+
*/
|
311
|
+
MetadataBlobChecksum?: string;
|
312
|
+
/**
|
313
|
+
* Checksum algorithm.
|
314
|
+
*/
|
315
|
+
MetadataBlobChecksumAlgorithm?: DataChecksumAlgorithm;
|
316
|
+
}
|
317
|
+
export interface NotifyObjectCompleteOutput {
|
318
|
+
/**
|
319
|
+
* Object checksum
|
320
|
+
*/
|
321
|
+
ObjectChecksum: string;
|
322
|
+
/**
|
323
|
+
* Checksum algorithm
|
324
|
+
*/
|
325
|
+
ObjectChecksumAlgorithm: SummaryChecksumAlgorithm;
|
326
|
+
}
|
327
|
+
export type ObjectList = BackupObject[];
|
328
|
+
export type OptionalLong = number;
|
329
|
+
export type PayloadBlob = Buffer|Uint8Array|Blob|string|Readable;
|
330
|
+
export interface PutChunkInput {
|
331
|
+
/**
|
332
|
+
* Backup job Id for the in-progress backup.
|
333
|
+
*/
|
334
|
+
BackupJobId: string;
|
335
|
+
/**
|
336
|
+
* Upload Id for the in-progress upload.
|
337
|
+
*/
|
338
|
+
UploadId: string;
|
339
|
+
/**
|
340
|
+
* Describes this chunk's position relative to the other chunks
|
341
|
+
*/
|
342
|
+
ChunkIndex: long;
|
343
|
+
/**
|
344
|
+
* Data to be uploaded
|
345
|
+
*/
|
346
|
+
Data: PayloadBlob;
|
347
|
+
/**
|
348
|
+
* Data length
|
349
|
+
*/
|
350
|
+
Length: long;
|
351
|
+
/**
|
352
|
+
* Data checksum
|
353
|
+
*/
|
354
|
+
Checksum: string;
|
355
|
+
/**
|
356
|
+
* Checksum algorithm
|
357
|
+
*/
|
358
|
+
ChecksumAlgorithm: DataChecksumAlgorithm;
|
359
|
+
}
|
360
|
+
export interface PutChunkOutput {
|
361
|
+
/**
|
362
|
+
* Chunk checksum
|
363
|
+
*/
|
364
|
+
ChunkChecksum: string;
|
365
|
+
/**
|
366
|
+
* Checksum algorithm
|
367
|
+
*/
|
368
|
+
ChunkChecksumAlgorithm: DataChecksumAlgorithm;
|
369
|
+
}
|
370
|
+
export interface PutObjectInput {
|
371
|
+
/**
|
372
|
+
* Backup job Id for the in-progress backup.
|
373
|
+
*/
|
374
|
+
BackupJobId: string;
|
375
|
+
/**
|
376
|
+
* The name of the Object to be uploaded.
|
377
|
+
*/
|
378
|
+
ObjectName: string;
|
379
|
+
/**
|
380
|
+
* Store user defined metadata like backup checksum, disk ids, restore metadata etc.
|
381
|
+
*/
|
382
|
+
MetadataString?: string;
|
383
|
+
/**
|
384
|
+
* Inline chunk data to be uploaded.
|
385
|
+
*/
|
386
|
+
InlineChunk?: PayloadBlob;
|
387
|
+
/**
|
388
|
+
* Length of the inline chunk data.
|
389
|
+
*/
|
390
|
+
InlineChunkLength?: long;
|
391
|
+
/**
|
392
|
+
* Inline chunk checksum
|
393
|
+
*/
|
394
|
+
InlineChunkChecksum?: string;
|
395
|
+
/**
|
396
|
+
* Inline chunk checksum algorithm
|
397
|
+
*/
|
398
|
+
InlineChunkChecksumAlgorithm?: string;
|
399
|
+
/**
|
400
|
+
* object checksum
|
401
|
+
*/
|
402
|
+
ObjectChecksum?: string;
|
403
|
+
/**
|
404
|
+
* object checksum algorithm
|
405
|
+
*/
|
406
|
+
ObjectChecksumAlgorithm?: SummaryChecksumAlgorithm;
|
407
|
+
/**
|
408
|
+
* Throw an exception if Object name is already exist.
|
409
|
+
*/
|
410
|
+
ThrowOnDuplicate?: boolean;
|
411
|
+
}
|
412
|
+
export interface PutObjectOutput {
|
413
|
+
/**
|
414
|
+
* Inline chunk checksum
|
415
|
+
*/
|
416
|
+
InlineChunkChecksum: string;
|
417
|
+
/**
|
418
|
+
* Inline chunk checksum algorithm
|
419
|
+
*/
|
420
|
+
InlineChunkChecksumAlgorithm: DataChecksumAlgorithm;
|
421
|
+
/**
|
422
|
+
* object checksum
|
423
|
+
*/
|
424
|
+
ObjectChecksum: string;
|
425
|
+
/**
|
426
|
+
* object checksum algorithm
|
427
|
+
*/
|
428
|
+
ObjectChecksumAlgorithm: SummaryChecksumAlgorithm;
|
429
|
+
}
|
430
|
+
export interface StartObjectInput {
|
431
|
+
/**
|
432
|
+
* Backup job Id for the in-progress backup
|
433
|
+
*/
|
434
|
+
BackupJobId: string;
|
435
|
+
/**
|
436
|
+
* Name for the object.
|
437
|
+
*/
|
438
|
+
ObjectName: string;
|
439
|
+
/**
|
440
|
+
* Throw an exception if Object name is already exist.
|
441
|
+
*/
|
442
|
+
ThrowOnDuplicate?: boolean;
|
443
|
+
}
|
444
|
+
export interface StartObjectOutput {
|
445
|
+
/**
|
446
|
+
* Upload Id for a given upload.
|
447
|
+
*/
|
448
|
+
UploadId: string;
|
449
|
+
}
|
450
|
+
export type SummaryChecksumAlgorithm = "SUMMARY"|string;
|
451
|
+
export type long = number;
|
452
|
+
export type timestamp = Date;
|
453
|
+
/**
|
454
|
+
* A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
|
455
|
+
*/
|
456
|
+
export type apiVersion = "2018-04-10"|"latest"|string;
|
457
|
+
export interface ClientApiVersions {
|
458
|
+
/**
|
459
|
+
* A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
|
460
|
+
*/
|
461
|
+
apiVersion?: apiVersion;
|
462
|
+
}
|
463
|
+
export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
|
464
|
+
/**
|
465
|
+
* Contains interfaces for use with the BackupStorage client.
|
466
|
+
*/
|
467
|
+
export import Types = BackupStorage;
|
468
|
+
}
|
469
|
+
export = BackupStorage;
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require('../lib/node_loader');
|
2
|
+
var AWS = require('../lib/core');
|
3
|
+
var Service = AWS.Service;
|
4
|
+
var apiLoader = AWS.apiLoader;
|
5
|
+
|
6
|
+
apiLoader.services['backupstorage'] = {};
|
7
|
+
AWS.BackupStorage = Service.defineService('backupstorage', ['2018-04-10']);
|
8
|
+
Object.defineProperty(apiLoader.services['backupstorage'], '2018-04-10', {
|
9
|
+
get: function get() {
|
10
|
+
var model = require('../apis/backupstorage-2018-04-10.min.json');
|
11
|
+
model.paginators = require('../apis/backupstorage-2018-04-10.paginators.json').pagination;
|
12
|
+
return model;
|
13
|
+
},
|
14
|
+
enumerable: true,
|
15
|
+
configurable: true
|
16
|
+
});
|
17
|
+
|
18
|
+
module.exports = AWS.BackupStorage;
|
@@ -21,11 +21,11 @@ declare class CloudWatch extends Service {
|
|
21
21
|
*/
|
22
22
|
deleteAlarms(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
23
23
|
/**
|
24
|
-
*
|
24
|
+
* Deletes the specified anomaly detection model from your account. For more information about how to delete an anomaly detection model, see Deleting an anomaly detection model in the CloudWatch User Guide.
|
25
25
|
*/
|
26
26
|
deleteAnomalyDetector(params: CloudWatch.Types.DeleteAnomalyDetectorInput, callback?: (err: AWSError, data: CloudWatch.Types.DeleteAnomalyDetectorOutput) => void): Request<CloudWatch.Types.DeleteAnomalyDetectorOutput, AWSError>;
|
27
27
|
/**
|
28
|
-
*
|
28
|
+
* Deletes the specified anomaly detection model from your account. For more information about how to delete an anomaly detection model, see Deleting an anomaly detection model in the CloudWatch User Guide.
|
29
29
|
*/
|
30
30
|
deleteAnomalyDetector(callback?: (err: AWSError, data: CloudWatch.Types.DeleteAnomalyDetectorOutput) => void): Request<CloudWatch.Types.DeleteAnomalyDetectorOutput, AWSError>;
|
31
31
|
/**
|
@@ -245,11 +245,11 @@ declare class CloudWatch extends Service {
|
|
245
245
|
*/
|
246
246
|
putMetricAlarm(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
247
247
|
/**
|
248
|
-
* Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to
|
248
|
+
* Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
|
249
249
|
*/
|
250
250
|
putMetricData(params: CloudWatch.Types.PutMetricDataInput, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
251
251
|
/**
|
252
|
-
* Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to
|
252
|
+
* Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics. You can publish either individual data points in the Value field, or arrays of values and the number of times each value occurred during the period by using the Values and Counts fields in the MetricDatum structure. Using the Values and Counts method enables you to publish up to 150 values per metric with one PutMetricData request, and supports retrieving percentile statistics on this data. Each PutMetricData request is limited to 1 MB in size for HTTP POST requests. You can send a payload compressed by gzip. Each request is also limited to no more than 1000 different metrics. Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported. You can use up to 30 dimensions per metric to further clarify what data the metric collects. Each dimension consists of a Name and Value pair. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide. You specify the time stamp to be associated with each data point. You can specify time stamps that are as much as two weeks before the current date, and as much as 2 hours after the current day and time. Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricData or GetMetricStatistics from the time they are submitted. Data points with time stamps between 3 and 24 hours ago can take as much as 2 hours to become available for for GetMetricData or GetMetricStatistics. CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true: The SampleCount value of the statistic set is 1 and Min, Max, and Sum are all equal. The Min and Max are equal, and Sum is equal to Min multiplied by SampleCount.
|
253
253
|
*/
|
254
254
|
putMetricData(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
255
255
|
/**
|
@@ -1537,7 +1537,7 @@ declare namespace CloudWatch {
|
|
1537
1537
|
*/
|
1538
1538
|
StatisticValues?: StatisticSet;
|
1539
1539
|
/**
|
1540
|
-
* Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to
|
1540
|
+
* Array of numbers representing the values for the metric during the period. Each unique value is listed just once in this array, and the corresponding number in the Counts array specifies the number of times that value occurred during the period. You can include up to 500 unique values in each PutMetricData action that specifies a Values array. Although the Values array accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of -2^360 to 2^360. In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.
|
1541
1541
|
*/
|
1542
1542
|
Values?: Values;
|
1543
1543
|
/**
|
@@ -1836,7 +1836,7 @@ declare namespace CloudWatch {
|
|
1836
1836
|
*/
|
1837
1837
|
Period?: Period;
|
1838
1838
|
/**
|
1839
|
-
* The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the alarm works as intended. However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and it behaves
|
1839
|
+
* The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately. If you don't specify Unit, CloudWatch retrieves all unit types that have been published for the metric and attempts to evaluate the alarm. Usually, metrics are published with only one unit, so the alarm works as intended. However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's behavior is not defined and it behaves unpredictably. We recommend omitting Unit so that you don't inadvertently specify an incorrect unit that is not published for this metric. Doing so causes the alarm to be stuck in the INSUFFICIENT DATA state.
|
1840
1840
|
*/
|
1841
1841
|
Unit?: StandardUnit;
|
1842
1842
|
/**
|
@@ -1882,7 +1882,7 @@ declare namespace CloudWatch {
|
|
1882
1882
|
*/
|
1883
1883
|
Namespace: Namespace;
|
1884
1884
|
/**
|
1885
|
-
* The data for the metric. The array can include no more than
|
1885
|
+
* The data for the metric. The array can include no more than 1000 metrics per call.
|
1886
1886
|
*/
|
1887
1887
|
MetricData: MetricData;
|
1888
1888
|
}
|