@cumulus/aws-client 9.8.0 → 10.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -858,6 +858,7 @@ Copy an S3 object to another location in S3 using a multipart copy
858
858
  | [params.sourceObject] | <code>AWS.S3.HeadObjectOutput</code> | | Output from https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#headObject-property |
859
859
  | [params.ACL] | <code>string</code> | | an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) |
860
860
  | [params.copyTags] | <code>boolean</code> | <code>false</code> | |
861
+ | [params.chunkSize] | <code>number</code> | | chunk size of the S3 multipart uploads |
861
862
 
862
863
  <a name="module_S3..moveObject"></a>
863
864
 
@@ -875,6 +876,7 @@ Move an S3 object to another location in S3
875
876
  | params.destinationKey | <code>string</code> | | |
876
877
  | [params.ACL] | <code>string</code> | | an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) |
877
878
  | [params.copyTags] | <code>boolean</code> | <code>false</code> | |
879
+ | [params.chunkSize] | <code>number</code> | | chunk size of the S3 multipart uploads |
878
880
 
879
881
  <a name="module_SNS"></a>
880
882
 
package/S3.d.ts CHANGED
@@ -282,7 +282,9 @@ export declare const deleteS3Files: (s3Objs: AWS.S3.DeleteObjectRequest[]) => Pr
282
282
  * @param {string} bucket - name of the bucket
283
283
  * @returns {Promise} the promised result of `S3.deleteBucket`
284
284
  **/
285
- export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<void>;
285
+ export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<{
286
+ $response: import("aws-sdk").Response<{}, import("aws-sdk").AWSError>;
287
+ }>;
286
288
  /**
287
289
  * Delete a list of buckets and all of their objects from S3
288
290
  *
@@ -412,6 +414,7 @@ export declare const createS3Buckets: (buckets: Array<string>) => Promise<any>;
412
414
  * Output from https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#headObject-property
413
415
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
414
416
  * @param {boolean} [params.copyTags=false]
417
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
415
418
  * @returns {Promise.<{ etag: string }>} object containing the ETag of the
416
419
  * destination object
417
420
  */
@@ -423,7 +426,7 @@ export declare const multipartCopyObject: (params: {
423
426
  sourceObject?: AWS.S3.HeadObjectOutput;
424
427
  ACL?: AWS.S3.ObjectCannedACL;
425
428
  copyTags?: boolean;
426
- copyMetadata?: boolean;
429
+ chunkSize?: number;
427
430
  }) => Promise<{
428
431
  etag: string;
429
432
  }>;
@@ -437,6 +440,7 @@ export declare const multipartCopyObject: (params: {
437
440
  * @param {string} params.destinationKey
438
441
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
439
442
  * @param {boolean} [params.copyTags=false]
443
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
440
444
  * @returns {Promise<undefined>}
441
445
  */
442
446
  export declare const moveObject: (params: {
@@ -446,6 +450,7 @@ export declare const moveObject: (params: {
446
450
  destinationKey: string;
447
451
  ACL?: AWS.S3.ObjectCannedACL;
448
452
  copyTags?: boolean;
449
- }) => Promise<void>;
453
+ chunkSize?: number;
454
+ }) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").DeleteObjectOutput, import("aws-sdk").AWSError>>;
450
455
  export {};
451
456
  //# sourceMappingURL=S3.d.ts.map
package/S3.js CHANGED
@@ -166,7 +166,7 @@ exports.s3ObjectExists = s3ObjectExists;
166
166
  */
167
167
  const waitForObjectToExist = async (params) => {
168
168
  const { bucket, key, interval = 1000, timeout = 30 * 1000, } = params;
169
- await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
169
+ return await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
170
170
  };
171
171
  exports.waitForObjectToExist = waitForObjectToExist;
172
172
  /**
@@ -320,9 +320,7 @@ exports.s3PutObjectTagging = (0, utils_1.improveStackTrace)((Bucket, Key, Taggin
320
320
  * @returns {Promise<AWS.S3.GetObjectOutput>} response from `AWS.S3.getObject()`
321
321
  * as a Promise
322
322
  */
323
- const getObject = (
324
- // eslint-disable-next-line no-shadow
325
- s3Client, params) => s3Client.getObject(params).promise();
323
+ const getObject = (s3Client, params) => s3Client.getObject(params).promise();
326
324
  exports.getObject = getObject;
327
325
  /**
328
326
  * Get an object from S3, waiting for it to exist and, if specified, have the
@@ -423,8 +421,7 @@ exports.getObjectReadStream = getObjectReadStream;
423
421
  **/
424
422
  const fileExists = async (bucket, key) => {
425
423
  try {
426
- const r = await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
427
- return r;
424
+ return await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
428
425
  }
429
426
  catch (error) {
430
427
  // if file is not return false
@@ -489,7 +486,7 @@ exports.recursivelyDeleteS3Bucket = (0, utils_1.improveStackTrace)(async (bucket
489
486
  };
490
487
  });
491
488
  await (0, exports.deleteS3Files)(s3Objects);
492
- await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
489
+ return await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
493
490
  });
494
491
  /**
495
492
  * Delete a list of buckets and all of their objects from S3
@@ -743,12 +740,13 @@ const uploadPartCopy = async (params) => {
743
740
  * Output from https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#headObject-property
744
741
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
745
742
  * @param {boolean} [params.copyTags=false]
743
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
746
744
  * @returns {Promise.<{ etag: string }>} object containing the ETag of the
747
745
  * destination object
748
746
  */
749
747
  const multipartCopyObject = async (params) => {
750
748
  var _a;
751
- const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, } = params;
749
+ const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, chunkSize, } = params;
752
750
  const sourceObject = (_a = params.sourceObject) !== null && _a !== void 0 ? _a : await (0, exports.headObject)(sourceBucket, sourceKey);
753
751
  // Create a multi-part upload (copy) and get its UploadId
754
752
  const uploadId = await createMultipartUpload({
@@ -766,7 +764,7 @@ const multipartCopyObject = async (params) => {
766
764
  if (objectSize === undefined) {
767
765
  throw new Error(`Unable to determine size of s3://${sourceBucket}/${sourceKey}`);
768
766
  }
769
- const chunks = S3MultipartUploads.createMultipartChunks(objectSize);
767
+ const chunks = S3MultipartUploads.createMultipartChunks(objectSize, chunkSize);
770
768
  // Submit all of the upload (copy) parts to S3
771
769
  const uploadPartCopyResponses = await Promise.all(chunks.map(({ start, end }, index) => uploadPartCopy({
772
770
  uploadId,
@@ -811,6 +809,7 @@ exports.multipartCopyObject = multipartCopyObject;
811
809
  * @param {string} params.destinationKey
812
810
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
813
811
  * @param {boolean} [params.copyTags=false]
812
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
814
813
  * @returns {Promise<undefined>}
815
814
  */
816
815
  const moveObject = async (params) => {
@@ -821,8 +820,9 @@ const moveObject = async (params) => {
821
820
  destinationKey: params.destinationKey,
822
821
  ACL: params.ACL,
823
822
  copyTags: (0, isBoolean_1.default)(params.copyTags) ? params.copyTags : true,
823
+ chunkSize: params.chunkSize,
824
824
  });
825
- await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
825
+ return await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
826
826
  };
827
827
  exports.moveObject = moveObject;
828
828
  //# sourceMappingURL=S3.js.map
@@ -65,7 +65,9 @@ export declare const getExecutionHistory: (params: import("aws-sdk/clients/stepf
65
65
  events: import("aws-sdk/clients/stepfunctions").HistoryEventList;
66
66
  }>;
67
67
  export declare const getExecutionStatus: (executionArn: string) => Promise<{
68
- execution: import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
68
+ execution: import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput & {
69
+ $response: import("aws-sdk").Response<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
70
+ };
69
71
  executionHistory: {
70
72
  events: import("aws-sdk/clients/stepfunctions").HistoryEventList;
71
73
  };
@@ -5,7 +5,24 @@ declare type Chunk = {
5
5
  start: number;
6
6
  end: number;
7
7
  };
8
- export declare const createMultipartChunks: (objectSize: number, maxChunkSize?: number) => Chunk[];
8
+ /**
9
+ * Each part of a multi-part copy needs to specify a byte range to be copied.
10
+ * This byte range has a starting byte and an ending byte (inclusive) that makes
11
+ * up the part. The maximum allowed chunk size is 5368709120 bytes.
12
+ *
13
+ * This function takes a file size and an optional maxSize. It returns an array
14
+ * of objects, each containing a `start` and an `end` value. These will make up
15
+ * the ranges of the multi-part copy.
16
+ *
17
+ * From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
18
+ *
19
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
20
+ *
21
+ * @param {number} objectSize - size of the object
22
+ * @param {number} chunkSize - chunk size of the S3 multipart uploads
23
+ * @returns {Promise<Array<Chunk>>} - array of chunks
24
+ */
25
+ export declare const createMultipartChunks: (objectSize: number, chunkSize?: number) => Chunk[];
9
26
  export declare const createMultipartUpload: (params: AWS.S3.CreateMultipartUploadRequest) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").CreateMultipartUploadOutput, import("aws-sdk").AWSError>>;
10
27
  export declare const completeMultipartUpload: (params: AWS.S3.CompleteMultipartUploadRequest) => Promise<CompleteMultipartUploadOutput>;
11
28
  export declare const abortMultipartUpload: (params: AWS.S3.AbortMultipartUploadRequest) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").AbortMultipartUploadOutput, import("aws-sdk").AWSError>>;
@@ -8,21 +8,27 @@ exports.uploadPartCopy = exports.abortMultipartUpload = exports.completeMultipar
8
8
  const range_1 = __importDefault(require("lodash/range"));
9
9
  const services_1 = require("../services");
10
10
  const MB = 1024 * 1024;
11
- // Each part of a multi-part copy needs to specify a byte range to be copied.
12
- // This byte range has a starting byte and an ending byte (inclusive) that makes
13
- // up the part. The maximum allowed chunk size is 5368709120 bytes.
14
- //
15
- // This function takes a file size and an optional maxSize. It returns an array
16
- // of objects, each containing a `start` and an `end` value. These will make up
17
- // the ranges of the multi-part copy.
18
- //
19
- // From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
20
- //
21
- // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
22
- const createMultipartChunks = (objectSize, maxChunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, maxChunkSize)
11
+ /**
12
+ * Each part of a multi-part copy needs to specify a byte range to be copied.
13
+ * This byte range has a starting byte and an ending byte (inclusive) that makes
14
+ * up the part. The maximum allowed chunk size is 5368709120 bytes.
15
+ *
16
+ * This function takes a file size and an optional maxSize. It returns an array
17
+ * of objects, each containing a `start` and an `end` value. These will make up
18
+ * the ranges of the multi-part copy.
19
+ *
20
+ * From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
21
+ *
22
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
23
+ *
24
+ * @param {number} objectSize - size of the object
25
+ * @param {number} chunkSize - chunk size of the S3 multipart uploads
26
+ * @returns {Promise<Array<Chunk>>} - array of chunks
27
+ */
28
+ const createMultipartChunks = (objectSize, chunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, chunkSize)
23
29
  .map((start) => ({
24
30
  start,
25
- end: Math.min(start + maxChunkSize, objectSize) - 1,
31
+ end: Math.min(start + chunkSize, objectSize) - 1,
26
32
  }));
27
33
  exports.createMultipartChunks = createMultipartChunks;
28
34
  const createMultipartUpload = async (params) => await (0, services_1.s3)().createMultipartUpload(params).promise();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cumulus/aws-client",
3
- "version": "9.8.0",
3
+ "version": "10.0.1",
4
4
  "description": "Utilities for working with AWS",
5
5
  "keywords": [
6
6
  "GIBS",
@@ -43,9 +43,9 @@
43
43
  "author": "Cumulus Authors",
44
44
  "license": "Apache-2.0",
45
45
  "dependencies": {
46
- "@cumulus/checksum": "9.8.0",
47
- "@cumulus/errors": "9.8.0",
48
- "@cumulus/logger": "9.8.0",
46
+ "@cumulus/checksum": "10.0.1",
47
+ "@cumulus/errors": "10.0.1",
48
+ "@cumulus/logger": "10.0.1",
49
49
  "aws-sdk": "^2.814.0",
50
50
  "jsonpath-plus": "^1.1.0",
51
51
  "lodash": "~4.17.20",
@@ -54,5 +54,5 @@
54
54
  "p-wait-for": "^3.1.0",
55
55
  "pump": "^3.0.0"
56
56
  },
57
- "gitHead": "913034ba6814e562b7f3d58bb39cf086255a6efd"
57
+ "gitHead": "49c3c88336838184f22f35fbce298c71cd269138"
58
58
  }