@cumulus/aws-client 9.9.0 → 10.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/S3.d.ts CHANGED
@@ -282,7 +282,9 @@ export declare const deleteS3Files: (s3Objs: AWS.S3.DeleteObjectRequest[]) => Pr
282
282
  * @param {string} bucket - name of the bucket
283
283
  * @returns {Promise} the promised result of `S3.deleteBucket`
284
284
  **/
285
- export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<void>;
285
+ export declare const recursivelyDeleteS3Bucket: (bucket: string) => Promise<{
286
+ $response: import("aws-sdk").Response<{}, import("aws-sdk").AWSError>;
287
+ }>;
286
288
  /**
287
289
  * Delete a list of buckets and all of their objects from S3
288
290
  *
@@ -412,6 +414,7 @@ export declare const createS3Buckets: (buckets: Array<string>) => Promise<any>;
412
414
  * Output from https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#headObject-property
413
415
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
414
416
  * @param {boolean} [params.copyTags=false]
417
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
415
418
  * @returns {Promise.<{ etag: string }>} object containing the ETag of the
416
419
  * destination object
417
420
  */
@@ -423,7 +426,7 @@ export declare const multipartCopyObject: (params: {
423
426
  sourceObject?: AWS.S3.HeadObjectOutput;
424
427
  ACL?: AWS.S3.ObjectCannedACL;
425
428
  copyTags?: boolean;
426
- copyMetadata?: boolean;
429
+ chunkSize?: number;
427
430
  }) => Promise<{
428
431
  etag: string;
429
432
  }>;
@@ -437,6 +440,7 @@ export declare const multipartCopyObject: (params: {
437
440
  * @param {string} params.destinationKey
438
441
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
439
442
  * @param {boolean} [params.copyTags=false]
443
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
440
444
  * @returns {Promise<undefined>}
441
445
  */
442
446
  export declare const moveObject: (params: {
@@ -446,6 +450,7 @@ export declare const moveObject: (params: {
446
450
  destinationKey: string;
447
451
  ACL?: AWS.S3.ObjectCannedACL;
448
452
  copyTags?: boolean;
449
- }) => Promise<void>;
453
+ chunkSize?: number;
454
+ }) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").DeleteObjectOutput, import("aws-sdk").AWSError>>;
450
455
  export {};
451
456
  //# sourceMappingURL=S3.d.ts.map
package/S3.js CHANGED
@@ -166,7 +166,7 @@ exports.s3ObjectExists = s3ObjectExists;
166
166
  */
167
167
  const waitForObjectToExist = async (params) => {
168
168
  const { bucket, key, interval = 1000, timeout = 30 * 1000, } = params;
169
- await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
169
+ return await (0, p_wait_for_1.default)(() => (0, exports.s3ObjectExists)({ Bucket: bucket, Key: key }), { interval, timeout });
170
170
  };
171
171
  exports.waitForObjectToExist = waitForObjectToExist;
172
172
  /**
@@ -423,8 +423,7 @@ exports.getObjectReadStream = getObjectReadStream;
423
423
  **/
424
424
  const fileExists = async (bucket, key) => {
425
425
  try {
426
- const r = await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
427
- return r;
426
+ return await (0, services_1.s3)().headObject({ Key: key, Bucket: bucket }).promise();
428
427
  }
429
428
  catch (error) {
430
429
  // if file is not return false
@@ -489,7 +488,7 @@ exports.recursivelyDeleteS3Bucket = (0, utils_1.improveStackTrace)(async (bucket
489
488
  };
490
489
  });
491
490
  await (0, exports.deleteS3Files)(s3Objects);
492
- await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
491
+ return await (0, services_1.s3)().deleteBucket({ Bucket: bucket }).promise();
493
492
  });
494
493
  /**
495
494
  * Delete a list of buckets and all of their objects from S3
@@ -743,12 +742,13 @@ const uploadPartCopy = async (params) => {
743
742
  * Output from https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#headObject-property
744
743
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
745
744
  * @param {boolean} [params.copyTags=false]
745
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
746
746
  * @returns {Promise.<{ etag: string }>} object containing the ETag of the
747
747
  * destination object
748
748
  */
749
749
  const multipartCopyObject = async (params) => {
750
750
  var _a;
751
- const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, } = params;
751
+ const { sourceBucket, sourceKey, destinationBucket, destinationKey, ACL, copyTags = false, chunkSize, } = params;
752
752
  const sourceObject = (_a = params.sourceObject) !== null && _a !== void 0 ? _a : await (0, exports.headObject)(sourceBucket, sourceKey);
753
753
  // Create a multi-part upload (copy) and get its UploadId
754
754
  const uploadId = await createMultipartUpload({
@@ -766,7 +766,7 @@ const multipartCopyObject = async (params) => {
766
766
  if (objectSize === undefined) {
767
767
  throw new Error(`Unable to determine size of s3://${sourceBucket}/${sourceKey}`);
768
768
  }
769
- const chunks = S3MultipartUploads.createMultipartChunks(objectSize);
769
+ const chunks = S3MultipartUploads.createMultipartChunks(objectSize, chunkSize);
770
770
  // Submit all of the upload (copy) parts to S3
771
771
  const uploadPartCopyResponses = await Promise.all(chunks.map(({ start, end }, index) => uploadPartCopy({
772
772
  uploadId,
@@ -811,6 +811,7 @@ exports.multipartCopyObject = multipartCopyObject;
811
811
  * @param {string} params.destinationKey
812
812
  * @param {string} [params.ACL] - an [S3 Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)
813
813
  * @param {boolean} [params.copyTags=false]
814
+ * @param {number} [params.chunkSize] - chunk size of the S3 multipart uploads
814
815
  * @returns {Promise<undefined>}
815
816
  */
816
817
  const moveObject = async (params) => {
@@ -821,8 +822,9 @@ const moveObject = async (params) => {
821
822
  destinationKey: params.destinationKey,
822
823
  ACL: params.ACL,
823
824
  copyTags: (0, isBoolean_1.default)(params.copyTags) ? params.copyTags : true,
825
+ chunkSize: params.chunkSize,
824
826
  });
825
- await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
827
+ return await (0, exports.deleteS3Object)(params.sourceBucket, params.sourceKey);
826
828
  };
827
829
  exports.moveObject = moveObject;
828
830
  //# sourceMappingURL=S3.js.map
@@ -65,7 +65,9 @@ export declare const getExecutionHistory: (params: import("aws-sdk/clients/stepf
65
65
  events: import("aws-sdk/clients/stepfunctions").HistoryEventList;
66
66
  }>;
67
67
  export declare const getExecutionStatus: (executionArn: string) => Promise<{
68
- execution: import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
68
+ execution: import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput & {
69
+ $response: import("aws-sdk").Response<import("aws-sdk/clients/stepfunctions").DescribeExecutionOutput, import("aws-sdk").AWSError>;
70
+ };
69
71
  executionHistory: {
70
72
  events: import("aws-sdk/clients/stepfunctions").HistoryEventList;
71
73
  };
@@ -5,7 +5,24 @@ declare type Chunk = {
5
5
  start: number;
6
6
  end: number;
7
7
  };
8
- export declare const createMultipartChunks: (objectSize: number, maxChunkSize?: number) => Chunk[];
8
+ /**
9
+ * Each part of a multi-part copy needs to specify a byte range to be copied.
10
+ * This byte range has a starting byte and an ending byte (inclusive) that makes
11
+ * up the part. The maximum allowed chunk size is 5368709120 bytes.
12
+ *
13
+ * This function takes a file size and an optional maxSize. It returns an array
14
+ * of objects, each containing a `start` and an `end` value. These will make up
15
+ * the ranges of the multi-part copy.
16
+ *
17
+ * From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
18
+ *
19
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
20
+ *
21
+ * @param {number} objectSize - size of the object
22
+ * @param {number} chunkSize - chunk size of the S3 multipart uploads
23
+ * @returns {Promise<Array<Chunk>>} - array of chunks
24
+ */
25
+ export declare const createMultipartChunks: (objectSize: number, chunkSize?: number) => Chunk[];
9
26
  export declare const createMultipartUpload: (params: AWS.S3.CreateMultipartUploadRequest) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").CreateMultipartUploadOutput, import("aws-sdk").AWSError>>;
10
27
  export declare const completeMultipartUpload: (params: AWS.S3.CompleteMultipartUploadRequest) => Promise<CompleteMultipartUploadOutput>;
11
28
  export declare const abortMultipartUpload: (params: AWS.S3.AbortMultipartUploadRequest) => Promise<import("aws-sdk/lib/request").PromiseResult<import("aws-sdk/clients/s3").AbortMultipartUploadOutput, import("aws-sdk").AWSError>>;
@@ -8,21 +8,27 @@ exports.uploadPartCopy = exports.abortMultipartUpload = exports.completeMultipar
8
8
  const range_1 = __importDefault(require("lodash/range"));
9
9
  const services_1 = require("../services");
10
10
  const MB = 1024 * 1024;
11
- // Each part of a multi-part copy needs to specify a byte range to be copied.
12
- // This byte range has a starting byte and an ending byte (inclusive) that makes
13
- // up the part. The maximum allowed chunk size is 5368709120 bytes.
14
- //
15
- // This function takes a file size and an optional maxSize. It returns an array
16
- // of objects, each containing a `start` and an `end` value. These will make up
17
- // the ranges of the multi-part copy.
18
- //
19
- // From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
20
- //
21
- // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
22
- const createMultipartChunks = (objectSize, maxChunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, maxChunkSize)
11
+ /**
12
+ * Each part of a multi-part copy needs to specify a byte range to be copied.
13
+ * This byte range has a starting byte and an ending byte (inclusive) that makes
14
+ * up the part. The maximum allowed chunk size is 5368709120 bytes.
15
+ *
16
+ * This function takes a file size and an optional maxSize. It returns an array
17
+ * of objects, each containing a `start` and an `end` value. These will make up
18
+ * the ranges of the multi-part copy.
19
+ *
20
+ * From anecdotal testing, a chunk size of 250 MB seems to perform fairly well.
21
+ *
22
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
23
+ *
24
+ * @param {number} objectSize - size of the object
25
+ * @param {number} chunkSize - chunk size of the S3 multipart uploads
26
+ * @returns {Promise<Array<Chunk>>} - array of chunks
27
+ */
28
+ const createMultipartChunks = (objectSize, chunkSize = 250 * MB) => (0, range_1.default)(0, objectSize, chunkSize)
23
29
  .map((start) => ({
24
30
  start,
25
- end: Math.min(start + maxChunkSize, objectSize) - 1,
31
+ end: Math.min(start + chunkSize, objectSize) - 1,
26
32
  }));
27
33
  exports.createMultipartChunks = createMultipartChunks;
28
34
  const createMultipartUpload = async (params) => await (0, services_1.s3)().createMultipartUpload(params).promise();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cumulus/aws-client",
3
- "version": "9.9.0",
3
+ "version": "10.0.0-beta.0",
4
4
  "description": "Utilities for working with AWS",
5
5
  "keywords": [
6
6
  "GIBS",
@@ -43,9 +43,9 @@
43
43
  "author": "Cumulus Authors",
44
44
  "license": "Apache-2.0",
45
45
  "dependencies": {
46
- "@cumulus/checksum": "9.9.0",
47
- "@cumulus/errors": "9.9.0",
48
- "@cumulus/logger": "9.9.0",
46
+ "@cumulus/checksum": "10.0.0-beta.0",
47
+ "@cumulus/errors": "10.0.0-beta.0",
48
+ "@cumulus/logger": "10.0.0-beta.0",
49
49
  "aws-sdk": "^2.814.0",
50
50
  "jsonpath-plus": "^1.1.0",
51
51
  "lodash": "~4.17.20",
@@ -54,5 +54,5 @@
54
54
  "p-wait-for": "^3.1.0",
55
55
  "pump": "^3.0.0"
56
56
  },
57
- "gitHead": "ac89218dfaa5ba8cc228db95321e1371d3e46e88"
57
+ "gitHead": "bc283986be627ba06a4084cabd4e01d1540d14c7"
58
58
  }