@nxtedition/lib 19.0.42 → 19.0.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/s3.js +37 -41
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/lib",
3
- "version": "19.0.42",
3
+ "version": "19.0.44",
4
4
  "license": "MIT",
5
5
  "author": "Robert Nagy <robert.nagy@boffins.se>",
6
6
  "type": "module",
package/s3.js CHANGED
@@ -1,20 +1,15 @@
1
1
  import crypto from 'node:crypto'
2
2
  import stream from 'node:stream'
3
- import assert from 'node:assert'
3
+ import tp from 'node:timers/promises'
4
4
  import AWS from '@aws-sdk/client-s3'
5
5
  import PQueue from 'p-queue'
6
6
 
7
7
  const CONTENT_MD5_EXPR = /^[A-F0-9]{32}$/i
8
8
  const CONTENT_LENGTH_EXPR = /^\d+$/i
9
9
 
10
- export async function upload({
11
- client: s3,
12
- signal,
13
- logger,
14
- partSize = 16e6,
15
- queueSize = 2,
16
- params,
17
- }) {
10
+ const queue = new PQueue({ concurrency: 16 })
11
+
12
+ export async function upload({ client: s3, signal, logger, partSize = 16e6, params }) {
18
13
  if (s3 == null) {
19
14
  throw new Error('Invalid client')
20
15
  }
@@ -23,10 +18,6 @@ export async function upload({
23
18
  throw new Error('Invalid partSize')
24
19
  }
25
20
 
26
- if (!Number.isFinite(queueSize) || queueSize <= 0 || queueSize > 32) {
27
- throw new Error('Invalid queueSize')
28
- }
29
-
30
21
  if (params == null || typeof params !== 'object') {
31
22
  throw new Error('Invalid params')
32
23
  }
@@ -41,11 +32,8 @@ export async function upload({
41
32
  throw new Error(`Invalid ContentLength: ${ContentLength}`)
42
33
  }
43
34
 
44
- const queue = new PQueue({ concurrency: queueSize })
45
35
  const promises = []
46
36
 
47
- assert(queue.concurrency > 0 && queue.concurrency <= 32)
48
-
49
37
  const ac = new AbortController()
50
38
  const onAbort = () => {
51
39
  ac.abort()
@@ -102,32 +90,40 @@ export async function upload({
102
90
  queue
103
91
  .add(
104
92
  async () => {
105
- logger?.debug({ number, size }, 'part upload started')
106
- try {
107
- const { ETag } = await s3.send(
108
- new AWS.UploadPartCommand({
109
- Bucket,
110
- Key,
111
- UploadId: uploadId,
112
- ContentMD5: hasher.digest('base64'),
113
- ContentLength: size,
114
- PartNumber: number,
115
- Body: new stream.Readable({
116
- read() {
117
- for (const chunk of chunks.splice(0)) {
118
- this.push(chunk)
119
- }
120
- this.push(null)
121
- },
93
+ for (let retryCount = 0; true; retryCount++) {
94
+ logger?.debug({ number, size }, 'part upload started')
95
+ try {
96
+ const { ETag } = await s3.send(
97
+ new AWS.UploadPartCommand({
98
+ Bucket,
99
+ Key,
100
+ UploadId: uploadId,
101
+ ContentMD5: hasher.digest('base64'),
102
+ ContentLength: size,
103
+ PartNumber: number,
104
+ Body: new stream.Readable({
105
+ read() {
106
+ for (const chunk of chunks.splice(0)) {
107
+ this.push(chunk)
108
+ }
109
+ this.push(null)
110
+ },
111
+ }),
122
112
  }),
123
- }),
124
- )
125
- logger?.debug({ number, size, etag: ETag }, 'part upload completed')
126
- return { part: { ETag, PartNumber: number } }
127
- } catch (err) {
128
- uploader.ac.abort(err)
129
- logger?.warn({ err }, 'part upload failed')
130
- return { error: err }
113
+ )
114
+ logger?.debug({ number, size, etag: ETag }, 'part upload completed')
115
+ return { part: { ETag, PartNumber: number } }
116
+ } catch (err) {
117
+ logger?.warn({ err }, 'part upload failed')
118
+
119
+ if (retryCount < 3) {
120
+ logger?.warn({ retryCount }, 'part upload retry')
121
+ await tp.setTimeout(1e3, undefined, { signal: uploader.signal })
122
+ } else {
123
+ uploader.ac.abort(err)
124
+ return { error: err }
125
+ }
126
+ }
131
127
  }
132
128
  },
133
129
  { signal: uploader.signal },