@nxtedition/lib 19.0.40 → 19.0.43

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/s3.js +31 -35
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/lib",
3
- "version": "19.0.40",
3
+ "version": "19.0.43",
4
4
  "license": "MIT",
5
5
  "author": "Robert Nagy <robert.nagy@boffins.se>",
6
6
  "type": "module",
package/s3.js CHANGED
@@ -1,20 +1,14 @@
1
1
  import crypto from 'node:crypto'
2
2
  import stream from 'node:stream'
3
- import assert from 'node:assert'
4
3
  import AWS from '@aws-sdk/client-s3'
5
4
  import PQueue from 'p-queue'
6
5
 
7
6
  const CONTENT_MD5_EXPR = /^[A-F0-9]{32}$/i
8
7
  const CONTENT_LENGTH_EXPR = /^\d+$/i
9
8
 
10
- export async function upload({
11
- client: s3,
12
- signal,
13
- logger,
14
- partSize = 16e6,
15
- queueSize = 2,
16
- params,
17
- }) {
9
+ const queue = new PQueue({ concurrency: 16 })
10
+
11
+ export async function upload({ client: s3, signal, logger, partSize = 16e6, params }) {
18
12
  if (s3 == null) {
19
13
  throw new Error('Invalid client')
20
14
  }
@@ -23,10 +17,6 @@ export async function upload({
23
17
  throw new Error('Invalid partSize')
24
18
  }
25
19
 
26
- if (!Number.isFinite(queueSize) || queueSize <= 0 || queueSize > 32) {
27
- throw new Error('Invalid queueSize')
28
- }
29
-
30
20
  if (params == null || typeof params !== 'object') {
31
21
  throw new Error('Invalid params')
32
22
  }
@@ -41,24 +31,21 @@ export async function upload({
41
31
  throw new Error(`Invalid ContentLength: ${ContentLength}`)
42
32
  }
43
33
 
44
- const queue = new PQueue({ concurrency: queueSize })
45
34
  const promises = []
46
35
 
47
- assert(queue.concurrency > 0 && queue.concurrency <= 32)
36
+ const ac = new AbortController()
37
+ const onAbort = () => {
38
+ ac.abort()
39
+ }
40
+ signal?.addEventListener('abort', onAbort)
48
41
 
49
42
  let uploadId
50
43
  try {
51
- const multipartUploadOutput = await s3.send(
52
- new AWS.CreateMultipartUploadCommand({ Bucket, Key }),
53
- { abortSignal: signal },
54
- )
55
- uploadId = multipartUploadOutput.UploadId
56
- logger = logger?.child({ uploadId })
57
- logger?.debug('multipart upload created')
58
-
59
44
  const uploader = {
60
45
  size: 0,
61
46
  hasher: crypto.createHash('md5'),
47
+ ac,
48
+ signal: ac.signal,
62
49
  part: {
63
50
  /** @type {Array<Buffer>} **/ chunks: [],
64
51
  hasher: crypto.createHash('md5'),
@@ -67,6 +54,16 @@ export async function upload({
67
54
  },
68
55
  }
69
56
 
57
+ const multipartUploadOutput = await s3.send(
58
+ new AWS.CreateMultipartUploadCommand({ Bucket, Key }),
59
+ { abortSignal: uploader.signal },
60
+ )
61
+ uploader.signal.throwIfAborted()
62
+
63
+ uploadId = multipartUploadOutput.UploadId
64
+ logger = logger?.child({ uploadId })
65
+ logger?.debug('multipart upload created')
66
+
70
67
  const maybeFlush = (minSize) => {
71
68
  const { part } = uploader
72
69
 
@@ -91,7 +88,7 @@ export async function upload({
91
88
  promises.push(
92
89
  queue
93
90
  .add(
94
- async ({ signal }) => {
91
+ async () => {
95
92
  logger?.debug({ number, size }, 'part upload started')
96
93
  try {
97
94
  const { ETag } = await s3.send(
@@ -111,20 +108,16 @@ export async function upload({
111
108
  },
112
109
  }),
113
110
  }),
114
- { abortSignal: signal },
115
111
  )
116
112
  logger?.debug({ number, size, etag: ETag }, 'part upload completed')
117
113
  return { part: { ETag, PartNumber: number } }
118
114
  } catch (err) {
119
- if (err.name === 'AbortError') {
120
- logger?.debug({ err }, 'part upload aborted')
121
- } else {
122
- logger?.warn({ err }, 'part upload failed')
123
- }
115
+ uploader.ac.abort(err)
116
+ logger?.warn({ err }, 'part upload failed')
124
117
  return { error: err }
125
118
  }
126
119
  },
127
- { signal },
120
+ { signal: uploader.signal },
128
121
  )
129
122
  .catch((err) => ({ error: err })),
130
123
  )
@@ -133,7 +126,7 @@ export async function upload({
133
126
  }
134
127
 
135
128
  for await (const chunk of Body) {
136
- signal?.throwIfAborted()
129
+ uploader.signal.throwIfAborted()
137
130
 
138
131
  uploader.hasher.update(chunk)
139
132
  uploader.size += chunk.byteLength
@@ -145,9 +138,11 @@ export async function upload({
145
138
  const thenable = maybeFlush(partSize)
146
139
  if (thenable) {
147
140
  await thenable
141
+ uploader.signal.throwIfAborted()
148
142
  }
149
143
  }
150
144
  await maybeFlush()
145
+ uploader.signal.throwIfAborted()
151
146
 
152
147
  const parts = []
153
148
  const errors = []
@@ -158,7 +153,7 @@ export async function upload({
158
153
  parts.push(part)
159
154
  }
160
155
  }
161
- signal?.throwIfAborted()
156
+ uploader.signal.throwIfAborted()
162
157
 
163
158
  if (errors.length > 0) {
164
159
  throw new AggregateError(errors, 'multipart upload failed')
@@ -175,9 +170,8 @@ export async function upload({
175
170
  UploadId: uploadId,
176
171
  MultipartUpload: { Parts: parts },
177
172
  }),
178
- { abortSignal: signal },
179
173
  )
180
- signal?.throwIfAborted()
174
+ uploader.signal.throwIfAborted()
181
175
 
182
176
  const result = {
183
177
  size: uploader.size,
@@ -212,5 +206,7 @@ export async function upload({
212
206
  }
213
207
 
214
208
  throw err
209
+ } finally {
210
+ signal?.removeEventListener('abort', onAbort)
215
211
  }
216
212
  }