@nxtedition/lib 19.0.39 → 19.0.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +5 -5
  2. package/s3.js +34 -22
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/lib",
3
- "version": "19.0.39",
3
+ "version": "19.0.42",
4
4
  "license": "MIT",
5
5
  "author": "Robert Nagy <robert.nagy@boffins.se>",
6
6
  "type": "module",
@@ -75,8 +75,8 @@
75
75
  "/__tests__"
76
76
  ],
77
77
  "dependencies": {
78
- "@aws-sdk/client-s3": "^3.550.0",
79
- "@elastic/elasticsearch": "^8.13.0",
78
+ "@aws-sdk/client-s3": "^3.552.0",
79
+ "@elastic/elasticsearch": "^8.13.1",
80
80
  "@elastic/transport": "^8.5.0",
81
81
  "@nxtedition/nxt-undici": "^2.0.45",
82
82
  "date-fns": "^3.6.0",
@@ -106,8 +106,8 @@
106
106
  "devDependencies": {
107
107
  "@nxtedition/deepstream.io-client-js": ">=24.1.20",
108
108
  "@types/lodash": "^4.17.0",
109
- "@types/node": "^20.12.6",
110
- "eslint": "^9.0.0",
109
+ "@types/node": "^20.12.7",
110
+ "eslint": "^8.0.0",
111
111
  "eslint-config-prettier": "^9.1.0",
112
112
  "eslint-config-standard": "^17.0.0",
113
113
  "eslint-plugin-import": "^2.29.1",
package/s3.js CHANGED
@@ -1,5 +1,6 @@
1
1
  import crypto from 'node:crypto'
2
2
  import stream from 'node:stream'
3
+ import assert from 'node:assert'
3
4
  import AWS from '@aws-sdk/client-s3'
4
5
  import PQueue from 'p-queue'
5
6
 
@@ -22,7 +23,7 @@ export async function upload({
22
23
  throw new Error('Invalid partSize')
23
24
  }
24
25
 
25
- if (!Number.isFinite(queueSize) || queueSize <= 0) {
26
+ if (!Number.isFinite(queueSize) || queueSize <= 0 || queueSize > 32) {
26
27
  throw new Error('Invalid queueSize')
27
28
  }
28
29
 
@@ -43,19 +44,21 @@ export async function upload({
43
44
  const queue = new PQueue({ concurrency: queueSize })
44
45
  const promises = []
45
46
 
47
+ assert(queue.concurrency > 0 && queue.concurrency <= 32)
48
+
49
+ const ac = new AbortController()
50
+ const onAbort = () => {
51
+ ac.abort()
52
+ }
53
+ signal?.addEventListener('abort', onAbort)
54
+
46
55
  let uploadId
47
56
  try {
48
- const multipartUploadOutput = await s3.send(
49
- new AWS.CreateMultipartUploadCommand({ Bucket, Key }),
50
- { abortSignal: signal },
51
- )
52
- uploadId = multipartUploadOutput.UploadId
53
- logger = logger?.child({ uploadId })
54
- logger?.debug('multipart upload created')
55
-
56
57
  const uploader = {
57
58
  size: 0,
58
59
  hasher: crypto.createHash('md5'),
60
+ ac,
61
+ signal: ac.signal,
59
62
  part: {
60
63
  /** @type {Array<Buffer>} **/ chunks: [],
61
64
  hasher: crypto.createHash('md5'),
@@ -64,6 +67,16 @@ export async function upload({
64
67
  },
65
68
  }
66
69
 
70
+ const multipartUploadOutput = await s3.send(
71
+ new AWS.CreateMultipartUploadCommand({ Bucket, Key }),
72
+ { abortSignal: uploader.signal },
73
+ )
74
+ uploader.signal.throwIfAborted()
75
+
76
+ uploadId = multipartUploadOutput.UploadId
77
+ logger = logger?.child({ uploadId })
78
+ logger?.debug('multipart upload created')
79
+
67
80
  const maybeFlush = (minSize) => {
68
81
  const { part } = uploader
69
82
 
@@ -88,7 +101,7 @@ export async function upload({
88
101
  promises.push(
89
102
  queue
90
103
  .add(
91
- async ({ signal }) => {
104
+ async () => {
92
105
  logger?.debug({ number, size }, 'part upload started')
93
106
  try {
94
107
  const { ETag } = await s3.send(
@@ -108,29 +121,25 @@ export async function upload({
108
121
  },
109
122
  }),
110
123
  }),
111
- { abortSignal: signal },
112
124
  )
113
125
  logger?.debug({ number, size, etag: ETag }, 'part upload completed')
114
126
  return { part: { ETag, PartNumber: number } }
115
127
  } catch (err) {
116
- if (err.name === 'AbortError') {
117
- logger?.debug({ err }, 'part upload aborted')
118
- } else {
119
- logger?.warn({ err }, 'part upload failed')
120
- }
128
+ uploader.ac.abort(err)
129
+ logger?.warn({ err }, 'part upload failed')
121
130
  return { error: err }
122
131
  }
123
132
  },
124
- { signal },
133
+ { signal: uploader.signal },
125
134
  )
126
135
  .catch((err) => ({ error: err })),
127
136
  )
128
137
 
129
- return queue.onEmpty()
138
+ return queue.size > 0 ? queue.onEmpty() : null
130
139
  }
131
140
 
132
141
  for await (const chunk of Body) {
133
- signal?.throwIfAborted()
142
+ uploader.signal.throwIfAborted()
134
143
 
135
144
  uploader.hasher.update(chunk)
136
145
  uploader.size += chunk.byteLength
@@ -142,9 +151,11 @@ export async function upload({
142
151
  const thenable = maybeFlush(partSize)
143
152
  if (thenable) {
144
153
  await thenable
154
+ uploader.signal.throwIfAborted()
145
155
  }
146
156
  }
147
157
  await maybeFlush()
158
+ uploader.signal.throwIfAborted()
148
159
 
149
160
  const parts = []
150
161
  const errors = []
@@ -155,7 +166,7 @@ export async function upload({
155
166
  parts.push(part)
156
167
  }
157
168
  }
158
- signal?.throwIfAborted()
169
+ uploader.signal.throwIfAborted()
159
170
 
160
171
  if (errors.length > 0) {
161
172
  throw new AggregateError(errors, 'multipart upload failed')
@@ -172,9 +183,8 @@ export async function upload({
172
183
  UploadId: uploadId,
173
184
  MultipartUpload: { Parts: parts },
174
185
  }),
175
- { abortSignal: signal },
176
186
  )
177
- signal?.throwIfAborted()
187
+ uploader.signal.throwIfAborted()
178
188
 
179
189
  const result = {
180
190
  size: uploader.size,
@@ -209,5 +219,7 @@ export async function upload({
209
219
  }
210
220
 
211
221
  throw err
222
+ } finally {
223
+ signal?.removeEventListener('abort', onAbort)
212
224
  }
213
225
  }