@nxtedition/lib 19.0.30 → 19.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/s3.js +51 -25
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nxtedition/lib",
3
- "version": "19.0.30",
3
+ "version": "19.0.31",
4
4
  "license": "MIT",
5
5
  "author": "Robert Nagy <robert.nagy@boffins.se>",
6
6
  "type": "module",
package/s3.js CHANGED
@@ -20,8 +20,9 @@ class PartUploader {
20
20
  #callback
21
21
  #hasher
22
22
  #signal
23
+ #logger
23
24
 
24
- constructor(dir, number, signal) {
25
+ constructor({ dir, number, signal, logger }) {
25
26
  this.#writable = null
26
27
  this.#callback = noop
27
28
  this.#hasher = crypto.createHash('md5')
@@ -29,6 +30,7 @@ class PartUploader {
29
30
  this.#signal = signal
30
31
  this.#number = number
31
32
  this.#path = path.join(dir, `${this.#number}.part`)
33
+ this.#logger = logger?.child({ part: this.#number, path: this.#path })
32
34
  }
33
35
 
34
36
  get size() {
@@ -36,16 +38,20 @@ class PartUploader {
36
38
  }
37
39
 
38
40
  async write(chunk) {
39
- this.#writable ??= fs
40
- .createWriteStream(this.#path, { signal: this.#signal })
41
- .on('drain', () => {
42
- this.#callback(null)
43
- this.#callback = noop
44
- })
45
- .on('error', (err) => {
46
- this.#callback(err)
47
- this.callback = noop
48
- })
41
+ if (!this.#writable) {
42
+ this.#writable = fs
43
+ .createWriteStream(this.#path, { signal: this.#signal })
44
+ .on('drain', () => {
45
+ this.#callback(null)
46
+ this.#callback = noop
47
+ })
48
+ .on('error', (err) => {
49
+ this.#callback(err)
50
+ this.callback = noop
51
+ })
52
+
53
+ this.#logger?.debug('created part')
54
+ }
49
55
 
50
56
  if (this.#writable.errored) {
51
57
  throw this.#writable.errored
@@ -77,6 +83,7 @@ class PartUploader {
77
83
 
78
84
  assert(this.#writable.bytesWritten === this.#size, 'Expected size to match bytesWritten')
79
85
 
86
+ this.#logger?.debug('uploading part')
80
87
  const { ETag } = await s3.send(
81
88
  new AWS.UploadPartCommand({
82
89
  ...params,
@@ -86,27 +93,28 @@ class PartUploader {
86
93
  Body: fs.createReadStream(this.#path, { signal: this.#signal }),
87
94
  }),
88
95
  )
96
+ this.#logger?.debug({ etag: ETag }, 'uploaded part')
89
97
 
90
98
  return { part: { ETag, PartNumber: this.#number } }
91
99
  } catch (err) {
92
100
  return { error: err }
93
101
  } finally {
94
102
  await fs.promises.unlink(this.#writable.path)
103
+ this.#logger?.debug('deleted part')
95
104
  }
96
105
  }
97
106
  }
98
107
 
99
- export async function upload(
100
- {
101
- client: s3,
102
- signal: outerSignal,
103
- tmpdir = os.tmpdir(),
104
- partSize = 64e6,
105
- queueSize = 4,
106
- leavePartsOnError = false,
107
- },
108
- { Body, Key, Bucket, ContentMD5, ContentLength },
109
- ) {
108
+ export async function upload({
109
+ client: s3,
110
+ signal: outerSignal,
111
+ logger,
112
+ tmpdir = os.tmpdir(),
113
+ partSize = 64e6,
114
+ queueSize = 4,
115
+ leavePartsOnError = false,
116
+ params,
117
+ }) {
110
118
  if (s3 == null) {
111
119
  throw new Error('Invalid client')
112
120
  }
@@ -119,6 +127,12 @@ export async function upload(
119
127
  throw new Error('Invalid queueSize')
120
128
  }
121
129
 
130
+ if (params == null || typeof params !== 'object') {
131
+ throw new Error('Invalid params')
132
+ }
133
+
134
+ const { Body, Key, Bucket, ContentMD5, ContentLength } = params ?? {}
135
+
122
136
  if (ContentMD5 != null && !CONTENT_MD5_EXPR.test(ContentMD5)) {
123
137
  throw new Error(`Invalid ContentMD5: ${ContentMD5}`)
124
138
  }
@@ -141,6 +155,7 @@ export async function upload(
141
155
  let uploadDir
142
156
  try {
143
157
  uploadDir = await fs.promises.mkdtemp(path.join(tmpdir, 's3-upload-'))
158
+ logger?.debug({ uploadDir }, 'created upload directory')
144
159
  signal.throwIfAborted()
145
160
 
146
161
  const multipartUploadOutput = await s3.send(
@@ -150,19 +165,26 @@ export async function upload(
150
165
  }),
151
166
  )
152
167
  uploadId = multipartUploadOutput.UploadId
168
+ logger = logger?.child({ uploadId })
169
+ logger?.debug('created multipart upload')
153
170
  signal.throwIfAborted()
154
171
 
155
172
  const uploader = {
156
173
  size: 0,
157
174
  hasher: crypto.createHash('md5'),
158
- part: new PartUploader(uploadDir, 1, signal),
175
+ part: new PartUploader({ dir: uploadDir, number: 1, signal, logger }),
159
176
  number: 1,
160
177
  }
161
178
 
162
179
  const maybeFlush = (minSize) => {
163
180
  if (uploader.part.size && (minSize == null || uploader.part.size >= minSize)) {
164
181
  const part = uploader.part
165
- uploader.part = new PartUploader(uploadDir, ++uploader.number, signal)
182
+ uploader.part = new PartUploader({
183
+ dir: uploadDir,
184
+ number: ++uploader.number,
185
+ logger,
186
+ signal,
187
+ })
166
188
 
167
189
  const promise = queue.add(() => part.end(s3, { Bucket, Key, UploadId: uploadId }))
168
190
  promises.push(promise)
@@ -212,7 +234,6 @@ export async function upload(
212
234
  MultipartUpload: { Parts: parts },
213
235
  }),
214
236
  )
215
- signal.throwIfAborted()
216
237
 
217
238
  const result = {
218
239
  size: uploader.size,
@@ -221,6 +242,10 @@ export async function upload(
221
242
  parts,
222
243
  }
223
244
 
245
+ logger?.debug(result, 'completed multipart upload')
246
+
247
+ signal.throwIfAborted()
248
+
224
249
  const size = ContentLength != null ? Number(ContentLength) : null
225
250
  const hash = ContentMD5
226
251
 
@@ -245,6 +270,7 @@ export async function upload(
245
270
  UploadId: uploadId,
246
271
  }),
247
272
  )
273
+ logger?.warn('aborted multipart upload')
248
274
  } catch (er) {
249
275
  throw new AggregateError([err, er])
250
276
  }