@karpeleslab/klbfw 0.2.24 → 0.2.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.ts CHANGED
@@ -237,6 +237,8 @@ interface UploadFileOptions {
237
237
  onProgress?: (progress: number) => void;
238
238
  /** Error callback - resolve to retry, reject to fail */
239
239
  onError?: (error: Error, context: { phase: string; blockNum?: number; attempt: number }) => Promise<void>;
240
+ /** AbortSignal for cancellation - use AbortController.signal */
241
+ signal?: AbortSignal;
240
242
  }
241
243
 
242
244
  /** Options for uploadManyFiles */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@karpeleslab/klbfw",
3
- "version": "0.2.24",
3
+ "version": "0.2.25",
4
4
  "description": "Frontend Framework",
5
5
  "main": "index.js",
6
6
  "types": "index.d.ts",
@@ -222,9 +222,10 @@ const utils = {
222
222
  * @param {*} body - Request body
223
223
  * @param {Object} headers - Request headers
224
224
  * @param {Object} context - Request context
225
+ * @param {AbortSignal} [signal] - Optional AbortSignal for cancellation
225
226
  * @returns {Promise} - Request promise
226
227
  */
227
- function awsReq(upInfo, method, query, body, headers, context) {
228
+ function awsReq(upInfo, method, query, body, headers, context, signal) {
228
229
  headers = headers || {};
229
230
  context = context || {};
230
231
 
@@ -305,11 +306,15 @@ function awsReq(upInfo, method, query, body, headers, context) {
305
306
  headers["Authorization"] = response.data.authorization;
306
307
 
307
308
  // Make the actual request to S3
308
- return utils.fetch(url, {
309
+ const fetchOptions = {
309
310
  method,
310
311
  body,
311
312
  headers
312
- });
313
+ };
314
+ if (signal) {
315
+ fetchOptions.signal = signal;
316
+ }
317
+ return utils.fetch(url, fetchOptions);
313
318
  })
314
319
  .then(resolve)
315
320
  .catch(reject);
package/upload-many.js CHANGED
@@ -40,7 +40,9 @@ const { uploadFile } = require('./upload');
40
40
  * Context includes { fileIndex, phase, attempt } where phase is 'file' for file-level errors,
41
41
  * or 'upload'/'init'/'complete' for block-level errors (also includes blockNum for 'upload').
42
42
  * @param {number} [options.concurrency=3] - Maximum concurrent uploads (1-10)
43
- * @returns {Promise<Array>} - Resolves with array of upload results in same order as input files
43
+ * @param {AbortSignal} [options.signal] - AbortSignal for cancellation. Use AbortController to cancel.
44
+ * @returns {Promise<Array>} - Resolves with array of upload results in same order as input files.
45
+ * Rejects with AbortError if cancelled.
44
46
  *
45
47
  * @example
46
48
  * // Upload multiple files from a file input
@@ -76,7 +78,14 @@ async function uploadManyFiles(api, files, method, params, context, options) {
76
78
  }
77
79
 
78
80
  const concurrency = Math.min(Math.max(options.concurrency || 3, 1), 10);
79
- const { onProgress, onFileComplete, onError } = options;
81
+ const { onProgress, onFileComplete, onError, signal } = options;
82
+
83
+ // Check if already aborted
84
+ if (signal && signal.aborted) {
85
+ const error = new Error('Upload aborted');
86
+ error.name = 'AbortError';
87
+ throw error;
88
+ }
80
89
 
81
90
  // Results array in same order as input
82
91
  const results = new Array(fileCount);
@@ -122,6 +131,11 @@ async function uploadManyFiles(api, files, method, params, context, options) {
122
131
  }
123
132
  };
124
133
 
134
+ // Pass signal to each file upload
135
+ if (signal) {
136
+ fileOptions.signal = signal;
137
+ }
138
+
125
139
  // Wrap onError to include fileIndex for block-level errors
126
140
  if (onError) {
127
141
  fileOptions.onError = (error, ctx) => {
@@ -142,6 +156,11 @@ async function uploadManyFiles(api, files, method, params, context, options) {
142
156
 
143
157
  return result;
144
158
  } catch (error) {
159
+ // Re-throw abort errors immediately without retry
160
+ if (error.name === 'AbortError') {
161
+ throw error;
162
+ }
163
+
145
164
  // Give onError a chance to retry the whole file
146
165
  if (onError) {
147
166
  try {
@@ -161,19 +180,37 @@ async function uploadManyFiles(api, files, method, params, context, options) {
161
180
  }
162
181
  };
163
182
 
183
+ // Track if aborted
184
+ let aborted = false;
185
+ let abortError = null;
186
+
164
187
  // Process files with concurrency limit
165
188
  const processQueue = async () => {
166
189
  const workers = [];
167
190
 
168
191
  for (let i = 0; i < concurrency; i++) {
169
192
  workers.push((async () => {
170
- while (nextIndex < fileCount) {
193
+ while (nextIndex < fileCount && !aborted) {
194
+ // Check for abort before starting next file
195
+ if (signal && signal.aborted) {
196
+ aborted = true;
197
+ abortError = new Error('Upload aborted');
198
+ abortError.name = 'AbortError';
199
+ return;
200
+ }
201
+
171
202
  const fileIndex = nextIndex++;
172
203
  running.add(fileIndex);
173
204
 
174
205
  try {
175
206
  await uploadOne(fileIndex);
176
207
  } catch (error) {
208
+ // If aborted, stop processing and propagate
209
+ if (error.name === 'AbortError') {
210
+ aborted = true;
211
+ abortError = error;
212
+ return;
213
+ }
177
214
  // Continue with next file even if one fails
178
215
  // Error is already stored in results
179
216
  } finally {
@@ -188,6 +225,11 @@ async function uploadManyFiles(api, files, method, params, context, options) {
188
225
 
189
226
  await processQueue();
190
227
 
228
+ // If aborted, throw the abort error
229
+ if (aborted && abortError) {
230
+ throw abortError;
231
+ }
232
+
191
233
  // Check if any uploads failed
192
234
  const errors = results.filter(r => r && r.error).map(r => r.error);
193
235
  if (errors.length > 0) {
package/upload.js CHANGED
@@ -37,7 +37,8 @@ const { env, utils, awsReq, readChunkFromStream, readFileSlice } = require('./up
37
37
  * @param {Function} [options.onError] - Error callback(error, context). Can return a Promise
38
38
  * that, if resolved, will cause the failed operation to be retried. Context contains
39
39
  * { phase, blockNum, attempt } for block uploads or { phase, attempt } for other operations.
40
- * @returns {Promise<Object>} - Resolves with the full REST response
40
+ * @param {AbortSignal} [options.signal] - AbortSignal for cancellation. Use AbortController to cancel.
41
+ * @returns {Promise<Object>} - Resolves with the full REST response. Rejects with AbortError if cancelled.
41
42
  *
42
43
  * @example
43
44
  * // Upload a buffer with filename
@@ -76,6 +77,25 @@ const { env, utils, awsReq, readChunkFromStream, readFileSlice } = require('./up
76
77
  * type: 'application/octet-stream',
77
78
  * size: 2199023255552 // optional: if known, enables optimal block sizing
78
79
  * });
80
+ *
81
+ * @example
82
+ * // Upload with cancellation support
83
+ * const controller = new AbortController();
84
+ * const uploadPromise = uploadFile('Misc/Debug:testUpload', buffer, 'POST', {
85
+ * filename: 'large-file.bin'
86
+ * }, null, {
87
+ * signal: controller.signal,
88
+ * onProgress: (progress) => console.log(`${Math.round(progress * 100)}%`)
89
+ * });
90
+ * // Cancel after 5 seconds
91
+ * setTimeout(() => controller.abort(), 5000);
92
+ * try {
93
+ * const result = await uploadPromise;
94
+ * } catch (err) {
95
+ * if (err.name === 'AbortError') {
96
+ * console.log('Upload was cancelled');
97
+ * }
98
+ * }
79
99
  */
80
100
  async function uploadFile(api, buffer, method, params, context, options) {
81
101
  // Handle default values
@@ -83,6 +103,13 @@ async function uploadFile(api, buffer, method, params, context, options) {
83
103
  params = params || {};
84
104
  options = options || {};
85
105
 
106
+ // Check if already aborted
107
+ if (options.signal && options.signal.aborted) {
108
+ const error = new Error('Upload aborted');
109
+ error.name = 'AbortError';
110
+ throw error;
111
+ }
112
+
86
113
  // Get context from framework if not provided, and add available values
87
114
  if (!context) {
88
115
  context = fwWrapper.getContext();
@@ -202,7 +229,16 @@ async function uploadFile(api, buffer, method, params, context, options) {
202
229
  * @private
203
230
  */
204
231
  async function doPutUpload(file, uploadInfo, context, options) {
205
- const { onProgress, onError } = options;
232
+ const { onProgress, onError, signal } = options;
233
+
234
+ // Helper to check abort status
235
+ const checkAbort = () => {
236
+ if (signal && signal.aborted) {
237
+ const error = new Error('Upload aborted');
238
+ error.name = 'AbortError';
239
+ throw error;
240
+ }
241
+ };
206
242
 
207
243
  // Calculate block size
208
244
  // - If size known: use server's Blocksize or file size
@@ -228,6 +264,9 @@ async function doPutUpload(file, uploadInfo, context, options) {
228
264
  const pendingUploads = [];
229
265
 
230
266
  while (!streamEnded || pendingUploads.length > 0) {
267
+ // Check for abort before reading more data
268
+ checkAbort();
269
+
231
270
  // Read and start uploads up to maxConcurrent
232
271
  while (!streamEnded && pendingUploads.length < maxConcurrent) {
233
272
  const chunkData = await readChunkFromStream(file.stream, blockSize);
@@ -243,7 +282,7 @@ async function doPutUpload(file, uploadInfo, context, options) {
243
282
  // Only add Content-Range for multi-block uploads
244
283
  const useContentRange = blocks === null || blocks > 1;
245
284
  const uploadPromise = uploadPutBlockWithDataAndRetry(
246
- uploadInfo, currentBlock, startByte, chunkData, file.type, onError, useContentRange
285
+ uploadInfo, currentBlock, startByte, chunkData, file.type, onError, useContentRange, signal
247
286
  ).then(() => {
248
287
  completedBlocks++;
249
288
  if (onProgress && blocks) {
@@ -267,10 +306,13 @@ async function doPutUpload(file, uploadInfo, context, options) {
267
306
  } else {
268
307
  // Buffer-based upload: original logic
269
308
  for (let i = 0; i < blocks; i += maxConcurrent) {
309
+ // Check for abort before starting next batch
310
+ checkAbort();
311
+
270
312
  const batch = [];
271
313
  for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
272
314
  batch.push(
273
- uploadPutBlockWithRetry(file, uploadInfo, j, blockSize, onError)
315
+ uploadPutBlockWithRetry(file, uploadInfo, j, blockSize, onError, signal)
274
316
  .then(() => {
275
317
  completedBlocks++;
276
318
  if (onProgress) {
@@ -285,6 +327,8 @@ async function doPutUpload(file, uploadInfo, context, options) {
285
327
  }
286
328
 
287
329
  // All blocks done, call completion with retry support
330
+ checkAbort();
331
+
288
332
  let attempt = 0;
289
333
  while (true) {
290
334
  attempt++;
@@ -292,6 +336,8 @@ async function doPutUpload(file, uploadInfo, context, options) {
292
336
  const completeResponse = await rest.rest(uploadInfo.Complete, 'POST', {}, context);
293
337
  return completeResponse;
294
338
  } catch (error) {
339
+ // Check if aborted during completion
340
+ checkAbort();
295
341
  if (onError) {
296
342
  await onError(error, { phase: 'complete', attempt });
297
343
  // If onError resolves, retry
@@ -306,7 +352,7 @@ async function doPutUpload(file, uploadInfo, context, options) {
306
352
  * Upload a single block via PUT with pre-read data and retry support
307
353
  * @private
308
354
  */
309
- async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, data, contentType, onError, useContentRange) {
355
+ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, data, contentType, onError, useContentRange, signal) {
310
356
  let attempt = 0;
311
357
  while (true) {
312
358
  attempt++;
@@ -320,11 +366,16 @@ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, d
320
366
  headers['Content-Range'] = `bytes ${startByte}-${startByte + data.byteLength - 1}/*`;
321
367
  }
322
368
 
323
- const response = await utils.fetch(uploadInfo.PUT, {
369
+ const fetchOptions = {
324
370
  method: 'PUT',
325
371
  body: data,
326
372
  headers: headers
327
- });
373
+ };
374
+ if (signal) {
375
+ fetchOptions.signal = signal;
376
+ }
377
+
378
+ const response = await utils.fetch(uploadInfo.PUT, fetchOptions);
328
379
 
329
380
  if (!response.ok) {
330
381
  throw new Error(`HTTP ${response.status}: ${response.statusText}`);
@@ -333,6 +384,10 @@ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, d
333
384
  await response.text();
334
385
  return;
335
386
  } catch (error) {
387
+ // Re-throw abort errors immediately
388
+ if (error.name === 'AbortError') {
389
+ throw error;
390
+ }
336
391
  if (onError) {
337
392
  await onError(error, { phase: 'upload', blockNum, attempt });
338
393
  continue;
@@ -346,13 +401,17 @@ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, d
346
401
  * Upload a single block via PUT with retry support
347
402
  * @private
348
403
  */
349
- async function uploadPutBlockWithRetry(file, uploadInfo, blockNum, blockSize, onError) {
404
+ async function uploadPutBlockWithRetry(file, uploadInfo, blockNum, blockSize, onError, signal) {
350
405
  let attempt = 0;
351
406
  while (true) {
352
407
  attempt++;
353
408
  try {
354
- return await uploadPutBlock(file, uploadInfo, blockNum, blockSize);
409
+ return await uploadPutBlock(file, uploadInfo, blockNum, blockSize, signal);
355
410
  } catch (error) {
411
+ // Re-throw abort errors immediately
412
+ if (error.name === 'AbortError') {
413
+ throw error;
414
+ }
356
415
  if (onError) {
357
416
  await onError(error, { phase: 'upload', blockNum, attempt });
358
417
  // If onError resolves, retry
@@ -367,7 +426,7 @@ async function uploadPutBlockWithRetry(file, uploadInfo, blockNum, blockSize, on
367
426
  * Upload a single block via PUT
368
427
  * @private
369
428
  */
370
- async function uploadPutBlock(file, uploadInfo, blockNum, blockSize) {
429
+ async function uploadPutBlock(file, uploadInfo, blockNum, blockSize, signal) {
371
430
  const startByte = blockNum * blockSize;
372
431
  const endByte = Math.min(startByte + blockSize, file.size);
373
432
 
@@ -383,11 +442,16 @@ async function uploadPutBlock(file, uploadInfo, blockNum, blockSize) {
383
442
  headers['Content-Range'] = `bytes ${startByte}-${endByte - 1}/*`;
384
443
  }
385
444
 
386
- const response = await utils.fetch(uploadInfo.PUT, {
445
+ const fetchOptions = {
387
446
  method: 'PUT',
388
447
  body: arrayBuffer,
389
448
  headers: headers
390
- });
449
+ };
450
+ if (signal) {
451
+ fetchOptions.signal = signal;
452
+ }
453
+
454
+ const response = await utils.fetch(uploadInfo.PUT, fetchOptions);
391
455
 
392
456
  if (!response.ok) {
393
457
  throw new Error(`HTTP ${response.status}: ${response.statusText}`);
@@ -401,7 +465,25 @@ async function uploadPutBlock(file, uploadInfo, blockNum, blockSize) {
401
465
  * @private
402
466
  */
403
467
  async function doAwsUpload(file, uploadInfo, context, options) {
404
- const { onProgress, onError } = options;
468
+ const { onProgress, onError, signal } = options;
469
+
470
+ // Helper to check abort status
471
+ const checkAbort = () => {
472
+ if (signal && signal.aborted) {
473
+ const error = new Error('Upload aborted');
474
+ error.name = 'AbortError';
475
+ throw error;
476
+ }
477
+ };
478
+
479
+ // Helper to abort AWS multipart upload (best effort, don't throw on failure)
480
+ const abortMultipartUpload = async (uploadId) => {
481
+ try {
482
+ await awsReq(uploadInfo, 'DELETE', `uploadId=${uploadId}`, '', null, context);
483
+ } catch (e) {
484
+ // Ignore errors during abort - this is cleanup
485
+ }
486
+ };
405
487
 
406
488
  // Calculate block size
407
489
  // - If size known: target ~10k parts, min 5MB
@@ -417,6 +499,9 @@ async function doAwsUpload(file, uploadInfo, context, options) {
417
499
  blockSize = 551550976; // 526MB
418
500
  }
419
501
 
502
+ // Check for abort before starting
503
+ checkAbort();
504
+
420
505
  // Initialize multipart upload with retry support
421
506
  let uploadId;
422
507
  let initAttempt = 0;
@@ -429,13 +514,18 @@ async function doAwsUpload(file, uploadInfo, context, options) {
429
514
  'uploads=',
430
515
  '',
431
516
  { 'Content-Type': file.type || 'application/octet-stream', 'X-Amz-Acl': 'private' },
432
- context
517
+ context,
518
+ signal
433
519
  );
434
520
  const initXml = await initResponse.text();
435
521
  const dom = utils.parseXML(initXml);
436
522
  uploadId = dom.querySelector('UploadId').innerHTML;
437
523
  break;
438
524
  } catch (error) {
525
+ // Re-throw abort errors immediately
526
+ if (error.name === 'AbortError') {
527
+ throw error;
528
+ }
439
529
  if (onError) {
440
530
  await onError(error, { phase: 'init', attempt: initAttempt });
441
531
  continue;
@@ -448,66 +538,84 @@ async function doAwsUpload(file, uploadInfo, context, options) {
448
538
  const maxConcurrent = 3;
449
539
  let completedBlocks = 0;
450
540
 
451
- // Stream-based upload: read sequentially, upload in parallel
452
- if (file.stream) {
453
- let blockNum = 0;
454
- let streamEnded = false;
455
- const pendingUploads = [];
541
+ // Wrap upload in try/catch to abort multipart upload on cancel
542
+ try {
543
+ // Stream-based upload: read sequentially, upload in parallel
544
+ if (file.stream) {
545
+ let blockNum = 0;
546
+ let streamEnded = false;
547
+ const pendingUploads = [];
548
+
549
+ while (!streamEnded || pendingUploads.length > 0) {
550
+ // Check for abort before reading more data
551
+ checkAbort();
552
+
553
+ // Read and start uploads up to maxConcurrent
554
+ while (!streamEnded && pendingUploads.length < maxConcurrent) {
555
+ const chunkData = await readChunkFromStream(file.stream, blockSize);
556
+ if (chunkData === null) {
557
+ streamEnded = true;
558
+ break;
559
+ }
456
560
 
457
- while (!streamEnded || pendingUploads.length > 0) {
458
- // Read and start uploads up to maxConcurrent
459
- while (!streamEnded && pendingUploads.length < maxConcurrent) {
460
- const chunkData = await readChunkFromStream(file.stream, blockSize);
461
- if (chunkData === null) {
462
- streamEnded = true;
463
- break;
561
+ const currentBlock = blockNum++;
562
+ const uploadPromise = uploadAwsBlockWithDataAndRetry(
563
+ uploadInfo, uploadId, currentBlock, chunkData, context, onError, signal
564
+ ).then(etag => {
565
+ etags[currentBlock] = etag;
566
+ completedBlocks++;
567
+ if (onProgress && blocks) {
568
+ onProgress(completedBlocks / blocks);
569
+ }
570
+ });
571
+
572
+ pendingUploads.push(uploadPromise);
464
573
  }
465
574
 
466
- const currentBlock = blockNum++;
467
- const uploadPromise = uploadAwsBlockWithDataAndRetry(
468
- uploadInfo, uploadId, currentBlock, chunkData, context, onError
469
- ).then(etag => {
470
- etags[currentBlock] = etag;
471
- completedBlocks++;
472
- if (onProgress && blocks) {
473
- onProgress(completedBlocks / blocks);
474
- }
475
- });
476
-
477
- pendingUploads.push(uploadPromise);
575
+ // Wait for at least one upload to complete before reading more
576
+ if (pendingUploads.length > 0) {
577
+ // Create indexed promises that return their index when done
578
+ const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
579
+ const completedIdx = await Promise.race(indexedPromises);
580
+ pendingUploads.splice(completedIdx, 1);
581
+ }
478
582
  }
479
583
 
480
- // Wait for at least one upload to complete before reading more
481
- if (pendingUploads.length > 0) {
482
- // Create indexed promises that return their index when done
483
- const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
484
- const completedIdx = await Promise.race(indexedPromises);
485
- pendingUploads.splice(completedIdx, 1);
486
- }
487
- }
584
+ blocks = blockNum; // Now we know the total
585
+ } else {
586
+ // Buffer-based upload: original logic
587
+ for (let i = 0; i < blocks; i += maxConcurrent) {
588
+ // Check for abort before starting next batch
589
+ checkAbort();
590
+
591
+ const batch = [];
592
+ for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
593
+ batch.push(
594
+ uploadAwsBlockWithRetry(file, uploadInfo, uploadId, j, blockSize, context, onError, signal)
595
+ .then(etag => {
596
+ etags[j] = etag;
597
+ completedBlocks++;
598
+ if (onProgress) {
599
+ onProgress(completedBlocks / blocks);
600
+ }
601
+ })
602
+ );
603
+ }
488
604
 
489
- blocks = blockNum; // Now we know the total
490
- } else {
491
- // Buffer-based upload: original logic
492
- for (let i = 0; i < blocks; i += maxConcurrent) {
493
- const batch = [];
494
- for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
495
- batch.push(
496
- uploadAwsBlockWithRetry(file, uploadInfo, uploadId, j, blockSize, context, onError)
497
- .then(etag => {
498
- etags[j] = etag;
499
- completedBlocks++;
500
- if (onProgress) {
501
- onProgress(completedBlocks / blocks);
502
- }
503
- })
504
- );
605
+ await Promise.all(batch);
505
606
  }
506
-
507
- await Promise.all(batch);
508
607
  }
608
+ } catch (error) {
609
+ // On abort, try to clean up the AWS multipart upload
610
+ if (error.name === 'AbortError') {
611
+ await abortMultipartUpload(uploadId);
612
+ }
613
+ throw error;
509
614
  }
510
615
 
616
+ // Check for abort before completing
617
+ checkAbort();
618
+
511
619
  // Complete multipart upload with retry support
512
620
  let xml = '<CompleteMultipartUpload>';
513
621
  for (let i = 0; i < blocks; i++) {
@@ -519,10 +627,15 @@ async function doAwsUpload(file, uploadInfo, context, options) {
519
627
  while (true) {
520
628
  completeAttempt++;
521
629
  try {
522
- const completeResponse = await awsReq(uploadInfo, 'POST', `uploadId=${uploadId}`, xml, null, context);
630
+ const completeResponse = await awsReq(uploadInfo, 'POST', `uploadId=${uploadId}`, xml, null, context, signal);
523
631
  await completeResponse.text();
524
632
  break;
525
633
  } catch (error) {
634
+ // On abort, try to clean up the AWS multipart upload
635
+ if (error.name === 'AbortError') {
636
+ await abortMultipartUpload(uploadId);
637
+ throw error;
638
+ }
526
639
  if (onError) {
527
640
  await onError(error, { phase: 'complete', attempt: completeAttempt });
528
641
  continue;
@@ -531,6 +644,9 @@ async function doAwsUpload(file, uploadInfo, context, options) {
531
644
  }
532
645
  }
533
646
 
647
+ // Check for abort before server-side completion
648
+ checkAbort();
649
+
534
650
  // Call server-side completion handler with retry support
535
651
  let handleAttempt = 0;
536
652
  while (true) {
@@ -544,6 +660,8 @@ async function doAwsUpload(file, uploadInfo, context, options) {
544
660
  );
545
661
  return finalResponse;
546
662
  } catch (error) {
663
+ // Check if aborted during completion
664
+ checkAbort();
547
665
  if (onError) {
548
666
  await onError(error, { phase: 'handleComplete', attempt: handleAttempt });
549
667
  continue;
@@ -557,7 +675,7 @@ async function doAwsUpload(file, uploadInfo, context, options) {
557
675
  * Upload a block to AWS S3 with pre-read data and retry support
558
676
  * @private
559
677
  */
560
- async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, data, context, onError) {
678
+ async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, data, context, onError, signal) {
561
679
  let attempt = 0;
562
680
  while (true) {
563
681
  attempt++;
@@ -569,7 +687,8 @@ async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, da
569
687
  `partNumber=${awsPartNumber}&uploadId=${uploadId}`,
570
688
  data,
571
689
  null,
572
- context
690
+ context,
691
+ signal
573
692
  );
574
693
 
575
694
  if (!response.ok) {
@@ -580,6 +699,10 @@ async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, da
580
699
  await response.text();
581
700
  return etag;
582
701
  } catch (error) {
702
+ // Re-throw abort errors immediately
703
+ if (error.name === 'AbortError') {
704
+ throw error;
705
+ }
583
706
  if (onError) {
584
707
  await onError(error, { phase: 'upload', blockNum, attempt });
585
708
  continue;
@@ -593,13 +716,17 @@ async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, da
593
716
  * Upload a single block to AWS S3 with retry support
594
717
  * @private
595
718
  */
596
- async function uploadAwsBlockWithRetry(file, uploadInfo, uploadId, blockNum, blockSize, context, onError) {
719
+ async function uploadAwsBlockWithRetry(file, uploadInfo, uploadId, blockNum, blockSize, context, onError, signal) {
597
720
  let attempt = 0;
598
721
  while (true) {
599
722
  attempt++;
600
723
  try {
601
- return await uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, context);
724
+ return await uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, context, signal);
602
725
  } catch (error) {
726
+ // Re-throw abort errors immediately
727
+ if (error.name === 'AbortError') {
728
+ throw error;
729
+ }
603
730
  if (onError) {
604
731
  await onError(error, { phase: 'upload', blockNum, attempt });
605
732
  continue;
@@ -613,7 +740,7 @@ async function uploadAwsBlockWithRetry(file, uploadInfo, uploadId, blockNum, blo
613
740
  * Upload a single block to AWS S3
614
741
  * @private
615
742
  */
616
- async function uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, context) {
743
+ async function uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, context, signal) {
617
744
  const startByte = blockNum * blockSize;
618
745
  const endByte = Math.min(startByte + blockSize, file.size);
619
746
  const awsPartNumber = blockNum + 1; // AWS uses 1-based part numbers
@@ -626,7 +753,8 @@ async function uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, c
626
753
  `partNumber=${awsPartNumber}&uploadId=${uploadId}`,
627
754
  arrayBuffer,
628
755
  null,
629
- context
756
+ context,
757
+ signal
630
758
  );
631
759
 
632
760
  if (!response.ok) {