@karpeleslab/klbfw 0.2.17 → 0.2.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/upload.js +514 -79
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@karpeleslab/klbfw",
3
- "version": "0.2.17",
3
+ "version": "0.2.19",
4
4
  "description": "Frontend Framework",
5
5
  "main": "index.js",
6
6
  "types": "index.d.ts",
package/upload.js CHANGED
@@ -39,23 +39,22 @@
39
39
  * ```js
40
40
  * // For Node.js environments, first install dependencies:
41
41
  * // npm install node-fetch @xmldom/xmldom
42
- *
43
- * // Create a buffer-based file object for upload
44
- * const file = {
45
- * name: 'test.txt',
46
- * size: buffer.length,
47
- * type: 'text/plain',
48
- * content: buffer, // Buffer or ArrayBuffer with file content
49
- * lastModified: Date.now(),
50
- * slice: function(start, end) {
51
- * return {
52
- * content: this.content.slice(start, end)
53
- * };
54
- * }
55
- * };
56
- *
57
- * upload.append('Misc/Debug:testUpload', file)
58
- * .then(result => console.log('Upload complete', result));
42
+ *
43
+ * // Simple upload with a buffer
44
+ * const { uploadFile } = require('./upload');
45
+ * const buffer = Buffer.from('Hello, World!');
46
+ * const result = await uploadFile('Misc/Debug:testUpload', buffer, 'POST', {
47
+ * filename: 'hello.txt',
48
+ * type: 'text/plain'
49
+ * });
50
+ *
51
+ * // Upload large files using a stream (doesn't load entire file into memory)
52
+ * const fs = require('fs');
53
+ * const stream = fs.createReadStream('/path/to/2tb-file.bin');
54
+ * const result = await uploadFile('Misc/Debug:testUpload', stream, 'POST', {
55
+ * filename: 'large-file.bin',
56
+ * type: 'application/octet-stream'
57
+ * });
59
58
  * ```
60
59
  *
61
60
  * @module upload
@@ -280,11 +279,17 @@ const utils = {
280
279
  * - A Uint8Array or other TypedArray
281
280
  * - A browser File object
282
281
  * - A file-like object with { name, size, type, content, lastModified }
282
+ * - A file-like object with { name, size, type, stream } for streaming large files
283
283
  * - A string (will be converted to UTF-8 bytes)
284
284
  * @param {string} [method='POST'] - HTTP method for the initial API call
285
285
  * @param {Object} [params={}] - Additional parameters to send with the upload.
286
286
  * Can include `filename` and `type` to override defaults.
287
287
  * @param {Object} [context=null] - Request context (uses default context if not provided)
288
+ * @param {Object} [options={}] - Upload options
289
+ * @param {Function} [options.onProgress] - Progress callback(progress) where progress is 0-1
290
+ * @param {Function} [options.onError] - Error callback(error, context). Can return a Promise
291
+ * that, if resolved, will cause the failed operation to be retried. Context contains
292
+ * { phase, blockNum, attempt } for block uploads or { phase, attempt } for other operations.
288
293
  * @returns {Promise<Object>} - Resolves with the upload result data
289
294
  *
290
295
  * @example
@@ -296,17 +301,40 @@ const utils = {
296
301
  * });
297
302
  *
298
303
  * @example
299
- * // Upload with defaults
300
- * const result = await uploadFile('Misc/Debug:testUpload', buffer);
304
+ * // Upload with progress and error handling
305
+ * const result = await uploadFile('Misc/Debug:testUpload', buffer, 'POST', {
306
+ * filename: 'large-file.bin'
307
+ * }, null, {
308
+ * onProgress: (progress) => console.log(`${Math.round(progress * 100)}%`),
309
+ * onError: async (error, ctx) => {
310
+ * console.log(`Error in ${ctx.phase}, attempt ${ctx.attempt}:`, error.message);
311
+ * if (ctx.attempt < 3) {
312
+ * await new Promise(r => setTimeout(r, 1000)); // Wait 1s before retry
313
+ * return; // Resolve to trigger retry
314
+ * }
315
+ * throw error; // Give up after 3 attempts
316
+ * }
317
+ * });
301
318
  *
302
319
  * @example
303
320
  * // Upload a File object (browser)
304
321
  * const result = await uploadFile('Misc/Debug:testUpload', fileInput.files[0]);
322
+ *
323
+ * @example
324
+ * // Upload a large file using a stream (Node.js) - doesn't load entire file into memory
325
+ * const fs = require('fs');
326
+ * const stream = fs.createReadStream('/path/to/large-file.bin');
327
+ * const result = await uploadFile('Misc/Debug:testUpload', stream, 'POST', {
328
+ * filename: 'large-file.bin',
329
+ * type: 'application/octet-stream',
330
+ * size: 2199023255552 // optional: if known, enables optimal block sizing
331
+ * });
305
332
  */
306
- async function uploadFile(api, buffer, method, params, context) {
333
+ async function uploadFile(api, buffer, method, params, context, options) {
307
334
  // Handle default values
308
335
  method = method || 'POST';
309
336
  params = params || {};
337
+ options = options || {};
310
338
 
311
339
  // Get context from framework if not provided, and add available values
312
340
  if (!context) {
@@ -384,8 +412,18 @@ async function uploadFile(api, buffer, method, params, context) {
384
412
  content: buffer.content
385
413
  };
386
414
  }
415
+ // Handle Node.js readable stream
416
+ else if (buffer && typeof buffer.read === 'function' && typeof buffer.on === 'function') {
417
+ fileObj = {
418
+ name: params.filename || 'file.bin',
419
+ size: params.size || null, // null means unknown size
420
+ type: params.type || 'application/octet-stream',
421
+ lastModified: Date.now(),
422
+ stream: buffer
423
+ };
424
+ }
387
425
  else {
388
- throw new Error('Invalid file: must be a Buffer, ArrayBuffer, Uint8Array, File, string, or file-like object with content');
426
+ throw new Error('Invalid file: must be a Buffer, ArrayBuffer, Uint8Array, File, readable stream, or file-like object with content');
389
427
  }
390
428
 
391
429
  // Merge params with file metadata (file metadata takes precedence for these fields)
@@ -401,12 +439,12 @@ async function uploadFile(api, buffer, method, params, context) {
401
439
 
402
440
  // Method 1: AWS signed multipart upload
403
441
  if (data.Cloud_Aws_Bucket_Upload__) {
404
- return doAwsUpload(fileObj, data, context);
442
+ return doAwsUpload(fileObj, data, context, options);
405
443
  }
406
444
 
407
445
  // Method 2: Direct PUT upload
408
446
  if (data.PUT) {
409
- return doPutUpload(fileObj, data, context);
447
+ return doPutUpload(fileObj, data, context, options);
410
448
  }
411
449
 
412
450
  throw new Error('Invalid upload response format: no upload method available');
@@ -416,26 +454,166 @@ async function uploadFile(api, buffer, method, params, context) {
416
454
  * Perform a direct PUT upload (simple upload method)
417
455
  * @private
418
456
  */
419
- async function doPutUpload(file, uploadInfo, context) {
420
- const blockSize = uploadInfo.Blocksize || file.size;
421
- const blocks = Math.ceil(file.size / blockSize);
457
+ async function doPutUpload(file, uploadInfo, context, options) {
458
+ const { onProgress, onError } = options;
459
+
460
+ // Calculate block size
461
+ // - If size known: use server's Blocksize or file size
462
+ // - If size unknown (streaming): use 526MB default
463
+ let blockSize;
464
+ let blocks = null;
465
+
466
+ if (file.size) {
467
+ blockSize = uploadInfo.Blocksize || file.size;
468
+ blocks = Math.ceil(file.size / blockSize);
469
+ } else {
470
+ blockSize = 551550976; // 526MB
471
+ }
422
472
 
423
- // Upload blocks with concurrency limit
424
473
  const maxConcurrent = 3;
474
+ let completedBlocks = 0;
475
+
476
+ // Stream-based upload: read sequentially, upload in parallel
477
+ if (file.stream) {
478
+ let blockNum = 0;
479
+ let streamEnded = false;
480
+ let byteOffset = 0;
481
+ const pendingUploads = [];
482
+
483
+ while (!streamEnded || pendingUploads.length > 0) {
484
+ // Read and start uploads up to maxConcurrent
485
+ while (!streamEnded && pendingUploads.length < maxConcurrent) {
486
+ const chunkData = await readChunkFromStream(file.stream, blockSize);
487
+ if (chunkData === null) {
488
+ streamEnded = true;
489
+ break;
490
+ }
491
+
492
+ const currentBlock = blockNum++;
493
+ const startByte = byteOffset;
494
+ byteOffset += chunkData.byteLength;
495
+
496
+ // Only add Content-Range for multi-block uploads
497
+ const useContentRange = blocks === null || blocks > 1;
498
+ const uploadPromise = uploadPutBlockWithDataAndRetry(
499
+ uploadInfo, currentBlock, startByte, chunkData, file.type, onError, useContentRange
500
+ ).then(() => {
501
+ completedBlocks++;
502
+ if (onProgress && blocks) {
503
+ onProgress(completedBlocks / blocks);
504
+ }
505
+ });
506
+
507
+ pendingUploads.push(uploadPromise);
508
+ }
509
+
510
+ // Wait for at least one upload to complete before reading more
511
+ if (pendingUploads.length > 0) {
512
+ // Create indexed promises that return their index when done
513
+ const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
514
+ const completedIdx = await Promise.race(indexedPromises);
515
+ pendingUploads.splice(completedIdx, 1);
516
+ }
517
+ }
518
+
519
+ blocks = blockNum;
520
+ } else {
521
+ // Buffer-based upload: original logic
522
+ for (let i = 0; i < blocks; i += maxConcurrent) {
523
+ const batch = [];
524
+ for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
525
+ batch.push(
526
+ uploadPutBlockWithRetry(file, uploadInfo, j, blockSize, onError)
527
+ .then(() => {
528
+ completedBlocks++;
529
+ if (onProgress) {
530
+ onProgress(completedBlocks / blocks);
531
+ }
532
+ })
533
+ );
534
+ }
535
+
536
+ await Promise.all(batch);
537
+ }
538
+ }
425
539
 
426
- // Process blocks in batches
427
- for (let i = 0; i < blocks; i += maxConcurrent) {
428
- const batch = [];
429
- for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
430
- batch.push(uploadPutBlock(file, uploadInfo, j, blockSize));
540
+ // All blocks done, call completion with retry support
541
+ let attempt = 0;
542
+ while (true) {
543
+ attempt++;
544
+ try {
545
+ const completeResponse = await rest.rest(uploadInfo.Complete, 'POST', {}, context);
546
+ return completeResponse.data;
547
+ } catch (error) {
548
+ if (onError) {
549
+ await onError(error, { phase: 'complete', attempt });
550
+ // If onError resolves, retry
551
+ continue;
552
+ }
553
+ throw error;
431
554
  }
555
+ }
556
+ }
557
+
558
+ /**
559
+ * Upload a single block via PUT with pre-read data and retry support
560
+ * @private
561
+ */
562
+ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, data, contentType, onError, useContentRange) {
563
+ let attempt = 0;
564
+ while (true) {
565
+ attempt++;
566
+ try {
567
+ const headers = {
568
+ 'Content-Type': contentType || 'application/octet-stream'
569
+ };
570
+
571
+ // Add Content-Range for multipart PUT (not for single-block uploads)
572
+ if (useContentRange) {
573
+ headers['Content-Range'] = `bytes ${startByte}-${startByte + data.byteLength - 1}/*`;
574
+ }
575
+
576
+ const response = await utils.fetch(uploadInfo.PUT, {
577
+ method: 'PUT',
578
+ body: data,
579
+ headers: headers
580
+ });
581
+
582
+ if (!response.ok) {
583
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
584
+ }
432
585
 
433
- await Promise.all(batch);
586
+ await response.text();
587
+ return;
588
+ } catch (error) {
589
+ if (onError) {
590
+ await onError(error, { phase: 'upload', blockNum, attempt });
591
+ continue;
592
+ }
593
+ throw error;
594
+ }
434
595
  }
596
+ }
435
597
 
436
- // All blocks done, call completion
437
- const completeResponse = await rest.rest(uploadInfo.Complete, 'POST', {}, context);
438
- return completeResponse.data;
598
+ /**
599
+ * Upload a single block via PUT with retry support
600
+ * @private
601
+ */
602
+ async function uploadPutBlockWithRetry(file, uploadInfo, blockNum, blockSize, onError) {
603
+ let attempt = 0;
604
+ while (true) {
605
+ attempt++;
606
+ try {
607
+ return await uploadPutBlock(file, uploadInfo, blockNum, blockSize);
608
+ } catch (error) {
609
+ if (onError) {
610
+ await onError(error, { phase: 'upload', blockNum, attempt });
611
+ // If onError resolves, retry
612
+ continue;
613
+ }
614
+ throw error;
615
+ }
616
+ }
439
617
  }
440
618
 
441
619
  /**
@@ -475,61 +653,213 @@ async function uploadPutBlock(file, uploadInfo, blockNum, blockSize) {
475
653
  * Perform an AWS multipart upload
476
654
  * @private
477
655
  */
478
- async function doAwsUpload(file, uploadInfo, context) {
479
- // Calculate optimal block size (min 5MB for AWS, target ~10k parts)
480
- let blockSize = Math.ceil(file.size / 10000);
481
- if (blockSize < 5242880) blockSize = 5242880;
482
-
483
- const blocks = Math.ceil(file.size / blockSize);
656
+ async function doAwsUpload(file, uploadInfo, context, options) {
657
+ const { onProgress, onError } = options;
658
+
659
+ // Calculate block size
660
+ // - If size known: target ~10k parts, min 5MB
661
+ // - If size unknown: use 526MB (allows up to ~5TB with 10k parts)
662
+ let blockSize;
663
+ let blocks = null; // null means unknown (streaming)
664
+
665
+ if (file.size) {
666
+ blockSize = Math.ceil(file.size / 10000);
667
+ if (blockSize < 5242880) blockSize = 5242880;
668
+ blocks = Math.ceil(file.size / blockSize);
669
+ } else {
670
+ blockSize = 551550976; // 526MB
671
+ }
484
672
 
485
- // Initialize multipart upload
486
- const initResponse = await awsReq(
487
- uploadInfo,
488
- 'POST',
489
- 'uploads=',
490
- '',
491
- { 'Content-Type': file.type || 'application/octet-stream', 'X-Amz-Acl': 'private' },
492
- context
493
- );
494
- const initXml = await initResponse.text();
495
- const dom = utils.parseXML(initXml);
496
- const uploadId = dom.querySelector('UploadId').innerHTML;
673
+ // Initialize multipart upload with retry support
674
+ let uploadId;
675
+ let initAttempt = 0;
676
+ while (true) {
677
+ initAttempt++;
678
+ try {
679
+ const initResponse = await awsReq(
680
+ uploadInfo,
681
+ 'POST',
682
+ 'uploads=',
683
+ '',
684
+ { 'Content-Type': file.type || 'application/octet-stream', 'X-Amz-Acl': 'private' },
685
+ context
686
+ );
687
+ const initXml = await initResponse.text();
688
+ const dom = utils.parseXML(initXml);
689
+ uploadId = dom.querySelector('UploadId').innerHTML;
690
+ break;
691
+ } catch (error) {
692
+ if (onError) {
693
+ await onError(error, { phase: 'init', attempt: initAttempt });
694
+ continue;
695
+ }
696
+ throw error;
697
+ }
698
+ }
497
699
 
498
- // Upload all parts with concurrency limit
499
700
  const etags = {};
500
701
  const maxConcurrent = 3;
702
+ let completedBlocks = 0;
703
+
704
+ // Stream-based upload: read sequentially, upload in parallel
705
+ if (file.stream) {
706
+ let blockNum = 0;
707
+ let streamEnded = false;
708
+ const pendingUploads = [];
709
+
710
+ while (!streamEnded || pendingUploads.length > 0) {
711
+ // Read and start uploads up to maxConcurrent
712
+ while (!streamEnded && pendingUploads.length < maxConcurrent) {
713
+ const chunkData = await readChunkFromStream(file.stream, blockSize);
714
+ if (chunkData === null) {
715
+ streamEnded = true;
716
+ break;
717
+ }
501
718
 
502
- for (let i = 0; i < blocks; i += maxConcurrent) {
503
- const batch = [];
504
- for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
505
- batch.push(
506
- uploadAwsBlock(file, uploadInfo, uploadId, j, blockSize, context)
507
- .then(etag => { etags[j] = etag; })
508
- );
719
+ const currentBlock = blockNum++;
720
+ const uploadPromise = uploadAwsBlockWithDataAndRetry(
721
+ uploadInfo, uploadId, currentBlock, chunkData, context, onError
722
+ ).then(etag => {
723
+ etags[currentBlock] = etag;
724
+ completedBlocks++;
725
+ if (onProgress && blocks) {
726
+ onProgress(completedBlocks / blocks);
727
+ }
728
+ });
729
+
730
+ pendingUploads.push(uploadPromise);
731
+ }
732
+
733
+ // Wait for at least one upload to complete before reading more
734
+ if (pendingUploads.length > 0) {
735
+ // Create indexed promises that return their index when done
736
+ const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
737
+ const completedIdx = await Promise.race(indexedPromises);
738
+ pendingUploads.splice(completedIdx, 1);
739
+ }
509
740
  }
510
741
 
511
- await Promise.all(batch);
742
+ blocks = blockNum; // Now we know the total
743
+ } else {
744
+ // Buffer-based upload: original logic
745
+ for (let i = 0; i < blocks; i += maxConcurrent) {
746
+ const batch = [];
747
+ for (let j = i; j < Math.min(i + maxConcurrent, blocks); j++) {
748
+ batch.push(
749
+ uploadAwsBlockWithRetry(file, uploadInfo, uploadId, j, blockSize, context, onError)
750
+ .then(etag => {
751
+ etags[j] = etag;
752
+ completedBlocks++;
753
+ if (onProgress) {
754
+ onProgress(completedBlocks / blocks);
755
+ }
756
+ })
757
+ );
758
+ }
759
+
760
+ await Promise.all(batch);
761
+ }
512
762
  }
513
763
 
514
- // Complete multipart upload
764
+ // Complete multipart upload with retry support
515
765
  let xml = '<CompleteMultipartUpload>';
516
766
  for (let i = 0; i < blocks; i++) {
517
767
  xml += `<Part><PartNumber>${i + 1}</PartNumber><ETag>${etags[i]}</ETag></Part>`;
518
768
  }
519
769
  xml += '</CompleteMultipartUpload>';
520
770
 
521
- const completeResponse = await awsReq(uploadInfo, 'POST', `uploadId=${uploadId}`, xml, null, context);
522
- await completeResponse.text();
771
+ let completeAttempt = 0;
772
+ while (true) {
773
+ completeAttempt++;
774
+ try {
775
+ const completeResponse = await awsReq(uploadInfo, 'POST', `uploadId=${uploadId}`, xml, null, context);
776
+ await completeResponse.text();
777
+ break;
778
+ } catch (error) {
779
+ if (onError) {
780
+ await onError(error, { phase: 'complete', attempt: completeAttempt });
781
+ continue;
782
+ }
783
+ throw error;
784
+ }
785
+ }
786
+
787
+ // Call server-side completion handler with retry support
788
+ let handleAttempt = 0;
789
+ while (true) {
790
+ handleAttempt++;
791
+ try {
792
+ const finalResponse = await rest.rest(
793
+ `Cloud/Aws/Bucket/Upload/${uploadInfo.Cloud_Aws_Bucket_Upload__}:handleComplete`,
794
+ 'POST',
795
+ {},
796
+ context
797
+ );
798
+ return finalResponse.data;
799
+ } catch (error) {
800
+ if (onError) {
801
+ await onError(error, { phase: 'handleComplete', attempt: handleAttempt });
802
+ continue;
803
+ }
804
+ throw error;
805
+ }
806
+ }
807
+ }
523
808
 
524
- // Call server-side completion handler
525
- const finalResponse = await rest.rest(
526
- `Cloud/Aws/Bucket/Upload/${uploadInfo.Cloud_Aws_Bucket_Upload__}:handleComplete`,
527
- 'POST',
528
- {},
529
- context
530
- );
809
+ /**
810
+ * Upload a block to AWS S3 with pre-read data and retry support
811
+ * @private
812
+ */
813
+ async function uploadAwsBlockWithDataAndRetry(uploadInfo, uploadId, blockNum, data, context, onError) {
814
+ let attempt = 0;
815
+ while (true) {
816
+ attempt++;
817
+ try {
818
+ const awsPartNumber = blockNum + 1;
819
+ const response = await awsReq(
820
+ uploadInfo,
821
+ 'PUT',
822
+ `partNumber=${awsPartNumber}&uploadId=${uploadId}`,
823
+ data,
824
+ null,
825
+ context
826
+ );
827
+
828
+ if (!response.ok) {
829
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
830
+ }
831
+
832
+ const etag = response.headers.get('ETag');
833
+ await response.text();
834
+ return etag;
835
+ } catch (error) {
836
+ if (onError) {
837
+ await onError(error, { phase: 'upload', blockNum, attempt });
838
+ continue;
839
+ }
840
+ throw error;
841
+ }
842
+ }
843
+ }
531
844
 
532
- return finalResponse.data;
845
+ /**
846
+ * Upload a single block to AWS S3 with retry support
847
+ * @private
848
+ */
849
+ async function uploadAwsBlockWithRetry(file, uploadInfo, uploadId, blockNum, blockSize, context, onError) {
850
+ let attempt = 0;
851
+ while (true) {
852
+ attempt++;
853
+ try {
854
+ return await uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, context);
855
+ } catch (error) {
856
+ if (onError) {
857
+ await onError(error, { phase: 'upload', blockNum, attempt });
858
+ continue;
859
+ }
860
+ throw error;
861
+ }
862
+ }
533
863
  }
534
864
 
535
865
  /**
@@ -561,6 +891,106 @@ async function uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, c
561
891
  return etag;
562
892
  }
563
893
 
894
+ /**
895
+ * Read a chunk of specified size from a stream
896
+ * @private
897
+ * @param {ReadableStream} stream - Node.js readable stream
898
+ * @param {number} size - Number of bytes to read
899
+ * @returns {Promise<ArrayBuffer|null>} - ArrayBuffer with data, or null if stream ended
900
+ */
901
+ function readChunkFromStream(stream, size) {
902
+ return new Promise((resolve, reject) => {
903
+ // Check if stream already ended before we start
904
+ if (stream.readableEnded) {
905
+ resolve(null);
906
+ return;
907
+ }
908
+
909
+ const chunks = [];
910
+ let bytesRead = 0;
911
+ let resolved = false;
912
+
913
+ const doResolve = (value) => {
914
+ if (resolved) return;
915
+ resolved = true;
916
+ cleanup();
917
+ resolve(value);
918
+ };
919
+
920
+ const onReadable = () => {
921
+ if (resolved) return;
922
+
923
+ let chunk;
924
+ while (bytesRead < size && (chunk = stream.read(Math.min(size - bytesRead, 65536))) !== null) {
925
+ chunks.push(chunk);
926
+ bytesRead += chunk.length;
927
+ }
928
+
929
+ if (bytesRead >= size) {
930
+ doResolve(combineChunks(chunks));
931
+ } else if (stream.readableEnded) {
932
+ // Stream already ended, resolve with what we have
933
+ if (bytesRead === 0) {
934
+ doResolve(null);
935
+ } else {
936
+ doResolve(combineChunks(chunks));
937
+ }
938
+ }
939
+ };
940
+
941
+ const onEnd = () => {
942
+ if (resolved) return;
943
+ if (bytesRead === 0) {
944
+ doResolve(null); // Stream ended, no more data
945
+ } else {
946
+ doResolve(combineChunks(chunks));
947
+ }
948
+ };
949
+
950
+ const onError = (err) => {
951
+ if (resolved) return;
952
+ resolved = true;
953
+ cleanup();
954
+ reject(err);
955
+ };
956
+
957
+ const cleanup = () => {
958
+ stream.removeListener('readable', onReadable);
959
+ stream.removeListener('end', onEnd);
960
+ stream.removeListener('error', onError);
961
+ };
962
+
963
+ stream.on('readable', onReadable);
964
+ stream.on('end', onEnd);
965
+ stream.on('error', onError);
966
+
967
+ // Try reading immediately in case data is already buffered
968
+ onReadable();
969
+ });
970
+ }
971
+
972
+ /**
973
+ * Combine chunks into a single ArrayBuffer
974
+ * @private
975
+ */
976
+ function combineChunks(chunks) {
977
+ if (chunks.length === 0) {
978
+ return new ArrayBuffer(0);
979
+ }
980
+ if (chunks.length === 1) {
981
+ const chunk = chunks[0];
982
+ return chunk.buffer.slice(chunk.byteOffset, chunk.byteOffset + chunk.length);
983
+ }
984
+ const totalLength = chunks.reduce((sum, chunk) => sum + chunk.length, 0);
985
+ const result = new Uint8Array(totalLength);
986
+ let offset = 0;
987
+ for (const chunk of chunks) {
988
+ result.set(new Uint8Array(chunk.buffer, chunk.byteOffset, chunk.length), offset);
989
+ offset += chunk.length;
990
+ }
991
+ return result.buffer;
992
+ }
993
+
564
994
  /**
565
995
  * Read a slice of a file as ArrayBuffer
566
996
  * @private
@@ -762,25 +1192,30 @@ module.exports.upload = (function () {
762
1192
  function handleFailure(up, error) {
763
1193
  // Skip if upload is no longer running
764
1194
  if (!(up.up_id in state.running)) return;
765
-
1195
+
766
1196
  // Check if already in failed list
767
1197
  for (const failedItem of state.failed) {
768
1198
  if (failedItem.up_id === up.up_id) {
769
1199
  return; // Already recorded as failed
770
1200
  }
771
1201
  }
772
-
1202
+
773
1203
  // Record failure
774
1204
  up.failure = error;
775
1205
  state.failed.push(up);
776
1206
  delete state.running[up.up_id];
777
-
1207
+
1208
+ // Reject the promise so callers know the upload failed
1209
+ if (up.reject) {
1210
+ up.reject(error);
1211
+ }
1212
+
778
1213
  // Continue processing queue
779
1214
  upload.run();
780
-
1215
+
781
1216
  // Notify progress
782
1217
  sendProgress();
783
-
1218
+
784
1219
  // Dispatch failure event
785
1220
  utils.dispatchEvent("upload:failed", {
786
1221
  item: up,