@karpeleslab/klbfw 0.2.18 → 0.2.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/upload.js +45 -31
package/package.json
CHANGED
package/upload.js
CHANGED
|
@@ -493,8 +493,10 @@ async function doPutUpload(file, uploadInfo, context, options) {
|
|
|
493
493
|
const startByte = byteOffset;
|
|
494
494
|
byteOffset += chunkData.byteLength;
|
|
495
495
|
|
|
496
|
+
// Only add Content-Range for multi-block uploads
|
|
497
|
+
const useContentRange = blocks === null || blocks > 1;
|
|
496
498
|
const uploadPromise = uploadPutBlockWithDataAndRetry(
|
|
497
|
-
uploadInfo, currentBlock, startByte, chunkData, file.type, onError
|
|
499
|
+
uploadInfo, currentBlock, startByte, chunkData, file.type, onError, useContentRange
|
|
498
500
|
).then(() => {
|
|
499
501
|
completedBlocks++;
|
|
500
502
|
if (onProgress && blocks) {
|
|
@@ -507,17 +509,10 @@ async function doPutUpload(file, uploadInfo, context, options) {
|
|
|
507
509
|
|
|
508
510
|
// Wait for at least one upload to complete before reading more
|
|
509
511
|
if (pendingUploads.length > 0) {
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
pendingUploads[i].then(() => 'done'),
|
|
515
|
-
Promise.resolve('pending')
|
|
516
|
-
]);
|
|
517
|
-
if (status === 'done') {
|
|
518
|
-
pendingUploads.splice(i, 1);
|
|
519
|
-
}
|
|
520
|
-
}
|
|
512
|
+
// Create indexed promises that return their index when done
|
|
513
|
+
const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
|
|
514
|
+
const completedIdx = await Promise.race(indexedPromises);
|
|
515
|
+
pendingUploads.splice(completedIdx, 1);
|
|
521
516
|
}
|
|
522
517
|
}
|
|
523
518
|
|
|
@@ -564,7 +559,7 @@ async function doPutUpload(file, uploadInfo, context, options) {
|
|
|
564
559
|
* Upload a single block via PUT with pre-read data and retry support
|
|
565
560
|
* @private
|
|
566
561
|
*/
|
|
567
|
-
async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, data, contentType, onError) {
|
|
562
|
+
async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, data, contentType, onError, useContentRange) {
|
|
568
563
|
let attempt = 0;
|
|
569
564
|
while (true) {
|
|
570
565
|
attempt++;
|
|
@@ -573,8 +568,10 @@ async function uploadPutBlockWithDataAndRetry(uploadInfo, blockNum, startByte, d
|
|
|
573
568
|
'Content-Type': contentType || 'application/octet-stream'
|
|
574
569
|
};
|
|
575
570
|
|
|
576
|
-
// Add Content-Range for multipart PUT
|
|
577
|
-
|
|
571
|
+
// Add Content-Range for multipart PUT (not for single-block uploads)
|
|
572
|
+
if (useContentRange) {
|
|
573
|
+
headers['Content-Range'] = `bytes ${startByte}-${startByte + data.byteLength - 1}/*`;
|
|
574
|
+
}
|
|
578
575
|
|
|
579
576
|
const response = await utils.fetch(uploadInfo.PUT, {
|
|
580
577
|
method: 'PUT',
|
|
@@ -735,17 +732,10 @@ async function doAwsUpload(file, uploadInfo, context, options) {
|
|
|
735
732
|
|
|
736
733
|
// Wait for at least one upload to complete before reading more
|
|
737
734
|
if (pendingUploads.length > 0) {
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
pendingUploads[i].then(() => 'done'),
|
|
743
|
-
Promise.resolve('pending')
|
|
744
|
-
]);
|
|
745
|
-
if (status === 'done') {
|
|
746
|
-
pendingUploads.splice(i, 1);
|
|
747
|
-
}
|
|
748
|
-
}
|
|
735
|
+
// Create indexed promises that return their index when done
|
|
736
|
+
const indexedPromises = pendingUploads.map((p, idx) => p.then(() => idx));
|
|
737
|
+
const completedIdx = await Promise.race(indexedPromises);
|
|
738
|
+
pendingUploads.splice(completedIdx, 1);
|
|
749
739
|
}
|
|
750
740
|
}
|
|
751
741
|
|
|
@@ -910,10 +900,26 @@ async function uploadAwsBlock(file, uploadInfo, uploadId, blockNum, blockSize, c
|
|
|
910
900
|
*/
|
|
911
901
|
function readChunkFromStream(stream, size) {
|
|
912
902
|
return new Promise((resolve, reject) => {
|
|
903
|
+
// Check if stream already ended before we start
|
|
904
|
+
if (stream.readableEnded) {
|
|
905
|
+
resolve(null);
|
|
906
|
+
return;
|
|
907
|
+
}
|
|
908
|
+
|
|
913
909
|
const chunks = [];
|
|
914
910
|
let bytesRead = 0;
|
|
911
|
+
let resolved = false;
|
|
912
|
+
|
|
913
|
+
const doResolve = (value) => {
|
|
914
|
+
if (resolved) return;
|
|
915
|
+
resolved = true;
|
|
916
|
+
cleanup();
|
|
917
|
+
resolve(value);
|
|
918
|
+
};
|
|
915
919
|
|
|
916
920
|
const onReadable = () => {
|
|
921
|
+
if (resolved) return;
|
|
922
|
+
|
|
917
923
|
let chunk;
|
|
918
924
|
while (bytesRead < size && (chunk = stream.read(Math.min(size - bytesRead, 65536))) !== null) {
|
|
919
925
|
chunks.push(chunk);
|
|
@@ -921,21 +927,29 @@ function readChunkFromStream(stream, size) {
|
|
|
921
927
|
}
|
|
922
928
|
|
|
923
929
|
if (bytesRead >= size) {
|
|
924
|
-
|
|
925
|
-
|
|
930
|
+
doResolve(combineChunks(chunks));
|
|
931
|
+
} else if (stream.readableEnded) {
|
|
932
|
+
// Stream already ended, resolve with what we have
|
|
933
|
+
if (bytesRead === 0) {
|
|
934
|
+
doResolve(null);
|
|
935
|
+
} else {
|
|
936
|
+
doResolve(combineChunks(chunks));
|
|
937
|
+
}
|
|
926
938
|
}
|
|
927
939
|
};
|
|
928
940
|
|
|
929
941
|
const onEnd = () => {
|
|
930
|
-
|
|
942
|
+
if (resolved) return;
|
|
931
943
|
if (bytesRead === 0) {
|
|
932
|
-
|
|
944
|
+
doResolve(null); // Stream ended, no more data
|
|
933
945
|
} else {
|
|
934
|
-
|
|
946
|
+
doResolve(combineChunks(chunks));
|
|
935
947
|
}
|
|
936
948
|
};
|
|
937
949
|
|
|
938
950
|
const onError = (err) => {
|
|
951
|
+
if (resolved) return;
|
|
952
|
+
resolved = true;
|
|
939
953
|
cleanup();
|
|
940
954
|
reject(err);
|
|
941
955
|
};
|