@ardrive/turbo-sdk 1.30.0 → 1.31.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +36 -2
- package/bundles/web.bundle.min.js +490 -17
- package/lib/cjs/cli/commands/uploadFile.js +1 -0
- package/lib/cjs/cli/commands/uploadFolder.js +5 -1
- package/lib/cjs/cli/options.js +22 -1
- package/lib/cjs/cli/types.js +0 -15
- package/lib/cjs/cli/utils.js +14 -0
- package/lib/cjs/common/chunked.js +414 -0
- package/lib/cjs/common/http.js +1 -0
- package/lib/cjs/common/turbo.js +10 -2
- package/lib/cjs/common/upload.js +42 -10
- package/lib/cjs/types.js +14 -1
- package/lib/cjs/version.js +1 -1
- package/lib/esm/cli/commands/uploadFile.js +2 -1
- package/lib/esm/cli/commands/uploadFolder.js +5 -1
- package/lib/esm/cli/options.js +22 -1
- package/lib/esm/cli/types.js +0 -15
- package/lib/esm/cli/utils.js +13 -0
- package/lib/esm/common/chunked.js +407 -0
- package/lib/esm/common/http.js +1 -0
- package/lib/esm/common/turbo.js +10 -2
- package/lib/esm/common/upload.js +42 -10
- package/lib/esm/types.js +13 -0
- package/lib/esm/version.js +1 -1
- package/lib/types/cli/commands/uploadFile.d.ts.map +1 -1
- package/lib/types/cli/commands/uploadFolder.d.ts.map +1 -1
- package/lib/types/cli/options.d.ts +58 -2
- package/lib/types/cli/options.d.ts.map +1 -1
- package/lib/types/cli/types.d.ts +5 -0
- package/lib/types/cli/types.d.ts.map +1 -1
- package/lib/types/cli/utils.d.ts +3 -2
- package/lib/types/cli/utils.d.ts.map +1 -1
- package/lib/types/common/chunked.d.ts +48 -0
- package/lib/types/common/chunked.d.ts.map +1 -0
- package/lib/types/common/http.d.ts +1 -1
- package/lib/types/common/http.d.ts.map +1 -1
- package/lib/types/common/turbo.d.ts +2 -2
- package/lib/types/common/turbo.d.ts.map +1 -1
- package/lib/types/common/upload.d.ts +3 -3
- package/lib/types/common/upload.d.ts.map +1 -1
- package/lib/types/types.d.ts +48 -4
- package/lib/types/types.d.ts.map +1 -1
- package/lib/types/version.d.ts +1 -1
- package/package.json +9 -5
package/lib/cjs/types.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.isJWK = exports.isWebUploadFolderParams = exports.isNodeUploadFolderParams = exports.tokenTypes = exports.fiatCurrencyTypes = void 0;
|
|
3
|
+
exports.validChunkingModes = exports.isJWK = exports.isWebUploadFolderParams = exports.isNodeUploadFolderParams = exports.multipartFinalizedStatus = exports.multipartFailedStatus = exports.multipartPendingStatus = exports.tokenTypes = exports.fiatCurrencyTypes = void 0;
|
|
4
4
|
exports.isCurrency = isCurrency;
|
|
5
5
|
exports.isKyvePrivateKey = isKyvePrivateKey;
|
|
6
6
|
exports.isEthPrivateKey = isEthPrivateKey;
|
|
@@ -31,6 +31,18 @@ exports.tokenTypes = [
|
|
|
31
31
|
'pol',
|
|
32
32
|
'base-eth',
|
|
33
33
|
];
|
|
34
|
+
exports.multipartPendingStatus = [
|
|
35
|
+
'ASSEMBLING',
|
|
36
|
+
'VALIDATING',
|
|
37
|
+
'FINALIZING',
|
|
38
|
+
];
|
|
39
|
+
exports.multipartFailedStatus = [
|
|
40
|
+
'UNDERFUNDED',
|
|
41
|
+
'INVALID',
|
|
42
|
+
'APPROVAL_FAILED',
|
|
43
|
+
'REVOKE_FAILED',
|
|
44
|
+
];
|
|
45
|
+
exports.multipartFinalizedStatus = ['FINALIZED'];
|
|
34
46
|
const isNodeUploadFolderParams = (p) => p.folderPath !== undefined;
|
|
35
47
|
exports.isNodeUploadFolderParams = isNodeUploadFolderParams;
|
|
36
48
|
const isWebUploadFolderParams = (p) => p.files !== undefined;
|
|
@@ -54,3 +66,4 @@ function isSolanaWalletAdapter(walletAdapter) {
|
|
|
54
66
|
function isEthereumWalletAdapter(walletAdapter) {
|
|
55
67
|
return 'getSigner' in walletAdapter;
|
|
56
68
|
}
|
|
69
|
+
exports.validChunkingModes = ['force', 'disabled', 'auto'];
|
package/lib/cjs/version.js
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
*/
|
|
16
16
|
import { createReadStream, statSync } from 'fs';
|
|
17
17
|
import { turboCliTags } from '../constants.js';
|
|
18
|
-
import { getTagsFromOptions, paidByFromOptions, turboFromOptions, } from '../utils.js';
|
|
18
|
+
import { getChunkingOptions, getTagsFromOptions, paidByFromOptions, turboFromOptions, } from '../utils.js';
|
|
19
19
|
export async function uploadFile(options) {
|
|
20
20
|
const { filePath } = options;
|
|
21
21
|
if (filePath === undefined) {
|
|
@@ -29,6 +29,7 @@ export async function uploadFile(options) {
|
|
|
29
29
|
fileStreamFactory: () => createReadStream(filePath),
|
|
30
30
|
fileSizeFactory: () => fileSize,
|
|
31
31
|
dataItemOpts: { tags: [...turboCliTags, ...customTags], paidBy },
|
|
32
|
+
...getChunkingOptions(options),
|
|
32
33
|
});
|
|
33
34
|
console.log('Uploaded file:', JSON.stringify(result, null, 2));
|
|
34
35
|
}
|
|
@@ -18,7 +18,7 @@ import { getTagsFromOptions, getUploadFolderOptions, paidByFromOptions, turboFro
|
|
|
18
18
|
export async function uploadFolder(options) {
|
|
19
19
|
const turbo = await turboFromOptions(options);
|
|
20
20
|
const paidBy = await paidByFromOptions(options, turbo);
|
|
21
|
-
const { disableManifest, fallbackFile, folderPath, indexFile, maxConcurrentUploads, } = getUploadFolderOptions(options);
|
|
21
|
+
const { disableManifest, fallbackFile, folderPath, indexFile, maxConcurrentUploads, chunkByteCount, chunkingMode, maxChunkConcurrency, maxFinalizeMs, } = getUploadFolderOptions(options);
|
|
22
22
|
const customTags = getTagsFromOptions(options);
|
|
23
23
|
const result = await turbo.uploadFolder({
|
|
24
24
|
folderPath: folderPath,
|
|
@@ -29,6 +29,10 @@ export async function uploadFolder(options) {
|
|
|
29
29
|
fallbackFile,
|
|
30
30
|
},
|
|
31
31
|
maxConcurrentUploads,
|
|
32
|
+
chunkByteCount,
|
|
33
|
+
chunkingMode,
|
|
34
|
+
maxChunkConcurrency,
|
|
35
|
+
maxFinalizeMs,
|
|
32
36
|
});
|
|
33
37
|
console.log('Uploaded folder:', JSON.stringify(result, null, 2));
|
|
34
38
|
}
|
package/lib/esm/cli/options.js
CHANGED
|
@@ -131,7 +131,7 @@ export const optionMap = {
|
|
|
131
131
|
},
|
|
132
132
|
maxConcurrency: {
|
|
133
133
|
alias: '--max-concurrency <maxConcurrency>',
|
|
134
|
-
description: 'Maximum number of concurrent uploads',
|
|
134
|
+
description: 'Maximum number of concurrent file uploads',
|
|
135
135
|
},
|
|
136
136
|
paidBy: {
|
|
137
137
|
alias: '--paid-by <paidBy...>',
|
|
@@ -156,6 +156,23 @@ export const optionMap = {
|
|
|
156
156
|
alias: '--byte-count <byteCount>',
|
|
157
157
|
description: 'Number of bytes to use for the action',
|
|
158
158
|
},
|
|
159
|
+
maxChunkConcurrency: {
|
|
160
|
+
alias: '--max-chunk-concurrency <maxChunkConcurrency>',
|
|
161
|
+
description: 'Maximum number of concurrent chunks to upload per file',
|
|
162
|
+
},
|
|
163
|
+
maxFinalizeMs: {
|
|
164
|
+
alias: '--max-finalize-ms <maxFinalizeMs>',
|
|
165
|
+
description: 'Maximum time in milliseconds to wait for the finalization of all chunks after the last chunk is uploaded. Defaults to 1 minute per GiB of the total file size.',
|
|
166
|
+
},
|
|
167
|
+
chunkByteCount: {
|
|
168
|
+
alias: '--chunk-byte-count <chunkByteCount>',
|
|
169
|
+
description: 'Size of each chunk in bytes',
|
|
170
|
+
},
|
|
171
|
+
chunkingMode: {
|
|
172
|
+
alias: '--chunking-mode <chunkingMode>',
|
|
173
|
+
description: 'Chunking mode to use for the upload. Can be "auto", "force" or "disabled". Defaults to "auto".',
|
|
174
|
+
default: 'auto',
|
|
175
|
+
},
|
|
159
176
|
};
|
|
160
177
|
export const walletOptions = [
|
|
161
178
|
optionMap.walletFile,
|
|
@@ -179,6 +196,10 @@ export const uploadOptions = [
|
|
|
179
196
|
optionMap.ignoreApprovals,
|
|
180
197
|
optionMap.useSignerBalanceFirst,
|
|
181
198
|
optionMap.tags,
|
|
199
|
+
optionMap.maxChunkConcurrency,
|
|
200
|
+
optionMap.maxFinalizeMs,
|
|
201
|
+
optionMap.chunkByteCount,
|
|
202
|
+
optionMap.chunkingMode,
|
|
182
203
|
];
|
|
183
204
|
export const uploadFolderOptions = [
|
|
184
205
|
...uploadOptions,
|
package/lib/esm/cli/types.js
CHANGED
|
@@ -1,16 +1 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Copyright (C) 2022-2024 Permanent Data Solutions, Inc.
|
|
3
|
-
*
|
|
4
|
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
-
* you may not use this file except in compliance with the License.
|
|
6
|
-
* You may obtain a copy of the License at
|
|
7
|
-
*
|
|
8
|
-
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
-
*
|
|
10
|
-
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
-
* See the License for the specific language governing permissions and
|
|
14
|
-
* limitations under the License.
|
|
15
|
-
*/
|
|
16
1
|
export {};
|
package/lib/esm/cli/utils.js
CHANGED
|
@@ -214,6 +214,7 @@ export function getUploadFolderOptions(options) {
|
|
|
214
214
|
fallbackFile: options.fallbackFile,
|
|
215
215
|
disableManifest: !options.manifest,
|
|
216
216
|
maxConcurrentUploads: +(options.maxConcurrency ?? 1),
|
|
217
|
+
...getChunkingOptions(options),
|
|
217
218
|
};
|
|
218
219
|
}
|
|
219
220
|
/**
|
|
@@ -261,3 +262,15 @@ export function requiredByteCountFromOptions({ byteCount, }) {
|
|
|
261
262
|
}
|
|
262
263
|
return byteCountValue;
|
|
263
264
|
}
|
|
265
|
+
export function getChunkingOptions(options) {
|
|
266
|
+
return {
|
|
267
|
+
chunkingMode: options.chunkingMode,
|
|
268
|
+
chunkByteCount: options.chunkByteCount !== undefined
|
|
269
|
+
? +options.chunkByteCount
|
|
270
|
+
: undefined,
|
|
271
|
+
maxChunkConcurrency: options.maxChunkConcurrency !== undefined
|
|
272
|
+
? +options.maxChunkConcurrency
|
|
273
|
+
: undefined,
|
|
274
|
+
maxFinalizeMs: options.maxFinalizeMs !== undefined ? +options.maxFinalizeMs : undefined,
|
|
275
|
+
};
|
|
276
|
+
}
|
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright (C) 2022-2024 Permanent Data Solutions, Inc.
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
import { CanceledError } from 'axios';
|
|
17
|
+
import { pLimit } from 'plimit-lit';
|
|
18
|
+
import { validChunkingModes, } from '../types.js';
|
|
19
|
+
import { sleep } from '../utils/common.js';
|
|
20
|
+
import { FailedRequestError } from '../utils/errors.js';
|
|
21
|
+
import { TurboEventEmitter, createStreamWithUploadEvents } from './events.js';
|
|
22
|
+
import { TurboWinstonLogger } from './logger.js';
|
|
23
|
+
const fiveMiB = 5 * 1024 * 1024; // 5 MiB
|
|
24
|
+
const fiveHundredMiB = fiveMiB * 100; // 500 MiB
|
|
25
|
+
export const defaultMaxChunkConcurrency = 5;
|
|
26
|
+
export const maxChunkByteCount = fiveHundredMiB;
|
|
27
|
+
export const minChunkByteCount = fiveMiB;
|
|
28
|
+
export const defaultChunkByteCount = minChunkByteCount;
|
|
29
|
+
const backlogQueueFactor = 2;
|
|
30
|
+
const chunkingHeader = { 'x-chunking-version': '2' };
|
|
31
|
+
/**
|
|
32
|
+
* Performs a chunked upload by splitting the stream into fixed-size buffers,
|
|
33
|
+
* uploading them in parallel, and emitting progress/error events.
|
|
34
|
+
*/
|
|
35
|
+
export class ChunkedUploader {
|
|
36
|
+
constructor({ http, token, maxChunkConcurrency = defaultMaxChunkConcurrency, maxFinalizeMs, chunkByteCount = defaultChunkByteCount, logger = TurboWinstonLogger.default, chunkingMode = 'auto', dataItemByteCount, }) {
|
|
37
|
+
this.assertChunkParams({
|
|
38
|
+
chunkByteCount,
|
|
39
|
+
chunkingMode,
|
|
40
|
+
maxChunkConcurrency,
|
|
41
|
+
maxFinalizeMs,
|
|
42
|
+
});
|
|
43
|
+
this.chunkByteCount = chunkByteCount;
|
|
44
|
+
this.maxChunkConcurrency = maxChunkConcurrency;
|
|
45
|
+
this.maxFinalizeMs = maxFinalizeMs;
|
|
46
|
+
this.http = http;
|
|
47
|
+
this.token = token;
|
|
48
|
+
this.logger = logger;
|
|
49
|
+
this.shouldUseChunkUploader = this.shouldChunkUpload({
|
|
50
|
+
chunkByteCount,
|
|
51
|
+
chunkingMode,
|
|
52
|
+
dataItemByteCount,
|
|
53
|
+
});
|
|
54
|
+
this.maxBacklogQueue = this.maxChunkConcurrency * backlogQueueFactor;
|
|
55
|
+
}
|
|
56
|
+
shouldChunkUpload({ chunkByteCount, chunkingMode, dataItemByteCount, }) {
|
|
57
|
+
if (chunkingMode === 'disabled') {
|
|
58
|
+
return false;
|
|
59
|
+
}
|
|
60
|
+
if (chunkingMode === 'force') {
|
|
61
|
+
return true;
|
|
62
|
+
}
|
|
63
|
+
const isMoreThanTwoChunksOfData = dataItemByteCount > chunkByteCount * 2;
|
|
64
|
+
return isMoreThanTwoChunksOfData;
|
|
65
|
+
}
|
|
66
|
+
assertChunkParams({ chunkByteCount, chunkingMode, maxChunkConcurrency, maxFinalizeMs, }) {
|
|
67
|
+
if (maxFinalizeMs !== undefined &&
|
|
68
|
+
(Number.isNaN(maxFinalizeMs) ||
|
|
69
|
+
!Number.isInteger(maxFinalizeMs) ||
|
|
70
|
+
maxFinalizeMs < 0)) {
|
|
71
|
+
throw new Error('Invalid max finalization wait time. Must be a non-negative integer.');
|
|
72
|
+
}
|
|
73
|
+
if (Number.isNaN(maxChunkConcurrency) ||
|
|
74
|
+
!Number.isInteger(maxChunkConcurrency) ||
|
|
75
|
+
maxChunkConcurrency < 1) {
|
|
76
|
+
throw new Error('Invalid max chunk concurrency. Must be an integer of at least 1.');
|
|
77
|
+
}
|
|
78
|
+
if (Number.isNaN(chunkByteCount) ||
|
|
79
|
+
!Number.isInteger(chunkByteCount) ||
|
|
80
|
+
chunkByteCount < fiveMiB ||
|
|
81
|
+
chunkByteCount > fiveHundredMiB) {
|
|
82
|
+
throw new Error('Invalid chunk size. Must be an integer between 5 MiB and 500 MiB.');
|
|
83
|
+
}
|
|
84
|
+
if (typeof chunkingMode !== 'string' ||
|
|
85
|
+
!validChunkingModes.includes(chunkingMode)) {
|
|
86
|
+
throw new Error(`Invalid chunking mode. Must be one of: ${validChunkingModes.join(', ')}`);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Initialize or resume an upload session, returning the upload ID.
|
|
91
|
+
*/
|
|
92
|
+
async initUpload() {
|
|
93
|
+
const res = await this.http.get({
|
|
94
|
+
endpoint: `/chunks/${this.token}/-1/-1?chunkSize=${this.chunkByteCount}`,
|
|
95
|
+
headers: chunkingHeader,
|
|
96
|
+
});
|
|
97
|
+
if (res.chunkSize !== this.chunkByteCount) {
|
|
98
|
+
this.logger.warn('Chunk size mismatch! Overriding with server value.', {
|
|
99
|
+
clientExpected: this.chunkByteCount,
|
|
100
|
+
serverReturned: res.chunkSize,
|
|
101
|
+
});
|
|
102
|
+
this.chunkByteCount = res.chunkSize;
|
|
103
|
+
}
|
|
104
|
+
return res.id;
|
|
105
|
+
}
|
|
106
|
+
async upload({ dataItemSizeFactory, dataItemStreamFactory, dataItemOpts, signal, events, }) {
|
|
107
|
+
const uploadId = await this.initUpload();
|
|
108
|
+
const dataItemByteCount = dataItemSizeFactory();
|
|
109
|
+
const emitter = new TurboEventEmitter(events);
|
|
110
|
+
const { stream, resume } = createStreamWithUploadEvents({
|
|
111
|
+
data: dataItemStreamFactory(),
|
|
112
|
+
dataSize: dataItemByteCount,
|
|
113
|
+
emitter,
|
|
114
|
+
});
|
|
115
|
+
this.logger.debug(`Starting chunked upload`, {
|
|
116
|
+
token: this.token,
|
|
117
|
+
uploadId,
|
|
118
|
+
totalSize: dataItemByteCount,
|
|
119
|
+
chunkByteCount: this.chunkByteCount,
|
|
120
|
+
maxChunkConcurrency: this.maxChunkConcurrency,
|
|
121
|
+
inputStreamType: isReadableStream(stream) ? 'ReadableStream' : 'Readable',
|
|
122
|
+
});
|
|
123
|
+
const inFlight = new Set();
|
|
124
|
+
const internalAbort = new AbortController();
|
|
125
|
+
const combinedSignal = combineAbortSignals([internalAbort.signal, signal]);
|
|
126
|
+
const limit = pLimit(this.maxChunkConcurrency);
|
|
127
|
+
let currentOffset = 0;
|
|
128
|
+
let currentChunkPartNumber = 0;
|
|
129
|
+
let firstError;
|
|
130
|
+
let uploadedBytes = 0;
|
|
131
|
+
const chunks = splitIntoChunks(stream, this.chunkByteCount);
|
|
132
|
+
resume();
|
|
133
|
+
for await (const chunk of chunks) {
|
|
134
|
+
if (combinedSignal?.aborted) {
|
|
135
|
+
internalAbort.abort();
|
|
136
|
+
await Promise.allSettled(inFlight);
|
|
137
|
+
firstError ??= new CanceledError();
|
|
138
|
+
break;
|
|
139
|
+
}
|
|
140
|
+
const chunkPartNumber = ++currentChunkPartNumber;
|
|
141
|
+
const chunkByteCount = chunk.length;
|
|
142
|
+
const chunkOffset = currentOffset;
|
|
143
|
+
currentOffset += chunkByteCount;
|
|
144
|
+
const promise = limit(async () => {
|
|
145
|
+
if (firstError !== undefined) {
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
this.logger.debug('Uploading chunk', {
|
|
149
|
+
chunkPartNumber,
|
|
150
|
+
chunkOffset,
|
|
151
|
+
chunkByteCount,
|
|
152
|
+
});
|
|
153
|
+
await this.http.post({
|
|
154
|
+
endpoint: `/chunks/${this.token}/${uploadId}/${chunkOffset}`,
|
|
155
|
+
data: chunk,
|
|
156
|
+
headers: {
|
|
157
|
+
'Content-Type': 'application/octet-stream',
|
|
158
|
+
...chunkingHeader,
|
|
159
|
+
},
|
|
160
|
+
signal: combinedSignal,
|
|
161
|
+
});
|
|
162
|
+
uploadedBytes += chunkByteCount;
|
|
163
|
+
this.logger.debug('Chunk uploaded', {
|
|
164
|
+
chunkPartNumber,
|
|
165
|
+
chunkOffset,
|
|
166
|
+
chunkByteCount,
|
|
167
|
+
});
|
|
168
|
+
emitter.emit('upload-progress', {
|
|
169
|
+
processedBytes: uploadedBytes,
|
|
170
|
+
totalBytes: dataItemByteCount,
|
|
171
|
+
});
|
|
172
|
+
}).catch((err) => {
|
|
173
|
+
this.logger.error('Chunk upload failed', {
|
|
174
|
+
id: chunkPartNumber,
|
|
175
|
+
offset: chunkOffset,
|
|
176
|
+
size: chunkByteCount,
|
|
177
|
+
err,
|
|
178
|
+
});
|
|
179
|
+
emitter.emit('upload-error', err);
|
|
180
|
+
internalAbort.abort(err);
|
|
181
|
+
firstError = firstError ?? err;
|
|
182
|
+
});
|
|
183
|
+
inFlight.add(promise);
|
|
184
|
+
promise.finally(() => inFlight.delete(promise));
|
|
185
|
+
if (inFlight.size >= this.maxBacklogQueue) {
|
|
186
|
+
await Promise.race(inFlight);
|
|
187
|
+
if (combinedSignal?.aborted) {
|
|
188
|
+
internalAbort.abort();
|
|
189
|
+
await Promise.allSettled(inFlight);
|
|
190
|
+
firstError ??= new CanceledError();
|
|
191
|
+
break;
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
await Promise.all(inFlight);
|
|
196
|
+
if (firstError !== undefined) {
|
|
197
|
+
throw firstError;
|
|
198
|
+
}
|
|
199
|
+
const finalizeResponse = await this.finalizeUpload(uploadId, dataItemByteCount, dataItemOpts?.paidBy, combinedSignal);
|
|
200
|
+
emitter.emit('upload-success');
|
|
201
|
+
return finalizeResponse;
|
|
202
|
+
}
|
|
203
|
+
toGiB(bytes) {
|
|
204
|
+
return bytes / 1024 ** 3;
|
|
205
|
+
}
|
|
206
|
+
async finalizeUpload(uploadId, dataItemByteCount, paidBy, signal) {
|
|
207
|
+
// Wait up to 1 minute per GiB of data for the upload to finalize
|
|
208
|
+
const fileSizeInGiB = Math.ceil(this.toGiB(dataItemByteCount));
|
|
209
|
+
const defaultMaxWaitTimeMins = fileSizeInGiB;
|
|
210
|
+
const maxWaitTimeMs = this.maxFinalizeMs ?? defaultMaxWaitTimeMins * 60 * 1000;
|
|
211
|
+
const minimumWaitPerStepMs =
|
|
212
|
+
// Per step, files smaller than 100MB will wait 2 second,
|
|
213
|
+
dataItemByteCount < 1024 * 1024 * 100
|
|
214
|
+
? 2000
|
|
215
|
+
: // files smaller than 3 GiB will wait 3 seconds,
|
|
216
|
+
dataItemByteCount < 1024 * 1024 * 1024 * 3
|
|
217
|
+
? 3000
|
|
218
|
+
: // and larger files will wait 1 second per GiB with max of 10 seconds
|
|
219
|
+
Math.max(1000 * fileSizeInGiB, 10000);
|
|
220
|
+
const paidByHeader = {};
|
|
221
|
+
if (paidBy !== undefined) {
|
|
222
|
+
paidByHeader['x-paid-by'] = Array.isArray(paidBy)
|
|
223
|
+
? paidBy.join(',')
|
|
224
|
+
: paidBy;
|
|
225
|
+
}
|
|
226
|
+
await this.http.post({
|
|
227
|
+
endpoint: `/chunks/${this.token}/${uploadId}/finalize`,
|
|
228
|
+
data: Buffer.alloc(0),
|
|
229
|
+
headers: {
|
|
230
|
+
'Content-Type': 'application/octet-stream',
|
|
231
|
+
...paidByHeader,
|
|
232
|
+
...chunkingHeader,
|
|
233
|
+
},
|
|
234
|
+
signal,
|
|
235
|
+
});
|
|
236
|
+
this.logger.debug(`Confirming upload to Turbo with uploadId ${uploadId} for up to ${defaultMaxWaitTimeMins} minutes.`);
|
|
237
|
+
const startTime = Date.now();
|
|
238
|
+
const cutoffTime = startTime + maxWaitTimeMs;
|
|
239
|
+
let attempts = 0;
|
|
240
|
+
while (Date.now() < cutoffTime) {
|
|
241
|
+
// Wait for 3/4 of the time remaining per attempt or minimum step
|
|
242
|
+
const waitTimeMs = Math.min(Math.floor((cutoffTime - Date.now()) * (3 / 4)), minimumWaitPerStepMs);
|
|
243
|
+
await sleep(waitTimeMs);
|
|
244
|
+
if (signal?.aborted) {
|
|
245
|
+
this.logger.warn(`Upload finalization aborted by signal.`);
|
|
246
|
+
throw new CanceledError();
|
|
247
|
+
}
|
|
248
|
+
const response = await this.http.get({
|
|
249
|
+
endpoint: `/chunks/${this.token}/${uploadId}/status`,
|
|
250
|
+
signal,
|
|
251
|
+
});
|
|
252
|
+
this.logger.debug(`Upload status found: ${response.status}`, {
|
|
253
|
+
status: response.status,
|
|
254
|
+
attempts: attempts++,
|
|
255
|
+
maxWaitTimeMs,
|
|
256
|
+
minimumWaitPerStepMs,
|
|
257
|
+
waitTimeMs,
|
|
258
|
+
elapsedMs: Date.now() - startTime,
|
|
259
|
+
});
|
|
260
|
+
if (response.status === 'FINALIZED') {
|
|
261
|
+
this.logger.debug(`Upload finalized successfully.`);
|
|
262
|
+
return response.receipt;
|
|
263
|
+
}
|
|
264
|
+
if (response.status === 'UNDERFUNDED') {
|
|
265
|
+
throw new FailedRequestError(`Insufficient balance`, 402);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
throw new Error(`Upload multi-part finalization has timed out for Upload ID ${uploadId}`);
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
/**
|
|
272
|
+
* Yield Buffers of up to `chunkByteCount`, coalescing whatever small pieces
|
|
273
|
+
* the source produces into proper slices.
|
|
274
|
+
*/
|
|
275
|
+
export async function* splitIntoChunks(source, chunkByteCount) {
|
|
276
|
+
if (isReadableStream(source)) {
|
|
277
|
+
yield* splitReadableStreamIntoChunks(source, chunkByteCount);
|
|
278
|
+
}
|
|
279
|
+
else {
|
|
280
|
+
yield* splitReadableIntoChunks(source, chunkByteCount);
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
export async function* splitReadableIntoChunks(source, chunkByteCount) {
|
|
284
|
+
const queue = [];
|
|
285
|
+
let total = 0;
|
|
286
|
+
let encoder;
|
|
287
|
+
for await (const piece of source) {
|
|
288
|
+
const u8 = piece instanceof Uint8Array
|
|
289
|
+
? new Uint8Array(piece.buffer, piece.byteOffset, piece.byteLength)
|
|
290
|
+
: (encoder ??= new TextEncoder()).encode(String(piece));
|
|
291
|
+
queue.push(u8);
|
|
292
|
+
total += u8.length;
|
|
293
|
+
// Emit full chunks
|
|
294
|
+
while (total >= chunkByteCount) {
|
|
295
|
+
const out = new Uint8Array(chunkByteCount);
|
|
296
|
+
let remaining = out.length;
|
|
297
|
+
let off = 0;
|
|
298
|
+
while (remaining > 0) {
|
|
299
|
+
const head = queue[0];
|
|
300
|
+
const take = Math.min(remaining, head.length);
|
|
301
|
+
out.set(head.subarray(0, take), off);
|
|
302
|
+
off += take;
|
|
303
|
+
remaining -= take;
|
|
304
|
+
if (take === head.length) {
|
|
305
|
+
queue.shift();
|
|
306
|
+
}
|
|
307
|
+
else {
|
|
308
|
+
queue[0] = head.subarray(take);
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
total -= chunkByteCount;
|
|
312
|
+
// Yield a Buffer view (no copy)
|
|
313
|
+
yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
// Remainder
|
|
317
|
+
if (total > 0) {
|
|
318
|
+
const out = new Uint8Array(total);
|
|
319
|
+
let off = 0;
|
|
320
|
+
while (queue.length > 0) {
|
|
321
|
+
const head = queue.shift(); // safe due to loop condition
|
|
322
|
+
out.set(head, off);
|
|
323
|
+
off += head.length;
|
|
324
|
+
}
|
|
325
|
+
yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
export async function* splitReadableStreamIntoChunks(source, chunkByteCount) {
|
|
329
|
+
const reader = source.getReader();
|
|
330
|
+
const queue = [];
|
|
331
|
+
let total = 0;
|
|
332
|
+
try {
|
|
333
|
+
while (true) {
|
|
334
|
+
const { value, done } = await reader.read();
|
|
335
|
+
if (done)
|
|
336
|
+
break;
|
|
337
|
+
// Ensure we keep a plain view (avoids surprises if the producer reuses buffers)
|
|
338
|
+
const u8 = new Uint8Array(value.buffer, value.byteOffset, value.byteLength);
|
|
339
|
+
queue.push(u8);
|
|
340
|
+
total += u8.length;
|
|
341
|
+
while (total >= chunkByteCount) {
|
|
342
|
+
const out = new Uint8Array(chunkByteCount);
|
|
343
|
+
let remaining = out.length;
|
|
344
|
+
let off = 0;
|
|
345
|
+
while (remaining > 0) {
|
|
346
|
+
const head = queue[0];
|
|
347
|
+
const take = Math.min(remaining, head.length);
|
|
348
|
+
out.set(head.subarray(0, take), off);
|
|
349
|
+
off += take;
|
|
350
|
+
remaining -= take;
|
|
351
|
+
if (take === head.length) {
|
|
352
|
+
queue.shift();
|
|
353
|
+
}
|
|
354
|
+
else {
|
|
355
|
+
queue[0] = head.subarray(take);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
total -= chunkByteCount;
|
|
359
|
+
yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
if (total > 0) {
|
|
363
|
+
const out = new Uint8Array(total);
|
|
364
|
+
let off = 0;
|
|
365
|
+
while (queue.length > 0) {
|
|
366
|
+
const head = queue.shift(); // safe due to loop condition
|
|
367
|
+
out.set(head, off);
|
|
368
|
+
off += head.length;
|
|
369
|
+
}
|
|
370
|
+
yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
finally {
|
|
374
|
+
reader.releaseLock();
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
function isReadableStream(source) {
|
|
378
|
+
// Prefer instanceof if available, otherwise use a safe duck-typing check
|
|
379
|
+
if (typeof ReadableStream !== 'undefined' &&
|
|
380
|
+
source instanceof ReadableStream) {
|
|
381
|
+
return true;
|
|
382
|
+
}
|
|
383
|
+
return (source !== null &&
|
|
384
|
+
typeof source === 'object' &&
|
|
385
|
+
'getReader' in source &&
|
|
386
|
+
typeof source.getReader === 'function');
|
|
387
|
+
}
|
|
388
|
+
function combineAbortSignals(signals) {
|
|
389
|
+
const real = signals.filter(Boolean);
|
|
390
|
+
if (real.length === 0)
|
|
391
|
+
return undefined;
|
|
392
|
+
const anyFn = AbortSignal.any;
|
|
393
|
+
if (typeof anyFn === 'function') {
|
|
394
|
+
return anyFn(real);
|
|
395
|
+
}
|
|
396
|
+
const controller = new AbortController();
|
|
397
|
+
for (const s of real) {
|
|
398
|
+
const sig = s;
|
|
399
|
+
if (sig.aborted) {
|
|
400
|
+
controller.abort(sig.reason);
|
|
401
|
+
break;
|
|
402
|
+
}
|
|
403
|
+
const onAbort = () => controller.abort(sig.reason);
|
|
404
|
+
s.addEventListener('abort', onAbort, { once: true });
|
|
405
|
+
}
|
|
406
|
+
return controller.signal;
|
|
407
|
+
}
|
package/lib/esm/common/http.js
CHANGED
|
@@ -42,6 +42,7 @@ export class TurboHTTPService {
|
|
|
42
42
|
// See: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API#body
|
|
43
43
|
const { body, duplex } = await toFetchBody(data);
|
|
44
44
|
try {
|
|
45
|
+
this.logger.debug('Posting data via fetch', { endpoint, headers });
|
|
45
46
|
const res = await fetch(this.axios.defaults.baseURL + endpoint, {
|
|
46
47
|
method: 'POST',
|
|
47
48
|
headers,
|
package/lib/esm/common/turbo.js
CHANGED
|
@@ -156,8 +156,16 @@ export class TurboAuthenticatedClient extends TurboUnauthenticatedClient {
|
|
|
156
156
|
/**
|
|
157
157
|
* Signs and uploads raw data to the Turbo Upload Service.
|
|
158
158
|
*/
|
|
159
|
-
upload({ data, dataItemOpts, signal, events, }) {
|
|
160
|
-
return this.uploadService.upload({
|
|
159
|
+
upload({ data, dataItemOpts, signal, events, chunkByteCount, chunkingMode, maxChunkConcurrency, }) {
|
|
160
|
+
return this.uploadService.upload({
|
|
161
|
+
data,
|
|
162
|
+
dataItemOpts,
|
|
163
|
+
signal,
|
|
164
|
+
events,
|
|
165
|
+
chunkByteCount,
|
|
166
|
+
chunkingMode,
|
|
167
|
+
maxChunkConcurrency,
|
|
168
|
+
});
|
|
161
169
|
}
|
|
162
170
|
uploadFile(params) {
|
|
163
171
|
return this.uploadService.uploadFile(params);
|