@ardrive/turbo-sdk 1.30.0 → 1.31.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +36 -2
  2. package/bundles/web.bundle.min.js +490 -17
  3. package/lib/cjs/cli/commands/uploadFile.js +1 -0
  4. package/lib/cjs/cli/commands/uploadFolder.js +5 -1
  5. package/lib/cjs/cli/options.js +22 -1
  6. package/lib/cjs/cli/types.js +0 -15
  7. package/lib/cjs/cli/utils.js +14 -0
  8. package/lib/cjs/common/chunked.js +414 -0
  9. package/lib/cjs/common/http.js +1 -0
  10. package/lib/cjs/common/turbo.js +10 -2
  11. package/lib/cjs/common/upload.js +42 -10
  12. package/lib/cjs/types.js +14 -1
  13. package/lib/cjs/version.js +1 -1
  14. package/lib/esm/cli/commands/uploadFile.js +2 -1
  15. package/lib/esm/cli/commands/uploadFolder.js +5 -1
  16. package/lib/esm/cli/options.js +22 -1
  17. package/lib/esm/cli/types.js +0 -15
  18. package/lib/esm/cli/utils.js +13 -0
  19. package/lib/esm/common/chunked.js +407 -0
  20. package/lib/esm/common/http.js +1 -0
  21. package/lib/esm/common/turbo.js +10 -2
  22. package/lib/esm/common/upload.js +42 -10
  23. package/lib/esm/types.js +13 -0
  24. package/lib/esm/version.js +1 -1
  25. package/lib/types/cli/commands/uploadFile.d.ts.map +1 -1
  26. package/lib/types/cli/commands/uploadFolder.d.ts.map +1 -1
  27. package/lib/types/cli/options.d.ts +58 -2
  28. package/lib/types/cli/options.d.ts.map +1 -1
  29. package/lib/types/cli/types.d.ts +5 -0
  30. package/lib/types/cli/types.d.ts.map +1 -1
  31. package/lib/types/cli/utils.d.ts +3 -2
  32. package/lib/types/cli/utils.d.ts.map +1 -1
  33. package/lib/types/common/chunked.d.ts +48 -0
  34. package/lib/types/common/chunked.d.ts.map +1 -0
  35. package/lib/types/common/http.d.ts +1 -1
  36. package/lib/types/common/http.d.ts.map +1 -1
  37. package/lib/types/common/turbo.d.ts +2 -2
  38. package/lib/types/common/turbo.d.ts.map +1 -1
  39. package/lib/types/common/upload.d.ts +3 -3
  40. package/lib/types/common/upload.d.ts.map +1 -1
  41. package/lib/types/types.d.ts +48 -4
  42. package/lib/types/types.d.ts.map +1 -1
  43. package/lib/types/version.d.ts +1 -1
  44. package/package.json +9 -5
@@ -21,7 +21,7 @@ const utils_js_1 = require("../utils.js");
21
21
  async function uploadFolder(options) {
22
22
  const turbo = await (0, utils_js_1.turboFromOptions)(options);
23
23
  const paidBy = await (0, utils_js_1.paidByFromOptions)(options, turbo);
24
- const { disableManifest, fallbackFile, folderPath, indexFile, maxConcurrentUploads, } = (0, utils_js_1.getUploadFolderOptions)(options);
24
+ const { disableManifest, fallbackFile, folderPath, indexFile, maxConcurrentUploads, chunkByteCount, chunkingMode, maxChunkConcurrency, maxFinalizeMs, } = (0, utils_js_1.getUploadFolderOptions)(options);
25
25
  const customTags = (0, utils_js_1.getTagsFromOptions)(options);
26
26
  const result = await turbo.uploadFolder({
27
27
  folderPath: folderPath,
@@ -32,6 +32,10 @@ async function uploadFolder(options) {
32
32
  fallbackFile,
33
33
  },
34
34
  maxConcurrentUploads,
35
+ chunkByteCount,
36
+ chunkingMode,
37
+ maxChunkConcurrency,
38
+ maxFinalizeMs,
35
39
  });
36
40
  console.log('Uploaded folder:', JSON.stringify(result, null, 2));
37
41
  }
@@ -134,7 +134,7 @@ exports.optionMap = {
134
134
  },
135
135
  maxConcurrency: {
136
136
  alias: '--max-concurrency <maxConcurrency>',
137
- description: 'Maximum number of concurrent uploads',
137
+ description: 'Maximum number of concurrent file uploads',
138
138
  },
139
139
  paidBy: {
140
140
  alias: '--paid-by <paidBy...>',
@@ -159,6 +159,23 @@ exports.optionMap = {
159
159
  alias: '--byte-count <byteCount>',
160
160
  description: 'Number of bytes to use for the action',
161
161
  },
162
+ maxChunkConcurrency: {
163
+ alias: '--max-chunk-concurrency <maxChunkConcurrency>',
164
+ description: 'Maximum number of concurrent chunks to upload per file',
165
+ },
166
+ maxFinalizeMs: {
167
+ alias: '--max-finalize-ms <maxFinalizeMs>',
168
+ description: 'Maximum time in milliseconds to wait for the finalization of all chunks after the last chunk is uploaded. Defaults to 1 minute per GiB of the total file size.',
169
+ },
170
+ chunkByteCount: {
171
+ alias: '--chunk-byte-count <chunkByteCount>',
172
+ description: 'Size of each chunk in bytes',
173
+ },
174
+ chunkingMode: {
175
+ alias: '--chunking-mode <chunkingMode>',
176
+ description: 'Chunking mode to use for the upload. Can be "auto", "force" or "disabled". Defaults to "auto".',
177
+ default: 'auto',
178
+ },
162
179
  };
163
180
  exports.walletOptions = [
164
181
  exports.optionMap.walletFile,
@@ -182,6 +199,10 @@ exports.uploadOptions = [
182
199
  exports.optionMap.ignoreApprovals,
183
200
  exports.optionMap.useSignerBalanceFirst,
184
201
  exports.optionMap.tags,
202
+ exports.optionMap.maxChunkConcurrency,
203
+ exports.optionMap.maxFinalizeMs,
204
+ exports.optionMap.chunkByteCount,
205
+ exports.optionMap.chunkingMode,
185
206
  ];
186
207
  exports.uploadFolderOptions = [
187
208
  ...exports.uploadOptions,
@@ -1,17 +1,2 @@
1
1
  "use strict";
2
- /**
3
- * Copyright (C) 2022-2024 Permanent Data Solutions, Inc.
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
2
  Object.defineProperty(exports, "__esModule", { value: true });
@@ -20,6 +20,7 @@ exports.parseTags = parseTags;
20
20
  exports.getTagsFromOptions = getTagsFromOptions;
21
21
  exports.currencyFromOptions = currencyFromOptions;
22
22
  exports.requiredByteCountFromOptions = requiredByteCountFromOptions;
23
+ exports.getChunkingOptions = getChunkingOptions;
23
24
  /**
24
25
  * Copyright (C) 2022-2024 Permanent Data Solutions, Inc.
25
26
  *
@@ -236,6 +237,7 @@ function getUploadFolderOptions(options) {
236
237
  fallbackFile: options.fallbackFile,
237
238
  disableManifest: !options.manifest,
238
239
  maxConcurrentUploads: +(options.maxConcurrency ?? 1),
240
+ ...getChunkingOptions(options),
239
241
  };
240
242
  }
241
243
  /**
@@ -283,3 +285,15 @@ function requiredByteCountFromOptions({ byteCount, }) {
283
285
  }
284
286
  return byteCountValue;
285
287
  }
288
+ function getChunkingOptions(options) {
289
+ return {
290
+ chunkingMode: options.chunkingMode,
291
+ chunkByteCount: options.chunkByteCount !== undefined
292
+ ? +options.chunkByteCount
293
+ : undefined,
294
+ maxChunkConcurrency: options.maxChunkConcurrency !== undefined
295
+ ? +options.maxChunkConcurrency
296
+ : undefined,
297
+ maxFinalizeMs: options.maxFinalizeMs !== undefined ? +options.maxFinalizeMs : undefined,
298
+ };
299
+ }
@@ -0,0 +1,414 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChunkedUploader = exports.defaultChunkByteCount = exports.minChunkByteCount = exports.maxChunkByteCount = exports.defaultMaxChunkConcurrency = void 0;
4
+ exports.splitIntoChunks = splitIntoChunks;
5
+ exports.splitReadableIntoChunks = splitReadableIntoChunks;
6
+ exports.splitReadableStreamIntoChunks = splitReadableStreamIntoChunks;
7
+ /**
8
+ * Copyright (C) 2022-2024 Permanent Data Solutions, Inc.
9
+ *
10
+ * Licensed under the Apache License, Version 2.0 (the "License");
11
+ * you may not use this file except in compliance with the License.
12
+ * You may obtain a copy of the License at
13
+ *
14
+ * http://www.apache.org/licenses/LICENSE-2.0
15
+ *
16
+ * Unless required by applicable law or agreed to in writing, software
17
+ * distributed under the License is distributed on an "AS IS" BASIS,
18
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ * See the License for the specific language governing permissions and
20
+ * limitations under the License.
21
+ */
22
+ const axios_1 = require("axios");
23
+ const plimit_lit_1 = require("plimit-lit");
24
+ const types_js_1 = require("../types.js");
25
+ const common_js_1 = require("../utils/common.js");
26
+ const errors_js_1 = require("../utils/errors.js");
27
+ const events_js_1 = require("./events.js");
28
+ const logger_js_1 = require("./logger.js");
29
+ const fiveMiB = 5 * 1024 * 1024; // 5 MiB
30
+ const fiveHundredMiB = fiveMiB * 100; // 500 MiB
31
+ exports.defaultMaxChunkConcurrency = 5;
32
+ exports.maxChunkByteCount = fiveHundredMiB;
33
+ exports.minChunkByteCount = fiveMiB;
34
+ exports.defaultChunkByteCount = exports.minChunkByteCount;
35
+ const backlogQueueFactor = 2;
36
+ const chunkingHeader = { 'x-chunking-version': '2' };
37
+ /**
38
+ * Performs a chunked upload by splitting the stream into fixed-size buffers,
39
+ * uploading them in parallel, and emitting progress/error events.
40
+ */
41
+ class ChunkedUploader {
42
+ constructor({ http, token, maxChunkConcurrency = exports.defaultMaxChunkConcurrency, maxFinalizeMs, chunkByteCount = exports.defaultChunkByteCount, logger = logger_js_1.TurboWinstonLogger.default, chunkingMode = 'auto', dataItemByteCount, }) {
43
+ this.assertChunkParams({
44
+ chunkByteCount,
45
+ chunkingMode,
46
+ maxChunkConcurrency,
47
+ maxFinalizeMs,
48
+ });
49
+ this.chunkByteCount = chunkByteCount;
50
+ this.maxChunkConcurrency = maxChunkConcurrency;
51
+ this.maxFinalizeMs = maxFinalizeMs;
52
+ this.http = http;
53
+ this.token = token;
54
+ this.logger = logger;
55
+ this.shouldUseChunkUploader = this.shouldChunkUpload({
56
+ chunkByteCount,
57
+ chunkingMode,
58
+ dataItemByteCount,
59
+ });
60
+ this.maxBacklogQueue = this.maxChunkConcurrency * backlogQueueFactor;
61
+ }
62
+ shouldChunkUpload({ chunkByteCount, chunkingMode, dataItemByteCount, }) {
63
+ if (chunkingMode === 'disabled') {
64
+ return false;
65
+ }
66
+ if (chunkingMode === 'force') {
67
+ return true;
68
+ }
69
+ const isMoreThanTwoChunksOfData = dataItemByteCount > chunkByteCount * 2;
70
+ return isMoreThanTwoChunksOfData;
71
+ }
72
+ assertChunkParams({ chunkByteCount, chunkingMode, maxChunkConcurrency, maxFinalizeMs, }) {
73
+ if (maxFinalizeMs !== undefined &&
74
+ (Number.isNaN(maxFinalizeMs) ||
75
+ !Number.isInteger(maxFinalizeMs) ||
76
+ maxFinalizeMs < 0)) {
77
+ throw new Error('Invalid max finalization wait time. Must be a non-negative integer.');
78
+ }
79
+ if (Number.isNaN(maxChunkConcurrency) ||
80
+ !Number.isInteger(maxChunkConcurrency) ||
81
+ maxChunkConcurrency < 1) {
82
+ throw new Error('Invalid max chunk concurrency. Must be an integer of at least 1.');
83
+ }
84
+ if (Number.isNaN(chunkByteCount) ||
85
+ !Number.isInteger(chunkByteCount) ||
86
+ chunkByteCount < fiveMiB ||
87
+ chunkByteCount > fiveHundredMiB) {
88
+ throw new Error('Invalid chunk size. Must be an integer between 5 MiB and 500 MiB.');
89
+ }
90
+ if (typeof chunkingMode !== 'string' ||
91
+ !types_js_1.validChunkingModes.includes(chunkingMode)) {
92
+ throw new Error(`Invalid chunking mode. Must be one of: ${types_js_1.validChunkingModes.join(', ')}`);
93
+ }
94
+ }
95
+ /**
96
+ * Initialize or resume an upload session, returning the upload ID.
97
+ */
98
+ async initUpload() {
99
+ const res = await this.http.get({
100
+ endpoint: `/chunks/${this.token}/-1/-1?chunkSize=${this.chunkByteCount}`,
101
+ headers: chunkingHeader,
102
+ });
103
+ if (res.chunkSize !== this.chunkByteCount) {
104
+ this.logger.warn('Chunk size mismatch! Overriding with server value.', {
105
+ clientExpected: this.chunkByteCount,
106
+ serverReturned: res.chunkSize,
107
+ });
108
+ this.chunkByteCount = res.chunkSize;
109
+ }
110
+ return res.id;
111
+ }
112
+ async upload({ dataItemSizeFactory, dataItemStreamFactory, dataItemOpts, signal, events, }) {
113
+ const uploadId = await this.initUpload();
114
+ const dataItemByteCount = dataItemSizeFactory();
115
+ const emitter = new events_js_1.TurboEventEmitter(events);
116
+ const { stream, resume } = (0, events_js_1.createStreamWithUploadEvents)({
117
+ data: dataItemStreamFactory(),
118
+ dataSize: dataItemByteCount,
119
+ emitter,
120
+ });
121
+ this.logger.debug(`Starting chunked upload`, {
122
+ token: this.token,
123
+ uploadId,
124
+ totalSize: dataItemByteCount,
125
+ chunkByteCount: this.chunkByteCount,
126
+ maxChunkConcurrency: this.maxChunkConcurrency,
127
+ inputStreamType: isReadableStream(stream) ? 'ReadableStream' : 'Readable',
128
+ });
129
+ const inFlight = new Set();
130
+ const internalAbort = new AbortController();
131
+ const combinedSignal = combineAbortSignals([internalAbort.signal, signal]);
132
+ const limit = (0, plimit_lit_1.pLimit)(this.maxChunkConcurrency);
133
+ let currentOffset = 0;
134
+ let currentChunkPartNumber = 0;
135
+ let firstError;
136
+ let uploadedBytes = 0;
137
+ const chunks = splitIntoChunks(stream, this.chunkByteCount);
138
+ resume();
139
+ for await (const chunk of chunks) {
140
+ if (combinedSignal?.aborted) {
141
+ internalAbort.abort();
142
+ await Promise.allSettled(inFlight);
143
+ firstError ??= new axios_1.CanceledError();
144
+ break;
145
+ }
146
+ const chunkPartNumber = ++currentChunkPartNumber;
147
+ const chunkByteCount = chunk.length;
148
+ const chunkOffset = currentOffset;
149
+ currentOffset += chunkByteCount;
150
+ const promise = limit(async () => {
151
+ if (firstError !== undefined) {
152
+ return;
153
+ }
154
+ this.logger.debug('Uploading chunk', {
155
+ chunkPartNumber,
156
+ chunkOffset,
157
+ chunkByteCount,
158
+ });
159
+ await this.http.post({
160
+ endpoint: `/chunks/${this.token}/${uploadId}/${chunkOffset}`,
161
+ data: chunk,
162
+ headers: {
163
+ 'Content-Type': 'application/octet-stream',
164
+ ...chunkingHeader,
165
+ },
166
+ signal: combinedSignal,
167
+ });
168
+ uploadedBytes += chunkByteCount;
169
+ this.logger.debug('Chunk uploaded', {
170
+ chunkPartNumber,
171
+ chunkOffset,
172
+ chunkByteCount,
173
+ });
174
+ emitter.emit('upload-progress', {
175
+ processedBytes: uploadedBytes,
176
+ totalBytes: dataItemByteCount,
177
+ });
178
+ }).catch((err) => {
179
+ this.logger.error('Chunk upload failed', {
180
+ id: chunkPartNumber,
181
+ offset: chunkOffset,
182
+ size: chunkByteCount,
183
+ err,
184
+ });
185
+ emitter.emit('upload-error', err);
186
+ internalAbort.abort(err);
187
+ firstError = firstError ?? err;
188
+ });
189
+ inFlight.add(promise);
190
+ promise.finally(() => inFlight.delete(promise));
191
+ if (inFlight.size >= this.maxBacklogQueue) {
192
+ await Promise.race(inFlight);
193
+ if (combinedSignal?.aborted) {
194
+ internalAbort.abort();
195
+ await Promise.allSettled(inFlight);
196
+ firstError ??= new axios_1.CanceledError();
197
+ break;
198
+ }
199
+ }
200
+ }
201
+ await Promise.all(inFlight);
202
+ if (firstError !== undefined) {
203
+ throw firstError;
204
+ }
205
+ const finalizeResponse = await this.finalizeUpload(uploadId, dataItemByteCount, dataItemOpts?.paidBy, combinedSignal);
206
+ emitter.emit('upload-success');
207
+ return finalizeResponse;
208
+ }
209
+ toGiB(bytes) {
210
+ return bytes / 1024 ** 3;
211
+ }
212
+ async finalizeUpload(uploadId, dataItemByteCount, paidBy, signal) {
213
+ // Wait up to 1 minute per GiB of data for the upload to finalize
214
+ const fileSizeInGiB = Math.ceil(this.toGiB(dataItemByteCount));
215
+ const defaultMaxWaitTimeMins = fileSizeInGiB;
216
+ const maxWaitTimeMs = this.maxFinalizeMs ?? defaultMaxWaitTimeMins * 60 * 1000;
217
+ const minimumWaitPerStepMs =
218
+ // Per step, files smaller than 100MB will wait 2 second,
219
+ dataItemByteCount < 1024 * 1024 * 100
220
+ ? 2000
221
+ : // files smaller than 3 GiB will wait 3 seconds,
222
+ dataItemByteCount < 1024 * 1024 * 1024 * 3
223
+ ? 3000
224
+ : // and larger files will wait 1 second per GiB with max of 10 seconds
225
+ Math.max(1000 * fileSizeInGiB, 10000);
226
+ const paidByHeader = {};
227
+ if (paidBy !== undefined) {
228
+ paidByHeader['x-paid-by'] = Array.isArray(paidBy)
229
+ ? paidBy.join(',')
230
+ : paidBy;
231
+ }
232
+ await this.http.post({
233
+ endpoint: `/chunks/${this.token}/${uploadId}/finalize`,
234
+ data: Buffer.alloc(0),
235
+ headers: {
236
+ 'Content-Type': 'application/octet-stream',
237
+ ...paidByHeader,
238
+ ...chunkingHeader,
239
+ },
240
+ signal,
241
+ });
242
+ this.logger.debug(`Confirming upload to Turbo with uploadId ${uploadId} for up to ${defaultMaxWaitTimeMins} minutes.`);
243
+ const startTime = Date.now();
244
+ const cutoffTime = startTime + maxWaitTimeMs;
245
+ let attempts = 0;
246
+ while (Date.now() < cutoffTime) {
247
+ // Wait for 3/4 of the time remaining per attempt or minimum step
248
+ const waitTimeMs = Math.min(Math.floor((cutoffTime - Date.now()) * (3 / 4)), minimumWaitPerStepMs);
249
+ await (0, common_js_1.sleep)(waitTimeMs);
250
+ if (signal?.aborted) {
251
+ this.logger.warn(`Upload finalization aborted by signal.`);
252
+ throw new axios_1.CanceledError();
253
+ }
254
+ const response = await this.http.get({
255
+ endpoint: `/chunks/${this.token}/${uploadId}/status`,
256
+ signal,
257
+ });
258
+ this.logger.debug(`Upload status found: ${response.status}`, {
259
+ status: response.status,
260
+ attempts: attempts++,
261
+ maxWaitTimeMs,
262
+ minimumWaitPerStepMs,
263
+ waitTimeMs,
264
+ elapsedMs: Date.now() - startTime,
265
+ });
266
+ if (response.status === 'FINALIZED') {
267
+ this.logger.debug(`Upload finalized successfully.`);
268
+ return response.receipt;
269
+ }
270
+ if (response.status === 'UNDERFUNDED') {
271
+ throw new errors_js_1.FailedRequestError(`Insufficient balance`, 402);
272
+ }
273
+ }
274
+ throw new Error(`Upload multi-part finalization has timed out for Upload ID ${uploadId}`);
275
+ }
276
+ }
277
+ exports.ChunkedUploader = ChunkedUploader;
278
+ /**
279
+ * Yield Buffers of up to `chunkByteCount`, coalescing whatever small pieces
280
+ * the source produces into proper slices.
281
+ */
282
+ async function* splitIntoChunks(source, chunkByteCount) {
283
+ if (isReadableStream(source)) {
284
+ yield* splitReadableStreamIntoChunks(source, chunkByteCount);
285
+ }
286
+ else {
287
+ yield* splitReadableIntoChunks(source, chunkByteCount);
288
+ }
289
+ }
290
+ async function* splitReadableIntoChunks(source, chunkByteCount) {
291
+ const queue = [];
292
+ let total = 0;
293
+ let encoder;
294
+ for await (const piece of source) {
295
+ const u8 = piece instanceof Uint8Array
296
+ ? new Uint8Array(piece.buffer, piece.byteOffset, piece.byteLength)
297
+ : (encoder ??= new TextEncoder()).encode(String(piece));
298
+ queue.push(u8);
299
+ total += u8.length;
300
+ // Emit full chunks
301
+ while (total >= chunkByteCount) {
302
+ const out = new Uint8Array(chunkByteCount);
303
+ let remaining = out.length;
304
+ let off = 0;
305
+ while (remaining > 0) {
306
+ const head = queue[0];
307
+ const take = Math.min(remaining, head.length);
308
+ out.set(head.subarray(0, take), off);
309
+ off += take;
310
+ remaining -= take;
311
+ if (take === head.length) {
312
+ queue.shift();
313
+ }
314
+ else {
315
+ queue[0] = head.subarray(take);
316
+ }
317
+ }
318
+ total -= chunkByteCount;
319
+ // Yield a Buffer view (no copy)
320
+ yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
321
+ }
322
+ }
323
+ // Remainder
324
+ if (total > 0) {
325
+ const out = new Uint8Array(total);
326
+ let off = 0;
327
+ while (queue.length > 0) {
328
+ const head = queue.shift(); // safe due to loop condition
329
+ out.set(head, off);
330
+ off += head.length;
331
+ }
332
+ yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
333
+ }
334
+ }
335
+ async function* splitReadableStreamIntoChunks(source, chunkByteCount) {
336
+ const reader = source.getReader();
337
+ const queue = [];
338
+ let total = 0;
339
+ try {
340
+ while (true) {
341
+ const { value, done } = await reader.read();
342
+ if (done)
343
+ break;
344
+ // Ensure we keep a plain view (avoids surprises if the producer reuses buffers)
345
+ const u8 = new Uint8Array(value.buffer, value.byteOffset, value.byteLength);
346
+ queue.push(u8);
347
+ total += u8.length;
348
+ while (total >= chunkByteCount) {
349
+ const out = new Uint8Array(chunkByteCount);
350
+ let remaining = out.length;
351
+ let off = 0;
352
+ while (remaining > 0) {
353
+ const head = queue[0];
354
+ const take = Math.min(remaining, head.length);
355
+ out.set(head.subarray(0, take), off);
356
+ off += take;
357
+ remaining -= take;
358
+ if (take === head.length) {
359
+ queue.shift();
360
+ }
361
+ else {
362
+ queue[0] = head.subarray(take);
363
+ }
364
+ }
365
+ total -= chunkByteCount;
366
+ yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
367
+ }
368
+ }
369
+ if (total > 0) {
370
+ const out = new Uint8Array(total);
371
+ let off = 0;
372
+ while (queue.length > 0) {
373
+ const head = queue.shift(); // safe due to loop condition
374
+ out.set(head, off);
375
+ off += head.length;
376
+ }
377
+ yield Buffer.from(out.buffer, out.byteOffset, out.byteLength);
378
+ }
379
+ }
380
+ finally {
381
+ reader.releaseLock();
382
+ }
383
+ }
384
+ function isReadableStream(source) {
385
+ // Prefer instanceof if available, otherwise use a safe duck-typing check
386
+ if (typeof ReadableStream !== 'undefined' &&
387
+ source instanceof ReadableStream) {
388
+ return true;
389
+ }
390
+ return (source !== null &&
391
+ typeof source === 'object' &&
392
+ 'getReader' in source &&
393
+ typeof source.getReader === 'function');
394
+ }
395
+ function combineAbortSignals(signals) {
396
+ const real = signals.filter(Boolean);
397
+ if (real.length === 0)
398
+ return undefined;
399
+ const anyFn = AbortSignal.any;
400
+ if (typeof anyFn === 'function') {
401
+ return anyFn(real);
402
+ }
403
+ const controller = new AbortController();
404
+ for (const s of real) {
405
+ const sig = s;
406
+ if (sig.aborted) {
407
+ controller.abort(sig.reason);
408
+ break;
409
+ }
410
+ const onAbort = () => controller.abort(sig.reason);
411
+ s.addEventListener('abort', onAbort, { once: true });
412
+ }
413
+ return controller.signal;
414
+ }
@@ -45,6 +45,7 @@ class TurboHTTPService {
45
45
  // See: https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API#body
46
46
  const { body, duplex } = await toFetchBody(data);
47
47
  try {
48
+ this.logger.debug('Posting data via fetch', { endpoint, headers });
48
49
  const res = await fetch(this.axios.defaults.baseURL + endpoint, {
49
50
  method: 'POST',
50
51
  headers,
@@ -160,8 +160,16 @@ class TurboAuthenticatedClient extends TurboUnauthenticatedClient {
160
160
  /**
161
161
  * Signs and uploads raw data to the Turbo Upload Service.
162
162
  */
163
- upload({ data, dataItemOpts, signal, events, }) {
164
- return this.uploadService.upload({ data, dataItemOpts, signal, events });
163
+ upload({ data, dataItemOpts, signal, events, chunkByteCount, chunkingMode, maxChunkConcurrency, }) {
164
+ return this.uploadService.upload({
165
+ data,
166
+ dataItemOpts,
167
+ signal,
168
+ events,
169
+ chunkByteCount,
170
+ chunkingMode,
171
+ maxChunkConcurrency,
172
+ });
165
173
  }
166
174
  uploadFile(params) {
167
175
  return this.uploadService.uploadFile(params);
@@ -21,6 +21,7 @@ const plimit_lit_1 = require("plimit-lit");
21
21
  const axiosClient_js_1 = require("../utils/axiosClient.js");
22
22
  const common_js_1 = require("../utils/common.js");
23
23
  const errors_js_1 = require("../utils/errors.js");
24
+ const chunked_js_1 = require("./chunked.js");
24
25
  const events_js_1 = require("./events.js");
25
26
  const http_js_1 = require("./http.js");
26
27
  const logger_js_1 = require("./logger.js");
@@ -95,7 +96,7 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
95
96
  /**
96
97
  * Signs and uploads raw data to the Turbo Upload Service.
97
98
  */
98
- upload({ data, dataItemOpts, signal, events, }) {
99
+ upload({ data, dataItemOpts, signal, events, chunkByteCount, chunkingMode, maxChunkConcurrency, }) {
99
100
  // This function is intended to be usable in both Node and browser environments.
100
101
  if ((0, common_js_1.isBlob)(data)) {
101
102
  const streamFactory = () => data.stream();
@@ -123,6 +124,9 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
123
124
  signal,
124
125
  dataItemOpts,
125
126
  events,
127
+ chunkByteCount,
128
+ chunkingMode,
129
+ maxChunkConcurrency,
126
130
  });
127
131
  }
128
132
  resolveUploadFileConfig(params) {
@@ -163,22 +167,43 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
163
167
  let lastStatusCode = undefined; // Store the last status code for throwing
164
168
  const emitter = new events_js_1.TurboEventEmitter(events);
165
169
  // avoid duplicating signing on failures here - these errors will immediately be thrown
166
- // TODO: create a SigningError class and throw that instead of the generic Error
167
- const { dataItemStreamFactory, dataItemSizeFactory } = await this.signer.signDataItem({
168
- fileStreamFactory,
169
- fileSizeFactory,
170
- dataItemOpts,
171
- emitter,
172
- });
173
170
  // TODO: move the retry implementation to the http class, and avoid awaiting here. This will standardize the retry logic across all upload methods.
174
171
  while (retries < maxRetries) {
175
172
  if (signal?.aborted) {
176
173
  throw new axios_1.CanceledError();
177
174
  }
175
+ // TODO: create a SigningError class and throw that instead of the generic Error
176
+ const { dataItemStreamFactory, dataItemSizeFactory } = await this.signer.signDataItem({
177
+ fileStreamFactory,
178
+ fileSizeFactory,
179
+ dataItemOpts,
180
+ emitter,
181
+ });
178
182
  // Now that we have the signed data item, we can upload it using the uploadSignedDataItem method
179
183
  // which will create a new emitter with upload events. We await
180
184
  // this result due to the wrapped retry logic of this method.
181
185
  try {
186
+ const { chunkByteCount, maxChunkConcurrency } = params;
187
+ const chunkedUploader = new chunked_js_1.ChunkedUploader({
188
+ http: this.httpService,
189
+ token: this.token,
190
+ maxChunkConcurrency,
191
+ chunkByteCount,
192
+ logger: this.logger,
193
+ dataItemByteCount: dataItemSizeFactory(),
194
+ chunkingMode: params.chunkingMode,
195
+ maxFinalizeMs: params.maxFinalizeMs,
196
+ });
197
+ if (chunkedUploader.shouldUseChunkUploader) {
198
+ const response = await chunkedUploader.upload({
199
+ dataItemStreamFactory,
200
+ dataItemSizeFactory,
201
+ dataItemOpts,
202
+ signal,
203
+ events,
204
+ });
205
+ return response;
206
+ }
182
207
  const response = await this.uploadSignedDataItem({
183
208
  dataItemStreamFactory,
184
209
  dataItemSizeFactory,
@@ -216,7 +241,7 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
216
241
  ]);
217
242
  }
218
243
  }
219
- const msg = `Failed to upload file after ${maxRetries + 1} attempts\n${lastError instanceof Error ? lastError.message : lastError}`;
244
+ const msg = `Failed to upload file after ${retries + 1} attempts\n${lastError instanceof Error ? lastError.message : lastError}`;
220
245
  // After all retries, throw the last error for catching
221
246
  if (lastError instanceof errors_js_1.FailedRequestError) {
222
247
  lastError.message = msg;
@@ -267,7 +292,7 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
267
292
  */
268
293
  async uploadFolder(params) {
269
294
  this.logger.debug('Uploading folder...', { params });
270
- const { dataItemOpts, signal, manifestOptions = {}, maxConcurrentUploads = 1, throwOnFailure = true, } = params;
295
+ const { dataItemOpts, signal, manifestOptions = {}, maxConcurrentUploads = 1, throwOnFailure = true, maxChunkConcurrency, chunkByteCount, chunkingMode, maxFinalizeMs, } = params;
271
296
  const { disableManifest, indexFile, fallbackFile } = manifestOptions;
272
297
  const paths = {};
273
298
  const response = {
@@ -291,6 +316,9 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
291
316
  fileSizeFactory: () => this.getFileSize(file),
292
317
  signal,
293
318
  dataItemOpts: dataItemOptsWithContentType,
319
+ chunkByteCount,
320
+ maxChunkConcurrency,
321
+ chunkingMode,
294
322
  });
295
323
  const relativePath = this.getRelativePath(file, params);
296
324
  paths[relativePath] = { id: result.id };
@@ -336,6 +364,10 @@ class TurboAuthenticatedBaseUploadService extends TurboUnauthenticatedUploadServ
336
364
  fileSizeFactory: () => manifestBuffer.byteLength,
337
365
  signal,
338
366
  dataItemOpts: { ...dataItemOpts, tags: tagsWithManifestContentType },
367
+ chunkByteCount,
368
+ maxChunkConcurrency,
369
+ maxFinalizeMs,
370
+ chunkingMode,
339
371
  });
340
372
  return {
341
373
  ...response,