@od-oneapp/storage 2026.1.1301
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +854 -0
- package/dist/client-next.d.mts +61 -0
- package/dist/client-next.d.mts.map +1 -0
- package/dist/client-next.mjs +111 -0
- package/dist/client-next.mjs.map +1 -0
- package/dist/client-utils-Dx6W25iz.d.mts +43 -0
- package/dist/client-utils-Dx6W25iz.d.mts.map +1 -0
- package/dist/client.d.mts +28 -0
- package/dist/client.d.mts.map +1 -0
- package/dist/client.mjs +183 -0
- package/dist/client.mjs.map +1 -0
- package/dist/env-BVHLmQdh.mjs +128 -0
- package/dist/env-BVHLmQdh.mjs.map +1 -0
- package/dist/env.mjs +3 -0
- package/dist/health-check-D7LnnDec.mjs +746 -0
- package/dist/health-check-D7LnnDec.mjs.map +1 -0
- package/dist/health-check-im_huJ59.d.mts +116 -0
- package/dist/health-check-im_huJ59.d.mts.map +1 -0
- package/dist/index.d.mts +60 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +3 -0
- package/dist/keys.d.mts +37 -0
- package/dist/keys.d.mts.map +1 -0
- package/dist/keys.mjs +253 -0
- package/dist/keys.mjs.map +1 -0
- package/dist/server-edge.d.mts +28 -0
- package/dist/server-edge.d.mts.map +1 -0
- package/dist/server-edge.mjs +88 -0
- package/dist/server-edge.mjs.map +1 -0
- package/dist/server-next.d.mts +183 -0
- package/dist/server-next.d.mts.map +1 -0
- package/dist/server-next.mjs +1353 -0
- package/dist/server-next.mjs.map +1 -0
- package/dist/server.d.mts +70 -0
- package/dist/server.d.mts.map +1 -0
- package/dist/server.mjs +384 -0
- package/dist/server.mjs.map +1 -0
- package/dist/types.d.mts +321 -0
- package/dist/types.d.mts.map +1 -0
- package/dist/types.mjs +3 -0
- package/dist/validation.d.mts +101 -0
- package/dist/validation.d.mts.map +1 -0
- package/dist/validation.mjs +590 -0
- package/dist/validation.mjs.map +1 -0
- package/dist/vercel-blob-07Sx0Akn.d.mts +31 -0
- package/dist/vercel-blob-07Sx0Akn.d.mts.map +1 -0
- package/dist/vercel-blob-DA8HaYuw.mjs +158 -0
- package/dist/vercel-blob-DA8HaYuw.mjs.map +1 -0
- package/package.json +111 -0
- package/src/actions/blob-upload.ts +171 -0
- package/src/actions/index.ts +23 -0
- package/src/actions/mediaActions.ts +1071 -0
- package/src/actions/productMediaActions.ts +538 -0
- package/src/auth-helpers.ts +386 -0
- package/src/capabilities.ts +225 -0
- package/src/client-next.ts +184 -0
- package/src/client-utils.ts +292 -0
- package/src/client.ts +102 -0
- package/src/constants.ts +88 -0
- package/src/health-check.ts +81 -0
- package/src/multi-storage.ts +230 -0
- package/src/multipart.ts +497 -0
- package/src/retry-utils.test.ts +118 -0
- package/src/retry-utils.ts +59 -0
- package/src/server-edge.ts +129 -0
- package/src/server-next.ts +14 -0
- package/src/server.ts +666 -0
- package/src/validation.test.ts +312 -0
- package/src/validation.ts +827 -0
package/src/multipart.ts
ADDED
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Unified Multipart Upload Manager
|
|
3
|
+
*
|
|
4
|
+
* Provides a consistent interface for multipart uploads across all storage providers.
|
|
5
|
+
* Handles provider-specific differences and provides retry logic, progress tracking,
|
|
6
|
+
* and error recovery.
|
|
7
|
+
*
|
|
8
|
+
* Features:
|
|
9
|
+
* - Automatic part size calculation
|
|
10
|
+
* - Concurrent part uploads
|
|
11
|
+
* - Progress tracking
|
|
12
|
+
* - Resume support
|
|
13
|
+
* - Error recovery
|
|
14
|
+
*
|
|
15
|
+
* @module @repo/storage/multipart
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
import { logWarn } from '@repo/shared/logs';
|
|
19
|
+
|
|
20
|
+
import {
|
|
21
|
+
type MultipartUploadResult as BaseMultipartUploadResult,
|
|
22
|
+
type MultipartUploadOptions,
|
|
23
|
+
type StorageProvider,
|
|
24
|
+
type UploadOptions,
|
|
25
|
+
type UploadProgress,
|
|
26
|
+
} from '../types';
|
|
27
|
+
|
|
28
|
+
import { MULTIPART_THRESHOLDS, PART_SIZES, STORAGE_CONSTANTS } from './constants';
|
|
29
|
+
|
|
30
|
+
export interface MultipartUploadState {
|
|
31
|
+
uploadId: string;
|
|
32
|
+
key: string;
|
|
33
|
+
provider: string;
|
|
34
|
+
parts: Array<{
|
|
35
|
+
partNumber: number;
|
|
36
|
+
etag?: string;
|
|
37
|
+
size: number;
|
|
38
|
+
uploaded: boolean;
|
|
39
|
+
}>;
|
|
40
|
+
totalSize: number;
|
|
41
|
+
uploadedSize: number;
|
|
42
|
+
completed: boolean;
|
|
43
|
+
aborted: boolean;
|
|
44
|
+
error?: string;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Extended multipart upload result with detailed part information
|
|
49
|
+
* Extends the base MultipartUploadResult from types.ts
|
|
50
|
+
*/
|
|
51
|
+
export interface MultipartUploadResult extends BaseMultipartUploadResult {
|
|
52
|
+
parts: Array<{ partNumber: number; etag: string; size: number }>;
|
|
53
|
+
totalParts: number;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export class MultipartUploadManager {
|
|
57
|
+
private provider: StorageProvider;
|
|
58
|
+
private state: MultipartUploadState | null = null;
|
|
59
|
+
private abortController: AbortController | null = null;
|
|
60
|
+
|
|
61
|
+
constructor(provider: StorageProvider) {
|
|
62
|
+
this.provider = provider;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Check if provider supports multipart uploads
|
|
67
|
+
*/
|
|
68
|
+
supportsMultipart(): boolean {
|
|
69
|
+
const capabilities = this.provider.getCapabilities?.();
|
|
70
|
+
return capabilities?.multipart ?? false;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Create a new multipart upload
|
|
75
|
+
*
|
|
76
|
+
* @param key - Storage key
|
|
77
|
+
* @param totalSize - Total file size in bytes
|
|
78
|
+
* @param options - Upload options
|
|
79
|
+
* @returns Upload state
|
|
80
|
+
*/
|
|
81
|
+
async createUpload(
|
|
82
|
+
key: string,
|
|
83
|
+
totalSize: number,
|
|
84
|
+
options: MultipartUploadOptions = {},
|
|
85
|
+
): Promise<MultipartUploadState> {
|
|
86
|
+
if (!this.supportsMultipart()) {
|
|
87
|
+
throw new Error('Provider does not support multipart uploads');
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
if (this.state && !this.state.completed && !this.state.aborted) {
|
|
91
|
+
throw new Error('Upload already in progress. Abort current upload first.');
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Calculate optimal part size
|
|
95
|
+
const partSize = this.calculatePartSize(totalSize, options.partSize);
|
|
96
|
+
const totalParts = Math.ceil(totalSize / partSize);
|
|
97
|
+
|
|
98
|
+
// Create multipart upload
|
|
99
|
+
const uploadOptions: UploadOptions = {
|
|
100
|
+
onProgress: options.onProgress
|
|
101
|
+
? (progress: UploadProgress) => {
|
|
102
|
+
options.onProgress?.({
|
|
103
|
+
key: progress.key,
|
|
104
|
+
loaded: progress.loaded,
|
|
105
|
+
total: progress.total,
|
|
106
|
+
part: progress.part ?? 0,
|
|
107
|
+
percentage: progress.percentage ?? 0,
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
: undefined,
|
|
111
|
+
};
|
|
112
|
+
const result = await this.provider.createMultipartUpload?.(key, uploadOptions);
|
|
113
|
+
|
|
114
|
+
if (!result) {
|
|
115
|
+
throw new Error('Failed to create multipart upload');
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// Initialize state
|
|
119
|
+
this.state = {
|
|
120
|
+
uploadId: result.uploadId,
|
|
121
|
+
key: result.key,
|
|
122
|
+
provider: this.getProviderName(),
|
|
123
|
+
parts: Array.from({ length: totalParts }, (_, i) => ({
|
|
124
|
+
partNumber: i + 1,
|
|
125
|
+
size: i === totalParts - 1 ? totalSize - i * partSize : partSize,
|
|
126
|
+
uploaded: false,
|
|
127
|
+
})),
|
|
128
|
+
totalSize,
|
|
129
|
+
uploadedSize: 0,
|
|
130
|
+
completed: false,
|
|
131
|
+
aborted: false,
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
this.abortController = new AbortController();
|
|
135
|
+
|
|
136
|
+
return this.state;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Upload a part
|
|
141
|
+
*
|
|
142
|
+
* @param partNumber - Part number (1-based)
|
|
143
|
+
* @param data - Part data
|
|
144
|
+
* @param options - Upload options
|
|
145
|
+
* @returns Upload result
|
|
146
|
+
*/
|
|
147
|
+
async uploadPart(
|
|
148
|
+
partNumber: number,
|
|
149
|
+
data: ArrayBuffer | Blob | Buffer,
|
|
150
|
+
options: UploadOptions = {},
|
|
151
|
+
): Promise<{ etag: string; partNumber: number; size: number }> {
|
|
152
|
+
if (!this.state) {
|
|
153
|
+
throw new Error('No active upload. Call createUpload first.');
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if (this.state.completed || this.state.aborted) {
|
|
157
|
+
throw new Error('Upload is completed or aborted');
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
const part = this.state.parts.find(p => p.partNumber === partNumber);
|
|
161
|
+
if (!part) {
|
|
162
|
+
throw new Error(`Part ${partNumber} not found`);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (part.uploaded) {
|
|
166
|
+
throw new Error(`Part ${partNumber} already uploaded`);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
// Check abort signal
|
|
170
|
+
if (this.abortController?.signal.aborted) {
|
|
171
|
+
throw new Error('Upload aborted');
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
// Upload part with retry logic
|
|
176
|
+
const result = await this.uploadPartWithRetry(partNumber, data, options);
|
|
177
|
+
|
|
178
|
+
// Update state
|
|
179
|
+
part.etag = result.etag;
|
|
180
|
+
part.uploaded = true;
|
|
181
|
+
this.state.uploadedSize += part.size;
|
|
182
|
+
|
|
183
|
+
// Report progress
|
|
184
|
+
if (options.onProgress) {
|
|
185
|
+
options.onProgress({
|
|
186
|
+
key: this.state.key,
|
|
187
|
+
loaded: this.state.uploadedSize,
|
|
188
|
+
total: this.state.totalSize,
|
|
189
|
+
part: partNumber,
|
|
190
|
+
percentage: (this.state.uploadedSize / this.state.totalSize) * 100,
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
return result;
|
|
195
|
+
} catch (error) {
|
|
196
|
+
this.state.error = error instanceof Error ? error.message : String(error);
|
|
197
|
+
throw error;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/**
|
|
202
|
+
* Complete the multipart upload
|
|
203
|
+
*
|
|
204
|
+
* @returns Final upload result
|
|
205
|
+
*/
|
|
206
|
+
async completeUpload(): Promise<MultipartUploadResult> {
|
|
207
|
+
if (!this.state) {
|
|
208
|
+
throw new Error('No active upload. Call createUpload first.');
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if (this.state.completed) {
|
|
212
|
+
throw new Error('Upload already completed');
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
if (this.state.aborted) {
|
|
216
|
+
throw new Error('Upload was aborted');
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// Check if all parts are uploaded
|
|
220
|
+
const unuploadedParts = this.state.parts.filter(p => !p.uploaded);
|
|
221
|
+
if (unuploadedParts.length > 0) {
|
|
222
|
+
throw new Error(`Upload incomplete: ${unuploadedParts.length} parts not uploaded`);
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
try {
|
|
226
|
+
// Complete multipart upload
|
|
227
|
+
const parts = this.state.parts
|
|
228
|
+
.filter(p => p.etag)
|
|
229
|
+
.map(p => ({ partNumber: p.partNumber, etag: p.etag ?? '' }))
|
|
230
|
+
.filter(p => p.etag !== '');
|
|
231
|
+
|
|
232
|
+
const result = await this.provider.completeMultipartUpload?.(this.state.uploadId, parts);
|
|
233
|
+
|
|
234
|
+
if (!result) {
|
|
235
|
+
throw new Error('Failed to complete multipart upload');
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// Update state
|
|
239
|
+
this.state.completed = true;
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
...result,
|
|
243
|
+
key: result.key ?? this.state.key,
|
|
244
|
+
uploadId: this.state.uploadId,
|
|
245
|
+
parts: this.state.parts
|
|
246
|
+
.filter(p => p.etag)
|
|
247
|
+
.map(p => ({
|
|
248
|
+
partNumber: p.partNumber,
|
|
249
|
+
etag: p.etag ?? '',
|
|
250
|
+
size: p.size,
|
|
251
|
+
})),
|
|
252
|
+
totalParts: this.state.parts.length,
|
|
253
|
+
};
|
|
254
|
+
} catch (error) {
|
|
255
|
+
this.state.error = error instanceof Error ? error.message : String(error);
|
|
256
|
+
throw error;
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
/**
|
|
261
|
+
* Abort the multipart upload
|
|
262
|
+
*/
|
|
263
|
+
async abortUpload(): Promise<void> {
|
|
264
|
+
if (!this.state) {
|
|
265
|
+
return;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
if (this.state.completed) {
|
|
269
|
+
return;
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Signal abort
|
|
273
|
+
this.abortController?.abort();
|
|
274
|
+
this.state.aborted = true;
|
|
275
|
+
|
|
276
|
+
try {
|
|
277
|
+
// Abort on provider
|
|
278
|
+
await this.provider.abortMultipartUpload?.(this.state.uploadId);
|
|
279
|
+
} catch (error) {
|
|
280
|
+
// Log error but don't throw - we want to clean up state
|
|
281
|
+
|
|
282
|
+
// Use structured logging instead of console.warn
|
|
283
|
+
logWarn('Failed to abort upload on provider', {
|
|
284
|
+
error: error instanceof Error ? error.message : String(error),
|
|
285
|
+
uploadId: this.state.uploadId,
|
|
286
|
+
key: this.state.key,
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Reset state
|
|
291
|
+
this.state = null;
|
|
292
|
+
this.abortController = null;
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/**
|
|
296
|
+
* Get current upload state
|
|
297
|
+
*/
|
|
298
|
+
getState(): MultipartUploadState | null {
|
|
299
|
+
return this.state;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
/**
|
|
303
|
+
* Resume upload from state (for recovery)
|
|
304
|
+
*
|
|
305
|
+
* @param state - Previous upload state
|
|
306
|
+
*/
|
|
307
|
+
async resumeUpload(state: MultipartUploadState): Promise<void> {
|
|
308
|
+
if (this.state && !this.state.completed && !this.state.aborted) {
|
|
309
|
+
throw new Error('Upload already in progress');
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
this.state = { ...state };
|
|
313
|
+
this.abortController = new AbortController();
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
/**
|
|
317
|
+
* Upload file in chunks automatically
|
|
318
|
+
*
|
|
319
|
+
* @param key - Storage key
|
|
320
|
+
* @param data - File data
|
|
321
|
+
* @param options - Upload options
|
|
322
|
+
* @returns Upload result
|
|
323
|
+
*/
|
|
324
|
+
async uploadFile(
|
|
325
|
+
key: string,
|
|
326
|
+
data: ArrayBuffer | Blob | Buffer,
|
|
327
|
+
options: MultipartUploadOptions = {},
|
|
328
|
+
): Promise<MultipartUploadResult> {
|
|
329
|
+
const totalSize =
|
|
330
|
+
data instanceof ArrayBuffer
|
|
331
|
+
? data.byteLength
|
|
332
|
+
: data instanceof Buffer
|
|
333
|
+
? data.length
|
|
334
|
+
: (data as Blob).size;
|
|
335
|
+
|
|
336
|
+
// Create upload
|
|
337
|
+
await this.createUpload(key, totalSize, options);
|
|
338
|
+
|
|
339
|
+
const partSize = this.calculatePartSize(totalSize, options.partSize);
|
|
340
|
+
const totalParts = Math.ceil(totalSize / partSize);
|
|
341
|
+
|
|
342
|
+
// Upload parts
|
|
343
|
+
for (let i = 0; i < totalParts; i++) {
|
|
344
|
+
const start = i * partSize;
|
|
345
|
+
const end = Math.min(start + partSize, totalSize);
|
|
346
|
+
const partData = data.slice(start, end);
|
|
347
|
+
|
|
348
|
+
await this.uploadPart(i + 1, partData, {
|
|
349
|
+
onProgress: options.onProgress
|
|
350
|
+
? (progress: UploadProgress) => {
|
|
351
|
+
options.onProgress?.({
|
|
352
|
+
key: progress.key,
|
|
353
|
+
loaded: progress.loaded,
|
|
354
|
+
total: progress.total,
|
|
355
|
+
part: progress.part ?? 0,
|
|
356
|
+
percentage: progress.percentage ?? 0,
|
|
357
|
+
});
|
|
358
|
+
}
|
|
359
|
+
: undefined,
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
// Complete upload
|
|
364
|
+
return await this.completeUpload();
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
private async uploadPartWithRetry(
|
|
368
|
+
partNumber: number,
|
|
369
|
+
data: ArrayBuffer | Blob | Buffer,
|
|
370
|
+
options: UploadOptions,
|
|
371
|
+
maxRetries: number = 3,
|
|
372
|
+
): Promise<{ etag: string; partNumber: number; size: number }> {
|
|
373
|
+
let lastError: Error | null = null;
|
|
374
|
+
|
|
375
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
376
|
+
try {
|
|
377
|
+
if (!this.state?.uploadId) {
|
|
378
|
+
throw new Error('Upload ID not initialized');
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
const result = await this.provider.uploadPart?.(this.state.uploadId, partNumber, data, {
|
|
382
|
+
...options,
|
|
383
|
+
abortSignal: this.abortController?.signal,
|
|
384
|
+
});
|
|
385
|
+
|
|
386
|
+
if (!result) {
|
|
387
|
+
throw new Error('Failed to upload part');
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
return {
|
|
391
|
+
etag: result.etag ?? '',
|
|
392
|
+
partNumber: result.partNumber ?? partNumber,
|
|
393
|
+
size:
|
|
394
|
+
data instanceof ArrayBuffer
|
|
395
|
+
? data.byteLength
|
|
396
|
+
: data instanceof Buffer
|
|
397
|
+
? data.length
|
|
398
|
+
: (data as Blob).size,
|
|
399
|
+
};
|
|
400
|
+
} catch (error) {
|
|
401
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
402
|
+
|
|
403
|
+
// Don't retry on abort
|
|
404
|
+
if (this.abortController?.signal.aborted) {
|
|
405
|
+
throw lastError;
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
// Don't retry on validation errors
|
|
409
|
+
if (lastError.message.includes('validation') || lastError.message.includes('invalid')) {
|
|
410
|
+
throw lastError;
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
// Wait before retry (exponential backoff)
|
|
414
|
+
if (attempt < maxRetries) {
|
|
415
|
+
await new Promise(resolve =>
|
|
416
|
+
setTimeout(resolve, Math.pow(2, attempt) * STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS),
|
|
417
|
+
);
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
throw lastError ?? new Error('Upload failed after retries');
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
private calculatePartSize(totalSize: number, userPartSize?: number): number {
|
|
426
|
+
if (userPartSize) {
|
|
427
|
+
return Math.min(userPartSize, totalSize);
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// Use constants for part size thresholds
|
|
431
|
+
if (totalSize < MULTIPART_THRESHOLDS.SMALL_FILE) {
|
|
432
|
+
return PART_SIZES.SMALL;
|
|
433
|
+
} else if (totalSize < MULTIPART_THRESHOLDS.MEDIUM_FILE) {
|
|
434
|
+
return PART_SIZES.MEDIUM;
|
|
435
|
+
} else {
|
|
436
|
+
// Scale part size to ensure we never exceed 10,000 parts (S3/R2 limit)
|
|
437
|
+
const maxParts = 9999; // Stay safely under 10,000 limit
|
|
438
|
+
const minPartSize = Math.ceil(totalSize / maxParts);
|
|
439
|
+
return Math.max(PART_SIZES.LARGE, minPartSize);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
private getProviderName(): string {
|
|
444
|
+
return this.provider.constructor.name;
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Create a multipart upload manager for a provider
|
|
450
|
+
*
|
|
451
|
+
* @param provider - Storage provider
|
|
452
|
+
* @returns Multipart upload manager
|
|
453
|
+
*/
|
|
454
|
+
export function createMultipartUploadManager(provider: StorageProvider): MultipartUploadManager {
|
|
455
|
+
if (!hasMultipartSupport(provider)) {
|
|
456
|
+
throw new Error('The provided storage provider does not support multipart uploads');
|
|
457
|
+
}
|
|
458
|
+
return new MultipartUploadManager(provider);
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
/**
|
|
462
|
+
* Determines whether the given storage provider supports multipart uploads.
|
|
463
|
+
*
|
|
464
|
+
* @param provider - The storage provider to check
|
|
465
|
+
* @returns `true` if the provider supports multipart uploads, `false` otherwise.
|
|
466
|
+
*/
|
|
467
|
+
export function hasMultipartSupport(provider: StorageProvider): boolean {
|
|
468
|
+
const capabilities = provider.getCapabilities?.();
|
|
469
|
+
return capabilities?.multipart ?? false;
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
/**
|
|
473
|
+
* Selects an appropriate multipart upload part size for a file.
|
|
474
|
+
*
|
|
475
|
+
* If `userPartSize` is provided, it will be capped at the file size; otherwise the part size
|
|
476
|
+
* is chosen from configured thresholds for small, medium, or large files.
|
|
477
|
+
*
|
|
478
|
+
* @param fileSize - File size in bytes
|
|
479
|
+
* @param userPartSize - Optional user-specified part size in bytes; capped to `fileSize` when provided
|
|
480
|
+
* @returns The selected part size in bytes
|
|
481
|
+
*/
|
|
482
|
+
export function getOptimalPartSize(fileSize: number, userPartSize?: number): number {
|
|
483
|
+
if (userPartSize) {
|
|
484
|
+
return Math.min(userPartSize, fileSize);
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
if (fileSize < MULTIPART_THRESHOLDS.SMALL_FILE) {
|
|
488
|
+
return PART_SIZES.SMALL;
|
|
489
|
+
} else if (fileSize < MULTIPART_THRESHOLDS.MEDIUM_FILE) {
|
|
490
|
+
return PART_SIZES.MEDIUM;
|
|
491
|
+
} else {
|
|
492
|
+
// Scale part size to ensure we never exceed 10,000 parts (S3/R2 limit)
|
|
493
|
+
const maxParts = 9999; // Stay safely under 10,000 limit
|
|
494
|
+
const minPartSize = Math.ceil(fileSize / maxParts);
|
|
495
|
+
return Math.max(PART_SIZES.LARGE, minPartSize);
|
|
496
|
+
}
|
|
497
|
+
}
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview retry-utils.test.ts
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
6
|
+
|
|
7
|
+
import { STORAGE_CONSTANTS } from './constants';
|
|
8
|
+
import { withRetry } from './retry-utils';
|
|
9
|
+
import { StorageError, StorageErrorCode } from './validation';
|
|
10
|
+
|
|
11
|
+
describe('withRetry', () => {
|
|
12
|
+
beforeEach(() => {
|
|
13
|
+
vi.useFakeTimers();
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
afterEach(() => {
|
|
17
|
+
vi.useRealTimers();
|
|
18
|
+
vi.restoreAllMocks();
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
it('returns immediately when the operation succeeds on the first attempt', async () => {
|
|
22
|
+
const operation = vi.fn().mockResolvedValue('success');
|
|
23
|
+
|
|
24
|
+
await expect(withRetry(operation)).resolves.toBe('success');
|
|
25
|
+
expect(operation).toHaveBeenCalledTimes(1);
|
|
26
|
+
});
|
|
27
|
+
|
|
28
|
+
it('retries when the operation throws a retryable error and eventually succeeds', async () => {
|
|
29
|
+
const retryableError = new StorageError(
|
|
30
|
+
'temporary timeout',
|
|
31
|
+
StorageErrorCode.TIMEOUT,
|
|
32
|
+
undefined,
|
|
33
|
+
true,
|
|
34
|
+
);
|
|
35
|
+
const operation = vi
|
|
36
|
+
.fn<() => Promise<string>>()
|
|
37
|
+
.mockRejectedValueOnce(retryableError)
|
|
38
|
+
.mockResolvedValueOnce('eventual-success');
|
|
39
|
+
|
|
40
|
+
const resultPromise = withRetry(operation, 3);
|
|
41
|
+
|
|
42
|
+
await vi.runAllTimersAsync();
|
|
43
|
+
|
|
44
|
+
await expect(resultPromise).resolves.toBe('eventual-success');
|
|
45
|
+
expect(operation).toHaveBeenCalledTimes(2);
|
|
46
|
+
});
|
|
47
|
+
|
|
48
|
+
it('waits using exponential backoff between retry attempts', async () => {
|
|
49
|
+
const retryableError = new StorageError(
|
|
50
|
+
'temporary failure',
|
|
51
|
+
StorageErrorCode.NETWORK_ERROR,
|
|
52
|
+
undefined,
|
|
53
|
+
true,
|
|
54
|
+
);
|
|
55
|
+
const operation = vi
|
|
56
|
+
.fn<() => Promise<string>>()
|
|
57
|
+
.mockRejectedValueOnce(retryableError)
|
|
58
|
+
.mockRejectedValueOnce(retryableError)
|
|
59
|
+
.mockResolvedValueOnce('done');
|
|
60
|
+
|
|
61
|
+
const setTimeoutSpy = vi.spyOn(global, 'setTimeout');
|
|
62
|
+
|
|
63
|
+
const resultPromise = withRetry(operation, 3);
|
|
64
|
+
|
|
65
|
+
// Execute pending timers sequentially to inspect delay values
|
|
66
|
+
await vi.advanceTimersByTimeAsync(STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS);
|
|
67
|
+
await vi.advanceTimersByTimeAsync(STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS * 2);
|
|
68
|
+
|
|
69
|
+
await expect(resultPromise).resolves.toBe('done');
|
|
70
|
+
expect(operation).toHaveBeenCalledTimes(3);
|
|
71
|
+
expect(setTimeoutSpy).toHaveBeenNthCalledWith(
|
|
72
|
+
1,
|
|
73
|
+
expect.any(Function),
|
|
74
|
+
STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS,
|
|
75
|
+
);
|
|
76
|
+
expect(setTimeoutSpy).toHaveBeenNthCalledWith(
|
|
77
|
+
2,
|
|
78
|
+
expect.any(Function),
|
|
79
|
+
STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS * 2,
|
|
80
|
+
);
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
it('throws immediately for non-retryable errors', async () => {
|
|
84
|
+
const fatalError = new Error('fatal error');
|
|
85
|
+
const operation = vi.fn().mockRejectedValue(fatalError);
|
|
86
|
+
|
|
87
|
+
await expect(withRetry(operation, 5)).rejects.toBe(fatalError);
|
|
88
|
+
expect(operation).toHaveBeenCalledTimes(1);
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
it('throws when all retry attempts fail', async () => {
|
|
92
|
+
const retryableError = new StorageError(
|
|
93
|
+
'still failing',
|
|
94
|
+
StorageErrorCode.UPLOAD_FAILED,
|
|
95
|
+
undefined,
|
|
96
|
+
true,
|
|
97
|
+
);
|
|
98
|
+
const operation = vi.fn().mockRejectedValue(retryableError);
|
|
99
|
+
|
|
100
|
+
// Start the retry operation and attach the catch handler immediately
|
|
101
|
+
const promise = withRetry(operation, 2).catch(e => e);
|
|
102
|
+
|
|
103
|
+
// Run timers to complete all retry attempts
|
|
104
|
+
await vi.runAllTimersAsync();
|
|
105
|
+
|
|
106
|
+
// The promise should resolve to the error (since we caught it above)
|
|
107
|
+
const error = await promise;
|
|
108
|
+
expect(error).toStrictEqual(retryableError);
|
|
109
|
+
expect(operation).toHaveBeenCalledTimes(2);
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
it('validates the maxRetries parameter', async () => {
|
|
113
|
+
const operation = vi.fn().mockResolvedValue('anything');
|
|
114
|
+
|
|
115
|
+
await expect(withRetry(operation, 0)).rejects.toThrowError('maxRetries must be at least 1');
|
|
116
|
+
expect(operation).not.toHaveBeenCalled();
|
|
117
|
+
});
|
|
118
|
+
});
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Retry utility for AWS SDK operations
|
|
3
|
+
*
|
|
4
|
+
* Wraps AWS SDK calls with retry logic for transient failures.
|
|
5
|
+
* Provides exponential backoff and retryable error detection.
|
|
6
|
+
*
|
|
7
|
+
* @module @repo/storage/retry-utils
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { STORAGE_CONSTANTS } from './constants';
|
|
11
|
+
import { isRetryableError } from './validation';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Run an operation with retry and exponential backoff for retryable errors.
|
|
15
|
+
*
|
|
16
|
+
* Retries the provided operation up to `maxRetries` attempts. Between attempts it waits using
|
|
17
|
+
* exponential backoff: delay = 2^attempt * RETRY_BASE_DELAY_MS. If a non-retryable error is
|
|
18
|
+
* encountered or all attempts are exhausted, the last error is thrown.
|
|
19
|
+
*
|
|
20
|
+
* @param operation - Function that performs the operation and returns a promise of the result
|
|
21
|
+
* @param maxRetries - Maximum number of attempts (initial attempt counts as one)
|
|
22
|
+
* @returns The resolved value from a successful operation
|
|
23
|
+
* @throws The last encountered error when a non-retryable error occurs or all retries fail
|
|
24
|
+
*/
|
|
25
|
+
export async function withRetry<T>(
|
|
26
|
+
operation: () => Promise<T>,
|
|
27
|
+
maxRetries: number = STORAGE_CONSTANTS.DEFAULT_MAX_RETRIES,
|
|
28
|
+
): Promise<T> {
|
|
29
|
+
// Validate maxRetries up-front
|
|
30
|
+
if (maxRetries < 1) {
|
|
31
|
+
throw new Error('maxRetries must be at least 1');
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
let lastError: Error | null = null;
|
|
35
|
+
|
|
36
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
37
|
+
try {
|
|
38
|
+
return await operation();
|
|
39
|
+
} catch (error) {
|
|
40
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
41
|
+
|
|
42
|
+
// Don't retry if error is not retryable
|
|
43
|
+
if (!isRetryableError(error)) {
|
|
44
|
+
throw lastError;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Don't retry on last attempt
|
|
48
|
+
if (attempt === maxRetries) {
|
|
49
|
+
throw lastError;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Exponential backoff: first retry uses base delay (2^0 * base)
|
|
53
|
+
const delay = Math.pow(2, attempt - 1) * STORAGE_CONSTANTS.RETRY_BASE_DELAY_MS;
|
|
54
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
throw lastError ?? new Error('Operation failed after retries');
|
|
59
|
+
}
|