@seanmozeik/s3up 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,507 @@
1
+ // src/lib/multipart.ts
2
+ // S3 Multipart Upload orchestrator
3
+
4
+ import {
5
+ createProgressState,
6
+ finishProgress,
7
+ type ProgressState,
8
+ updateProgress,
9
+ writeProgress
10
+ } from './progress-bar';
11
+ import type { S3Config } from './providers';
12
+ import { getEndpoint } from './providers';
13
+ import { type AwsCredentials, getRegionForSigning, signRequest } from './signing';
14
+ import {
15
+ addCompletedPart,
16
+ type CompletedPart,
17
+ createInitialState,
18
+ deleteState,
19
+ hasFileChanged,
20
+ loadState,
21
+ saveState,
22
+ type UploadState
23
+ } from './state';
24
+
25
+ export interface MultipartOptions {
26
+ chunkSize: number;
27
+ connections: number;
28
+ }
29
+
30
+ interface MultipartUploadResult {
31
+ success: true;
32
+ publicUrl: string;
33
+ }
34
+
35
+ interface MultipartUploadError {
36
+ success: false;
37
+ error: string;
38
+ }
39
+
40
+ export type MultipartOutcome = MultipartUploadResult | MultipartUploadError;
41
+
42
+ // Track abort state for graceful shutdown
43
+ let abortController: AbortController | null = null;
44
+ let isAborting = false;
45
+
46
+ /**
47
+ * Get AWS credentials from S3Config
48
+ */
49
+ function getCredentials(config: S3Config): AwsCredentials {
50
+ return {
51
+ accessKeyId: config.accessKeyId,
52
+ region: getRegionForSigning(config.provider, config.region),
53
+ secretAccessKey: config.secretAccessKey
54
+ };
55
+ }
56
+
57
+ /**
58
+ * Initiate a multipart upload
59
+ */
60
+ async function initiateMultipartUpload(config: S3Config, key: string): Promise<string> {
61
+ const endpoint = getEndpoint(config);
62
+ const url = `${endpoint}/${config.bucket}/${encodeURIComponent(key)}?uploads`;
63
+ const credentials = getCredentials(config);
64
+
65
+ const signed = signRequest(
66
+ 'POST',
67
+ url,
68
+ {
69
+ 'content-type': 'application/octet-stream'
70
+ },
71
+ '',
72
+ credentials
73
+ );
74
+
75
+ const response = await fetch(signed.url, {
76
+ headers: signed.headers,
77
+ method: signed.method
78
+ });
79
+
80
+ if (!response.ok) {
81
+ const text = await response.text();
82
+ throw new Error(`Failed to initiate multipart upload: ${response.status} ${text}`);
83
+ }
84
+
85
+ const xml = await response.text();
86
+ const match = xml.match(/<UploadId>([^<]+)<\/UploadId>/);
87
+ if (!match) {
88
+ throw new Error('No UploadId in response');
89
+ }
90
+
91
+ return match[1];
92
+ }
93
+
94
+ /**
95
+ * Upload a single part
96
+ */
97
+ async function uploadPart(
98
+ config: S3Config,
99
+ key: string,
100
+ uploadId: string,
101
+ partNumber: number,
102
+ body: Uint8Array,
103
+ signal?: AbortSignal
104
+ ): Promise<CompletedPart> {
105
+ const endpoint = getEndpoint(config);
106
+ const url = `${endpoint}/${config.bucket}/${encodeURIComponent(key)}?partNumber=${partNumber}&uploadId=${encodeURIComponent(uploadId)}`;
107
+ const credentials = getCredentials(config);
108
+
109
+ // Include content-type for R2 compatibility
110
+ const signed = signRequest(
111
+ 'PUT',
112
+ url,
113
+ { 'content-type': 'application/octet-stream' },
114
+ body,
115
+ credentials
116
+ );
117
+
118
+ const response = await fetch(signed.url, {
119
+ body: Buffer.from(body),
120
+ headers: signed.headers,
121
+ method: signed.method,
122
+ signal
123
+ });
124
+
125
+ if (!response.ok) {
126
+ const text = await response.text();
127
+ throw new Error(`Failed to upload part ${partNumber}: ${response.status} ${text}`);
128
+ }
129
+
130
+ const etag = response.headers.get('etag');
131
+ if (!etag) {
132
+ throw new Error(`No ETag for part ${partNumber}`);
133
+ }
134
+
135
+ return { etag, partNumber };
136
+ }
137
+
138
+ /**
139
+ * Complete the multipart upload
140
+ */
141
+ async function completeMultipartUpload(
142
+ config: S3Config,
143
+ key: string,
144
+ uploadId: string,
145
+ parts: CompletedPart[]
146
+ ): Promise<void> {
147
+ const endpoint = getEndpoint(config);
148
+ const url = `${endpoint}/${config.bucket}/${encodeURIComponent(key)}?uploadId=${encodeURIComponent(uploadId)}`;
149
+ const credentials = getCredentials(config);
150
+
151
+ // Build completion XML
152
+ const partsXml = parts
153
+ .sort((a, b) => a.partNumber - b.partNumber)
154
+ .map((p) => `<Part><PartNumber>${p.partNumber}</PartNumber><ETag>${p.etag}</ETag></Part>`)
155
+ .join('');
156
+ const body = `<?xml version="1.0" encoding="UTF-8"?><CompleteMultipartUpload>${partsXml}</CompleteMultipartUpload>`;
157
+
158
+ const signed = signRequest(
159
+ 'POST',
160
+ url,
161
+ {
162
+ 'content-type': 'application/xml'
163
+ },
164
+ body,
165
+ credentials
166
+ );
167
+
168
+ const response = await fetch(signed.url, {
169
+ body,
170
+ headers: signed.headers,
171
+ method: signed.method
172
+ });
173
+
174
+ if (!response.ok) {
175
+ const text = await response.text();
176
+ throw new Error(`Failed to complete multipart upload: ${response.status} ${text}`);
177
+ }
178
+ }
179
+
180
+ /**
181
+ * Abort a multipart upload
182
+ */
183
+ async function abortMultipartUpload(
184
+ config: S3Config,
185
+ key: string,
186
+ uploadId: string
187
+ ): Promise<void> {
188
+ const endpoint = getEndpoint(config);
189
+ const url = `${endpoint}/${config.bucket}/${encodeURIComponent(key)}?uploadId=${encodeURIComponent(uploadId)}`;
190
+ const credentials = getCredentials(config);
191
+
192
+ const signed = signRequest('DELETE', url, {}, null, credentials);
193
+
194
+ try {
195
+ const response = await fetch(signed.url, {
196
+ headers: signed.headers,
197
+ method: signed.method
198
+ });
199
+
200
+ // 204 or 404 are both acceptable
201
+ if (!response.ok && response.status !== 404) {
202
+ console.warn(`Warning: Failed to abort upload: ${response.status}`);
203
+ }
204
+ } catch {
205
+ // Ignore abort errors
206
+ }
207
+ }
208
+
209
+ /**
210
+ * List parts for an existing upload (to verify state)
211
+ */
212
+ async function listParts(
213
+ config: S3Config,
214
+ key: string,
215
+ uploadId: string
216
+ ): Promise<CompletedPart[]> {
217
+ const endpoint = getEndpoint(config);
218
+ const url = `${endpoint}/${config.bucket}/${encodeURIComponent(key)}?uploadId=${encodeURIComponent(uploadId)}`;
219
+ const credentials = getCredentials(config);
220
+
221
+ const signed = signRequest('GET', url, {}, null, credentials);
222
+
223
+ const response = await fetch(signed.url, {
224
+ headers: signed.headers,
225
+ method: signed.method
226
+ });
227
+
228
+ if (!response.ok) {
229
+ if (response.status === 404) {
230
+ return []; // Upload expired or doesn't exist
231
+ }
232
+ throw new Error(`Failed to list parts: ${response.status}`);
233
+ }
234
+
235
+ const xml = await response.text();
236
+ const parts: CompletedPart[] = [];
237
+
238
+ // Parse parts from XML
239
+ const partMatches = xml.matchAll(
240
+ /<Part>[\s\S]*?<PartNumber>(\d+)<\/PartNumber>[\s\S]*?<ETag>([^<]+)<\/ETag>[\s\S]*?<\/Part>/g
241
+ );
242
+ for (const match of partMatches) {
243
+ parts.push({
244
+ etag: match[2],
245
+ partNumber: parseInt(match[1], 10)
246
+ });
247
+ }
248
+
249
+ return parts;
250
+ }
251
+
252
+ /**
253
+ * Run parallel part uploads with concurrency limit
254
+ */
255
+ async function uploadPartsInParallel(
256
+ config: S3Config,
257
+ filePath: string,
258
+ state: UploadState,
259
+ progress: ProgressState,
260
+ connections: number
261
+ ): Promise<CompletedPart[]> {
262
+ const file = Bun.file(filePath);
263
+ const completedPartNumbers = new Set(state.completedParts.map((p) => p.partNumber));
264
+ const pendingParts: number[] = [];
265
+
266
+ // Build list of parts to upload
267
+ for (let i = 1; i <= state.totalParts; i++) {
268
+ if (!completedPartNumbers.has(i)) {
269
+ pendingParts.push(i);
270
+ }
271
+ }
272
+
273
+ // Update progress with already completed parts
274
+ progress.completedParts = state.completedParts.length;
275
+ progress.bytesUploaded = state.completedParts.reduce((sum, p) => {
276
+ const partSize =
277
+ p.partNumber < state.totalParts
278
+ ? state.chunkSize
279
+ : state.fileSize - (state.totalParts - 1) * state.chunkSize;
280
+ return sum + partSize;
281
+ }, 0);
282
+
283
+ const allParts = [...state.completedParts];
284
+ const executing = new Set<Promise<void>>();
285
+
286
+ abortController = new AbortController();
287
+
288
+ for (const partNumber of pendingParts) {
289
+ if (isAborting) break;
290
+
291
+ const start = (partNumber - 1) * state.chunkSize;
292
+ const end = Math.min(start + state.chunkSize, state.fileSize);
293
+ const partSize = end - start;
294
+
295
+ const uploadPromise = (async () => {
296
+ try {
297
+ // Read chunk from file
298
+ const chunk = await file.slice(start, end).arrayBuffer();
299
+ const body = new Uint8Array(chunk);
300
+
301
+ // Upload part
302
+ const part = await uploadPart(
303
+ config,
304
+ state.key,
305
+ state.uploadId,
306
+ partNumber,
307
+ body,
308
+ abortController?.signal
309
+ );
310
+
311
+ // Save to state immediately
312
+ await addCompletedPart(filePath, state, part);
313
+ allParts.push(part);
314
+
315
+ // Update progress
316
+ updateProgress(progress, partSize);
317
+ writeProgress(progress);
318
+ } catch (err) {
319
+ if (!isAborting) {
320
+ throw err;
321
+ }
322
+ }
323
+ })();
324
+
325
+ executing.add(uploadPromise);
326
+ uploadPromise.finally(() => executing.delete(uploadPromise));
327
+
328
+ // Limit concurrency
329
+ if (executing.size >= connections) {
330
+ await Promise.race(executing);
331
+ }
332
+ }
333
+
334
+ // Wait for remaining uploads
335
+ await Promise.all(executing);
336
+
337
+ return allParts;
338
+ }
339
+
340
+ /**
341
+ * Setup SIGINT handler for graceful abort
342
+ */
343
+ export function setupAbortHandler(_filePath: string, state: UploadState | null): void {
344
+ const handler = async () => {
345
+ if (isAborting) {
346
+ process.exit(1);
347
+ }
348
+
349
+ isAborting = true;
350
+ console.log('\n\nInterrupted! Saving progress...');
351
+
352
+ if (abortController) {
353
+ abortController.abort();
354
+ }
355
+
356
+ // Wait a moment for in-flight requests
357
+ await new Promise((r) => setTimeout(r, 1000));
358
+
359
+ if (state) {
360
+ console.log(`Upload paused at ${state.completedParts.length}/${state.totalParts} parts.`);
361
+ console.log('Run the same command to resume.');
362
+ }
363
+
364
+ process.exit(0);
365
+ };
366
+
367
+ process.on('SIGINT', handler);
368
+ }
369
+
370
+ /**
371
+ * Main multipart upload function
372
+ */
373
+ export async function uploadMultipart(
374
+ filePath: string,
375
+ config: S3Config,
376
+ key: string,
377
+ options: MultipartOptions
378
+ ): Promise<MultipartOutcome> {
379
+ const file = Bun.file(filePath);
380
+ const stat = await file.stat();
381
+ const fileSize = file.size;
382
+ const fileModified = stat.mtime.getTime();
383
+ const endpoint = getEndpoint(config);
384
+
385
+ let state = await loadState(filePath);
386
+
387
+ // Check if we can resume
388
+ if (state) {
389
+ // Check if file changed
390
+ if (await hasFileChanged(filePath, state)) {
391
+ state = null; // Force fresh start
392
+ }
393
+ }
394
+
395
+ // Initialize progress
396
+ const totalParts = Math.ceil(fileSize / options.chunkSize);
397
+ const progress = createProgressState(key, totalParts, fileSize);
398
+
399
+ try {
400
+ if (!state) {
401
+ // Start new upload
402
+ const uploadId = await initiateMultipartUpload(config, key);
403
+ state = createInitialState(
404
+ uploadId,
405
+ config.bucket,
406
+ key,
407
+ fileSize,
408
+ fileModified,
409
+ options.chunkSize,
410
+ config.provider,
411
+ endpoint
412
+ );
413
+ await saveState(filePath, state);
414
+ } else {
415
+ // Resuming - verify parts still exist on S3
416
+ const remoteParts = await listParts(config, key, state.uploadId);
417
+ if (remoteParts.length === 0 && state.completedParts.length > 0) {
418
+ // Upload expired, start fresh
419
+ await deleteState(filePath);
420
+ const uploadId = await initiateMultipartUpload(config, key);
421
+ state = createInitialState(
422
+ uploadId,
423
+ config.bucket,
424
+ key,
425
+ fileSize,
426
+ fileModified,
427
+ options.chunkSize,
428
+ config.provider,
429
+ endpoint
430
+ );
431
+ await saveState(filePath, state);
432
+ }
433
+ }
434
+
435
+ // Setup abort handler
436
+ setupAbortHandler(filePath, state);
437
+
438
+ // Show initial progress bar
439
+ writeProgress(progress);
440
+
441
+ // Upload all parts
442
+ const allParts = await uploadPartsInParallel(
443
+ config,
444
+ filePath,
445
+ state,
446
+ progress,
447
+ options.connections
448
+ );
449
+
450
+ if (isAborting) {
451
+ return { error: 'Upload interrupted', success: false };
452
+ }
453
+
454
+ finishProgress();
455
+
456
+ // Complete the upload
457
+ await completeMultipartUpload(config, key, state.uploadId, allParts);
458
+
459
+ // Clean up state file
460
+ await deleteState(filePath);
461
+
462
+ const publicUrl = `${config.publicUrlBase}/${key}`;
463
+ return { publicUrl, success: true };
464
+ } catch (err) {
465
+ finishProgress();
466
+ return {
467
+ error: err instanceof Error ? err.message : String(err),
468
+ success: false
469
+ };
470
+ }
471
+ }
472
+
473
+ /**
474
+ * Check if there's a resumable upload and prompt user
475
+ */
476
+ export async function checkResumableUpload(filePath: string): Promise<{
477
+ canResume: boolean;
478
+ state: UploadState | null;
479
+ percentComplete: number;
480
+ }> {
481
+ const state = await loadState(filePath);
482
+
483
+ if (!state) {
484
+ return { canResume: false, percentComplete: 0, state: null };
485
+ }
486
+
487
+ // Check if file has changed
488
+ if (await hasFileChanged(filePath, state)) {
489
+ return { canResume: false, percentComplete: 0, state };
490
+ }
491
+
492
+ const percentComplete = Math.round((state.completedParts.length / state.totalParts) * 100);
493
+
494
+ return { canResume: true, percentComplete, state };
495
+ }
496
+
497
+ /**
498
+ * Abort and clean up an existing upload
499
+ */
500
+ export async function cleanupExistingUpload(filePath: string, config: S3Config): Promise<void> {
501
+ const state = await loadState(filePath);
502
+
503
+ if (state) {
504
+ await abortMultipartUpload(config, state.key, state.uploadId);
505
+ await deleteState(filePath);
506
+ }
507
+ }
@@ -0,0 +1,74 @@
1
+ // src/lib/output.test.ts
2
+ import { describe, expect, test } from 'bun:test';
3
+ import { formatBytes, formatDeleteSummary, formatListItem, formatUploadSuccess } from './output';
4
+
5
+ describe('formatBytes', () => {
6
+ test('formats bytes', () => {
7
+ expect(formatBytes(500)).toBe('500 B');
8
+ });
9
+
10
+ test('formats kilobytes', () => {
11
+ expect(formatBytes(1024)).toBe('1.0 KB');
12
+ expect(formatBytes(1536)).toBe('1.5 KB');
13
+ });
14
+
15
+ test('formats megabytes', () => {
16
+ expect(formatBytes(1024 * 1024)).toBe('1.0 MB');
17
+ expect(formatBytes(2.5 * 1024 * 1024)).toBe('2.5 MB');
18
+ });
19
+
20
+ test('formats gigabytes', () => {
21
+ expect(formatBytes(1024 * 1024 * 1024)).toBe('1.0 GB');
22
+ });
23
+ });
24
+
25
+ describe('formatUploadSuccess (quiet mode)', () => {
26
+ test('formats single file upload', () => {
27
+ const result = formatUploadSuccess(
28
+ 'backup.tar.gz',
29
+ 'https://cdn.example.com/backup.tar.gz',
30
+ 2500000,
31
+ true
32
+ );
33
+ expect(result).toBe('backup.tar.gz → https://cdn.example.com/backup.tar.gz (2.4 MB)');
34
+ });
35
+ });
36
+
37
+ describe('formatListItem (quiet mode)', () => {
38
+ test('formats list item with tabs', () => {
39
+ const result = formatListItem(
40
+ 'backups/file.tar.gz',
41
+ 47395635,
42
+ new Date('2026-01-28T03:00:05Z'),
43
+ true
44
+ );
45
+ expect(result).toContain('backups/file.tar.gz');
46
+ expect(result).toContain('45.2 MB');
47
+ expect(result).toContain('2026-01-28');
48
+ });
49
+
50
+ test('formats list item as JSON', () => {
51
+ const result = formatListItem(
52
+ 'backups/file.tar.gz',
53
+ 47395635,
54
+ new Date('2026-01-28T03:00:05Z'),
55
+ true,
56
+ true
57
+ );
58
+ const parsed = JSON.parse(result);
59
+ expect(parsed.key).toBe('backups/file.tar.gz');
60
+ expect(parsed.size).toBe(47395635);
61
+ });
62
+ });
63
+
64
+ describe('formatDeleteSummary', () => {
65
+ test('formats deletion summary', () => {
66
+ const result = formatDeleteSummary(3, 138200000, false, true);
67
+ expect(result).toBe('Deleted 3 objects (131.8 MB)');
68
+ });
69
+
70
+ test('formats dry-run summary', () => {
71
+ const result = formatDeleteSummary(3, 138200000, true, true);
72
+ expect(result).toContain('Would delete 3 objects');
73
+ });
74
+ });
@@ -0,0 +1,63 @@
1
+ export function formatBytes(bytes: number): string {
2
+ if (bytes < 1024) return `${bytes} B`;
3
+ if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
4
+ if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
5
+ return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)} GB`;
6
+ }
7
+
8
+ export function formatUploadSuccess(
9
+ filename: string,
10
+ url: string,
11
+ size: number,
12
+ quiet: boolean
13
+ ): string {
14
+ if (quiet) {
15
+ return `${filename} → ${url} (${formatBytes(size)})`;
16
+ }
17
+ // Normal mode formatting handled by UI layer
18
+ return `${filename} → ${url} (${formatBytes(size)})`;
19
+ }
20
+
21
+ export function formatUploadError(filename: string, error: string): string {
22
+ return `Error: ${filename} - ${error}`;
23
+ }
24
+
25
+ export function formatListItem(
26
+ key: string,
27
+ size: number,
28
+ lastModified: Date,
29
+ quiet: boolean,
30
+ json = false
31
+ ): string {
32
+ if (json) {
33
+ return JSON.stringify({
34
+ key,
35
+ lastModified: lastModified.toISOString(),
36
+ size
37
+ });
38
+ }
39
+
40
+ if (quiet) {
41
+ const sizeStr = formatBytes(size).padStart(10);
42
+ const dateStr = lastModified.toISOString().split('T')[0];
43
+ return `${key}\t${sizeStr}\t${dateStr}`;
44
+ }
45
+
46
+ // Normal mode with colors handled by UI layer
47
+ return `${key}\t${formatBytes(size)}\t${lastModified.toISOString()}`;
48
+ }
49
+
50
+ export function formatDeleteSummary(
51
+ count: number,
52
+ totalSize: number,
53
+ dryRun: boolean,
54
+ _quiet: boolean
55
+ ): string {
56
+ const action = dryRun ? 'Would delete' : 'Deleted';
57
+ return `${action} ${count} objects (${formatBytes(totalSize)})`;
58
+ }
59
+
60
+ export function formatDryRunList(objects: Array<{ key: string; size: number }>): string {
61
+ const lines = objects.map((o) => ` ${o.key} (${formatBytes(o.size)})`);
62
+ return lines.join('\n');
63
+ }