@uploadista/data-store-s3 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.turbo/turbo-build.log +5 -0
  2. package/.turbo/turbo-check.log +5 -0
  3. package/LICENSE +21 -0
  4. package/README.md +588 -0
  5. package/dist/index.d.ts +2 -0
  6. package/dist/index.d.ts.map +1 -0
  7. package/dist/index.js +1 -0
  8. package/dist/observability.d.ts +45 -0
  9. package/dist/observability.d.ts.map +1 -0
  10. package/dist/observability.js +155 -0
  11. package/dist/s3-store-old.d.ts +51 -0
  12. package/dist/s3-store-old.d.ts.map +1 -0
  13. package/dist/s3-store-old.js +765 -0
  14. package/dist/s3-store.d.ts +9 -0
  15. package/dist/s3-store.d.ts.map +1 -0
  16. package/dist/s3-store.js +666 -0
  17. package/dist/services/__mocks__/s3-client-mock.service.d.ts +44 -0
  18. package/dist/services/__mocks__/s3-client-mock.service.d.ts.map +1 -0
  19. package/dist/services/__mocks__/s3-client-mock.service.js +379 -0
  20. package/dist/services/index.d.ts +2 -0
  21. package/dist/services/index.d.ts.map +1 -0
  22. package/dist/services/index.js +1 -0
  23. package/dist/services/s3-client.service.d.ts +68 -0
  24. package/dist/services/s3-client.service.d.ts.map +1 -0
  25. package/dist/services/s3-client.service.js +209 -0
  26. package/dist/test-observability.d.ts +6 -0
  27. package/dist/test-observability.d.ts.map +1 -0
  28. package/dist/test-observability.js +62 -0
  29. package/dist/types.d.ts +81 -0
  30. package/dist/types.d.ts.map +1 -0
  31. package/dist/types.js +1 -0
  32. package/dist/utils/calculations.d.ts +7 -0
  33. package/dist/utils/calculations.d.ts.map +1 -0
  34. package/dist/utils/calculations.js +41 -0
  35. package/dist/utils/error-handling.d.ts +7 -0
  36. package/dist/utils/error-handling.d.ts.map +1 -0
  37. package/dist/utils/error-handling.js +29 -0
  38. package/dist/utils/index.d.ts +4 -0
  39. package/dist/utils/index.d.ts.map +1 -0
  40. package/dist/utils/index.js +3 -0
  41. package/dist/utils/stream-adapter.d.ts +14 -0
  42. package/dist/utils/stream-adapter.d.ts.map +1 -0
  43. package/dist/utils/stream-adapter.js +41 -0
  44. package/package.json +36 -0
  45. package/src/__tests__/integration/s3-store.integration.test.ts +548 -0
  46. package/src/__tests__/multipart-logic.test.ts +395 -0
  47. package/src/__tests__/s3-store.edge-cases.test.ts +681 -0
  48. package/src/__tests__/s3-store.performance.test.ts +622 -0
  49. package/src/__tests__/s3-store.test.ts +662 -0
  50. package/src/__tests__/utils/performance-helpers.ts +459 -0
  51. package/src/__tests__/utils/test-data-generator.ts +331 -0
  52. package/src/__tests__/utils/test-setup.ts +256 -0
  53. package/src/index.ts +1 -0
  54. package/src/s3-store.ts +1059 -0
  55. package/src/services/__mocks__/s3-client-mock.service.ts +604 -0
  56. package/src/services/index.ts +1 -0
  57. package/src/services/s3-client.service.ts +359 -0
  58. package/src/types.ts +96 -0
  59. package/src/utils/calculations.ts +61 -0
  60. package/src/utils/error-handling.ts +52 -0
  61. package/src/utils/index.ts +3 -0
  62. package/src/utils/stream-adapter.ts +50 -0
  63. package/tsconfig.json +19 -0
  64. package/tsconfig.tsbuildinfo +1 -0
  65. package/vitest.config.ts +15 -0
@@ -0,0 +1,681 @@
1
+ import { UploadistaError } from "@uploadista/core/errors";
2
+ import { UploadFileKVStore } from "@uploadista/core/types";
3
+ import { Effect, Option, Stream } from "effect";
4
+ import { afterEach, beforeEach, describe, expect, it } from "vitest";
5
+ import { createS3StoreImplementation } from "../s3-store";
6
+ import type { S3ClientService } from "../services";
7
+ import type { S3Store } from "../types";
8
+ import {
9
+ createTestDataStream,
10
+ TEST_FILE_SIZES,
11
+ } from "./utils/test-data-generator";
12
+ import {
13
+ assertFileUploaded,
14
+ createTestS3StoreConfig,
15
+ createTestUploadFile,
16
+ DEFAULT_TEST_CONFIG,
17
+ type MockS3TestMethods,
18
+ runTestWithTimeout,
19
+ setupTestEnvironment,
20
+ TestLayersWithMockS3,
21
+ } from "./utils/test-setup";
22
+
23
+ describe("S3Store - Edge Cases and Error Handling", () => {
24
+ let s3Store: S3Store;
25
+ let mockService: S3ClientService["Type"] & MockS3TestMethods;
26
+
27
+ beforeEach(async () => {
28
+ await runTestWithTimeout(
29
+ Effect.gen(function* () {
30
+ mockService = yield* setupTestEnvironment();
31
+
32
+ const kvStore = yield* UploadFileKVStore;
33
+ const config = createTestS3StoreConfig();
34
+
35
+ s3Store = yield* createS3StoreImplementation({
36
+ ...config,
37
+ kvStore,
38
+ });
39
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
40
+ );
41
+ });
42
+
43
+ afterEach(async () => {
44
+ await runTestWithTimeout(
45
+ Effect.gen(function* () {
46
+ // Clear both S3 mock storage and KV store
47
+ yield* mockService.clearStorage();
48
+
49
+ // Clear all entries from KV store
50
+ const kvStore = yield* UploadFileKVStore;
51
+ if (!kvStore.list) {
52
+ return;
53
+ }
54
+ const keys = yield* kvStore.list();
55
+ if (keys.length > 0) {
56
+ yield* Effect.all(
57
+ keys.map((key) => kvStore.delete(key)),
58
+ { concurrency: "unbounded" },
59
+ );
60
+ }
61
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
62
+ );
63
+ });
64
+
65
+ describe("Network Failures", () => {
66
+ it("should handle createMultipartUpload failures", async () => {
67
+ const testFile = createTestUploadFile(
68
+ "create-failure",
69
+ TEST_FILE_SIZES.MEDIUM.size,
70
+ );
71
+
72
+ await runTestWithTimeout(
73
+ Effect.gen(function* () {
74
+ // Inject error for createMultipartUpload
75
+ yield* mockService.injectError(
76
+ "createMultipartUpload",
77
+ new Error("Network timeout during multipart upload creation"),
78
+ );
79
+
80
+ // Attempt to create upload should fail
81
+ const result = yield* Effect.either(s3Store.create(testFile));
82
+
83
+ expect(result._tag).toBe("Left");
84
+ if (result._tag === "Left") {
85
+ expect(result.left).toBeInstanceOf(UploadistaError);
86
+ }
87
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
88
+ );
89
+ });
90
+
91
+ it("should handle uploadPart failures", async () => {
92
+ const testFile = createTestUploadFile(
93
+ "upload-failure",
94
+ TEST_FILE_SIZES.MEDIUM.size,
95
+ );
96
+ const testData = createTestDataStream(testFile.size ?? 0);
97
+
98
+ await runTestWithTimeout(
99
+ Effect.gen(function* () {
100
+ yield* s3Store.create(testFile);
101
+
102
+ // Inject error for uploadPart
103
+ yield* mockService.injectError(
104
+ "uploadPart",
105
+ new Error("Part upload failed due to network error"),
106
+ );
107
+
108
+ // Attempt to write should fail
109
+ const result = yield* Effect.either(
110
+ s3Store.write(
111
+ {
112
+ file_id: testFile.id,
113
+ stream: testData,
114
+ offset: 0,
115
+ },
116
+ { onProgress: undefined },
117
+ ),
118
+ );
119
+
120
+ expect(result._tag).toBe("Left");
121
+ if (result._tag === "Left") {
122
+ expect(result.left).toBeInstanceOf(UploadistaError);
123
+ }
124
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
125
+ 15000, // Increased timeout for retry logic with persistent error injection
126
+ );
127
+ }, 15000);
128
+
129
+ it("should handle completeMultipartUpload failures", async () => {
130
+ const testFile = createTestUploadFile(
131
+ "complete-failure",
132
+ TEST_FILE_SIZES.MEDIUM.size,
133
+ );
134
+ const testData = createTestDataStream(testFile.size ?? 0);
135
+
136
+ await runTestWithTimeout(
137
+ Effect.gen(function* () {
138
+ yield* s3Store.create(testFile);
139
+
140
+ // Inject error for completeMultipartUpload
141
+ yield* mockService.injectError(
142
+ "completeMultipartUpload",
143
+ new Error("Failed to complete multipart upload"),
144
+ );
145
+
146
+ // Write should fail at completion stage
147
+ const result = yield* Effect.either(
148
+ s3Store.write(
149
+ {
150
+ file_id: testFile.id,
151
+ stream: testData,
152
+ offset: 0,
153
+ },
154
+ { onProgress: undefined },
155
+ ),
156
+ );
157
+
158
+ expect(result._tag).toBe("Left");
159
+ if (result._tag === "Left") {
160
+ expect(result.left).toBeInstanceOf(UploadistaError);
161
+ }
162
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
163
+ 20000,
164
+ );
165
+ });
166
+
167
+ it("should handle intermittent network failures with retry", async () => {
168
+ const testFile = createTestUploadFile(
169
+ "retry-test",
170
+ TEST_FILE_SIZES.SMALL_BASIC.size,
171
+ );
172
+ const testData = createTestDataStream(testFile.size ?? 0);
173
+
174
+ await runTestWithTimeout(
175
+ Effect.gen(function* () {
176
+ // Set up mock to fail randomly but not too often
177
+ yield* mockService.setConfig({
178
+ ...DEFAULT_TEST_CONFIG,
179
+ errorRate: 0.2, // 20% failure rate to test retry logic
180
+ });
181
+
182
+ yield* s3Store.create(testFile);
183
+
184
+ // This might fail due to random errors, but should eventually succeed with retries
185
+ const result = yield* Effect.either(
186
+ s3Store.write(
187
+ {
188
+ file_id: testFile.id,
189
+ stream: testData,
190
+ offset: 0,
191
+ },
192
+ { onProgress: undefined },
193
+ ),
194
+ );
195
+
196
+ // Note: The current implementation has retry logic, so this might succeed
197
+ // If it fails, it should be due to UploadistaError
198
+ if (result._tag === "Left") {
199
+ expect(result.left).toBeInstanceOf(UploadistaError);
200
+ } else {
201
+ expect(result.right).toBe(testFile.size);
202
+ }
203
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
204
+ 15000,
205
+ );
206
+ });
207
+ });
208
+
209
+ describe("Invalid Input Handling", () => {
210
+ it("should handle empty streams", async () => {
211
+ const testFile = createTestUploadFile("empty-stream", 0);
212
+ const emptyStream = Stream.empty;
213
+
214
+ await runTestWithTimeout(
215
+ Effect.gen(function* () {
216
+ yield* s3Store.create(testFile);
217
+
218
+ const result = yield* s3Store.write(
219
+ {
220
+ file_id: testFile.id,
221
+ stream: emptyStream,
222
+ offset: 0,
223
+ },
224
+ { onProgress: undefined },
225
+ );
226
+
227
+ expect(result).toBe(0);
228
+
229
+ // Should handle empty file correctly
230
+ const storage = yield* mockService.getStorage();
231
+ const uploadedFile = storage.objects.get(testFile.id);
232
+ expect(uploadedFile).toBeDefined();
233
+ expect(uploadedFile?.length).toBe(0);
234
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
235
+ );
236
+ });
237
+
238
+ it("should handle non-existent upload IDs", async () => {
239
+ await runTestWithTimeout(
240
+ Effect.gen(function* () {
241
+ const result = yield* Effect.either(
242
+ s3Store.getUpload("non-existent-id"),
243
+ );
244
+
245
+ expect(result._tag).toBe("Left");
246
+ if (result._tag === "Left") {
247
+ expect(result.left).toBeInstanceOf(UploadistaError);
248
+ }
249
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
250
+ );
251
+ });
252
+
253
+ it("should handle invalid file sizes", async () => {
254
+ const testFile = createTestUploadFile("invalid-size", -1);
255
+
256
+ await runTestWithTimeout(
257
+ Effect.gen(function* () {
258
+ const result = yield* Effect.either(s3Store.create(testFile));
259
+
260
+ // The store should handle this gracefully
261
+ // Current implementation might allow this, so we test the behavior
262
+ if (result._tag === "Right") {
263
+ expect(result.right.size).toBe(-1);
264
+ }
265
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
266
+ );
267
+ });
268
+
269
+ it("should handle corrupted streams", async () => {
270
+ const testFile = createTestUploadFile(
271
+ "corrupted-stream",
272
+ TEST_FILE_SIZES.SMALL_BASIC.size,
273
+ );
274
+
275
+ // Create a stream that fails halfway through
276
+ const corruptedStream = Stream.unfoldEffect(0, (n) => {
277
+ const size = testFile.size ?? 0;
278
+ if (n >= size) return Effect.succeed(Option.none());
279
+
280
+ if (n > size / 2) {
281
+ // Simulate stream corruption using Effect.fail
282
+ return Effect.fail(
283
+ UploadistaError.fromCode(
284
+ "FILE_WRITE_ERROR",
285
+ new Error("Stream corrupted"),
286
+ ),
287
+ );
288
+ }
289
+
290
+ const chunk = new Uint8Array(Math.min(1024, size - n)).fill(n % 256);
291
+ return Effect.succeed(Option.some([chunk, n + chunk.length]));
292
+ });
293
+
294
+ await runTestWithTimeout(
295
+ Effect.gen(function* () {
296
+ yield* s3Store.create(testFile);
297
+
298
+ const result = yield* Effect.either(
299
+ s3Store.write(
300
+ {
301
+ file_id: testFile.id,
302
+ stream: corruptedStream,
303
+ offset: 0,
304
+ },
305
+ { onProgress: undefined },
306
+ ),
307
+ );
308
+
309
+ expect(result._tag).toBe("Left");
310
+ if (result._tag === "Left") {
311
+ expect(result.left).toBeInstanceOf(Error);
312
+ }
313
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
314
+ );
315
+ });
316
+ });
317
+
318
+ describe("Concurrent Access", () => {
319
+ it("should handle concurrent writes to the same file", async () => {
320
+ const testFile = createTestUploadFile(
321
+ "concurrent-write",
322
+ TEST_FILE_SIZES.MEDIUM.size,
323
+ );
324
+
325
+ await runTestWithTimeout(
326
+ Effect.gen(function* () {
327
+ yield* s3Store.create(testFile);
328
+
329
+ const size = testFile.size ?? 0;
330
+ const stream1 = createTestDataStream(size, {
331
+ type: "random",
332
+ seed: 1,
333
+ });
334
+ const stream2 = createTestDataStream(size, {
335
+ type: "random",
336
+ seed: 2,
337
+ });
338
+
339
+ // Start two concurrent writes to the same file
340
+ const write1 = s3Store.write(
341
+ {
342
+ file_id: testFile.id,
343
+ stream: stream1,
344
+ offset: 0,
345
+ },
346
+ { onProgress: undefined },
347
+ );
348
+
349
+ const write2 = s3Store.write(
350
+ {
351
+ file_id: testFile.id,
352
+ stream: stream2,
353
+ offset: 0,
354
+ },
355
+ { onProgress: undefined },
356
+ );
357
+
358
+ // Both writes might succeed or one might fail due to race conditions
359
+ const results = yield* Effect.all([
360
+ Effect.either(write1),
361
+ Effect.either(write2),
362
+ ]);
363
+
364
+ // At least one should succeed or both should fail with proper error handling
365
+ const successCount = results.filter((r) => r._tag === "Right").length;
366
+ const failureCount = results.filter((r) => r._tag === "Left").length;
367
+
368
+ expect(successCount + failureCount).toBe(2);
369
+
370
+ // If there are failures, they should be UploadistaErrors
371
+ results.forEach((result) => {
372
+ if (result._tag === "Left") {
373
+ expect(result.left).toBeInstanceOf(Error);
374
+ }
375
+ });
376
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
377
+ 30000,
378
+ );
379
+ });
380
+
381
+ it("should handle concurrent creates for different files", async () => {
382
+ const fileCount = 5;
383
+ const creates = Array.from({ length: fileCount }, (_, i) => {
384
+ const testFile = createTestUploadFile(
385
+ `concurrent-create-${i}`,
386
+ TEST_FILE_SIZES.SMALL_BASIC.size,
387
+ );
388
+ return s3Store.create(testFile);
389
+ });
390
+
391
+ await runTestWithTimeout(
392
+ Effect.gen(function* () {
393
+ const results = yield* Effect.all(creates.map(Effect.either));
394
+
395
+ // All creates should succeed
396
+ results.forEach((result, i) => {
397
+ expect(result._tag).toBe("Right");
398
+ if (result._tag === "Right") {
399
+ expect(result.right.id).toBe(`concurrent-create-${i}`);
400
+ }
401
+ });
402
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
403
+ );
404
+ });
405
+ });
406
+
407
+ describe("Resource Limits", () => {
408
+ it("should handle files exceeding configured limits", async () => {
409
+ const largeSize = 10 * 1024 * 1024; // 10MB
410
+ const testFile = createTestUploadFile("size-limit", largeSize);
411
+
412
+ await runTestWithTimeout(
413
+ Effect.gen(function* () {
414
+ // Set a smaller max object size
415
+ yield* mockService.setConfig({
416
+ ...DEFAULT_TEST_CONFIG,
417
+ maxObjectSize: 5 * 1024 * 1024, // 5MB limit
418
+ });
419
+
420
+ yield* s3Store.create(testFile);
421
+
422
+ const testData = createTestDataStream(largeSize);
423
+
424
+ const result = yield* Effect.either(
425
+ s3Store.write(
426
+ {
427
+ file_id: testFile.id,
428
+ stream: testData,
429
+ offset: 0,
430
+ },
431
+ { onProgress: undefined },
432
+ ),
433
+ );
434
+
435
+ expect(result._tag).toBe("Left");
436
+ if (result._tag === "Left") {
437
+ expect(result.left).toBeInstanceOf(UploadistaError);
438
+ }
439
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
440
+ 25000, // Increased timeout for retry logic
441
+ );
442
+ });
443
+
444
+ it("should handle part count limits", async () => {
445
+ const testFile = createTestUploadFile(
446
+ "part-limit",
447
+ TEST_FILE_SIZES.LARGE.size,
448
+ );
449
+
450
+ await runTestWithTimeout(
451
+ Effect.gen(function* () {
452
+ // Create store with very small part size to exceed part limits
453
+ const kvStore = yield* UploadFileKVStore;
454
+ const config = createTestS3StoreConfig({
455
+ partSize: 1024 * 1024, // 1MB parts
456
+ maxMultipartParts: 5, // Very low limit
457
+ });
458
+
459
+ const limitedStore = yield* createS3StoreImplementation({
460
+ ...config,
461
+ kvStore,
462
+ });
463
+
464
+ yield* limitedStore.create(testFile);
465
+
466
+ const testData = createTestDataStream(testFile.size ?? 0);
467
+
468
+ const result = yield* Effect.either(
469
+ limitedStore.write(
470
+ {
471
+ file_id: testFile.id,
472
+ stream: testData,
473
+ offset: 0,
474
+ },
475
+ { onProgress: undefined },
476
+ ),
477
+ );
478
+
479
+ // The system should automatically adjust part size to stay within limits
480
+ // So this might actually succeed with larger parts
481
+ if (result._tag === "Right") {
482
+ expect(result.right).toBe(testFile.size);
483
+ }
484
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
485
+ 45000,
486
+ );
487
+ });
488
+ });
489
+
490
+ describe("Cleanup and Cancellation", () => {
491
+ it("should clean up incomplete uploads on abort", async () => {
492
+ const testFile = createTestUploadFile(
493
+ "abort-test",
494
+ TEST_FILE_SIZES.MEDIUM.size,
495
+ );
496
+ const testData = createTestDataStream(testFile.size ?? 0);
497
+
498
+ await runTestWithTimeout(
499
+ Effect.gen(function* () {
500
+ yield* s3Store.create(testFile);
501
+
502
+ // Start upload but don't complete it
503
+ yield* mockService.injectError(
504
+ "completeMultipartUpload",
505
+ new Error("Simulated completion failure"),
506
+ );
507
+
508
+ const result = yield* Effect.either(
509
+ s3Store.write(
510
+ {
511
+ file_id: testFile.id,
512
+ stream: testData,
513
+ offset: 0,
514
+ },
515
+ { onProgress: undefined },
516
+ ),
517
+ );
518
+
519
+ expect(result._tag).toBe("Left");
520
+
521
+ // Remove the upload (this should clean up)
522
+ yield* s3Store.remove(testFile.id);
523
+
524
+ // Verify cleanup
525
+ const storage = yield* mockService.getStorage();
526
+ expect(storage.objects.has(testFile.id)).toBe(false);
527
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
528
+ 20000,
529
+ );
530
+ });
531
+
532
+ it("should handle expired upload cleanup", async () => {
533
+ await runTestWithTimeout(
534
+ Effect.gen(function* () {
535
+ // Create some multipart uploads that would be "expired"
536
+ const testFile1 = createTestUploadFile(
537
+ "expired-1",
538
+ TEST_FILE_SIZES.MEDIUM.size,
539
+ );
540
+ const testFile2 = createTestUploadFile(
541
+ "expired-2",
542
+ TEST_FILE_SIZES.MEDIUM.size,
543
+ );
544
+
545
+ yield* s3Store.create(testFile1);
546
+ yield* s3Store.create(testFile2);
547
+
548
+ // Run cleanup
549
+ const deletedCount = yield* s3Store.deleteExpired;
550
+
551
+ // The mock doesn't simulate actual expiration, so this tests the mechanism
552
+ expect(deletedCount).toBeGreaterThanOrEqual(0);
553
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
554
+ );
555
+ });
556
+ });
557
+
558
+ describe("Data Integrity", () => {
559
+ it("should detect mismatched part ETags", async () => {
560
+ const testFile = createTestUploadFile(
561
+ "etag-test",
562
+ TEST_FILE_SIZES.MEDIUM.size,
563
+ );
564
+ const testData = createTestDataStream(testFile.size ?? 0);
565
+
566
+ await runTestWithTimeout(
567
+ Effect.gen(function* () {
568
+ yield* s3Store.create(testFile);
569
+
570
+ // Normal upload should work
571
+ const result = yield* s3Store.write(
572
+ {
573
+ file_id: testFile.id,
574
+ stream: testData,
575
+ offset: 0,
576
+ },
577
+ { onProgress: undefined },
578
+ );
579
+
580
+ expect(result).toBe(testFile.size);
581
+
582
+ // Verify the file was uploaded and has correct size
583
+ yield* assertFileUploaded(
584
+ mockService,
585
+ testFile.id,
586
+ testFile.size ?? 0,
587
+ );
588
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
589
+ 20000,
590
+ );
591
+ });
592
+
593
+ it("should handle partial upload recovery", async () => {
594
+ const testFile = createTestUploadFile(
595
+ "partial-recovery",
596
+ TEST_FILE_SIZES.MEDIUM.size,
597
+ );
598
+ const partialData = createTestDataStream((testFile.size ?? 0) / 2);
599
+
600
+ await runTestWithTimeout(
601
+ Effect.gen(function* () {
602
+ yield* s3Store.create(testFile);
603
+
604
+ // Upload first half
605
+ const partialResult = yield* s3Store.write(
606
+ {
607
+ file_id: testFile.id,
608
+ stream: partialData,
609
+ offset: 0,
610
+ },
611
+ { onProgress: undefined },
612
+ );
613
+
614
+ expect(partialResult).toBe((testFile.size ?? 0) / 2);
615
+
616
+ // Check upload status
617
+ const uploadInfo = yield* s3Store.getUpload(testFile.id);
618
+ expect(uploadInfo.offset).toBe((testFile.size ?? 0) / 2);
619
+ expect(uploadInfo.size).toBe(testFile.size ?? 0);
620
+
621
+ // Upload should be incomplete
622
+ const storage = yield* mockService.getStorage();
623
+ expect(storage.objects.has(testFile.id)).toBe(false); // Not completed yet
624
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
625
+ 20000,
626
+ );
627
+ });
628
+ });
629
+
630
+ describe("Error Recovery", () => {
631
+ it("should recover from temporary storage issues", async () => {
632
+ const testFile = createTestUploadFile(
633
+ "recovery-test",
634
+ TEST_FILE_SIZES.SMALL_BASIC.size,
635
+ );
636
+ const testData = createTestDataStream(testFile.size ?? 0);
637
+
638
+ await runTestWithTimeout(
639
+ Effect.gen(function* () {
640
+ yield* s3Store.create(testFile);
641
+
642
+ // Set high error rate initially
643
+ yield* mockService.setConfig({
644
+ ...DEFAULT_TEST_CONFIG,
645
+ uploadFailureRate: 0.8, // 80% failure rate
646
+ });
647
+
648
+ // First attempt likely to fail
649
+ yield* Effect.either(
650
+ s3Store.write(
651
+ {
652
+ file_id: testFile.id,
653
+ stream: testData,
654
+ offset: 0,
655
+ },
656
+ { onProgress: undefined },
657
+ ),
658
+ );
659
+
660
+ // Reduce error rate and try again
661
+ yield* mockService.setConfig({
662
+ ...DEFAULT_TEST_CONFIG,
663
+ uploadFailureRate: 0, // No failures
664
+ });
665
+
666
+ const result2 = yield* s3Store.write(
667
+ {
668
+ file_id: testFile.id,
669
+ stream: createTestDataStream(testFile.size ?? 0),
670
+ offset: 0,
671
+ },
672
+ { onProgress: undefined },
673
+ );
674
+
675
+ expect(result2).toBe(testFile.size ?? 0);
676
+ }).pipe(Effect.provide(TestLayersWithMockS3())),
677
+ 20000,
678
+ );
679
+ });
680
+ });
681
+ });