@uploadista/data-store-s3 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/.turbo/turbo-build.log +5 -0
  2. package/.turbo/turbo-check.log +5 -0
  3. package/LICENSE +21 -0
  4. package/README.md +588 -0
  5. package/dist/index.d.ts +2 -0
  6. package/dist/index.d.ts.map +1 -0
  7. package/dist/index.js +1 -0
  8. package/dist/observability.d.ts +45 -0
  9. package/dist/observability.d.ts.map +1 -0
  10. package/dist/observability.js +155 -0
  11. package/dist/s3-store-old.d.ts +51 -0
  12. package/dist/s3-store-old.d.ts.map +1 -0
  13. package/dist/s3-store-old.js +765 -0
  14. package/dist/s3-store.d.ts +9 -0
  15. package/dist/s3-store.d.ts.map +1 -0
  16. package/dist/s3-store.js +666 -0
  17. package/dist/services/__mocks__/s3-client-mock.service.d.ts +44 -0
  18. package/dist/services/__mocks__/s3-client-mock.service.d.ts.map +1 -0
  19. package/dist/services/__mocks__/s3-client-mock.service.js +379 -0
  20. package/dist/services/index.d.ts +2 -0
  21. package/dist/services/index.d.ts.map +1 -0
  22. package/dist/services/index.js +1 -0
  23. package/dist/services/s3-client.service.d.ts +68 -0
  24. package/dist/services/s3-client.service.d.ts.map +1 -0
  25. package/dist/services/s3-client.service.js +209 -0
  26. package/dist/test-observability.d.ts +6 -0
  27. package/dist/test-observability.d.ts.map +1 -0
  28. package/dist/test-observability.js +62 -0
  29. package/dist/types.d.ts +81 -0
  30. package/dist/types.d.ts.map +1 -0
  31. package/dist/types.js +1 -0
  32. package/dist/utils/calculations.d.ts +7 -0
  33. package/dist/utils/calculations.d.ts.map +1 -0
  34. package/dist/utils/calculations.js +41 -0
  35. package/dist/utils/error-handling.d.ts +7 -0
  36. package/dist/utils/error-handling.d.ts.map +1 -0
  37. package/dist/utils/error-handling.js +29 -0
  38. package/dist/utils/index.d.ts +4 -0
  39. package/dist/utils/index.d.ts.map +1 -0
  40. package/dist/utils/index.js +3 -0
  41. package/dist/utils/stream-adapter.d.ts +14 -0
  42. package/dist/utils/stream-adapter.d.ts.map +1 -0
  43. package/dist/utils/stream-adapter.js +41 -0
  44. package/package.json +36 -0
  45. package/src/__tests__/integration/s3-store.integration.test.ts +548 -0
  46. package/src/__tests__/multipart-logic.test.ts +395 -0
  47. package/src/__tests__/s3-store.edge-cases.test.ts +681 -0
  48. package/src/__tests__/s3-store.performance.test.ts +622 -0
  49. package/src/__tests__/s3-store.test.ts +662 -0
  50. package/src/__tests__/utils/performance-helpers.ts +459 -0
  51. package/src/__tests__/utils/test-data-generator.ts +331 -0
  52. package/src/__tests__/utils/test-setup.ts +256 -0
  53. package/src/index.ts +1 -0
  54. package/src/s3-store.ts +1059 -0
  55. package/src/services/__mocks__/s3-client-mock.service.ts +604 -0
  56. package/src/services/index.ts +1 -0
  57. package/src/services/s3-client.service.ts +359 -0
  58. package/src/types.ts +96 -0
  59. package/src/utils/calculations.ts +61 -0
  60. package/src/utils/error-handling.ts +52 -0
  61. package/src/utils/index.ts +3 -0
  62. package/src/utils/stream-adapter.ts +50 -0
  63. package/tsconfig.json +19 -0
  64. package/tsconfig.tsbuildinfo +1 -0
  65. package/vitest.config.ts +15 -0
@@ -0,0 +1,359 @@
1
+ import type AWS from "@aws-sdk/client-s3";
2
+ import type { S3ClientConfig } from "@aws-sdk/client-s3";
3
+ import { NoSuchKey, NotFound, S3 } from "@aws-sdk/client-s3";
4
+ import type { UploadistaError } from "@uploadista/core/errors";
5
+ import { withS3ApiMetrics } from "@uploadista/observability";
6
+ import { Context, Effect, Layer } from "effect";
7
+ import type { MultipartUploadInfo, S3OperationContext } from "../types";
8
+ import {
9
+ handleS3Error,
10
+ handleS3NotFoundError,
11
+ partKey,
12
+ toReadableStream,
13
+ } from "../utils";
14
+
15
+ export class S3ClientService extends Context.Tag("S3ClientService")<
16
+ S3ClientService,
17
+ {
18
+ readonly bucket: string;
19
+
20
+ // Basic S3 operations
21
+ readonly getObject: (
22
+ key: string,
23
+ ) => Effect.Effect<ReadableStream, UploadistaError>;
24
+ readonly headObject: (
25
+ key: string,
26
+ ) => Effect.Effect<number | undefined, UploadistaError>;
27
+ readonly putObject: (
28
+ key: string,
29
+ body: Uint8Array,
30
+ ) => Effect.Effect<string, UploadistaError>;
31
+ readonly deleteObject: (
32
+ key: string,
33
+ ) => Effect.Effect<void, UploadistaError>;
34
+ readonly deleteObjects: (
35
+ keys: string[],
36
+ ) => Effect.Effect<AWS.DeleteObjectsCommandOutput, UploadistaError>;
37
+
38
+ // Multipart upload operations
39
+ readonly createMultipartUpload: (
40
+ context: S3OperationContext,
41
+ ) => Effect.Effect<MultipartUploadInfo, UploadistaError>;
42
+ readonly uploadPart: (
43
+ context: S3OperationContext & { partNumber: number; data: Uint8Array },
44
+ ) => Effect.Effect<string, UploadistaError>;
45
+ readonly completeMultipartUpload: (
46
+ context: S3OperationContext,
47
+ parts: Array<AWS.Part>,
48
+ ) => Effect.Effect<string | undefined, UploadistaError>;
49
+ readonly abortMultipartUpload: (
50
+ context: S3OperationContext,
51
+ ) => Effect.Effect<void, UploadistaError>;
52
+ readonly listParts: (
53
+ context: S3OperationContext & { partNumberMarker?: string },
54
+ ) => Effect.Effect<
55
+ {
56
+ parts: AWS.Part[];
57
+ isTruncated: boolean;
58
+ nextPartNumberMarker?: string;
59
+ },
60
+ UploadistaError
61
+ >;
62
+ readonly listMultipartUploads: (
63
+ keyMarker?: string,
64
+ uploadIdMarker?: string,
65
+ ) => Effect.Effect<AWS.ListMultipartUploadsCommandOutput, UploadistaError>;
66
+
67
+ // Incomplete part operations
68
+ readonly getIncompletePart: (
69
+ id: string,
70
+ ) => Effect.Effect<ReadableStream | undefined, UploadistaError>;
71
+ readonly getIncompletePartSize: (
72
+ id: string,
73
+ ) => Effect.Effect<number | undefined, UploadistaError>;
74
+ readonly putIncompletePart: (
75
+ id: string,
76
+ data: Uint8Array,
77
+ ) => Effect.Effect<string, UploadistaError>;
78
+ readonly deleteIncompletePart: (
79
+ id: string,
80
+ ) => Effect.Effect<void, UploadistaError>;
81
+ }
82
+ >() {}
83
+
84
+ export const makeS3ClientService = (
85
+ s3ClientConfig: S3ClientConfig,
86
+ bucket: string,
87
+ ) => {
88
+ const s3Client = new S3(s3ClientConfig);
89
+ const getObject = (key: string) =>
90
+ Effect.tryPromise({
91
+ try: async () => {
92
+ const data = await s3Client.getObject({
93
+ Bucket: bucket,
94
+ Key: key,
95
+ });
96
+ return toReadableStream(data.Body);
97
+ },
98
+ catch: (error) => handleS3Error("getObject", error, { key, bucket }),
99
+ });
100
+
101
+ const headObject = (key: string) =>
102
+ Effect.tryPromise({
103
+ try: async () => {
104
+ try {
105
+ const data = await s3Client.headObject({
106
+ Bucket: bucket,
107
+ Key: key,
108
+ });
109
+ return data.ContentLength;
110
+ } catch (error) {
111
+ if (error instanceof NotFound) {
112
+ return undefined;
113
+ }
114
+ throw error;
115
+ }
116
+ },
117
+ catch: (error) => handleS3Error("headObject", error, { key, bucket }),
118
+ });
119
+
120
+ const putObject = (key: string, body: Uint8Array) =>
121
+ Effect.tryPromise({
122
+ try: async () => {
123
+ const response = await s3Client.putObject({
124
+ Bucket: bucket,
125
+ Key: key,
126
+ Body: body,
127
+ });
128
+ return response.ETag || "";
129
+ },
130
+ catch: (error) =>
131
+ handleS3Error("putObject", error, { key, bucket, size: body.length }),
132
+ });
133
+
134
+ const deleteObject = (key: string) =>
135
+ Effect.tryPromise({
136
+ try: async () => {
137
+ await s3Client.deleteObject({
138
+ Bucket: bucket,
139
+ Key: key,
140
+ });
141
+ },
142
+ catch: (error) => handleS3Error("deleteObject", error, { key, bucket }),
143
+ });
144
+
145
+ const deleteObjects = (keys: string[]) =>
146
+ Effect.tryPromise({
147
+ try: () =>
148
+ s3Client.deleteObjects({
149
+ Bucket: bucket,
150
+ Delete: {
151
+ Objects: keys.map((key) => ({ Key: key })),
152
+ },
153
+ }),
154
+ catch: (error) =>
155
+ handleS3Error("deleteObjects", error, { keys: keys.length, bucket }),
156
+ });
157
+
158
+ const createMultipartUpload = (context: S3OperationContext) =>
159
+ withS3ApiMetrics(
160
+ "createMultipartUpload",
161
+ Effect.tryPromise({
162
+ try: async () => {
163
+ const request: AWS.CreateMultipartUploadCommandInput = {
164
+ Bucket: context.bucket,
165
+ Key: context.key,
166
+ };
167
+
168
+ if (context.contentType) {
169
+ request.ContentType = context.contentType;
170
+ }
171
+
172
+ if (context.cacheControl) {
173
+ request.CacheControl = context.cacheControl;
174
+ }
175
+
176
+ const res = await s3Client.createMultipartUpload(request);
177
+
178
+ if (!res.UploadId) {
179
+ throw new Error("Upload ID is undefined");
180
+ }
181
+ if (!res.Key) {
182
+ throw new Error("Key is undefined");
183
+ }
184
+
185
+ return {
186
+ uploadId: res.UploadId,
187
+ bucket: context.bucket,
188
+ key: res.Key,
189
+ };
190
+ },
191
+ catch: (error) =>
192
+ handleS3Error("createMultipartUpload", error, context),
193
+ }),
194
+ );
195
+
196
+ const uploadPart = (
197
+ context: S3OperationContext & { partNumber: number; data: Uint8Array },
198
+ ) =>
199
+ withS3ApiMetrics(
200
+ "uploadPart",
201
+ Effect.tryPromise({
202
+ try: () =>
203
+ s3Client.uploadPart({
204
+ Bucket: context.bucket,
205
+ Key: context.key,
206
+ UploadId: context.uploadId,
207
+ PartNumber: context.partNumber,
208
+ Body: context.data,
209
+ }),
210
+ catch: (error) =>
211
+ handleS3Error("uploadPart", error, {
212
+ upload_id: context.key,
213
+ part_number: context.partNumber,
214
+ part_size: context.data.length,
215
+ s3_bucket: context.bucket,
216
+ }),
217
+ }).pipe(Effect.map((response) => response.ETag as string)),
218
+ );
219
+
220
+ const completeMultipartUpload = (
221
+ context: S3OperationContext,
222
+ parts: Array<AWS.Part>,
223
+ ) =>
224
+ withS3ApiMetrics(
225
+ "completeMultipartUpload",
226
+ Effect.tryPromise({
227
+ try: () =>
228
+ s3Client
229
+ .completeMultipartUpload({
230
+ Bucket: context.bucket,
231
+ Key: context.key,
232
+ UploadId: context.uploadId,
233
+ MultipartUpload: {
234
+ Parts: parts.map((part) => ({
235
+ ETag: part.ETag,
236
+ PartNumber: part.PartNumber,
237
+ })),
238
+ },
239
+ })
240
+ .then((response) => response.Location),
241
+ catch: (error) =>
242
+ handleS3Error("completeMultipartUpload", error, {
243
+ upload_id: context.key,
244
+ parts_count: parts.length,
245
+ s3_bucket: context.bucket,
246
+ }),
247
+ }),
248
+ );
249
+
250
+ const abortMultipartUpload = (context: S3OperationContext) =>
251
+ Effect.tryPromise({
252
+ try: async () => {
253
+ await s3Client.abortMultipartUpload({
254
+ Bucket: context.bucket,
255
+ Key: context.key,
256
+ UploadId: context.uploadId,
257
+ });
258
+ },
259
+ catch: (error) =>
260
+ handleS3NotFoundError("abortMultipartUpload", error, {
261
+ upload_id: context.key,
262
+ s3_bucket: context.bucket,
263
+ }),
264
+ });
265
+
266
+ const listParts = (
267
+ context: S3OperationContext & { partNumberMarker?: string },
268
+ ) =>
269
+ Effect.tryPromise({
270
+ try: async () => {
271
+ const params: AWS.ListPartsCommandInput = {
272
+ Bucket: context.bucket,
273
+ Key: context.key,
274
+ UploadId: context.uploadId,
275
+ PartNumberMarker: context.partNumberMarker,
276
+ };
277
+
278
+ const data = await s3Client.listParts(params);
279
+
280
+ return {
281
+ parts: data.Parts ?? [],
282
+ isTruncated: data.IsTruncated ?? false,
283
+ nextPartNumberMarker: data.NextPartNumberMarker,
284
+ };
285
+ },
286
+ catch: (error) =>
287
+ handleS3Error("listParts", error, {
288
+ upload_id: context.key,
289
+ s3_bucket: context.bucket,
290
+ }),
291
+ });
292
+
293
+ const listMultipartUploads = (keyMarker?: string, uploadIdMarker?: string) =>
294
+ Effect.tryPromise({
295
+ try: () =>
296
+ s3Client.listMultipartUploads({
297
+ Bucket: bucket,
298
+ KeyMarker: keyMarker,
299
+ UploadIdMarker: uploadIdMarker,
300
+ }),
301
+ catch: (error) =>
302
+ handleS3Error("listMultipartUploads", error, { bucket }),
303
+ });
304
+
305
+ const getIncompletePart = (id: string) =>
306
+ Effect.tryPromise({
307
+ try: async () => {
308
+ try {
309
+ const data = await s3Client.getObject({
310
+ Bucket: bucket,
311
+ Key: partKey(id),
312
+ });
313
+ return toReadableStream(data.Body);
314
+ } catch (error) {
315
+ if (error instanceof NoSuchKey) {
316
+ return undefined;
317
+ }
318
+ throw error;
319
+ }
320
+ },
321
+ catch: (error) =>
322
+ handleS3Error("getIncompletePart", error, { upload_id: id, bucket }),
323
+ });
324
+
325
+ const getIncompletePartSize = (id: string) => headObject(partKey(id));
326
+
327
+ const putIncompletePart = (id: string, data: Uint8Array) =>
328
+ putObject(partKey(id), data).pipe(
329
+ Effect.tap(() =>
330
+ Effect.logInfo("Incomplete part uploaded").pipe(
331
+ Effect.annotateLogs({ upload_id: id }),
332
+ ),
333
+ ),
334
+ );
335
+
336
+ const deleteIncompletePart = (id: string) => deleteObject(partKey(id));
337
+
338
+ return {
339
+ bucket,
340
+ getObject,
341
+ headObject,
342
+ putObject,
343
+ deleteObject,
344
+ deleteObjects,
345
+ createMultipartUpload,
346
+ uploadPart,
347
+ completeMultipartUpload,
348
+ abortMultipartUpload,
349
+ listParts,
350
+ listMultipartUploads,
351
+ getIncompletePart,
352
+ getIncompletePartSize,
353
+ putIncompletePart,
354
+ deleteIncompletePart,
355
+ };
356
+ };
357
+
358
+ export const S3ClientLayer = (s3ClientConfig: S3ClientConfig, bucket: string) =>
359
+ Layer.succeed(S3ClientService, makeS3ClientService(s3ClientConfig, bucket));
package/src/types.ts ADDED
@@ -0,0 +1,96 @@
1
+ import type { S3ClientConfig } from "@aws-sdk/client-s3";
2
+ import type { UploadistaError } from "@uploadista/core/errors";
3
+ import type {
4
+ DataStoreCapabilities,
5
+ DataStoreWriteOptions,
6
+ KvStore,
7
+ UploadFile,
8
+ UploadStrategy,
9
+ } from "@uploadista/core/types";
10
+ import type { Effect } from "effect";
11
+
12
+ export type S3StoreOptions = {
13
+ deliveryUrl: string;
14
+ /**
15
+ * The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
16
+ * The server calculates the optimal part size, which takes this size into account,
17
+ * but may increase it to not exceed the S3 10K parts limit.
18
+ */
19
+ partSize?: number;
20
+ /**
21
+ * The minimal part size for parts.
22
+ * Can be used to ensure that all non-trailing parts are exactly the same size.
23
+ * Can not be lower than 5MiB or more than 5GiB.
24
+ */
25
+ minPartSize?: number;
26
+ /**
27
+ * The maximum number of parts allowed in a multipart upload. Defaults to 10,000.
28
+ */
29
+ maxMultipartParts?: number;
30
+ useTags?: boolean;
31
+ maxConcurrentPartUploads?: number;
32
+ expirationPeriodInMilliseconds?: number;
33
+ // Options to pass to the AWS S3 SDK.
34
+ s3ClientConfig: S3ClientConfig & { bucket: string };
35
+ };
36
+
37
+ export type ChunkInfo = {
38
+ partNumber: number;
39
+ data: Uint8Array;
40
+ size: number;
41
+ isFinalPart?: boolean;
42
+ };
43
+
44
+ export type S3OperationContext = {
45
+ uploadId: string;
46
+ bucket: string;
47
+ key: string;
48
+ partNumber?: number;
49
+ partSize?: number;
50
+ contentType?: string;
51
+ cacheControl?: string;
52
+ };
53
+
54
+ export type PartUploadResult = {
55
+ etag: string;
56
+ partNumber: number;
57
+ };
58
+
59
+ export type MultipartUploadInfo = {
60
+ uploadId: string;
61
+ bucket: string;
62
+ key: string;
63
+ };
64
+
65
+ export type UploadProgress = {
66
+ bytesUploaded: number;
67
+ totalBytes: number;
68
+ currentOffset: number;
69
+ };
70
+
71
+ export type S3Store = {
72
+ bucket: string;
73
+ create: (upload: UploadFile) => Effect.Effect<UploadFile, UploadistaError>;
74
+ remove: (id: string) => Effect.Effect<void, UploadistaError>;
75
+ write: (
76
+ options: DataStoreWriteOptions,
77
+ dependencies: { onProgress?: (chunkSize: number) => void },
78
+ ) => Effect.Effect<number, UploadistaError>;
79
+ getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;
80
+ read: (id: string) => Effect.Effect<ReadableStream, UploadistaError>;
81
+ deleteExpired: Effect.Effect<number, UploadistaError>;
82
+ getCapabilities: () => DataStoreCapabilities;
83
+ getChunkerConstraints: () => {
84
+ minChunkSize: number;
85
+ maxChunkSize: number;
86
+ optimalChunkSize: number;
87
+ requiresOrderedChunks: boolean;
88
+ };
89
+ validateUploadStrategy: (
90
+ strategy: UploadStrategy,
91
+ ) => Effect.Effect<boolean, never>;
92
+ };
93
+
94
+ export type S3StoreConfig = S3StoreOptions & {
95
+ kvStore: KvStore<UploadFile>;
96
+ };
@@ -0,0 +1,61 @@
1
+ import type AWS from "@aws-sdk/client-s3";
2
+
3
+ export const calcOffsetFromParts = (parts?: Array<AWS.Part>): number => {
4
+ return parts && parts.length > 0
5
+ ? parts.reduce((a, b) => a + (b?.Size ?? 0), 0)
6
+ : 0;
7
+ };
8
+
9
+ export const calcOptimalPartSize = (
10
+ initSize: number | undefined,
11
+ preferredPartSize: number,
12
+ minPartSize: number,
13
+ maxMultipartParts: number,
14
+ maxUploadSize = 5_497_558_138_880, // 5TiB
15
+ ): number => {
16
+ const size = initSize ?? maxUploadSize;
17
+ let optimalPartSize: number;
18
+
19
+ if (size <= preferredPartSize) {
20
+ // For files smaller than preferred part size, use the file size
21
+ // but ensure it meets S3's minimum requirements for multipart uploads
22
+ optimalPartSize = size;
23
+ } else if (size <= preferredPartSize * maxMultipartParts) {
24
+ // File fits within max parts limit using preferred part size
25
+ optimalPartSize = preferredPartSize;
26
+ } else {
27
+ // File is too large for preferred part size, calculate minimum needed
28
+ optimalPartSize = Math.ceil(size / maxMultipartParts);
29
+ }
30
+
31
+ // Ensure we respect minimum part size for multipart uploads
32
+ // Exception: if the file is smaller than minPartSize, use the file size directly
33
+ const finalPartSize =
34
+ initSize && initSize < minPartSize
35
+ ? optimalPartSize // Single part upload for small files
36
+ : Math.max(optimalPartSize, minPartSize); // Enforce minimum for multipart
37
+
38
+ // Round up to ensure consistent part sizes and align to reasonable boundaries
39
+ // This helps ensure all parts except the last one will have exactly the same size
40
+ const alignment = 1024; // 1KB alignment for better consistency
41
+ return Math.ceil(finalPartSize / alignment) * alignment;
42
+ };
43
+
44
+ export const partKey = (id: string): string => {
45
+ return `${id}.part`;
46
+ };
47
+
48
+ export const shouldUseExpirationTags = (
49
+ expirationPeriodInMilliseconds: number,
50
+ useTags: boolean,
51
+ ): boolean => {
52
+ return expirationPeriodInMilliseconds !== 0 && useTags;
53
+ };
54
+
55
+ export const getExpirationDate = (
56
+ createdAt: string,
57
+ expirationPeriodInMilliseconds: number,
58
+ ): Date => {
59
+ const date = new Date(createdAt);
60
+ return new Date(date.getTime() + expirationPeriodInMilliseconds);
61
+ };
@@ -0,0 +1,52 @@
1
+ import { UploadistaError } from "@uploadista/core/errors";
2
+ import { trackS3Error as logS3Error } from "@uploadista/observability";
3
+ import { Effect } from "effect";
4
+
5
+ export const handleS3Error = (
6
+ operation: string,
7
+ error: unknown,
8
+ context: Record<string, unknown> = {},
9
+ ): UploadistaError => {
10
+ // Log the error with context
11
+ Effect.runSync(logS3Error(operation, error, context));
12
+
13
+ return UploadistaError.fromCode("FILE_WRITE_ERROR", error as Error);
14
+ };
15
+
16
+ export const handleS3NotFoundError = (
17
+ operation: string,
18
+ error: unknown,
19
+ context: Record<string, unknown> = {},
20
+ ): UploadistaError => {
21
+ if (
22
+ typeof error === "object" &&
23
+ error !== null &&
24
+ "code" in error &&
25
+ typeof error.code === "string" &&
26
+ ["NotFound", "NoSuchKey", "NoSuchUpload"].includes(error.code)
27
+ ) {
28
+ Effect.runSync(
29
+ Effect.logWarning(`File not found during ${operation} operation`).pipe(
30
+ Effect.annotateLogs({
31
+ error_code: error.code,
32
+ ...context,
33
+ }),
34
+ ),
35
+ );
36
+ return UploadistaError.fromCode("FILE_NOT_FOUND");
37
+ }
38
+
39
+ return handleS3Error(operation, error, context);
40
+ };
41
+
42
+ export const isUploadNotFoundError = (
43
+ error: unknown,
44
+ ): error is { code: "NoSuchUpload" | "NoSuchKey" } => {
45
+ return (
46
+ typeof error === "object" &&
47
+ error !== null &&
48
+ "code" in error &&
49
+ typeof error.code === "string" &&
50
+ (error.code === "NoSuchUpload" || error.code === "NoSuchKey")
51
+ );
52
+ };
@@ -0,0 +1,3 @@
1
+ export * from "./calculations";
2
+ export * from "./error-handling";
3
+ export * from "./stream-adapter";
@@ -0,0 +1,50 @@
1
+ /**
2
+ * Stream adapter utility to handle AWS SDK Body responses across different environments.
3
+ *
4
+ * In Node.js environments, AWS SDK returns Node.js Readable streams.
5
+ * In Cloudflare Workers, it returns Web Streams API ReadableStreams.
6
+ * This utility normalizes both to Web Streams API ReadableStreams.
7
+ */
8
+
9
+ /**
10
+ * Converts various stream types to a Web Streams API ReadableStream
11
+ * @param body The body from AWS SDK response (could be Node.js Readable or Web ReadableStream)
12
+ * @returns A Web Streams API ReadableStream
13
+ */
14
+ export function toReadableStream(body: unknown): ReadableStream {
15
+ // If it's already a Web ReadableStream, return as-is
16
+ if (body instanceof ReadableStream) {
17
+ return body;
18
+ }
19
+
20
+ // If it has a getReader method, it's likely already a ReadableStream
21
+ if (body && typeof body === "object" && "getReader" in body) {
22
+ return body as ReadableStream;
23
+ }
24
+
25
+ // Check if it's a Node.js Readable stream
26
+ if (body && typeof body === "object" && "pipe" in body && "on" in body) {
27
+ const nodeStream = body as NodeJS.ReadableStream;
28
+
29
+ return new ReadableStream({
30
+ start(controller) {
31
+ nodeStream.on("data", (chunk) => {
32
+ controller.enqueue(new Uint8Array(chunk));
33
+ });
34
+
35
+ nodeStream.on("end", () => {
36
+ controller.close();
37
+ });
38
+
39
+ nodeStream.on("error", (error) => {
40
+ controller.error(error);
41
+ });
42
+ },
43
+ });
44
+ }
45
+
46
+ // If it's some other type, try to handle it gracefully
47
+ throw new Error(
48
+ `Unsupported body type: ${typeof body}. Expected ReadableStream or Node.js Readable.`,
49
+ );
50
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,19 @@
1
+ {
2
+ "extends": "@uploadista/typescript-config/server.json",
3
+ "compilerOptions": {
4
+ "baseUrl": "./",
5
+ "paths": {
6
+ "@/*": ["./src/*"]
7
+ },
8
+ "outDir": "./dist",
9
+ "rootDir": "./src"
10
+ },
11
+ "include": ["src"],
12
+ "exclude": [
13
+ "src/**/*.test.ts",
14
+ "src/**/*.test.tsx",
15
+ "src/**/*.spec.ts",
16
+ "src/**/*.spec.tsx",
17
+ "src/**/__tests__/**"
18
+ ]
19
+ }
@@ -0,0 +1 @@
1
+ {"root":["./src/index.ts","./src/s3-store.ts","./src/types.ts","./src/services/index.ts","./src/services/s3-client.service.ts","./src/services/__mocks__/s3-client-mock.service.ts","./src/utils/calculations.ts","./src/utils/error-handling.ts","./src/utils/index.ts","./src/utils/stream-adapter.ts"],"version":"5.9.3"}
@@ -0,0 +1,15 @@
1
+ import { defineConfig } from 'vitest/config';
2
+
3
+ export default defineConfig({
4
+ test: {
5
+ globals: true,
6
+ environment: 'node',
7
+ include: ['src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
8
+ exclude: ['node_modules', 'dist'],
9
+ coverage: {
10
+ provider: 'v8',
11
+ reporter: ['text', 'json', 'html'],
12
+ exclude: ['node_modules/', 'dist/', '**/*.d.ts', '**/*.test.ts', '**/*.spec.ts']
13
+ }
14
+ }
15
+ });