@uploadista/data-store-r2 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,288 @@
1
+ import type {
2
+ R2Bucket,
3
+ ReadableStream,
4
+ } from "@cloudflare/workers-types";
5
+ import type { UploadistaError } from "@uploadista/core/errors";
6
+ import { withS3ApiMetrics } from "@uploadista/observability";
7
+ import { Context, Effect, Layer } from "effect";
8
+ import type { MultipartUploadInfo, R2OperationContext, R2UploadedPart } from "../types";
9
+ import { handleR2Error, handleR2NotFoundError, partKey } from "../utils";
10
+
11
+ export class R2ClientService extends Context.Tag("R2ClientService")<
12
+ R2ClientService,
13
+ {
14
+ readonly bucket: string;
15
+
16
+ // Basic S3 operations
17
+ readonly getObject: (
18
+ key: string,
19
+ ) => Effect.Effect<ReadableStream, UploadistaError>;
20
+ readonly headObject: (
21
+ key: string,
22
+ ) => Effect.Effect<number | undefined, UploadistaError>;
23
+ readonly putObject: (
24
+ key: string,
25
+ body: Uint8Array,
26
+ ) => Effect.Effect<string, UploadistaError>;
27
+ readonly deleteObject: (
28
+ key: string,
29
+ ) => Effect.Effect<void, UploadistaError>;
30
+ readonly deleteObjects: (
31
+ keys: string[],
32
+ ) => Effect.Effect<void, UploadistaError>;
33
+
34
+ // Multipart upload operations
35
+ readonly createMultipartUpload: (
36
+ context: R2OperationContext,
37
+ ) => Effect.Effect<MultipartUploadInfo, UploadistaError>;
38
+ readonly uploadPart: (
39
+ context: R2OperationContext & { partNumber: number; data: Uint8Array },
40
+ ) => Effect.Effect<string, UploadistaError>;
41
+ readonly completeMultipartUpload: (
42
+ context: R2OperationContext,
43
+ parts: Array<R2UploadedPart>,
44
+ ) => Effect.Effect<string | undefined, UploadistaError>;
45
+ readonly abortMultipartUpload: (
46
+ context: R2OperationContext,
47
+ ) => Effect.Effect<void, UploadistaError>;
48
+
49
+ // Incomplete part operations
50
+ readonly getIncompletePart: (
51
+ id: string,
52
+ ) => Effect.Effect<ReadableStream | undefined, UploadistaError>;
53
+ readonly getIncompletePartSize: (
54
+ id: string,
55
+ ) => Effect.Effect<number | undefined, UploadistaError>;
56
+ readonly putIncompletePart: (
57
+ id: string,
58
+ data: Uint8Array,
59
+ ) => Effect.Effect<string, UploadistaError>;
60
+ readonly deleteIncompletePart: (
61
+ id: string,
62
+ ) => Effect.Effect<void, UploadistaError>;
63
+ }
64
+ >() {}
65
+
66
+ export const makeR2ClientService = (
67
+ r2Bucket: R2Bucket,
68
+ r2BucketName: string,
69
+ ) => {
70
+ const getObject = (key: string) =>
71
+ Effect.gen(function* () {
72
+ const data = yield* Effect.tryPromise({
73
+ try: async () => {
74
+ const result = await r2Bucket.get(key);
75
+ if (!result) {
76
+ throw new Error(`Object not found: ${key}`);
77
+ }
78
+ return result.body;
79
+ },
80
+ catch: (error) =>
81
+ handleR2Error("getObject", error, { key, bucket: r2BucketName }),
82
+ });
83
+ return data;
84
+ });
85
+
86
+ const headObject = (key: string) =>
87
+ Effect.tryPromise({
88
+ try: async () => {
89
+ const data = await r2Bucket.head(key);
90
+ if (!data) {
91
+ return undefined;
92
+ }
93
+ return data.size;
94
+ },
95
+ catch: (error) =>
96
+ handleR2Error("headObject", error, { key, bucket: r2BucketName }),
97
+ });
98
+
99
+ const putObject = (key: string, body: Uint8Array) =>
100
+ Effect.tryPromise({
101
+ try: async () => {
102
+ const response = await r2Bucket.put(key, body);
103
+ if (!response) {
104
+ throw new Error("Failed to put object");
105
+ }
106
+ return response.etag;
107
+ },
108
+ catch: (error) =>
109
+ handleR2Error("putObject", error, {
110
+ key,
111
+ bucket: r2BucketName,
112
+ size: body.length,
113
+ }),
114
+ });
115
+
116
+ const deleteObject = (key: string) =>
117
+ Effect.tryPromise({
118
+ try: async () => {
119
+ await r2Bucket.delete(key);
120
+ },
121
+ catch: (error) =>
122
+ handleR2Error("deleteObject", error, { key, bucket: r2BucketName }),
123
+ });
124
+
125
+ const deleteObjects = (keys: string[]) =>
126
+ Effect.tryPromise({
127
+ try: () => r2Bucket.delete(keys),
128
+ catch: (error) =>
129
+ handleR2Error("deleteObjects", error, {
130
+ keys: keys.length,
131
+ bucket: r2BucketName,
132
+ }),
133
+ });
134
+
135
+ const createMultipartUpload = (context: R2OperationContext) =>
136
+ withS3ApiMetrics(
137
+ "createMultipartUpload",
138
+ Effect.tryPromise({
139
+ try: async () => {
140
+ const multipartUpload = await r2Bucket.createMultipartUpload(
141
+ context.key,
142
+ );
143
+
144
+ if (!multipartUpload.uploadId) {
145
+ throw new Error("Upload ID is undefined");
146
+ }
147
+ if (!multipartUpload.key) {
148
+ throw new Error("Key is undefined");
149
+ }
150
+
151
+ return {
152
+ uploadId: multipartUpload.uploadId,
153
+ bucket: context.bucket,
154
+ key: multipartUpload.key,
155
+ };
156
+ },
157
+ catch: (error) =>
158
+ handleR2Error("createMultipartUpload", error, context),
159
+ }),
160
+ );
161
+
162
+ const uploadPart = (
163
+ context: R2OperationContext & { partNumber: number; data: Uint8Array },
164
+ ) =>
165
+ withS3ApiMetrics(
166
+ "uploadPart",
167
+ Effect.tryPromise({
168
+ try: async () => {
169
+ const multipartUpload = await r2Bucket.resumeMultipartUpload(
170
+ context.key,
171
+ context.uploadId,
172
+ );
173
+ const part = await multipartUpload.uploadPart(
174
+ context.partNumber,
175
+ context.data,
176
+ );
177
+ if (!part) {
178
+ throw new Error("Part is undefined");
179
+ }
180
+ return part.etag;
181
+ },
182
+ catch: (error) =>
183
+ handleR2Error("uploadPart", error, {
184
+ upload_id: context.key,
185
+ part_number: context.partNumber,
186
+ part_size: context.data.length,
187
+ s3_bucket: context.bucket,
188
+ }),
189
+ }).pipe(Effect.map((response) => response)),
190
+ );
191
+
192
+ const completeMultipartUpload = (
193
+ context: R2OperationContext,
194
+ parts: Array<R2UploadedPart>,
195
+ ) =>
196
+ withS3ApiMetrics(
197
+ "completeMultipartUpload",
198
+ Effect.tryPromise({
199
+ try: async () => {
200
+ const multipartUpload = await r2Bucket.resumeMultipartUpload(
201
+ context.key,
202
+ context.uploadId,
203
+ );
204
+ const complete = await multipartUpload.complete(parts);
205
+ if (!complete) {
206
+ throw new Error("Complete is undefined");
207
+ }
208
+ return complete.key;
209
+ },
210
+ catch: (error) =>
211
+ handleR2Error("completeMultipartUpload", error, {
212
+ upload_id: context.key,
213
+ parts_count: parts.length,
214
+ s3_bucket: context.bucket,
215
+ }),
216
+ }),
217
+ );
218
+
219
+ const abortMultipartUpload = (context: R2OperationContext) =>
220
+ Effect.tryPromise({
221
+ try: async () => {
222
+ const multipartUpload = await r2Bucket.resumeMultipartUpload(
223
+ context.key,
224
+ context.uploadId,
225
+ );
226
+ await multipartUpload.abort();
227
+ },
228
+ catch: (error) =>
229
+ handleR2NotFoundError("abortMultipartUpload", error, {
230
+ upload_id: context.key,
231
+ s3_bucket: context.bucket,
232
+ }),
233
+ });
234
+
235
+ // Note: R2 does not provide a listParts API like S3
236
+ // Parts are tracked in the KV store instead (see r2-store.ts)
237
+ // Note: R2 also does not provide listMultipartUploads API
238
+ // For cleanup, use R2's native expiration policies instead
239
+
240
+ const getIncompletePart = (id: string) =>
241
+ Effect.tryPromise({
242
+ try: async () => {
243
+ const data = await r2Bucket.get(partKey(id));
244
+ if (!data || !data.body) {
245
+ return undefined;
246
+ }
247
+ return data.body;
248
+ },
249
+ catch: (error) =>
250
+ handleR2Error("getIncompletePart", error, {
251
+ upload_id: id,
252
+ bucket: r2BucketName,
253
+ }),
254
+ });
255
+
256
+ const getIncompletePartSize = (id: string) => headObject(partKey(id));
257
+
258
+ const putIncompletePart = (id: string, data: Uint8Array) =>
259
+ putObject(partKey(id), data).pipe(
260
+ Effect.tap(() =>
261
+ Effect.logInfo("Incomplete part uploaded").pipe(
262
+ Effect.annotateLogs({ upload_id: id }),
263
+ ),
264
+ ),
265
+ );
266
+
267
+ const deleteIncompletePart = (id: string) => deleteObject(partKey(id));
268
+
269
+ return {
270
+ bucket: r2BucketName,
271
+ getObject,
272
+ headObject,
273
+ putObject,
274
+ deleteObject,
275
+ deleteObjects,
276
+ createMultipartUpload,
277
+ uploadPart,
278
+ completeMultipartUpload,
279
+ abortMultipartUpload,
280
+ getIncompletePart,
281
+ getIncompletePartSize,
282
+ putIncompletePart,
283
+ deleteIncompletePart,
284
+ };
285
+ };
286
+
287
+ export const R2ClientLayer = (r2Bucket: R2Bucket, r2BucketName: string) =>
288
+ Layer.succeed(R2ClientService, makeR2ClientService(r2Bucket, r2BucketName));
package/src/types.ts ADDED
@@ -0,0 +1,106 @@
1
+ import type {
2
+ R2Bucket,
3
+ R2UploadedPart as CloudflareR2UploadedPart,
4
+ ReadableStream,
5
+ } from "@cloudflare/workers-types";
6
+ import type { UploadistaError } from "@uploadista/core/errors";
7
+ import type {
8
+ DataStoreCapabilities,
9
+ DataStoreWriteOptions,
10
+ UploadFile,
11
+ UploadStrategy,
12
+ } from "@uploadista/core/types";
13
+ import type { Effect } from "effect";
14
+
15
+ /**
16
+ * Extended R2UploadedPart type that includes size property
17
+ * Cloudflare's R2UploadedPart doesn't include size, but we need it for tracking
18
+ */
19
+ export type R2UploadedPart = CloudflareR2UploadedPart & {
20
+ size: number;
21
+ };
22
+
23
+ export type R2StoreOptions = {
24
+ deliveryUrl: string;
25
+ /**
26
+ * The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
27
+ * The server calculates the optimal part size, which takes this size into account,
28
+ * but may increase it to not exceed the S3 10K parts limit.
29
+ */
30
+ partSize?: number;
31
+ /**
32
+ * The minimal part size for parts.
33
+ * Can be used to ensure that all non-trailing parts are exactly the same size.
34
+ * Can not be lower than 5MiB or more than 5GiB.
35
+ */
36
+ minPartSize?: number;
37
+ /**
38
+ * The maximum number of parts allowed in a multipart upload. Defaults to 10,000.
39
+ */
40
+ maxMultipartParts?: number;
41
+ useTags?: boolean;
42
+ maxConcurrentPartUploads?: number;
43
+ expirationPeriodInMilliseconds?: number;
44
+ // Options to pass to the Cloudflare R2 SDK.
45
+ bucket: string;
46
+ r2Bucket: R2Bucket;
47
+ };
48
+
49
+ export type ChunkInfo = {
50
+ partNumber: number;
51
+ data: Uint8Array;
52
+ size: number;
53
+ isFinalPart?: boolean;
54
+ };
55
+
56
+ export type R2OperationContext = {
57
+ uploadId: string;
58
+ bucket: string;
59
+ key: string;
60
+ partNumber?: number;
61
+ partSize?: number;
62
+ contentType?: string;
63
+ cacheControl?: string;
64
+ };
65
+
66
+ export type PartUploadResult = {
67
+ etag: string;
68
+ partNumber: number;
69
+ };
70
+
71
+ export type MultipartUploadInfo = {
72
+ uploadId: string;
73
+ bucket: string;
74
+ key: string;
75
+ };
76
+
77
+ export type UploadProgress = {
78
+ bytesUploaded: number;
79
+ totalBytes: number;
80
+ currentOffset: number;
81
+ };
82
+
83
+ export type R2Store = {
84
+ bucket: string;
85
+ create: (upload: UploadFile) => Effect.Effect<UploadFile, UploadistaError>;
86
+ remove: (id: string) => Effect.Effect<void, UploadistaError>;
87
+ write: (
88
+ options: DataStoreWriteOptions,
89
+ dependencies: { onProgress?: (chunkSize: number) => void },
90
+ ) => Effect.Effect<number, UploadistaError>;
91
+ getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;
92
+ read: (id: string) => Effect.Effect<ReadableStream, UploadistaError>;
93
+ deleteExpired: Effect.Effect<number, UploadistaError>;
94
+ getCapabilities: () => DataStoreCapabilities;
95
+ getChunkerConstraints: () => {
96
+ minChunkSize: number;
97
+ maxChunkSize: number;
98
+ optimalChunkSize: number;
99
+ requiresOrderedChunks: boolean;
100
+ };
101
+ validateUploadStrategy: (
102
+ strategy: UploadStrategy,
103
+ ) => Effect.Effect<boolean, never>;
104
+ };
105
+
106
+ export type R2StoreConfig = R2StoreOptions;
@@ -0,0 +1,61 @@
1
+ import type { R2UploadedPart } from "../types";
2
+
3
+ export const calcOffsetFromParts = (parts?: Array<R2UploadedPart>): number => {
4
+ return parts && parts.length > 0
5
+ ? parts.reduce((a, b) => a + (b?.size ?? 0), 0)
6
+ : 0;
7
+ };
8
+
9
+ export const calcOptimalPartSize = (
10
+ initSize: number | undefined,
11
+ preferredPartSize: number,
12
+ minPartSize: number,
13
+ maxMultipartParts: number,
14
+ maxUploadSize = 5_497_558_138_880, // 5TiB
15
+ ): number => {
16
+ const size = initSize ?? maxUploadSize;
17
+ let optimalPartSize: number;
18
+
19
+ if (size <= preferredPartSize) {
20
+ // For files smaller than preferred part size, use the file size
21
+ // but ensure it meets S3's minimum requirements for multipart uploads
22
+ optimalPartSize = size;
23
+ } else if (size <= preferredPartSize * maxMultipartParts) {
24
+ // File fits within max parts limit using preferred part size
25
+ optimalPartSize = preferredPartSize;
26
+ } else {
27
+ // File is too large for preferred part size, calculate minimum needed
28
+ optimalPartSize = Math.ceil(size / maxMultipartParts);
29
+ }
30
+
31
+ // Ensure we respect minimum part size for multipart uploads
32
+ // Exception: if the file is smaller than minPartSize, use the file size directly
33
+ const finalPartSize =
34
+ initSize && initSize < minPartSize
35
+ ? optimalPartSize // Single part upload for small files
36
+ : Math.max(optimalPartSize, minPartSize); // Enforce minimum for multipart
37
+
38
+ // Round up to ensure consistent part sizes and align to reasonable boundaries
39
+ // This helps ensure all parts except the last one will have exactly the same size
40
+ const alignment = 1024; // 1KB alignment for better consistency
41
+ return Math.ceil(finalPartSize / alignment) * alignment;
42
+ };
43
+
44
+ export const partKey = (id: string): string => {
45
+ return `${id}.part`;
46
+ };
47
+
48
+ export const shouldUseExpirationTags = (
49
+ expirationPeriodInMilliseconds: number,
50
+ useTags: boolean,
51
+ ): boolean => {
52
+ return expirationPeriodInMilliseconds !== 0 && useTags;
53
+ };
54
+
55
+ export const getExpirationDate = (
56
+ createdAt: string,
57
+ expirationPeriodInMilliseconds: number,
58
+ ): Date => {
59
+ const date = new Date(createdAt);
60
+ return new Date(date.getTime() + expirationPeriodInMilliseconds);
61
+ };
@@ -0,0 +1,52 @@
1
+ import { UploadistaError } from "@uploadista/core/errors";
2
+ import { trackS3Error as logR2Error } from "@uploadista/observability";
3
+ import { Effect } from "effect";
4
+
5
+ export const handleR2Error = (
6
+ operation: string,
7
+ error: unknown,
8
+ context: Record<string, unknown> = {},
9
+ ): UploadistaError => {
10
+ // Log the error with context
11
+ Effect.runSync(logR2Error(operation, error, context));
12
+
13
+ return UploadistaError.fromCode("FILE_WRITE_ERROR", error as Error);
14
+ };
15
+
16
+ export const handleR2NotFoundError = (
17
+ operation: string,
18
+ error: unknown,
19
+ context: Record<string, unknown> = {},
20
+ ): UploadistaError => {
21
+ if (
22
+ typeof error === "object" &&
23
+ error !== null &&
24
+ "code" in error &&
25
+ typeof error.code === "string" &&
26
+ ["NotFound", "NoSuchKey", "NoSuchUpload"].includes(error.code)
27
+ ) {
28
+ Effect.runSync(
29
+ Effect.logWarning(`File not found during ${operation} operation`).pipe(
30
+ Effect.annotateLogs({
31
+ error_code: error.code,
32
+ ...context,
33
+ }),
34
+ ),
35
+ );
36
+ return UploadistaError.fromCode("FILE_NOT_FOUND");
37
+ }
38
+
39
+ return handleR2Error(operation, error, context);
40
+ };
41
+
42
+ export const isUploadNotFoundError = (
43
+ error: unknown,
44
+ ): error is { code: "NoSuchUpload" | "NoSuchKey" } => {
45
+ return (
46
+ typeof error === "object" &&
47
+ error !== null &&
48
+ "code" in error &&
49
+ typeof error.code === "string" &&
50
+ (error.code === "NoSuchUpload" || error.code === "NoSuchKey")
51
+ );
52
+ };
@@ -0,0 +1,2 @@
1
+ export * from "./calculations";
2
+ export * from "./error-handling";
package/tsconfig.json ADDED
@@ -0,0 +1,19 @@
1
+ {
2
+ "extends": "@uploadista/typescript-config/server.json",
3
+ "compilerOptions": {
4
+ "baseUrl": "./",
5
+ "paths": {
6
+ "@/*": ["./src/*"]
7
+ },
8
+ "outDir": "./dist",
9
+ "rootDir": "./src"
10
+ },
11
+ "include": ["src"],
12
+ "exclude": [
13
+ "src/**/*.test.ts",
14
+ "src/**/*.test.tsx",
15
+ "src/**/*.spec.ts",
16
+ "src/**/*.spec.tsx",
17
+ "src/**/__tests__/**"
18
+ ]
19
+ }
@@ -0,0 +1,11 @@
1
+ import { defineConfig } from "tsdown";
2
+
3
+ export default defineConfig({
4
+ entry: {
5
+ index: "src/index.ts",
6
+ },
7
+ minify: true,
8
+ format: ["esm", "cjs"],
9
+ dts: true,
10
+ outDir: "dist",
11
+ });
@@ -0,0 +1,15 @@
1
+ import { defineConfig } from 'vitest/config';
2
+
3
+ export default defineConfig({
4
+ test: {
5
+ globals: true,
6
+ environment: 'node',
7
+ include: ['src/**/*.{test,spec}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}'],
8
+ exclude: ['node_modules', 'dist'],
9
+ coverage: {
10
+ provider: 'v8',
11
+ reporter: ['text', 'json', 'html'],
12
+ exclude: ['node_modules/', 'dist/', '**/*.d.ts', '**/*.test.ts', '**/*.spec.ts']
13
+ }
14
+ }
15
+ });