@uploadista/data-store-r2 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 uploadista
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/dist/index.cjs ADDED
@@ -0,0 +1 @@
1
+ let e=require(`@uploadista/core/errors`),t=require(`@uploadista/core/types`),n=require(`@uploadista/observability`),r=require(`effect`);const i=e=>e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0,a=(e,t,n,r,i=5497558138880)=>{let a=e??i,o;o=a<=t?a:a<=t*r?t:Math.ceil(a/r);let s=e&&e<n?o:Math.max(o,n),c=1024;return Math.ceil(s/c)*c},o=e=>`${e}.part`,s=(t,i,a={})=>(r.Effect.runSync((0,n.trackS3Error)(t,i,a)),e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,i)),c=(t,n,i={})=>typeof n==`object`&&n&&`code`in n&&typeof n.code==`string`&&[`NotFound`,`NoSuchKey`,`NoSuchUpload`].includes(n.code)?(r.Effect.runSync(r.Effect.logWarning(`File not found during ${t} operation`).pipe(r.Effect.annotateLogs({error_code:n.code,...i}))),e.UploadistaError.fromCode(`FILE_NOT_FOUND`)):s(t,n,i);var l=class extends r.Context.Tag(`R2ClientService`)(){};const u=(e,t)=>{let i=n=>r.Effect.gen(function*(){return yield*r.Effect.tryPromise({try:async()=>{let t=await e.get(n);if(!t)throw Error(`Object not found: ${n}`);return t.body},catch:e=>s(`getObject`,e,{key:n,bucket:t})})}),a=n=>r.Effect.tryPromise({try:async()=>{let t=await e.head(n);if(t)return t.size},catch:e=>s(`headObject`,e,{key:n,bucket:t})}),l=(n,i)=>r.Effect.tryPromise({try:async()=>{let t=await e.put(n,i);if(!t)throw Error(`Failed to put object`);return t.etag},catch:e=>s(`putObject`,e,{key:n,bucket:t,size:i.length})}),u=n=>r.Effect.tryPromise({try:async()=>{await e.delete(n)},catch:e=>s(`deleteObject`,e,{key:n,bucket:t})});return{bucket:t,getObject:i,headObject:a,putObject:l,deleteObject:u,deleteObjects:n=>r.Effect.tryPromise({try:()=>e.delete(n),catch:e=>s(`deleteObjects`,e,{keys:n.length,bucket:t})}),createMultipartUpload:t=>(0,n.withS3ApiMetrics)(`createMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await e.createMultipartUpload(t.key);if(!n.uploadId)throw Error(`Upload ID is undefined`);if(!n.key)throw Error(`Key is undefined`);return{uploadId:n.uploadId,bucket:t.bucket,key:n.key}},catch:e=>s(`createMultipartUpload`,e,t)})),uploadPart:t=>(0,n.withS3ApiMetrics)(`uploadPart`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).uploadPart(t.partNumber,t.data);if(!n)throw Error(`Part is undefined`);return n.etag},catch:e=>s(`uploadPart`,e,{upload_id:t.key,part_number:t.partNumber,part_size:t.data.length,s3_bucket:t.bucket})}).pipe(r.Effect.map(e=>e))),completeMultipartUpload:(t,i)=>(0,n.withS3ApiMetrics)(`completeMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).complete(i);if(!n)throw Error(`Complete is undefined`);return n.key},catch:e=>s(`completeMultipartUpload`,e,{upload_id:t.key,parts_count:i.length,s3_bucket:t.bucket})})),abortMultipartUpload:t=>r.Effect.tryPromise({try:async()=>{await(await e.resumeMultipartUpload(t.key,t.uploadId)).abort()},catch:e=>c(`abortMultipartUpload`,e,{upload_id:t.key,s3_bucket:t.bucket})}),getIncompletePart:n=>r.Effect.tryPromise({try:async()=>{let t=await e.get(o(n));if(!(!t||!t.body))return t.body},catch:e=>s(`getIncompletePart`,e,{upload_id:n,bucket:t})}),getIncompletePartSize:e=>a(o(e)),putIncompletePart:(e,t)=>l(o(e),t).pipe(r.Effect.tap(()=>r.Effect.logInfo(`Incomplete part uploaded`).pipe(r.Effect.annotateLogs({upload_id:e})))),deleteIncompletePart:e=>u(o(e))}},d=(e,t)=>r.Layer.succeed(l,u(e,t)),f=e=>{let{id:t,metadata:n}=e;if(!n)return t;let r=n.filename||n.fileName||n.name;return typeof r==`string`&&r.includes(`.`)?`${t}${r.substring(r.lastIndexOf(`.`))}`:t};function p(o){let{deliveryUrl:s,partSize:c,minPartSize:u=5242880,useTags:d=!0,maxMultipartParts:p=1e4,maxConcurrentPartUploads:m=60,bucket:h}=o;return r.Effect.gen(function*(){let o=yield*l,g=yield*t.UploadFileKVStore,_=c||8*1024*1024,v=t=>{let n=t.storage.uploadId;return n?r.Effect.succeed(n):r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Upload ID is undefined`)))},y=(e,t,i)=>{let a=f(e);return(0,n.withS3TimingMetrics)(n.s3PartUploadDurationHistogram,r.Effect.gen(function*(){let s=yield*v(e),c=yield*o.uploadPart({bucket:o.bucket,key:a,uploadId:s,partNumber:i,data:t}).pipe(r.Effect.retry(r.Schedule.exponential(`1 second`,2).pipe(r.Schedule.intersect(r.Schedule.recurs(3)))),r.Effect.tapError(n=>r.Effect.logWarning(`Retrying part upload`).pipe(r.Effect.annotateLogs({upload_id:e.id,part_number:i,error_message:n.message,retry_attempt:`unknown`,part_size:t.length,s3_bucket:o.bucket})))),l=[...e.storage.parts||[],{partNumber:i,etag:c,size:t.length}];return yield*g.set(e.id,{...e,storage:{...e.storage,parts:l}}),yield*(0,n.s3UploadPartsTotal)(r.Effect.succeed(1)),yield*r.Effect.logInfo(`Part uploaded successfully`).pipe(r.Effect.annotateLogs({upload_id:e.id,part_number:i,part_size:t.length,etag:c})),c})).pipe(r.Effect.withSpan(`s3-upload-part-${i}`,{attributes:{"upload.id":e.id,"upload.part_number":i,"upload.part_size":t.length,"s3.bucket":o.bucket,"s3.key":a}}))},b=(e,t)=>o.putIncompletePart(e,t),x=e=>r.Effect.gen(function*(){let t=yield*o.getIncompletePart(e);if(!t)return;let n=t.getReader(),i=[],a=0;try{for(;;){let{done:e,value:t}=yield*r.Effect.promise(()=>n.read());if(e)break;i.push(t),a+=t.length}}finally{n.releaseLock()}let s=r.Stream.fromIterable(i);return{size:a,stream:s}}),S=e=>o.deleteIncompletePart(e),C=e=>o.getIncompletePartSize(e),w=(e,t)=>{let i=f(e);return r.Effect.gen(function*(){let n=yield*v(e);return yield*o.completeMultipartUpload({bucket:o.bucket,key:i,uploadId:n},t)}).pipe(r.Effect.tap(()=>(0,n.s3UploadSuccessTotal)(r.Effect.succeed(1))),r.Effect.withSpan(`s3-complete-multipart-upload`,{attributes:{"upload.id":e.id,"upload.parts_count":t.length,"s3.bucket":o.bucket,"s3.key":i}}))},T=e=>{let t=f(e);return r.Effect.gen(function*(){let n=yield*v(e);yield*o.abortMultipartUpload({bucket:o.bucket,key:t,uploadId:n}),yield*o.deleteObjects([t])})},E=e=>r.Effect.gen(function*(){let t=((yield*g.get(e)).storage.parts||[]).map(e=>({partNumber:e.partNumber,etag:e.etag,size:e.size}));return t.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),{uploadFound:!0,parts:t}}),D=(e,t)=>r.Effect.gen(function*(){if(!t)return 0;let n=yield*g.get(e.id),r=n.storage.uploadId;return r&&(yield*g.set(e.id,{...n,storage:{...n.storage,uploadId:r}})),0}),O=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Clearing cache`).pipe(r.Effect.annotateLogs({upload_id:e})),yield*g.delete(e)}),k=e=>{let t=f(e);return r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let i=yield*o.createMultipartUpload({bucket:o.bucket,key:t,uploadId:``,contentType:e.metadata?.contentType?.toString(),cacheControl:e.metadata?.cacheControl?.toString()}),a={...e,storage:{...e.storage,path:i.key,uploadId:i.uploadId,bucket:i.bucket},url:`${s}/${t}`};return yield*g.set(e.id,a),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:a.storage.uploadId,s3_key:t})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),yield*(0,n.s3FileSizeHistogram)(r.Effect.succeed(e.size||0)),a}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":o.bucket,"s3.key":t}}))},A=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let t=yield*k(e);return yield*g.set(e.id,t),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:t.storage.uploadId})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),t}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":h}})),j=e=>r.Effect.gen(function*(){yield*T(yield*g.get(e)),yield*O(e)}),M=(e,t)=>(0,n.withS3UploadMetrics)(e.file_id,(0,n.withS3TimingMetrics)(n.s3UploadDurationHistogram,r.Effect.gen(function*(){let{stream:i,file_id:o,offset:s}=e,{onProgress:c}=t,l=Date.now();yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(1));let{uploadFile:d,nextPartNumber:f,offset:h,data:g,existingPartSize:v}=yield*P(o,s,i),y=v||a(d.size,_,u,p);yield*r.Effect.logInfo(`Part size decision`).pipe(r.Effect.annotateLogs({upload_id:o,existing_part_size:v,calculated_part_size:a(d.size,_,u,p),final_part_size:y,next_part_number:f}));let b=h+(yield*z(d,g,f,h,y,u,m,c));return b>m&&d.size===b&&(yield*F(o,d,l)),b}).pipe(r.Effect.ensuring((0,n.s3ActiveUploadsGauge)(r.Effect.succeed(0)))))),N=e=>r.Effect.gen(function*(){let t=yield*g.get(e),{parts:n,uploadFound:r}=yield*E(e);if(!r)return{...t,offset:t.size,size:t.size};let a=i(n),o=yield*C(e);return{...t,offset:a+(o??0),size:t.size,storage:t.storage}}),P=(e,t,n)=>r.Effect.gen(function*(){let i=yield*g.get(e),a=i.storage.parts||[],o=(a.length>0&&a[a.length-1].partNumber?a[a.length-1].partNumber:0)+1,s=a.length>0&&a[0].size?a[0].size:null;if(s&&a.length>1){let t=a.slice(0,-1).find(e=>e.size!==s);t&&(yield*r.Effect.logWarning(`Inconsistent part sizes detected in existing upload`).pipe(r.Effect.annotateLogs({upload_id:e,expected_size:s,inconsistent_part:t.partNumber,inconsistent_size:t.size})))}let c=yield*x(e);if(c){yield*S(e);let a=t-c.size,l=c.stream.pipe(r.Stream.concat(n));return{uploadFile:i,nextPartNumber:o,offset:a,incompletePartSize:c.size,data:l,existingPartSize:s}}else return{uploadFile:i,nextPartNumber:o,offset:t,incompletePartSize:0,data:n,existingPartSize:s}}),F=(e,t,i)=>r.Effect.gen(function*(){let{parts:a}=yield*E(e);yield*r.Effect.logInfo(`Attempting to complete multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e,parts_count:a.length,parts_info:a.map((e,t)=>({part_number:e.partNumber,size:e.size,etag:e.etag,is_final_part:t===a.length-1}))})),yield*w(t,a),yield*D(t,d);let o=Date.now()-i,s=t.size||0,c=o>0?s*1e3/o:0,l=a.length>0?a.reduce((e,t)=>e+(t.size||0),0)/a.length:void 0;yield*(0,n.logS3UploadCompletion)(e,{fileSize:s,totalDurationMs:o,partsCount:a.length,averagePartSize:l,throughputBps:c})}).pipe(r.Effect.tapError(t=>r.Effect.gen(function*(){yield*(0,n.s3UploadErrorsTotal)(r.Effect.succeed(1)),yield*r.Effect.logError(`Failed to finish upload`).pipe(r.Effect.annotateLogs({upload_id:e,error:String(t)}))}))),I=r.Effect.gen(function*(){return yield*r.Effect.logWarning(`R2 does not support automatic expired upload deletion via API. Please use R2 lifecycle rules instead.`).pipe(r.Effect.annotateLogs({bucket:o.bucket})),0}),L=e=>t=>r.Stream.async(n=>{let i=new Uint8Array,a=1,o=0,s=(t,i=!1)=>{r.Effect.runSync(r.Effect.logInfo(`Creating chunk`).pipe(r.Effect.annotateLogs({part_number:a,chunk_size:t.length,expected_size:e,is_final_chunk:i,total_bytes_processed:o+t.length}))),n.single({partNumber:a++,data:t,size:t.length})},c=t=>{let n=new Uint8Array(i.length+t.length);for(n.set(i),n.set(t,i.length),i=n,o+=t.length;i.length>=e;){let t=i.slice(0,e);i=i.slice(e),s(t,!1)}};r.Effect.runFork(t.pipe(r.Stream.runForEach(e=>r.Effect.sync(()=>c(e))),r.Effect.andThen(()=>r.Effect.sync(()=>{i.length>0&&s(i,!0),n.end()})),r.Effect.catchAll(e=>r.Effect.sync(()=>n.fail(e)))))}),R=(e,t=0)=>n=>e?r.Effect.gen(function*(){let i=yield*r.Ref.make(t);return n.pipe(r.Stream.tap(t=>r.Effect.gen(function*(){e(yield*r.Ref.updateAndGet(i,e=>e+t.length))})))}).pipe(r.Stream.unwrap):n,z=(t,i,a,o,s,c,l,u)=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Starting part uploads`).pipe(r.Effect.annotateLogs({upload_id:t.id,init_offset:o,file_size:t.size,part_size:s,min_part_size:c}));let d=i.pipe(R(u,o),L(s)),f=yield*r.Ref.make(o),p=yield*r.Ref.make(0),m=i=>r.Effect.gen(function*(){let o=yield*r.Ref.updateAndGet(f,e=>e+i.size),l=o>=(t.size||0);yield*r.Effect.logDebug(`Processing chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,cumulative_offset:o,file_size:t.size,chunk_size:i.size,is_final_part:l}));let u=a+i.partNumber-1;i.size>s&&(yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Part size ${i.size} exceeds upload part size ${s}`)))),i.size>=c||l?(yield*r.Effect.logDebug(`Uploading multipart chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:u,chunk_size:i.size,min_part_size:c,is_final_part:l})),yield*y(t,i.data,u),yield*(0,n.s3PartSizeHistogram)(r.Effect.succeed(i.size))):yield*b(t.id,i.data),yield*r.Ref.update(p,e=>e+i.size)});return yield*d.pipe(r.Stream.runForEach(e=>m(e)),r.Effect.withConcurrency(l)),yield*r.Ref.get(p)}),B=()=>({supportsParallelUploads:!0,supportsConcatenation:!0,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,maxConcurrentUploads:m,minChunkSize:u,maxChunkSize:5368709120,maxParts:p,optimalChunkSize:_,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0}),V=()=>({minChunkSize:u,maxChunkSize:5368709120,optimalChunkSize:_,requiresOrderedChunks:!1}),H=e=>{let t=B(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return r.Effect.succeed(n)},U=e=>{let t=new Uint8Array(e.reduce((e,t)=>e+t.length,0)),n=0;for(let r of e)t.set(r,n),n+=r.length;return t},W=async e=>{let t=e.getReader(),n=[];for(;;){let{done:e,value:r}=await t.read();if(e)break;n.push(r)}return U(n)};return{bucket:h,create:A,remove:j,write:M,getUpload:N,read:t=>r.Effect.gen(function*(){let n=yield*g.get(t);if(!n.id)return yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let i=f(n),a=yield*o.getObject(i);return yield*r.Effect.promise(()=>W(a))}),deleteExpired:I,getCapabilities:B,getChunkerConstraints:V,validateUploadStrategy:H}})}const m=e=>{let{r2Bucket:t,bucket:n}=e;return p(e).pipe(r.Effect.provide(d(t,n)))};exports.createR2Store=p,exports.r2Store=m;
@@ -0,0 +1,37 @@
1
+ import { Effect } from "effect";
2
+ import { R2Bucket } from "@cloudflare/workers-types";
3
+
4
+ //#region src/types.d.ts
5
+
6
+ type R2StoreOptions = {
7
+ deliveryUrl: string;
8
+ /**
9
+ * The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
10
+ * The server calculates the optimal part size, which takes this size into account,
11
+ * but may increase it to not exceed the S3 10K parts limit.
12
+ */
13
+ partSize?: number;
14
+ /**
15
+ * The minimal part size for parts.
16
+ * Can be used to ensure that all non-trailing parts are exactly the same size.
17
+ * Can not be lower than 5MiB or more than 5GiB.
18
+ */
19
+ minPartSize?: number;
20
+ /**
21
+ * The maximum number of parts allowed in a multipart upload. Defaults to 10,000.
22
+ */
23
+ maxMultipartParts?: number;
24
+ useTags?: boolean;
25
+ maxConcurrentPartUploads?: number;
26
+ expirationPeriodInMilliseconds?: number;
27
+ bucket: string;
28
+ r2Bucket: R2Bucket;
29
+ };
30
+ type R2StoreConfig = R2StoreOptions;
31
+ //#endregion
32
+ //#region src/r2-store.d.ts
33
+ declare function createR2Store(config: R2StoreConfig): Effect.Effect<DataStore<UploadFile>, unknown, unknown>;
34
+ declare const r2Store: (options: R2StoreConfig) => Effect.Effect<DataStore<UploadFile>, unknown, unknown>;
35
+ //#endregion
36
+ export { createR2Store, r2Store };
37
+ //# sourceMappingURL=index.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.cts","names":[],"sources":["../src/types.ts","../src/r2-store.ts"],"sourcesContent":[],"mappings":";;;;;ACqDmD,KD/BvC,cAAA,GC+BuC;EAAA,WAAA,EAAA,MAAA;EAAA;;AAg4BnD;;;EAA8C,QAAA,CAAA,EAAA,MAAA;EAAA;;;;;;;;;;;;;;YDx4BlC;;KA4DA,aAAA,GAAgB;;;iBCpDZ,aAAA,SAAsB,gBAAa,MAAA,CAAA,OAAA,UAAA;cAg4BtC,mBAAoB,kBAAa,MAAA,CAAA,OAAA,UAAA"}
@@ -0,0 +1,39 @@
1
+ import { UploadistaError } from "@uploadista/core/errors";
2
+ import "@uploadista/core/types";
3
+ import { Effect } from "effect";
4
+ import { R2Bucket } from "@cloudflare/workers-types";
5
+
6
+ //#region src/types.d.ts
7
+
8
+ type R2StoreOptions = {
9
+ deliveryUrl: string;
10
+ /**
11
+ * The preferred part size for parts send to S3. Can not be lower than 5MiB or more than 5GiB.
12
+ * The server calculates the optimal part size, which takes this size into account,
13
+ * but may increase it to not exceed the S3 10K parts limit.
14
+ */
15
+ partSize?: number;
16
+ /**
17
+ * The minimal part size for parts.
18
+ * Can be used to ensure that all non-trailing parts are exactly the same size.
19
+ * Can not be lower than 5MiB or more than 5GiB.
20
+ */
21
+ minPartSize?: number;
22
+ /**
23
+ * The maximum number of parts allowed in a multipart upload. Defaults to 10,000.
24
+ */
25
+ maxMultipartParts?: number;
26
+ useTags?: boolean;
27
+ maxConcurrentPartUploads?: number;
28
+ expirationPeriodInMilliseconds?: number;
29
+ bucket: string;
30
+ r2Bucket: R2Bucket;
31
+ };
32
+ type R2StoreConfig = R2StoreOptions;
33
+ //#endregion
34
+ //#region src/r2-store.d.ts
35
+ declare function createR2Store(config: R2StoreConfig): Effect.Effect<DataStore<UploadFile>, unknown, unknown>;
36
+ declare const r2Store: (options: R2StoreConfig) => Effect.Effect<DataStore<UploadFile>, unknown, unknown>;
37
+ //#endregion
38
+ export { createR2Store, r2Store };
39
+ //# sourceMappingURL=index.d.mts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.mts","names":[],"sources":["../src/types.ts","../src/r2-store.ts"],"sourcesContent":[],"mappings":";;;;;;;ACqDgB,KD/BJ,cAAA,GC+BiB;EAAS,WAAA,EAAA,MAAA;EAAa;;;;AAg4BnD;EAAiC,QAAA,CAAA,EAAA,MAAA;EAAa;;;;;;;;;;;;;;YDx4BlC;;KA4DA,aAAA,GAAgB;;;iBCpDZ,aAAA,SAAsB,gBAAa,MAAA,CAAA,OAAA,UAAA;cAg4BtC,mBAAoB,kBAAa,MAAA,CAAA,OAAA,UAAA"}
package/dist/index.mjs ADDED
@@ -0,0 +1,2 @@
1
+ import{UploadistaError as e}from"@uploadista/core/errors";import{UploadFileKVStore as t}from"@uploadista/core/types";import{logS3UploadCompletion as n,s3ActiveUploadsGauge as r,s3FileSizeHistogram as i,s3PartSizeHistogram as a,s3PartUploadDurationHistogram as o,s3UploadDurationHistogram as s,s3UploadErrorsTotal as c,s3UploadPartsTotal as l,s3UploadRequestsTotal as u,s3UploadSuccessTotal as d,trackS3Error as f,withS3ApiMetrics as p,withS3TimingMetrics as m,withS3UploadMetrics as h}from"@uploadista/observability";import{Context as g,Effect as _,Layer as v,Ref as y,Schedule as b,Stream as x}from"effect";const ee=e=>e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0,S=(e,t,n,r,i=5497558138880)=>{let a=e??i,o;o=a<=t?a:a<=t*r?t:Math.ceil(a/r);let s=e&&e<n?o:Math.max(o,n),c=1024;return Math.ceil(s/c)*c},C=e=>`${e}.part`,w=(t,n,r={})=>(_.runSync(f(t,n,r)),e.fromCode(`FILE_WRITE_ERROR`,n)),T=(t,n,r={})=>typeof n==`object`&&n&&`code`in n&&typeof n.code==`string`&&[`NotFound`,`NoSuchKey`,`NoSuchUpload`].includes(n.code)?(_.runSync(_.logWarning(`File not found during ${t} operation`).pipe(_.annotateLogs({error_code:n.code,...r}))),e.fromCode(`FILE_NOT_FOUND`)):w(t,n,r);var E=class extends g.Tag(`R2ClientService`)(){};const D=(e,t)=>{let n=n=>_.gen(function*(){return yield*_.tryPromise({try:async()=>{let t=await e.get(n);if(!t)throw Error(`Object not found: ${n}`);return t.body},catch:e=>w(`getObject`,e,{key:n,bucket:t})})}),r=n=>_.tryPromise({try:async()=>{let t=await e.head(n);if(t)return t.size},catch:e=>w(`headObject`,e,{key:n,bucket:t})}),i=(n,r)=>_.tryPromise({try:async()=>{let t=await e.put(n,r);if(!t)throw Error(`Failed to put object`);return t.etag},catch:e=>w(`putObject`,e,{key:n,bucket:t,size:r.length})}),a=n=>_.tryPromise({try:async()=>{await e.delete(n)},catch:e=>w(`deleteObject`,e,{key:n,bucket:t})});return{bucket:t,getObject:n,headObject:r,putObject:i,deleteObject:a,deleteObjects:n=>_.tryPromise({try:()=>e.delete(n),catch:e=>w(`deleteObjects`,e,{keys:n.length,bucket:t})}),createMultipartUpload:t=>p(`createMultipartUpload`,_.tryPromise({try:async()=>{let n=await e.createMultipartUpload(t.key);if(!n.uploadId)throw Error(`Upload ID is undefined`);if(!n.key)throw Error(`Key is undefined`);return{uploadId:n.uploadId,bucket:t.bucket,key:n.key}},catch:e=>w(`createMultipartUpload`,e,t)})),uploadPart:t=>p(`uploadPart`,_.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).uploadPart(t.partNumber,t.data);if(!n)throw Error(`Part is undefined`);return n.etag},catch:e=>w(`uploadPart`,e,{upload_id:t.key,part_number:t.partNumber,part_size:t.data.length,s3_bucket:t.bucket})}).pipe(_.map(e=>e))),completeMultipartUpload:(t,n)=>p(`completeMultipartUpload`,_.tryPromise({try:async()=>{let r=await(await e.resumeMultipartUpload(t.key,t.uploadId)).complete(n);if(!r)throw Error(`Complete is undefined`);return r.key},catch:e=>w(`completeMultipartUpload`,e,{upload_id:t.key,parts_count:n.length,s3_bucket:t.bucket})})),abortMultipartUpload:t=>_.tryPromise({try:async()=>{await(await e.resumeMultipartUpload(t.key,t.uploadId)).abort()},catch:e=>T(`abortMultipartUpload`,e,{upload_id:t.key,s3_bucket:t.bucket})}),getIncompletePart:n=>_.tryPromise({try:async()=>{let t=await e.get(C(n));if(!(!t||!t.body))return t.body},catch:e=>w(`getIncompletePart`,e,{upload_id:n,bucket:t})}),getIncompletePartSize:e=>r(C(e)),putIncompletePart:(e,t)=>i(C(e),t).pipe(_.tap(()=>_.logInfo(`Incomplete part uploaded`).pipe(_.annotateLogs({upload_id:e})))),deleteIncompletePart:e=>a(C(e))}},O=(e,t)=>v.succeed(E,D(e,t)),k=e=>{let{id:t,metadata:n}=e;if(!n)return t;let r=n.filename||n.fileName||n.name;return typeof r==`string`&&r.includes(`.`)?`${t}${r.substring(r.lastIndexOf(`.`))}`:t};function A(f){let{deliveryUrl:p,partSize:g,minPartSize:v=5242880,useTags:C=!0,maxMultipartParts:w=1e4,maxConcurrentPartUploads:T=60,bucket:D}=f;return _.gen(function*(){let f=yield*E,O=yield*t,A=g||8*1024*1024,j=t=>{let n=t.storage.uploadId;return n?_.succeed(n):_.fail(e.fromCode(`FILE_WRITE_ERROR`,Error(`Upload ID is undefined`)))},M=(e,t,n)=>{let r=k(e);return m(o,_.gen(function*(){let i=yield*j(e),a=yield*f.uploadPart({bucket:f.bucket,key:r,uploadId:i,partNumber:n,data:t}).pipe(_.retry(b.exponential(`1 second`,2).pipe(b.intersect(b.recurs(3)))),_.tapError(r=>_.logWarning(`Retrying part upload`).pipe(_.annotateLogs({upload_id:e.id,part_number:n,error_message:r.message,retry_attempt:`unknown`,part_size:t.length,s3_bucket:f.bucket})))),o=[...e.storage.parts||[],{partNumber:n,etag:a,size:t.length}];return yield*O.set(e.id,{...e,storage:{...e.storage,parts:o}}),yield*l(_.succeed(1)),yield*_.logInfo(`Part uploaded successfully`).pipe(_.annotateLogs({upload_id:e.id,part_number:n,part_size:t.length,etag:a})),a})).pipe(_.withSpan(`s3-upload-part-${n}`,{attributes:{"upload.id":e.id,"upload.part_number":n,"upload.part_size":t.length,"s3.bucket":f.bucket,"s3.key":r}}))},N=(e,t)=>f.putIncompletePart(e,t),P=e=>_.gen(function*(){let t=yield*f.getIncompletePart(e);if(!t)return;let n=t.getReader(),r=[],i=0;try{for(;;){let{done:e,value:t}=yield*_.promise(()=>n.read());if(e)break;r.push(t),i+=t.length}}finally{n.releaseLock()}let a=x.fromIterable(r);return{size:i,stream:a}}),F=e=>f.deleteIncompletePart(e),I=e=>f.getIncompletePartSize(e),L=(e,t)=>{let n=k(e);return _.gen(function*(){let r=yield*j(e);return yield*f.completeMultipartUpload({bucket:f.bucket,key:n,uploadId:r},t)}).pipe(_.tap(()=>d(_.succeed(1))),_.withSpan(`s3-complete-multipart-upload`,{attributes:{"upload.id":e.id,"upload.parts_count":t.length,"s3.bucket":f.bucket,"s3.key":n}}))},R=e=>{let t=k(e);return _.gen(function*(){let n=yield*j(e);yield*f.abortMultipartUpload({bucket:f.bucket,key:t,uploadId:n}),yield*f.deleteObjects([t])})},z=e=>_.gen(function*(){let t=((yield*O.get(e)).storage.parts||[]).map(e=>({partNumber:e.partNumber,etag:e.etag,size:e.size}));return t.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),{uploadFound:!0,parts:t}}),B=(e,t)=>_.gen(function*(){if(!t)return 0;let n=yield*O.get(e.id),r=n.storage.uploadId;return r&&(yield*O.set(e.id,{...n,storage:{...n.storage,uploadId:r}})),0}),V=e=>_.gen(function*(){yield*_.logInfo(`Clearing cache`).pipe(_.annotateLogs({upload_id:e})),yield*O.delete(e)}),H=e=>{let t=k(e);return _.gen(function*(){yield*_.logInfo(`Initializing multipart upload`).pipe(_.annotateLogs({upload_id:e.id}));let n=yield*f.createMultipartUpload({bucket:f.bucket,key:t,uploadId:``,contentType:e.metadata?.contentType?.toString(),cacheControl:e.metadata?.cacheControl?.toString()}),r={...e,storage:{...e.storage,path:n.key,uploadId:n.uploadId,bucket:n.bucket},url:`${p}/${t}`};return yield*O.set(e.id,r),yield*_.logInfo(`Multipart upload created`).pipe(_.annotateLogs({upload_id:e.id,s3_upload_id:r.storage.uploadId,s3_key:t})),yield*u(_.succeed(1)),yield*i(_.succeed(e.size||0)),r}).pipe(_.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":f.bucket,"s3.key":t}}))},U=e=>_.gen(function*(){yield*_.logInfo(`Initializing multipart upload`).pipe(_.annotateLogs({upload_id:e.id}));let t=yield*H(e);return yield*O.set(e.id,t),yield*_.logInfo(`Multipart upload created`).pipe(_.annotateLogs({upload_id:e.id,s3_upload_id:t.storage.uploadId})),yield*u(_.succeed(1)),t}).pipe(_.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":D}})),W=e=>_.gen(function*(){yield*R(yield*O.get(e)),yield*V(e)}),G=(e,t)=>h(e.file_id,m(s,_.gen(function*(){let{stream:n,file_id:i,offset:a}=e,{onProgress:o}=t,s=Date.now();yield*r(_.succeed(1));let{uploadFile:c,nextPartNumber:l,offset:u,data:d,existingPartSize:f}=yield*q(i,a,n),p=f||S(c.size,A,v,w);yield*_.logInfo(`Part size decision`).pipe(_.annotateLogs({upload_id:i,existing_part_size:f,calculated_part_size:S(c.size,A,v,w),final_part_size:p,next_part_number:l}));let m=u+(yield*Q(c,d,l,u,p,v,T,o));return m>T&&c.size===m&&(yield*J(i,c,s)),m}).pipe(_.ensuring(r(_.succeed(0)))))),K=e=>_.gen(function*(){let t=yield*O.get(e),{parts:n,uploadFound:r}=yield*z(e);if(!r)return{...t,offset:t.size,size:t.size};let i=ee(n),a=yield*I(e);return{...t,offset:i+(a??0),size:t.size,storage:t.storage}}),q=(e,t,n)=>_.gen(function*(){let r=yield*O.get(e),i=r.storage.parts||[],a=(i.length>0&&i[i.length-1].partNumber?i[i.length-1].partNumber:0)+1,o=i.length>0&&i[0].size?i[0].size:null;if(o&&i.length>1){let t=i.slice(0,-1).find(e=>e.size!==o);t&&(yield*_.logWarning(`Inconsistent part sizes detected in existing upload`).pipe(_.annotateLogs({upload_id:e,expected_size:o,inconsistent_part:t.partNumber,inconsistent_size:t.size})))}let s=yield*P(e);if(s){yield*F(e);let i=t-s.size,c=s.stream.pipe(x.concat(n));return{uploadFile:r,nextPartNumber:a,offset:i,incompletePartSize:s.size,data:c,existingPartSize:o}}else return{uploadFile:r,nextPartNumber:a,offset:t,incompletePartSize:0,data:n,existingPartSize:o}}),J=(e,t,r)=>_.gen(function*(){let{parts:i}=yield*z(e);yield*_.logInfo(`Attempting to complete multipart upload`).pipe(_.annotateLogs({upload_id:e,parts_count:i.length,parts_info:i.map((e,t)=>({part_number:e.partNumber,size:e.size,etag:e.etag,is_final_part:t===i.length-1}))})),yield*L(t,i),yield*B(t,C);let a=Date.now()-r,o=t.size||0,s=a>0?o*1e3/a:0,c=i.length>0?i.reduce((e,t)=>e+(t.size||0),0)/i.length:void 0;yield*n(e,{fileSize:o,totalDurationMs:a,partsCount:i.length,averagePartSize:c,throughputBps:s})}).pipe(_.tapError(t=>_.gen(function*(){yield*c(_.succeed(1)),yield*_.logError(`Failed to finish upload`).pipe(_.annotateLogs({upload_id:e,error:String(t)}))}))),Y=_.gen(function*(){return yield*_.logWarning(`R2 does not support automatic expired upload deletion via API. Please use R2 lifecycle rules instead.`).pipe(_.annotateLogs({bucket:f.bucket})),0}),X=e=>t=>x.async(n=>{let r=new Uint8Array,i=1,a=0,o=(t,r=!1)=>{_.runSync(_.logInfo(`Creating chunk`).pipe(_.annotateLogs({part_number:i,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:a+t.length}))),n.single({partNumber:i++,data:t,size:t.length})},s=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,a+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),o(t,!1)}};_.runFork(t.pipe(x.runForEach(e=>_.sync(()=>s(e))),_.andThen(()=>_.sync(()=>{r.length>0&&o(r,!0),n.end()})),_.catchAll(e=>_.sync(()=>n.fail(e)))))}),Z=(e,t=0)=>n=>e?_.gen(function*(){let r=yield*y.make(t);return n.pipe(x.tap(t=>_.gen(function*(){e(yield*y.updateAndGet(r,e=>e+t.length))})))}).pipe(x.unwrap):n,Q=(t,n,r,i,o,s,c,l)=>_.gen(function*(){yield*_.logInfo(`Starting part uploads`).pipe(_.annotateLogs({upload_id:t.id,init_offset:i,file_size:t.size,part_size:o,min_part_size:s}));let u=n.pipe(Z(l,i),X(o)),d=yield*y.make(i),f=yield*y.make(0),p=n=>_.gen(function*(){let i=yield*y.updateAndGet(d,e=>e+n.size),c=i>=(t.size||0);yield*_.logDebug(`Processing chunk`).pipe(_.annotateLogs({upload_id:t.id,cumulative_offset:i,file_size:t.size,chunk_size:n.size,is_final_part:c}));let l=r+n.partNumber-1;n.size>o&&(yield*_.fail(e.fromCode(`FILE_WRITE_ERROR`,Error(`Part size ${n.size} exceeds upload part size ${o}`)))),n.size>=s||c?(yield*_.logDebug(`Uploading multipart chunk`).pipe(_.annotateLogs({upload_id:t.id,part_number:l,chunk_size:n.size,min_part_size:s,is_final_part:c})),yield*M(t,n.data,l),yield*a(_.succeed(n.size))):yield*N(t.id,n.data),yield*y.update(f,e=>e+n.size)});return yield*u.pipe(x.runForEach(e=>p(e)),_.withConcurrency(c)),yield*y.get(f)}),$=()=>({supportsParallelUploads:!0,supportsConcatenation:!0,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,maxConcurrentUploads:T,minChunkSize:v,maxChunkSize:5368709120,maxParts:w,optimalChunkSize:A,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0}),te=()=>({minChunkSize:v,maxChunkSize:5368709120,optimalChunkSize:A,requiresOrderedChunks:!1}),ne=e=>{let t=$(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return _.succeed(n)},re=e=>{let t=new Uint8Array(e.reduce((e,t)=>e+t.length,0)),n=0;for(let r of e)t.set(r,n),n+=r.length;return t},ie=async e=>{let t=e.getReader(),n=[];for(;;){let{done:e,value:r}=await t.read();if(e)break;n.push(r)}return re(n)};return{bucket:D,create:U,remove:W,write:G,getUpload:K,read:t=>_.gen(function*(){let n=yield*O.get(t);if(!n.id)return yield*_.fail(e.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let r=k(n),i=yield*f.getObject(r);return yield*_.promise(()=>ie(i))}),deleteExpired:Y,getCapabilities:$,getChunkerConstraints:te,validateUploadStrategy:ne}})}const j=e=>{let{r2Bucket:t,bucket:n}=e;return A(e).pipe(_.provide(O(t,n)))};export{A as createR2Store,j as r2Store};
2
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.mjs","names":["optimalPartSize: number","logR2Error","withTimingMetrics","partUploadDurationHistogram","uploadPartsTotal","chunks: Uint8Array[]","uploadSuccessTotal","r2Parts: R2UploadedPart[]","useTags","uploadRequestsTotal","fileSizeHistogram","withUploadMetrics","uploadDurationHistogram","activeUploadsGauge","uploadErrorsTotal","minPartSize","partSizeHistogram","maxConcurrentPartUploads"],"sources":["../src/utils/calculations.ts","../src/utils/error-handling.ts","../src/services/r2-client.service.ts","../src/r2-store.ts"],"sourcesContent":["import type { R2UploadedPart } from \"../types\";\n\nexport const calcOffsetFromParts = (parts?: Array<R2UploadedPart>): number => {\n return parts && parts.length > 0\n ? parts.reduce((a, b) => a + (b?.size ?? 0), 0)\n : 0;\n};\n\nexport const calcOptimalPartSize = (\n initSize: number | undefined,\n preferredPartSize: number,\n minPartSize: number,\n maxMultipartParts: number,\n maxUploadSize = 5_497_558_138_880, // 5TiB\n): number => {\n const size = initSize ?? maxUploadSize;\n let optimalPartSize: number;\n\n if (size <= preferredPartSize) {\n // For files smaller than preferred part size, use the file size\n // but ensure it meets S3's minimum requirements for multipart uploads\n optimalPartSize = size;\n } else if (size <= preferredPartSize * maxMultipartParts) {\n // File fits within max parts limit using preferred part size\n optimalPartSize = preferredPartSize;\n } else {\n // File is too large for preferred part size, calculate minimum needed\n optimalPartSize = Math.ceil(size / maxMultipartParts);\n }\n\n // Ensure we respect minimum part size for multipart uploads\n // Exception: if the file is smaller than minPartSize, use the file size directly\n const finalPartSize =\n initSize && initSize < minPartSize\n ? optimalPartSize // Single part upload for small files\n : Math.max(optimalPartSize, minPartSize); // Enforce minimum for multipart\n\n // Round up to ensure consistent part sizes and align to reasonable boundaries\n // This helps ensure all parts except the last one will have exactly the same size\n const alignment = 1024; // 1KB alignment for better consistency\n return Math.ceil(finalPartSize / alignment) * alignment;\n};\n\nexport const partKey = (id: string): string => {\n return `${id}.part`;\n};\n\nexport const shouldUseExpirationTags = (\n expirationPeriodInMilliseconds: number,\n useTags: boolean,\n): boolean => {\n return expirationPeriodInMilliseconds !== 0 && useTags;\n};\n\nexport const getExpirationDate = (\n createdAt: string,\n expirationPeriodInMilliseconds: number,\n): Date => {\n const date = new Date(createdAt);\n return new Date(date.getTime() + expirationPeriodInMilliseconds);\n};\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport { trackS3Error as logR2Error } from \"@uploadista/observability\";\nimport { Effect } from \"effect\";\n\nexport const handleR2Error = (\n operation: string,\n error: unknown,\n context: Record<string, unknown> = {},\n): UploadistaError => {\n // Log the error with context\n Effect.runSync(logR2Error(operation, error, context));\n\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", error as Error);\n};\n\nexport const handleR2NotFoundError = (\n operation: string,\n error: unknown,\n context: Record<string, unknown> = {},\n): UploadistaError => {\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"code\" in error &&\n typeof error.code === \"string\" &&\n [\"NotFound\", \"NoSuchKey\", \"NoSuchUpload\"].includes(error.code)\n ) {\n Effect.runSync(\n Effect.logWarning(`File not found during ${operation} operation`).pipe(\n Effect.annotateLogs({\n error_code: error.code,\n ...context,\n }),\n ),\n );\n return UploadistaError.fromCode(\"FILE_NOT_FOUND\");\n }\n\n return handleR2Error(operation, error, context);\n};\n\nexport const isUploadNotFoundError = (\n error: unknown,\n): error is { code: \"NoSuchUpload\" | \"NoSuchKey\" } => {\n return (\n typeof error === \"object\" &&\n error !== null &&\n \"code\" in error &&\n typeof error.code === \"string\" &&\n (error.code === \"NoSuchUpload\" || error.code === \"NoSuchKey\")\n );\n};\n","import type {\n R2Bucket,\n ReadableStream,\n} from \"@cloudflare/workers-types\";\nimport type { UploadistaError } from \"@uploadista/core/errors\";\nimport { withS3ApiMetrics } from \"@uploadista/observability\";\nimport { Context, Effect, Layer } from \"effect\";\nimport type { MultipartUploadInfo, R2OperationContext, R2UploadedPart } from \"../types\";\nimport { handleR2Error, handleR2NotFoundError, partKey } from \"../utils\";\n\nexport class R2ClientService extends Context.Tag(\"R2ClientService\")<\n R2ClientService,\n {\n readonly bucket: string;\n\n // Basic S3 operations\n readonly getObject: (\n key: string,\n ) => Effect.Effect<ReadableStream, UploadistaError>;\n readonly headObject: (\n key: string,\n ) => Effect.Effect<number | undefined, UploadistaError>;\n readonly putObject: (\n key: string,\n body: Uint8Array,\n ) => Effect.Effect<string, UploadistaError>;\n readonly deleteObject: (\n key: string,\n ) => Effect.Effect<void, UploadistaError>;\n readonly deleteObjects: (\n keys: string[],\n ) => Effect.Effect<void, UploadistaError>;\n\n // Multipart upload operations\n readonly createMultipartUpload: (\n context: R2OperationContext,\n ) => Effect.Effect<MultipartUploadInfo, UploadistaError>;\n readonly uploadPart: (\n context: R2OperationContext & { partNumber: number; data: Uint8Array },\n ) => Effect.Effect<string, UploadistaError>;\n readonly completeMultipartUpload: (\n context: R2OperationContext,\n parts: Array<R2UploadedPart>,\n ) => Effect.Effect<string | undefined, UploadistaError>;\n readonly abortMultipartUpload: (\n context: R2OperationContext,\n ) => Effect.Effect<void, UploadistaError>;\n\n // Incomplete part operations\n readonly getIncompletePart: (\n id: string,\n ) => Effect.Effect<ReadableStream | undefined, UploadistaError>;\n readonly getIncompletePartSize: (\n id: string,\n ) => Effect.Effect<number | undefined, UploadistaError>;\n readonly putIncompletePart: (\n id: string,\n data: Uint8Array,\n ) => Effect.Effect<string, UploadistaError>;\n readonly deleteIncompletePart: (\n id: string,\n ) => Effect.Effect<void, UploadistaError>;\n }\n>() {}\n\nexport const makeR2ClientService = (\n r2Bucket: R2Bucket,\n r2BucketName: string,\n) => {\n const getObject = (key: string) =>\n Effect.gen(function* () {\n const data = yield* Effect.tryPromise({\n try: async () => {\n const result = await r2Bucket.get(key);\n if (!result) {\n throw new Error(`Object not found: ${key}`);\n }\n return result.body;\n },\n catch: (error) =>\n handleR2Error(\"getObject\", error, { key, bucket: r2BucketName }),\n });\n return data;\n });\n\n const headObject = (key: string) =>\n Effect.tryPromise({\n try: async () => {\n const data = await r2Bucket.head(key);\n if (!data) {\n return undefined;\n }\n return data.size;\n },\n catch: (error) =>\n handleR2Error(\"headObject\", error, { key, bucket: r2BucketName }),\n });\n\n const putObject = (key: string, body: Uint8Array) =>\n Effect.tryPromise({\n try: async () => {\n const response = await r2Bucket.put(key, body);\n if (!response) {\n throw new Error(\"Failed to put object\");\n }\n return response.etag;\n },\n catch: (error) =>\n handleR2Error(\"putObject\", error, {\n key,\n bucket: r2BucketName,\n size: body.length,\n }),\n });\n\n const deleteObject = (key: string) =>\n Effect.tryPromise({\n try: async () => {\n await r2Bucket.delete(key);\n },\n catch: (error) =>\n handleR2Error(\"deleteObject\", error, { key, bucket: r2BucketName }),\n });\n\n const deleteObjects = (keys: string[]) =>\n Effect.tryPromise({\n try: () => r2Bucket.delete(keys),\n catch: (error) =>\n handleR2Error(\"deleteObjects\", error, {\n keys: keys.length,\n bucket: r2BucketName,\n }),\n });\n\n const createMultipartUpload = (context: R2OperationContext) =>\n withS3ApiMetrics(\n \"createMultipartUpload\",\n Effect.tryPromise({\n try: async () => {\n const multipartUpload = await r2Bucket.createMultipartUpload(\n context.key,\n );\n\n if (!multipartUpload.uploadId) {\n throw new Error(\"Upload ID is undefined\");\n }\n if (!multipartUpload.key) {\n throw new Error(\"Key is undefined\");\n }\n\n return {\n uploadId: multipartUpload.uploadId,\n bucket: context.bucket,\n key: multipartUpload.key,\n };\n },\n catch: (error) =>\n handleR2Error(\"createMultipartUpload\", error, context),\n }),\n );\n\n const uploadPart = (\n context: R2OperationContext & { partNumber: number; data: Uint8Array },\n ) =>\n withS3ApiMetrics(\n \"uploadPart\",\n Effect.tryPromise({\n try: async () => {\n const multipartUpload = await r2Bucket.resumeMultipartUpload(\n context.key,\n context.uploadId,\n );\n const part = await multipartUpload.uploadPart(\n context.partNumber,\n context.data,\n );\n if (!part) {\n throw new Error(\"Part is undefined\");\n }\n return part.etag;\n },\n catch: (error) =>\n handleR2Error(\"uploadPart\", error, {\n upload_id: context.key,\n part_number: context.partNumber,\n part_size: context.data.length,\n s3_bucket: context.bucket,\n }),\n }).pipe(Effect.map((response) => response)),\n );\n\n const completeMultipartUpload = (\n context: R2OperationContext,\n parts: Array<R2UploadedPart>,\n ) =>\n withS3ApiMetrics(\n \"completeMultipartUpload\",\n Effect.tryPromise({\n try: async () => {\n const multipartUpload = await r2Bucket.resumeMultipartUpload(\n context.key,\n context.uploadId,\n );\n const complete = await multipartUpload.complete(parts);\n if (!complete) {\n throw new Error(\"Complete is undefined\");\n }\n return complete.key;\n },\n catch: (error) =>\n handleR2Error(\"completeMultipartUpload\", error, {\n upload_id: context.key,\n parts_count: parts.length,\n s3_bucket: context.bucket,\n }),\n }),\n );\n\n const abortMultipartUpload = (context: R2OperationContext) =>\n Effect.tryPromise({\n try: async () => {\n const multipartUpload = await r2Bucket.resumeMultipartUpload(\n context.key,\n context.uploadId,\n );\n await multipartUpload.abort();\n },\n catch: (error) =>\n handleR2NotFoundError(\"abortMultipartUpload\", error, {\n upload_id: context.key,\n s3_bucket: context.bucket,\n }),\n });\n\n // Note: R2 does not provide a listParts API like S3\n // Parts are tracked in the KV store instead (see r2-store.ts)\n // Note: R2 also does not provide listMultipartUploads API\n // For cleanup, use R2's native expiration policies instead\n\n const getIncompletePart = (id: string) =>\n Effect.tryPromise({\n try: async () => {\n const data = await r2Bucket.get(partKey(id));\n if (!data || !data.body) {\n return undefined;\n }\n return data.body;\n },\n catch: (error) =>\n handleR2Error(\"getIncompletePart\", error, {\n upload_id: id,\n bucket: r2BucketName,\n }),\n });\n\n const getIncompletePartSize = (id: string) => headObject(partKey(id));\n\n const putIncompletePart = (id: string, data: Uint8Array) =>\n putObject(partKey(id), data).pipe(\n Effect.tap(() =>\n Effect.logInfo(\"Incomplete part uploaded\").pipe(\n Effect.annotateLogs({ upload_id: id }),\n ),\n ),\n );\n\n const deleteIncompletePart = (id: string) => deleteObject(partKey(id));\n\n return {\n bucket: r2BucketName,\n getObject,\n headObject,\n putObject,\n deleteObject,\n deleteObjects,\n createMultipartUpload,\n uploadPart,\n completeMultipartUpload,\n abortMultipartUpload,\n getIncompletePart,\n getIncompletePartSize,\n putIncompletePart,\n deleteIncompletePart,\n };\n};\n\nexport const R2ClientLayer = (r2Bucket: R2Bucket, r2BucketName: string) =>\n Layer.succeed(R2ClientService, makeR2ClientService(r2Bucket, r2BucketName));\n","import type { ReadableStream } from \"@cloudflare/workers-types\";\nimport { UploadistaError } from \"@uploadista/core/errors\";\nimport type {\n DataStore,\n DataStoreCapabilities,\n DataStoreWriteOptions,\n UploadFile,\n UploadStrategy,\n} from \"@uploadista/core/types\";\nimport { UploadFileKVStore } from \"@uploadista/core/types\";\nimport {\n s3ActiveUploadsGauge as activeUploadsGauge,\n s3FileSizeHistogram as fileSizeHistogram,\n logS3UploadCompletion,\n s3PartSizeHistogram as partSizeHistogram,\n s3PartUploadDurationHistogram as partUploadDurationHistogram,\n s3UploadDurationHistogram as uploadDurationHistogram,\n s3UploadErrorsTotal as uploadErrorsTotal,\n s3UploadPartsTotal as uploadPartsTotal,\n s3UploadRequestsTotal as uploadRequestsTotal,\n s3UploadSuccessTotal as uploadSuccessTotal,\n withS3TimingMetrics as withTimingMetrics,\n withS3UploadMetrics as withUploadMetrics,\n} from \"@uploadista/observability\";\nimport { Effect, Ref, Schedule, Stream } from \"effect\";\nimport { R2ClientLayer, R2ClientService } from \"./services/r2-client.service\";\nimport type { ChunkInfo, R2StoreConfig, R2UploadedPart } from \"./types\";\nimport { calcOffsetFromParts, calcOptimalPartSize } from \"./utils\";\n\n/**\n * Generates an S3 key from an upload file, preserving the file extension if available.\n * Looks for filename in metadata under common keys: 'filename', 'fileName', or 'name'.\n * Falls back to just the upload ID if no filename is found.\n */\nconst getS3Key = (uploadFile: UploadFile): string => {\n const { id, metadata } = uploadFile;\n\n if (!metadata) {\n return id;\n }\n\n // Try common metadata keys for filename\n const filename = metadata.filename || metadata.fileName || metadata.name;\n\n if (typeof filename === \"string\" && filename.includes(\".\")) {\n const extension = filename.substring(filename.lastIndexOf(\".\"));\n return `${id}${extension}`;\n }\n\n return id;\n};\n\n// Clean implementation using composed services\nexport function createR2Store(config: R2StoreConfig) {\n const {\n deliveryUrl,\n partSize,\n minPartSize = 5_242_880,\n useTags = true,\n maxMultipartParts = 10_000,\n maxConcurrentPartUploads = 60,\n bucket,\n } = config;\n\n return Effect.gen(function* () {\n const r2Client = yield* R2ClientService;\n const kvStore = yield* UploadFileKVStore;\n const preferredPartSize = partSize || 8 * 1024 * 1024;\n\n const getUploadId = (\n uploadFile: UploadFile,\n ): Effect.Effect<string, UploadistaError> => {\n const uploadId = uploadFile.storage.uploadId;\n if (!uploadId) {\n return Effect.fail(\n UploadistaError.fromCode(\n \"FILE_WRITE_ERROR\",\n new Error(\"Upload ID is undefined\"),\n ),\n );\n }\n return Effect.succeed(uploadId);\n };\n\n const uploadPart = (\n uploadFile: UploadFile,\n data: Uint8Array,\n partNumber: number,\n ) => {\n const s3Key = getS3Key(uploadFile);\n\n return withTimingMetrics(\n partUploadDurationHistogram,\n Effect.gen(function* () {\n const uploadId = yield* getUploadId(uploadFile);\n\n const etag = yield* r2Client\n .uploadPart({\n bucket: r2Client.bucket,\n key: s3Key,\n uploadId,\n partNumber,\n data,\n })\n .pipe(\n Effect.retry(\n Schedule.exponential(\"1 second\", 2.0).pipe(\n Schedule.intersect(Schedule.recurs(3)),\n ),\n ),\n Effect.tapError((error) =>\n Effect.logWarning(\"Retrying part upload\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n part_number: partNumber,\n error_message: error.message,\n retry_attempt: \"unknown\", // Will be overridden by the retry schedule\n part_size: data.length,\n s3_bucket: r2Client.bucket,\n }),\n ),\n ),\n );\n\n // Store part metadata in KV (R2 doesn't provide listParts API)\n const existingParts = uploadFile.storage.parts || [];\n const updatedParts = [...existingParts, {\n partNumber,\n etag,\n size: data.length,\n }];\n\n yield* kvStore.set(uploadFile.id, {\n ...uploadFile,\n storage: {\n ...uploadFile.storage,\n parts: updatedParts,\n },\n });\n\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* Effect.logInfo(\"Part uploaded successfully\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n part_number: partNumber,\n part_size: data.length,\n etag: etag,\n }),\n );\n\n return etag;\n }),\n ).pipe(\n Effect.withSpan(`s3-upload-part-${partNumber}`, {\n attributes: {\n \"upload.id\": uploadFile.id,\n \"upload.part_number\": partNumber,\n \"upload.part_size\": data.length,\n \"s3.bucket\": r2Client.bucket,\n \"s3.key\": s3Key,\n },\n }),\n );\n };\n\n const uploadIncompletePart = (id: string, data: Uint8Array) =>\n r2Client.putIncompletePart(id, data);\n\n const downloadIncompletePart = (id: string) =>\n Effect.gen(function* () {\n const incompletePart = yield* r2Client.getIncompletePart(id);\n\n if (!incompletePart) {\n return undefined;\n }\n\n // Read the stream and collect all chunks to calculate size\n const reader = incompletePart.getReader();\n const chunks: Uint8Array[] = [];\n let incompletePartSize = 0;\n\n try {\n while (true) {\n const { done, value } = yield* Effect.promise(() => reader.read());\n if (done) break;\n chunks.push(value);\n incompletePartSize += value.length;\n }\n } finally {\n reader.releaseLock();\n }\n\n const stream = Stream.fromIterable(chunks);\n\n return {\n size: incompletePartSize,\n stream,\n };\n });\n\n const deleteIncompletePart = (id: string) =>\n r2Client.deleteIncompletePart(id);\n\n const getIncompletePartSize = (id: string) =>\n r2Client.getIncompletePartSize(id);\n\n const complete = (uploadFile: UploadFile, parts: Array<R2UploadedPart>) => {\n const s3Key = getS3Key(uploadFile);\n\n return Effect.gen(function* () {\n const uploadId = yield* getUploadId(uploadFile);\n\n return yield* r2Client.completeMultipartUpload(\n {\n bucket: r2Client.bucket,\n key: s3Key,\n uploadId,\n },\n parts,\n );\n }).pipe(\n Effect.tap(() => uploadSuccessTotal(Effect.succeed(1))),\n Effect.withSpan(\"s3-complete-multipart-upload\", {\n attributes: {\n \"upload.id\": uploadFile.id,\n \"upload.parts_count\": parts.length,\n \"s3.bucket\": r2Client.bucket,\n \"s3.key\": s3Key,\n },\n }),\n );\n };\n\n const abort = (uploadFile: UploadFile) => {\n const s3Key = getS3Key(uploadFile);\n\n return Effect.gen(function* () {\n const uploadId = yield* getUploadId(uploadFile);\n\n yield* r2Client.abortMultipartUpload({\n bucket: r2Client.bucket,\n key: s3Key,\n uploadId,\n });\n\n yield* r2Client.deleteObjects([s3Key]);\n });\n };\n\n const retrieveParts = (id: string) =>\n Effect.gen(function* () {\n const metadata = yield* kvStore.get(id);\n\n // R2 doesn't have a listParts API, so we retrieve parts from KV store\n const parts = metadata.storage.parts || [];\n\n // Convert to R2UploadedPart format for compatibility\n const r2Parts: R2UploadedPart[] = parts.map((part) => ({\n partNumber: part.partNumber,\n etag: part.etag,\n size: part.size,\n }));\n\n // Sort parts by part number to ensure correct order\n r2Parts.sort((a, b) => (a.partNumber ?? 0) - (b.partNumber ?? 0));\n\n return { uploadFound: true, parts: r2Parts };\n });\n\n const completeMetadata = (upload: UploadFile, useTags: boolean) =>\n Effect.gen(function* () {\n if (!useTags) {\n return 0;\n }\n\n const uploadFile = yield* kvStore.get(upload.id);\n const uploadId = uploadFile.storage.uploadId;\n if (!uploadId) {\n return 0;\n }\n\n yield* kvStore.set(upload.id, {\n ...uploadFile,\n storage: { ...uploadFile.storage, uploadId },\n });\n\n return 0;\n });\n\n const clearCache = (id: string) =>\n Effect.gen(function* () {\n yield* Effect.logInfo(\"Clearing cache\").pipe(\n Effect.annotateLogs({ upload_id: id }),\n );\n yield* kvStore.delete(id);\n });\n\n const createMultipartUpload = (upload: UploadFile) => {\n const s3Key = getS3Key(upload);\n\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Initializing multipart upload\").pipe(\n Effect.annotateLogs({ upload_id: upload.id }),\n );\n\n const multipartInfo = yield* r2Client.createMultipartUpload({\n bucket: r2Client.bucket,\n key: s3Key,\n uploadId: \"\", // Not needed for create\n contentType: upload.metadata?.contentType?.toString(),\n cacheControl: upload.metadata?.cacheControl?.toString(),\n });\n\n const uploadCreated = {\n ...upload,\n storage: {\n ...upload.storage,\n path: multipartInfo.key,\n uploadId: multipartInfo.uploadId,\n bucket: multipartInfo.bucket,\n },\n url: `${deliveryUrl}/${s3Key}`,\n };\n\n yield* kvStore.set(upload.id, uploadCreated);\n\n yield* Effect.logInfo(\"Multipart upload created\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n s3_upload_id: uploadCreated.storage.uploadId,\n s3_key: s3Key,\n }),\n );\n\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* fileSizeHistogram(Effect.succeed(upload.size || 0));\n\n return uploadCreated;\n }).pipe(\n Effect.withSpan(\"s3-create-upload\", {\n attributes: {\n \"upload.id\": upload.id,\n \"upload.size\": upload.size || 0,\n \"s3.bucket\": r2Client.bucket,\n \"s3.key\": s3Key,\n },\n }),\n );\n };\n\n /**\n * Creates a multipart upload on S3 attaching any metadata to it.\n * Also, a `${file_id}.info` file is created which holds some information\n * about the upload itself like: `upload-id`, `upload-length`, etc.\n */\n const create = (upload: UploadFile) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Initializing multipart upload\").pipe(\n Effect.annotateLogs({ upload_id: upload.id }),\n );\n const uploadCreated = yield* createMultipartUpload(upload);\n yield* kvStore.set(upload.id, uploadCreated);\n yield* Effect.logInfo(\"Multipart upload created\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n s3_upload_id: uploadCreated.storage.uploadId,\n }),\n );\n yield* uploadRequestsTotal(Effect.succeed(1));\n\n return uploadCreated;\n }).pipe(\n Effect.withSpan(\"s3-create-upload\", {\n attributes: {\n \"upload.id\": upload.id,\n \"upload.size\": upload.size || 0,\n \"s3.bucket\": bucket,\n },\n }),\n );\n };\n\n const remove = (id: string) =>\n Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(id);\n yield* abort(uploadFile);\n yield* clearCache(id);\n });\n\n const write = (\n options: DataStoreWriteOptions,\n dependencies: { onProgress?: (currentOffset: number) => void },\n ) =>\n withUploadMetrics(\n options.file_id,\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const {\n stream: initialData,\n file_id,\n offset: initialOffset,\n } = options;\n const { onProgress } = dependencies;\n\n // Capture start time for upload completion metrics\n const startTime = Date.now();\n\n // Track active upload\n yield* activeUploadsGauge(Effect.succeed(1));\n\n const prepareResult = yield* prepareUpload(\n file_id,\n initialOffset,\n initialData,\n );\n\n const {\n uploadFile,\n nextPartNumber,\n offset,\n data,\n existingPartSize,\n } = prepareResult;\n\n // Use existing part size if parts already exist, otherwise calculate optimal size\n const uploadPartSize =\n existingPartSize ||\n calcOptimalPartSize(\n uploadFile.size,\n preferredPartSize,\n minPartSize,\n maxMultipartParts,\n );\n\n // Log part size decision for debugging\n yield* Effect.logInfo(\"Part size decision\").pipe(\n Effect.annotateLogs({\n upload_id: file_id,\n existing_part_size: existingPartSize,\n calculated_part_size: calcOptimalPartSize(\n uploadFile.size,\n preferredPartSize,\n minPartSize,\n maxMultipartParts,\n ),\n final_part_size: uploadPartSize,\n next_part_number: nextPartNumber,\n }),\n );\n\n const bytesUploaded = yield* uploadParts(\n uploadFile,\n data,\n nextPartNumber,\n offset,\n uploadPartSize,\n minPartSize,\n maxConcurrentPartUploads,\n onProgress,\n );\n\n const newOffset = offset + bytesUploaded;\n\n if (newOffset > maxConcurrentPartUploads)\n if (uploadFile.size === newOffset) {\n yield* finishUpload(file_id, uploadFile, startTime);\n }\n\n return newOffset;\n }).pipe(Effect.ensuring(activeUploadsGauge(Effect.succeed(0)))),\n ),\n );\n\n const getUpload = (id: string) =>\n Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(id);\n\n const { parts, uploadFound } = yield* retrieveParts(id);\n if (!uploadFound) {\n return {\n ...uploadFile,\n offset: uploadFile.size as number,\n size: uploadFile.size,\n };\n }\n\n const offset = calcOffsetFromParts(parts);\n const incompletePartSize = yield* getIncompletePartSize(id);\n\n return {\n ...uploadFile,\n offset: offset + (incompletePartSize ?? 0),\n size: uploadFile.size,\n storage: uploadFile.storage,\n };\n });\n\n // const read = (id: string) =>\n // Effect.gen(function* () {\n // return yield* r2Client.getObject(id);\n // });\n\n // Helper functions\n const prepareUpload = (\n fileId: string,\n initialOffset: number,\n initialData: Stream.Stream<Uint8Array, UploadistaError>,\n ) =>\n Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(fileId);\n\n // Get parts from storage metadata (tracked locally)\n const parts = uploadFile.storage.parts || [];\n\n const partNumber: number =\n parts.length > 0 && parts[parts.length - 1].partNumber\n ? parts[parts.length - 1].partNumber\n : 0;\n const nextPartNumber = partNumber + 1;\n\n // Detect existing part size to maintain consistency\n // We check the first part's size to ensure all subsequent parts match\n const existingPartSize =\n parts.length > 0 && parts[0].size ? parts[0].size : null;\n\n // Validate that all existing parts (except potentially the last one) have the same size\n if (existingPartSize && parts.length > 1) {\n const inconsistentPart = parts\n .slice(0, -1)\n .find((part) => part.size !== existingPartSize);\n if (inconsistentPart) {\n yield* Effect.logWarning(\n \"Inconsistent part sizes detected in existing upload\",\n ).pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n expected_size: existingPartSize,\n inconsistent_part: inconsistentPart.partNumber,\n inconsistent_size: inconsistentPart.size,\n }),\n );\n }\n }\n\n const incompletePart = yield* downloadIncompletePart(fileId);\n\n if (incompletePart) {\n yield* deleteIncompletePart(fileId);\n const offset = initialOffset - incompletePart.size;\n const data = incompletePart.stream.pipe(Stream.concat(initialData));\n return {\n uploadFile,\n nextPartNumber,\n offset,\n incompletePartSize: incompletePart.size,\n data,\n existingPartSize,\n };\n } else {\n return {\n uploadFile,\n nextPartNumber,\n offset: initialOffset,\n incompletePartSize: 0,\n data: initialData,\n existingPartSize,\n };\n }\n });\n\n const finishUpload = (\n fileId: string,\n uploadFile: UploadFile,\n startTime: number,\n ) =>\n Effect.gen(function* () {\n const { parts } = yield* retrieveParts(fileId);\n\n // Log all parts for debugging S3 multipart upload requirements\n yield* Effect.logInfo(\"Attempting to complete multipart upload\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n parts_count: parts.length,\n parts_info: parts.map((part, index) => ({\n part_number: part.partNumber,\n size: part.size,\n etag: part.etag,\n is_final_part: index === parts.length - 1,\n })),\n }),\n );\n\n yield* complete(uploadFile, parts);\n yield* completeMetadata(uploadFile, useTags);\n // yield* clearCache(fileId);\n\n // Log upload completion metrics\n const endTime = Date.now();\n const totalDurationMs = endTime - startTime;\n const fileSize = uploadFile.size || 0;\n const throughputBps =\n totalDurationMs > 0 ? (fileSize * 1000) / totalDurationMs : 0;\n\n // Calculate average part size if we have parts\n const averagePartSize =\n parts.length > 0\n ? parts.reduce((sum, part) => sum + (part.size || 0), 0) /\n parts.length\n : undefined;\n\n yield* logS3UploadCompletion(fileId, {\n fileSize,\n totalDurationMs,\n partsCount: parts.length,\n averagePartSize,\n throughputBps,\n });\n }).pipe(\n Effect.tapError((error) =>\n Effect.gen(function* () {\n yield* uploadErrorsTotal(Effect.succeed(1));\n yield* Effect.logError(\"Failed to finish upload\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n error: String(error),\n }),\n );\n }),\n ),\n );\n\n // Note: R2 does not provide a listMultipartUploads API\n // Use R2's native lifecycle rules or Cloudflare Workers Cron for cleanup\n // See: https://developers.cloudflare.com/r2/buckets/object-lifecycles/\n const deleteExpired = Effect.gen(function* () {\n yield* Effect.logWarning(\n \"R2 does not support automatic expired upload deletion via API. Please use R2 lifecycle rules instead.\",\n ).pipe(\n Effect.annotateLogs({\n bucket: r2Client.bucket,\n }),\n );\n return 0;\n });\n\n // Proper single-pass chunking using Effect's async stream constructor\n // Ensures all parts except the final part are exactly the same size (S3 requirement)\n const createChunkedStream =\n (chunkSize: number) =>\n <E>(\n stream: Stream.Stream<Uint8Array, E>,\n ): Stream.Stream<ChunkInfo, E> => {\n return Stream.async<ChunkInfo, E>((emit) => {\n let buffer = new Uint8Array(0);\n let partNumber = 1;\n let totalBytesProcessed = 0;\n\n const emitChunk = (data: Uint8Array, isFinalChunk = false) => {\n // Log chunk information for debugging - use INFO level to see in logs\n Effect.runSync(\n Effect.logInfo(\"Creating chunk\").pipe(\n Effect.annotateLogs({\n part_number: partNumber,\n chunk_size: data.length,\n expected_size: chunkSize,\n is_final_chunk: isFinalChunk,\n total_bytes_processed: totalBytesProcessed + data.length,\n }),\n ),\n );\n emit.single({\n partNumber: partNumber++,\n data,\n size: data.length,\n });\n };\n\n const processChunk = (newData: Uint8Array) => {\n // Combine buffer with new data\n const combined = new Uint8Array(buffer.length + newData.length);\n combined.set(buffer);\n combined.set(newData, buffer.length);\n buffer = combined;\n totalBytesProcessed += newData.length;\n\n // Emit full chunks of exactly chunkSize bytes\n // This ensures S3 multipart upload rule: all parts except last must be same size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emitChunk(chunk, false);\n }\n };\n\n // Process the stream\n Effect.runFork(\n stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.sync(() => processChunk(chunk)),\n ),\n Effect.andThen(() =>\n Effect.sync(() => {\n // Emit final chunk if there's remaining data\n // The final chunk can be any size < chunkSize (S3 allows this)\n if (buffer.length > 0) {\n emitChunk(buffer, true);\n }\n emit.end();\n }),\n ),\n Effect.catchAll((error) => Effect.sync(() => emit.fail(error))),\n ),\n );\n });\n };\n\n // Byte-level progress tracking during streaming\n // This provides smooth, immediate progress feedback by tracking bytes as they\n // flow through the stream, before they reach S3. This solves the issue where\n // small files (< 5MB) would jump from 0% to 100% instantly.\n const withByteProgressTracking =\n (onProgress?: (totalBytes: number) => void, initialOffset = 0) =>\n <E, R>(stream: Stream.Stream<Uint8Array, E, R>) => {\n if (!onProgress) return stream;\n\n return Effect.gen(function* () {\n const totalBytesProcessedRef = yield* Ref.make(initialOffset);\n\n return stream.pipe(\n Stream.tap((chunk) =>\n Effect.gen(function* () {\n const newTotal = yield* Ref.updateAndGet(\n totalBytesProcessedRef,\n (total) => total + chunk.length,\n );\n onProgress(newTotal);\n }),\n ),\n );\n }).pipe(Stream.unwrap);\n };\n\n const uploadParts = (\n uploadFile: UploadFile,\n readStream: Stream.Stream<Uint8Array, UploadistaError>,\n initCurrentPartNumber: number,\n initOffset: number,\n uploadPartSize: number,\n minPartSize: number,\n maxConcurrentPartUploads: number,\n onProgress?: (newOffset: number) => void,\n ) =>\n Effect.gen(function* () {\n yield* Effect.logInfo(\"Starting part uploads\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n init_offset: initOffset,\n file_size: uploadFile.size,\n part_size: uploadPartSize,\n min_part_size: minPartSize,\n }),\n );\n\n // Enhanced Progress Tracking Strategy:\n // 1. Byte-level progress during streaming - provides immediate, smooth feedback\n // as data flows through the pipeline (even for small files)\n // 2. This tracks progress BEFORE S3 upload, giving users immediate feedback\n // 3. For large files with multiple parts, this provides granular updates\n // 4. For small files (single part), this prevents 0%->100% jumps\n const chunkStream = readStream.pipe(\n // Add byte-level progress tracking during streaming (immediate feedback)\n withByteProgressTracking(onProgress, initOffset),\n // Create chunks for S3 multipart upload with uniform part sizes\n createChunkedStream(uploadPartSize),\n );\n\n // Track cumulative offset and total bytes with Effect Refs\n const cumulativeOffsetRef = yield* Ref.make(initOffset);\n const totalBytesUploadedRef = yield* Ref.make(0);\n\n // Create a chunk upload function for the sink\n const uploadChunk = (chunkInfo: ChunkInfo) =>\n Effect.gen(function* () {\n // Calculate cumulative bytes to determine if this is the final part\n const cumulativeOffset = yield* Ref.updateAndGet(\n cumulativeOffsetRef,\n (offset) => offset + chunkInfo.size,\n );\n const isFinalPart = cumulativeOffset >= (uploadFile.size || 0);\n\n yield* Effect.logDebug(\"Processing chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n cumulative_offset: cumulativeOffset,\n file_size: uploadFile.size,\n chunk_size: chunkInfo.size,\n is_final_part: isFinalPart,\n }),\n );\n\n const actualPartNumber =\n initCurrentPartNumber + chunkInfo.partNumber - 1;\n\n if (chunkInfo.size > uploadPartSize) {\n yield* Effect.fail(\n UploadistaError.fromCode(\n \"FILE_WRITE_ERROR\",\n new Error(\n `Part size ${chunkInfo.size} exceeds upload part size ${uploadPartSize}`,\n ),\n ),\n );\n }\n\n // For parts that meet the minimum part size (5MB) or are the final part,\n // upload them as regular multipart parts\n if (chunkInfo.size >= minPartSize || isFinalPart) {\n yield* Effect.logDebug(\"Uploading multipart chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n part_number: actualPartNumber,\n chunk_size: chunkInfo.size,\n min_part_size: minPartSize,\n is_final_part: isFinalPart,\n }),\n );\n yield* uploadPart(uploadFile, chunkInfo.data, actualPartNumber);\n yield* partSizeHistogram(Effect.succeed(chunkInfo.size));\n } else {\n // Only upload as incomplete part if it's smaller than minimum and not final\n yield* uploadIncompletePart(uploadFile.id, chunkInfo.data);\n }\n\n yield* Ref.update(\n totalBytesUploadedRef,\n (total) => total + chunkInfo.size,\n );\n\n // Note: Byte-level progress is now tracked during streaming phase\n // This ensures smooth progress updates regardless of part size\n // S3 upload completion is tracked via totalBytesUploadedRef for accuracy\n });\n\n // Process chunks concurrently with controlled concurrency\n yield* chunkStream.pipe(\n Stream.runForEach((chunkInfo) => uploadChunk(chunkInfo)),\n Effect.withConcurrency(maxConcurrentPartUploads),\n );\n\n return yield* Ref.get(totalBytesUploadedRef);\n });\n\n const getCapabilities = (): DataStoreCapabilities => ({\n supportsParallelUploads: true,\n supportsConcatenation: true,\n supportsDeferredLength: true,\n supportsResumableUploads: true,\n supportsTransactionalUploads: true,\n maxConcurrentUploads: maxConcurrentPartUploads,\n minChunkSize: minPartSize,\n maxChunkSize: 5_368_709_120, // 5GiB S3 limit\n maxParts: maxMultipartParts,\n optimalChunkSize: preferredPartSize,\n requiresOrderedChunks: false,\n requiresMimeTypeValidation: true,\n maxValidationSize: undefined, // no size limit\n });\n\n const getChunkerConstraints = () => ({\n minChunkSize: minPartSize,\n maxChunkSize: 5_368_709_120, // 5GiB S3 limit\n optimalChunkSize: preferredPartSize,\n requiresOrderedChunks: false,\n });\n\n const validateUploadStrategy = (\n strategy: UploadStrategy,\n ): Effect.Effect<boolean, never> => {\n const capabilities = getCapabilities();\n const result = (() => {\n switch (strategy) {\n case \"parallel\":\n return capabilities.supportsParallelUploads;\n case \"single\":\n return true;\n default:\n return false;\n }\n })();\n return Effect.succeed(result);\n };\n\n const concatArrayBuffers = (chunks: Uint8Array[]): Uint8Array => {\n const result = new Uint8Array(chunks.reduce((a, c) => a + c.length, 0));\n let offset = 0;\n for (const chunk of chunks) {\n result.set(chunk, offset);\n offset += chunk.length;\n }\n return result;\n };\n\n const streamToArray = async (\n stream: ReadableStream<Uint8Array>,\n ): Promise<Uint8Array> => {\n const reader = stream.getReader();\n const chunks: Uint8Array[] = [];\n while (true) {\n const { done, value } = await reader.read();\n if (done) break;\n chunks.push(value);\n }\n return concatArrayBuffers(chunks);\n };\n\n const read = (id: string) =>\n Effect.gen(function* () {\n const upload = yield* kvStore.get(id);\n\n if (!upload.id) {\n return yield* Effect.fail(\n UploadistaError.fromCode(\n \"FILE_READ_ERROR\",\n new Error(\"Upload Key is undefined\"),\n ),\n );\n }\n const s3Key = getS3Key(upload);\n const stream = yield* r2Client.getObject(s3Key);\n return yield* Effect.promise(() => streamToArray(stream));\n });\n\n return {\n bucket,\n create,\n remove,\n write,\n getUpload,\n read,\n deleteExpired,\n getCapabilities,\n getChunkerConstraints,\n validateUploadStrategy,\n } as DataStore<UploadFile>;\n });\n}\n\n// Effect-based factory that uses services\nexport const r2Store = (options: R2StoreConfig) => {\n const { r2Bucket, bucket } = options;\n return createR2Store(options).pipe(\n Effect.provide(R2ClientLayer(r2Bucket, bucket)),\n );\n};\n"],"mappings":"gmBAEA,MAAa,GAAuB,GAC3B,GAAS,EAAM,OAAS,EAC3B,EAAM,QAAQ,EAAG,IAAM,GAAK,GAAG,MAAQ,GAAI,EAAE,CAC7C,EAGO,GACX,EACA,EACA,EACA,EACA,EAAgB,gBACL,CACX,IAAM,EAAO,GAAY,EACrBA,EAEJ,AASE,EATE,GAAQ,EAGQ,EACT,GAAQ,EAAoB,EAEnB,EAGA,KAAK,KAAK,EAAO,EAAkB,CAKvD,IAAM,EACJ,GAAY,EAAW,EACnB,EACA,KAAK,IAAI,EAAiB,EAAY,CAItC,EAAY,KAClB,OAAO,KAAK,KAAK,EAAgB,EAAU,CAAG,GAGnC,EAAW,GACf,GAAG,EAAG,OCxCF,GACX,EACA,EACA,EAAmC,EAAE,IAGrC,EAAO,QAAQC,EAAW,EAAW,EAAO,EAAQ,CAAC,CAE9C,EAAgB,SAAS,mBAAoB,EAAe,EAGxD,GACX,EACA,EACA,EAAmC,EAAE,GAGnC,OAAO,GAAU,UACjB,GACA,SAAU,GACV,OAAO,EAAM,MAAS,UACtB,CAAC,WAAY,YAAa,eAAe,CAAC,SAAS,EAAM,KAAK,EAE9D,EAAO,QACL,EAAO,WAAW,yBAAyB,EAAU,YAAY,CAAC,KAChE,EAAO,aAAa,CAClB,WAAY,EAAM,KAClB,GAAG,EACJ,CAAC,CACH,CACF,CACM,EAAgB,SAAS,iBAAiB,EAG5C,EAAc,EAAW,EAAO,EAAQ,CC5BjD,IAAa,EAAb,cAAqC,EAAQ,IAAI,kBAAkB,EAqDhE,AAAC,GAEJ,MAAa,GACX,EACA,IACG,CACH,IAAM,EAAa,GACjB,EAAO,IAAI,WAAa,CAYtB,OAXa,MAAO,EAAO,WAAW,CACpC,IAAK,SAAY,CACf,IAAM,EAAS,MAAM,EAAS,IAAI,EAAI,CACtC,GAAI,CAAC,EACH,MAAU,MAAM,qBAAqB,IAAM,CAE7C,OAAO,EAAO,MAEhB,MAAQ,GACN,EAAc,YAAa,EAAO,CAAE,MAAK,OAAQ,EAAc,CAAC,CACnE,CAAC,EAEF,CAEE,EAAc,GAClB,EAAO,WAAW,CAChB,IAAK,SAAY,CACf,IAAM,EAAO,MAAM,EAAS,KAAK,EAAI,CAChC,KAGL,OAAO,EAAK,MAEd,MAAQ,GACN,EAAc,aAAc,EAAO,CAAE,MAAK,OAAQ,EAAc,CAAC,CACpE,CAAC,CAEE,GAAa,EAAa,IAC9B,EAAO,WAAW,CAChB,IAAK,SAAY,CACf,IAAM,EAAW,MAAM,EAAS,IAAI,EAAK,EAAK,CAC9C,GAAI,CAAC,EACH,MAAU,MAAM,uBAAuB,CAEzC,OAAO,EAAS,MAElB,MAAQ,GACN,EAAc,YAAa,EAAO,CAChC,MACA,OAAQ,EACR,KAAM,EAAK,OACZ,CAAC,CACL,CAAC,CAEE,EAAgB,GACpB,EAAO,WAAW,CAChB,IAAK,SAAY,CACf,MAAM,EAAS,OAAO,EAAI,EAE5B,MAAQ,GACN,EAAc,eAAgB,EAAO,CAAE,MAAK,OAAQ,EAAc,CAAC,CACtE,CAAC,CAkJJ,MAAO,CACL,OAAQ,EACR,YACA,aACA,YACA,eACA,cAtJqB,GACrB,EAAO,WAAW,CAChB,QAAW,EAAS,OAAO,EAAK,CAChC,MAAQ,GACN,EAAc,gBAAiB,EAAO,CACpC,KAAM,EAAK,OACX,OAAQ,EACT,CAAC,CACL,CAAC,CA+IF,sBA7I6B,GAC7B,EACE,wBACA,EAAO,WAAW,CAChB,IAAK,SAAY,CACf,IAAM,EAAkB,MAAM,EAAS,sBACrC,EAAQ,IACT,CAED,GAAI,CAAC,EAAgB,SACnB,MAAU,MAAM,yBAAyB,CAE3C,GAAI,CAAC,EAAgB,IACnB,MAAU,MAAM,mBAAmB,CAGrC,MAAO,CACL,SAAU,EAAgB,SAC1B,OAAQ,EAAQ,OAChB,IAAK,EAAgB,IACtB,EAEH,MAAQ,GACN,EAAc,wBAAyB,EAAO,EAAQ,CACzD,CAAC,CACH,CAqHD,WAlHA,GAEA,EACE,aACA,EAAO,WAAW,CAChB,IAAK,SAAY,CAKf,IAAM,EAAO,MAJW,MAAM,EAAS,sBACrC,EAAQ,IACR,EAAQ,SACT,EACkC,WACjC,EAAQ,WACR,EAAQ,KACT,CACD,GAAI,CAAC,EACH,MAAU,MAAM,oBAAoB,CAEtC,OAAO,EAAK,MAEd,MAAQ,GACN,EAAc,aAAc,EAAO,CACjC,UAAW,EAAQ,IACnB,YAAa,EAAQ,WACrB,UAAW,EAAQ,KAAK,OACxB,UAAW,EAAQ,OACpB,CAAC,CACL,CAAC,CAAC,KAAK,EAAO,IAAK,GAAa,EAAS,CAAC,CAC5C,CAwFD,yBArFA,EACA,IAEA,EACE,0BACA,EAAO,WAAW,CAChB,IAAK,SAAY,CAKf,IAAM,EAAW,MAJO,MAAM,EAAS,sBACrC,EAAQ,IACR,EAAQ,SACT,EACsC,SAAS,EAAM,CACtD,GAAI,CAAC,EACH,MAAU,MAAM,wBAAwB,CAE1C,OAAO,EAAS,KAElB,MAAQ,GACN,EAAc,0BAA2B,EAAO,CAC9C,UAAW,EAAQ,IACnB,YAAa,EAAM,OACnB,UAAW,EAAQ,OACpB,CAAC,CACL,CAAC,CACH,CA8DD,qBA5D4B,GAC5B,EAAO,WAAW,CAChB,IAAK,SAAY,CAKf,MAJwB,MAAM,EAAS,sBACrC,EAAQ,IACR,EAAQ,SACT,EACqB,OAAO,EAE/B,MAAQ,GACN,EAAsB,uBAAwB,EAAO,CACnD,UAAW,EAAQ,IACnB,UAAW,EAAQ,OACpB,CAAC,CACL,CAAC,CA+CF,kBAxCyB,GACzB,EAAO,WAAW,CAChB,IAAK,SAAY,CACf,IAAM,EAAO,MAAM,EAAS,IAAI,EAAQ,EAAG,CAAC,CACxC,MAAC,GAAQ,CAAC,EAAK,MAGnB,OAAO,EAAK,MAEd,MAAQ,GACN,EAAc,oBAAqB,EAAO,CACxC,UAAW,EACX,OAAQ,EACT,CAAC,CACL,CAAC,CA2BF,sBAzB6B,GAAe,EAAW,EAAQ,EAAG,CAAC,CA0BnE,mBAxByB,EAAY,IACrC,EAAU,EAAQ,EAAG,CAAE,EAAK,CAAC,KAC3B,EAAO,QACL,EAAO,QAAQ,2BAA2B,CAAC,KACzC,EAAO,aAAa,CAAE,UAAW,EAAI,CAAC,CACvC,CACF,CACF,CAkBD,qBAhB4B,GAAe,EAAa,EAAQ,EAAG,CAAC,CAiBrE,EAGU,GAAiB,EAAoB,IAChD,EAAM,QAAQ,EAAiB,EAAoB,EAAU,EAAa,CAAC,CC7PvE,EAAY,GAAmC,CACnD,GAAM,CAAE,KAAI,YAAa,EAEzB,GAAI,CAAC,EACH,OAAO,EAIT,IAAM,EAAW,EAAS,UAAY,EAAS,UAAY,EAAS,KAOpE,OALI,OAAO,GAAa,UAAY,EAAS,SAAS,IAAI,CAEjD,GAAG,IADQ,EAAS,UAAU,EAAS,YAAY,IAAI,CAAC,GAI1D,GAIT,SAAgB,EAAc,EAAuB,CACnD,GAAM,CACJ,cACA,WACA,cAAc,QACd,UAAU,GACV,oBAAoB,IACpB,2BAA2B,GAC3B,UACE,EAEJ,OAAO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAW,MAAO,EAClB,EAAU,MAAO,EACjB,EAAoB,GAAY,EAAI,KAAO,KAE3C,EACJ,GAC2C,CAC3C,IAAM,EAAW,EAAW,QAAQ,SASpC,OARK,EAQE,EAAO,QAAQ,EAAS,CAPtB,EAAO,KACZ,EAAgB,SACd,mBACI,MAAM,yBAAyB,CACpC,CACF,EAKC,GACJ,EACA,EACA,IACG,CACH,IAAM,EAAQ,EAAS,EAAW,CAElC,OAAOC,EACLC,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAW,MAAO,EAAY,EAAW,CAEzC,EAAO,MAAO,EACjB,WAAW,CACV,OAAQ,EAAS,OACjB,IAAK,EACL,WACA,aACA,OACD,CAAC,CACD,KACC,EAAO,MACL,EAAS,YAAY,WAAY,EAAI,CAAC,KACpC,EAAS,UAAU,EAAS,OAAO,EAAE,CAAC,CACvC,CACF,CACD,EAAO,SAAU,GACf,EAAO,WAAW,uBAAuB,CAAC,KACxC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,cAAe,EAAM,QACrB,cAAe,UACf,UAAW,EAAK,OAChB,UAAW,EAAS,OACrB,CAAC,CACH,CACF,CACF,CAIG,EAAe,CAAC,GADA,EAAW,QAAQ,OAAS,EAAE,CACZ,CACtC,aACA,OACA,KAAM,EAAK,OACZ,CAAC,CAoBF,OAlBA,MAAO,EAAQ,IAAI,EAAW,GAAI,CAChC,GAAG,EACH,QAAS,CACP,GAAG,EAAW,QACd,MAAO,EACR,CACF,CAAC,CAEF,MAAOC,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAO,EAAO,QAAQ,6BAA6B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,UAAW,EAAK,OACV,OACP,CAAC,CACH,CAEM,GACP,CACH,CAAC,KACA,EAAO,SAAS,kBAAkB,IAAc,CAC9C,WAAY,CACV,YAAa,EAAW,GACxB,qBAAsB,EACtB,mBAAoB,EAAK,OACzB,YAAa,EAAS,OACtB,SAAU,EACX,CACF,CAAC,CACH,EAGG,GAAwB,EAAY,IACxC,EAAS,kBAAkB,EAAI,EAAK,CAEhC,EAA0B,GAC9B,EAAO,IAAI,WAAa,CACtB,IAAM,EAAiB,MAAO,EAAS,kBAAkB,EAAG,CAE5D,GAAI,CAAC,EACH,OAIF,IAAM,EAAS,EAAe,WAAW,CACnCC,EAAuB,EAAE,CAC3B,EAAqB,EAEzB,GAAI,CACF,OAAa,CACX,GAAM,CAAE,OAAM,SAAU,MAAO,EAAO,YAAc,EAAO,MAAM,CAAC,CAClE,GAAI,EAAM,MACV,EAAO,KAAK,EAAM,CAClB,GAAsB,EAAM,eAEtB,CACR,EAAO,aAAa,CAGtB,IAAM,EAAS,EAAO,aAAa,EAAO,CAE1C,MAAO,CACL,KAAM,EACN,SACD,EACD,CAEE,EAAwB,GAC5B,EAAS,qBAAqB,EAAG,CAE7B,EAAyB,GAC7B,EAAS,sBAAsB,EAAG,CAE9B,GAAY,EAAwB,IAAiC,CACzE,IAAM,EAAQ,EAAS,EAAW,CAElC,OAAO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAW,MAAO,EAAY,EAAW,CAE/C,OAAO,MAAO,EAAS,wBACrB,CACE,OAAQ,EAAS,OACjB,IAAK,EACL,WACD,CACD,EACD,EACD,CAAC,KACD,EAAO,QAAUC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAAC,CACvD,EAAO,SAAS,+BAAgC,CAC9C,WAAY,CACV,YAAa,EAAW,GACxB,qBAAsB,EAAM,OAC5B,YAAa,EAAS,OACtB,SAAU,EACX,CACF,CAAC,CACH,EAGG,EAAS,GAA2B,CACxC,IAAM,EAAQ,EAAS,EAAW,CAElC,OAAO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAW,MAAO,EAAY,EAAW,CAE/C,MAAO,EAAS,qBAAqB,CACnC,OAAQ,EAAS,OACjB,IAAK,EACL,WACD,CAAC,CAEF,MAAO,EAAS,cAAc,CAAC,EAAM,CAAC,EACtC,EAGE,EAAiB,GACrB,EAAO,IAAI,WAAa,CAOtB,IAAMC,IANW,MAAO,EAAQ,IAAI,EAAG,EAGhB,QAAQ,OAAS,EAAE,EAGF,IAAK,IAAU,CACrD,WAAY,EAAK,WACjB,KAAM,EAAK,KACX,KAAM,EAAK,KACZ,EAAE,CAKH,OAFA,EAAQ,MAAM,EAAG,KAAO,EAAE,YAAc,IAAM,EAAE,YAAc,GAAG,CAE1D,CAAE,YAAa,GAAM,MAAO,EAAS,EAC5C,CAEE,GAAoB,EAAoB,IAC5C,EAAO,IAAI,WAAa,CACtB,GAAI,CAACC,EACH,MAAO,GAGT,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAO,GAAG,CAC1C,EAAW,EAAW,QAAQ,SAUpC,OATK,IAIL,MAAO,EAAQ,IAAI,EAAO,GAAI,CAC5B,GAAG,EACH,QAAS,CAAE,GAAG,EAAW,QAAS,WAAU,CAC7C,CAAC,EANO,GAST,CAEE,EAAc,GAClB,EAAO,IAAI,WAAa,CACtB,MAAO,EAAO,QAAQ,iBAAiB,CAAC,KACtC,EAAO,aAAa,CAAE,UAAW,EAAI,CAAC,CACvC,CACD,MAAO,EAAQ,OAAO,EAAG,EACzB,CAEE,EAAyB,GAAuB,CACpD,IAAM,EAAQ,EAAS,EAAO,CAE9B,OAAO,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,gCAAgC,CAAC,KACrD,EAAO,aAAa,CAAE,UAAW,EAAO,GAAI,CAAC,CAC9C,CAED,IAAM,EAAgB,MAAO,EAAS,sBAAsB,CAC1D,OAAQ,EAAS,OACjB,IAAK,EACL,SAAU,GACV,YAAa,EAAO,UAAU,aAAa,UAAU,CACrD,aAAc,EAAO,UAAU,cAAc,UAAU,CACxD,CAAC,CAEI,EAAgB,CACpB,GAAG,EACH,QAAS,CACP,GAAG,EAAO,QACV,KAAM,EAAc,IACpB,SAAU,EAAc,SACxB,OAAQ,EAAc,OACvB,CACD,IAAK,GAAG,EAAY,GAAG,IACxB,CAeD,OAbA,MAAO,EAAQ,IAAI,EAAO,GAAI,EAAc,CAE5C,MAAO,EAAO,QAAQ,2BAA2B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EAAO,GAClB,aAAc,EAAc,QAAQ,SACpC,OAAQ,EACT,CAAC,CACH,CAED,MAAOC,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAkB,EAAO,QAAQ,EAAO,MAAQ,EAAE,CAAC,CAEnD,GACP,CAAC,KACD,EAAO,SAAS,mBAAoB,CAClC,WAAY,CACV,YAAa,EAAO,GACpB,cAAe,EAAO,MAAQ,EAC9B,YAAa,EAAS,OACtB,SAAU,EACX,CACF,CAAC,CACH,EAQG,EAAU,GACP,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,gCAAgC,CAAC,KACrD,EAAO,aAAa,CAAE,UAAW,EAAO,GAAI,CAAC,CAC9C,CACD,IAAM,EAAgB,MAAO,EAAsB,EAAO,CAU1D,OATA,MAAO,EAAQ,IAAI,EAAO,GAAI,EAAc,CAC5C,MAAO,EAAO,QAAQ,2BAA2B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EAAO,GAClB,aAAc,EAAc,QAAQ,SACrC,CAAC,CACH,CACD,MAAOD,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAEtC,GACP,CAAC,KACD,EAAO,SAAS,mBAAoB,CAClC,WAAY,CACV,YAAa,EAAO,GACpB,cAAe,EAAO,MAAQ,EAC9B,YAAa,EACd,CACF,CAAC,CACH,CAGG,EAAU,GACd,EAAO,IAAI,WAAa,CAEtB,MAAO,EADY,MAAO,EAAQ,IAAI,EAAG,CACjB,CACxB,MAAO,EAAW,EAAG,EACrB,CAEE,GACJ,EACA,IAEAE,EACE,EAAQ,QACRT,EACEU,EACA,EAAO,IAAI,WAAa,CACtB,GAAM,CACJ,OAAQ,EACR,UACA,OAAQ,GACN,EACE,CAAE,cAAe,EAGjB,EAAY,KAAK,KAAK,CAG5B,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAQ5C,GAAM,CACJ,aACA,iBACA,SACA,OACA,oBAXoB,MAAO,EAC3B,EACA,EACA,EACD,CAWK,EACJ,GACA,EACE,EAAW,KACX,EACA,EACA,EACD,CAGH,MAAO,EAAO,QAAQ,qBAAqB,CAAC,KAC1C,EAAO,aAAa,CAClB,UAAW,EACX,mBAAoB,EACpB,qBAAsB,EACpB,EAAW,KACX,EACA,EACA,EACD,CACD,gBAAiB,EACjB,iBAAkB,EACnB,CAAC,CACH,CAaD,IAAM,EAAY,GAXI,MAAO,EAC3B,EACA,EACA,EACA,EACA,EACA,EACA,EACA,EACD,EASD,OALI,EAAY,GACV,EAAW,OAAS,IACtB,MAAO,EAAa,EAAS,EAAY,EAAU,EAGhD,GACP,CAAC,KAAK,EAAO,SAASA,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAAC,CAAC,CAChE,CACF,CAEG,EAAa,GACjB,EAAO,IAAI,WAAa,CACtB,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAG,CAEnC,CAAE,QAAO,eAAgB,MAAO,EAAc,EAAG,CACvD,GAAI,CAAC,EACH,MAAO,CACL,GAAG,EACH,OAAQ,EAAW,KACnB,KAAM,EAAW,KAClB,CAGH,IAAM,EAAS,GAAoB,EAAM,CACnC,EAAqB,MAAO,EAAsB,EAAG,CAE3D,MAAO,CACL,GAAG,EACH,OAAQ,GAAU,GAAsB,GACxC,KAAM,EAAW,KACjB,QAAS,EAAW,QACrB,EACD,CAQE,GACJ,EACA,EACA,IAEA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAO,CAGvC,EAAQ,EAAW,QAAQ,OAAS,EAAE,CAMtC,GAHJ,EAAM,OAAS,GAAK,EAAM,EAAM,OAAS,GAAG,WACxC,EAAM,EAAM,OAAS,GAAG,WACxB,GAC8B,EAI9B,EACJ,EAAM,OAAS,GAAK,EAAM,GAAG,KAAO,EAAM,GAAG,KAAO,KAGtD,GAAI,GAAoB,EAAM,OAAS,EAAG,CACxC,IAAM,EAAmB,EACtB,MAAM,EAAG,GAAG,CACZ,KAAM,GAAS,EAAK,OAAS,EAAiB,CAC7C,IACF,MAAO,EAAO,WACZ,sDACD,CAAC,KACA,EAAO,aAAa,CAClB,UAAW,EACX,cAAe,EACf,kBAAmB,EAAiB,WACpC,kBAAmB,EAAiB,KACrC,CAAC,CACH,EAIL,IAAM,EAAiB,MAAO,EAAuB,EAAO,CAE5D,GAAI,EAAgB,CAClB,MAAO,EAAqB,EAAO,CACnC,IAAM,EAAS,EAAgB,EAAe,KACxC,EAAO,EAAe,OAAO,KAAK,EAAO,OAAO,EAAY,CAAC,CACnE,MAAO,CACL,aACA,iBACA,SACA,mBAAoB,EAAe,KACnC,OACA,mBACD,MAED,MAAO,CACL,aACA,iBACA,OAAQ,EACR,mBAAoB,EACpB,KAAM,EACN,mBACD,EAEH,CAEE,GACJ,EACA,EACA,IAEA,EAAO,IAAI,WAAa,CACtB,GAAM,CAAE,SAAU,MAAO,EAAc,EAAO,CAG9C,MAAO,EAAO,QAAQ,0CAA0C,CAAC,KAC/D,EAAO,aAAa,CAClB,UAAW,EACX,YAAa,EAAM,OACnB,WAAY,EAAM,KAAK,EAAM,KAAW,CACtC,YAAa,EAAK,WAClB,KAAM,EAAK,KACX,KAAM,EAAK,KACX,cAAe,IAAU,EAAM,OAAS,EACzC,EAAE,CACJ,CAAC,CACH,CAED,MAAO,EAAS,EAAY,EAAM,CAClC,MAAO,EAAiB,EAAY,EAAQ,CAK5C,IAAM,EADU,KAAK,KAAK,CACQ,EAC5B,EAAW,EAAW,MAAQ,EAC9B,EACJ,EAAkB,EAAK,EAAW,IAAQ,EAAkB,EAGxD,EACJ,EAAM,OAAS,EACX,EAAM,QAAQ,EAAK,IAAS,GAAO,EAAK,MAAQ,GAAI,EAAE,CACtD,EAAM,OACN,IAAA,GAEN,MAAO,EAAsB,EAAQ,CACnC,WACA,kBACA,WAAY,EAAM,OAClB,kBACA,gBACD,CAAC,EACF,CAAC,KACD,EAAO,SAAU,GACf,EAAO,IAAI,WAAa,CACtB,MAAOC,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,MAAO,EAAO,SAAS,0BAA0B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,OAAO,EAAM,CACrB,CAAC,CACH,EACD,CACH,CACF,CAKG,EAAgB,EAAO,IAAI,WAAa,CAQ5C,OAPA,MAAO,EAAO,WACZ,wGACD,CAAC,KACA,EAAO,aAAa,CAClB,OAAQ,EAAS,OAClB,CAAC,CACH,CACM,GACP,CAII,EACH,GAEC,GAEO,EAAO,MAAqB,GAAS,CAC1C,IAAI,EAAS,IAAI,WACb,EAAa,EACb,EAAsB,EAEpB,GAAa,EAAkB,EAAe,KAAU,CAE5D,EAAO,QACL,EAAO,QAAQ,iBAAiB,CAAC,KAC/B,EAAO,aAAa,CAClB,YAAa,EACb,WAAY,EAAK,OACjB,cAAe,EACf,eAAgB,EAChB,sBAAuB,EAAsB,EAAK,OACnD,CAAC,CACH,CACF,CACD,EAAK,OAAO,CACV,WAAY,IACZ,OACA,KAAM,EAAK,OACZ,CAAC,EAGE,EAAgB,GAAwB,CAE5C,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAQ,OAAO,CAQ/D,IAPA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAS,EAAO,OAAO,CACpC,EAAS,EACT,GAAuB,EAAQ,OAIxB,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAU,EAAO,GAAM,GAK3B,EAAO,QACL,EAAO,KACL,EAAO,WAAY,GACjB,EAAO,SAAW,EAAa,EAAM,CAAC,CACvC,CACD,EAAO,YACL,EAAO,SAAW,CAGZ,EAAO,OAAS,GAClB,EAAU,EAAQ,GAAK,CAEzB,EAAK,KAAK,EACV,CACH,CACD,EAAO,SAAU,GAAU,EAAO,SAAW,EAAK,KAAK,EAAM,CAAC,CAAC,CAChE,CACF,EACD,CAOA,GACH,EAA2C,EAAgB,IACrD,GACA,EAEE,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAyB,MAAO,EAAI,KAAK,EAAc,CAE7D,OAAO,EAAO,KACZ,EAAO,IAAK,GACV,EAAO,IAAI,WAAa,CAKtB,EAJiB,MAAO,EAAI,aAC1B,EACC,GAAU,EAAQ,EAAM,OAC1B,CACmB,EACpB,CACH,CACF,EACD,CAAC,KAAK,EAAO,OAAO,CAhBE,EAmBtB,GACJ,EACA,EACA,EACA,EACA,EACA,EACA,EACA,IAEA,EAAO,IAAI,WAAa,CACtB,MAAO,EAAO,QAAQ,wBAAwB,CAAC,KAC7C,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,UAAW,EAAW,KACtB,UAAW,EACX,cAAeC,EAChB,CAAC,CACH,CAQD,IAAM,EAAc,EAAW,KAE7B,EAAyB,EAAY,EAAW,CAEhD,EAAoB,EAAe,CACpC,CAGK,EAAsB,MAAO,EAAI,KAAK,EAAW,CACjD,EAAwB,MAAO,EAAI,KAAK,EAAE,CAG1C,EAAe,GACnB,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAmB,MAAO,EAAI,aAClC,EACC,GAAW,EAAS,EAAU,KAChC,CACK,EAAc,IAAqB,EAAW,MAAQ,GAE5D,MAAO,EAAO,SAAS,mBAAmB,CAAC,KACzC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,kBAAmB,EACnB,UAAW,EAAW,KACtB,WAAY,EAAU,KACtB,cAAe,EAChB,CAAC,CACH,CAED,IAAM,EACJ,EAAwB,EAAU,WAAa,EAE7C,EAAU,KAAO,IACnB,MAAO,EAAO,KACZ,EAAgB,SACd,mBACI,MACF,aAAa,EAAU,KAAK,4BAA4B,IACzD,CACF,CACF,EAKC,EAAU,MAAQA,GAAe,GACnC,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,WAAY,EAAU,KACtB,cAAeA,EACf,cAAe,EAChB,CAAC,CACH,CACD,MAAO,EAAW,EAAY,EAAU,KAAM,EAAiB,CAC/D,MAAOC,EAAkB,EAAO,QAAQ,EAAU,KAAK,CAAC,EAGxD,MAAO,EAAqB,EAAW,GAAI,EAAU,KAAK,CAG5D,MAAO,EAAI,OACT,EACC,GAAU,EAAQ,EAAU,KAC9B,EAKD,CAQJ,OALA,MAAO,EAAY,KACjB,EAAO,WAAY,GAAc,EAAY,EAAU,CAAC,CACxD,EAAO,gBAAgBC,EAAyB,CACjD,CAEM,MAAO,EAAI,IAAI,EAAsB,EAC5C,CAEE,OAAgD,CACpD,wBAAyB,GACzB,sBAAuB,GACvB,uBAAwB,GACxB,yBAA0B,GAC1B,6BAA8B,GAC9B,qBAAsB,EACtB,aAAc,EACd,aAAc,WACd,SAAU,EACV,iBAAkB,EAClB,sBAAuB,GACvB,2BAA4B,GAC5B,kBAAmB,IAAA,GACpB,EAEK,QAA+B,CACnC,aAAc,EACd,aAAc,WACd,iBAAkB,EAClB,sBAAuB,GACxB,EAEK,GACJ,GACkC,CAClC,IAAM,EAAe,GAAiB,CAChC,OAAgB,CACpB,OAAQ,EAAR,CACE,IAAK,WACH,OAAO,EAAa,wBACtB,IAAK,SACH,MAAO,GACT,QACE,MAAO,OAET,CACJ,OAAO,EAAO,QAAQ,EAAO,EAGzB,GAAsB,GAAqC,CAC/D,IAAM,EAAS,IAAI,WAAW,EAAO,QAAQ,EAAG,IAAM,EAAI,EAAE,OAAQ,EAAE,CAAC,CACnE,EAAS,EACb,IAAK,IAAM,KAAS,EAClB,EAAO,IAAI,EAAO,EAAO,CACzB,GAAU,EAAM,OAElB,OAAO,GAGH,GAAgB,KACpB,IACwB,CACxB,IAAM,EAAS,EAAO,WAAW,CAC3BZ,EAAuB,EAAE,CAC/B,OAAa,CACX,GAAM,CAAE,OAAM,SAAU,MAAM,EAAO,MAAM,CAC3C,GAAI,EAAM,MACV,EAAO,KAAK,EAAM,CAEpB,OAAO,GAAmB,EAAO,EAoBnC,MAAO,CACL,SACA,SACA,SACA,QACA,YACA,KAvBY,GACZ,EAAO,IAAI,WAAa,CACtB,IAAM,EAAS,MAAO,EAAQ,IAAI,EAAG,CAErC,GAAI,CAAC,EAAO,GACV,OAAO,MAAO,EAAO,KACnB,EAAgB,SACd,kBACI,MAAM,0BAA0B,CACrC,CACF,CAEH,IAAM,EAAQ,EAAS,EAAO,CACxB,EAAS,MAAO,EAAS,UAAU,EAAM,CAC/C,OAAO,MAAO,EAAO,YAAc,GAAc,EAAO,CAAC,EACzD,CASF,gBACA,kBACA,yBACA,0BACD,EACD,CAIJ,MAAa,EAAW,GAA2B,CACjD,GAAM,CAAE,WAAU,UAAW,EAC7B,OAAO,EAAc,EAAQ,CAAC,KAC5B,EAAO,QAAQ,EAAc,EAAU,EAAO,CAAC,CAChD"}
package/package.json ADDED
@@ -0,0 +1,36 @@
1
+ {
2
+ "name": "@uploadista/data-store-r2",
3
+ "type": "module",
4
+ "version": "0.0.9",
5
+ "description": "Cloudflare R2 data store for Uploadista",
6
+ "license": "MIT",
7
+ "author": "Uploadista",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.js",
12
+ "default": "./dist/index.js"
13
+ }
14
+ },
15
+ "dependencies": {
16
+ "@cloudflare/workers-types": "4.20251014.0",
17
+ "effect": "3.19.0",
18
+ "@uploadista/core": "0.0.9",
19
+ "@uploadista/observability": "0.0.9"
20
+ },
21
+ "devDependencies": {
22
+ "tsdown": "0.16.0",
23
+ "vitest": "4.0.7",
24
+ "@uploadista/typescript-config": "0.0.9"
25
+ },
26
+ "scripts": {
27
+ "build": "tsdown",
28
+ "format": "biome format --write ./src",
29
+ "lint": "biome lint --write ./src",
30
+ "check": "biome check --write ./src",
31
+ "test": "vitest",
32
+ "test:run": "vitest run",
33
+ "test:watch": "vitest --watch",
34
+ "typecheck": "tsc --noEmit"
35
+ }
36
+ }
package/src/index.ts ADDED
@@ -0,0 +1 @@
1
+ export * from "./r2-store";