@uploadista/data-store-r2 0.1.4-beta.1 → 1.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.cjs +1 -1
  2. package/package.json +8 -8
package/dist/index.cjs CHANGED
@@ -1 +1 @@
1
- let e=require(`@uploadista/core/errors`),t=require(`@uploadista/core/types`),n=require(`@uploadista/observability`),r=require(`effect`);const i=e=>e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0,a=(e,t,n,r,i=5497558138880)=>{let a=e??i,o;o=a<=t?a:a<=t*r?t:Math.ceil(a/r);let s=e&&e<n?o:Math.max(o,n),c=1024;return Math.ceil(s/c)*c},o=e=>`${e}.part`,s=(t,i,a={})=>(r.Effect.runSync((0,n.trackS3Error)(t,i,a)),e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,i)),c=e=>[`NotFound`,`NoSuchKey`,`NoSuchUpload`].includes(e),l=e=>{if(typeof e!=`object`||!e)return null;let t={};return`code`in e&&typeof e.code==`string`&&(t.code=e.code),`name`in e&&typeof e.name==`string`&&(t.name=e.name),Object.keys(t).length>0?t:null},u=(t,n,i={})=>{let a=l(n);return a&&(a.code&&c(a.code)||a.name&&c(a.name))?(r.Effect.runSync(r.Effect.logWarning(`File not found during ${t} operation`).pipe(r.Effect.annotateLogs({error_code:a.code,error_name:a.name,...i}))),e.UploadistaError.fromCode(`FILE_NOT_FOUND`)):s(t,n,i)},d=e=>e===`NoSuchUpload`||e===`NoSuchKey`,f=t=>{let n=l(t);if(n&&(n.code&&d(n.code)||n.name&&d(n.name)))return!0;if(t instanceof e.UploadistaError&&t.cause){let e=l(t.cause);if(e&&(e.code&&d(e.code)||e.name&&d(e.name)))return!0}return!1};var p=class extends r.Context.Tag(`R2ClientService`)(){};const m=(e,t)=>{let i=n=>r.Effect.gen(function*(){return yield*r.Effect.tryPromise({try:async()=>{let t=await e.get(n);if(!t)throw Error(`Object not found: ${n}`);return t.body},catch:e=>s(`getObject`,e,{key:n,bucket:t})})}),a=n=>r.Effect.tryPromise({try:async()=>{let t=await e.head(n);if(t)return t.size},catch:e=>s(`headObject`,e,{key:n,bucket:t})}),c=(n,i)=>r.Effect.tryPromise({try:async()=>{let t=await e.put(n,i);if(!t)throw Error(`Failed to put object`);return t.etag},catch:e=>s(`putObject`,e,{key:n,bucket:t,size:i.length})}),l=n=>r.Effect.tryPromise({try:async()=>{await e.delete(n)},catch:e=>s(`deleteObject`,e,{key:n,bucket:t})});return{bucket:t,getObject:i,headObject:a,putObject:c,deleteObject:l,deleteObjects:n=>r.Effect.tryPromise({try:()=>e.delete(n),catch:e=>s(`deleteObjects`,e,{keys:n.length,bucket:t})}),createMultipartUpload:t=>(0,n.withS3ApiMetrics)(`createMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await e.createMultipartUpload(t.key);if(!n.uploadId)throw Error(`Upload ID is undefined`);if(!n.key)throw Error(`Key is undefined`);return{uploadId:n.uploadId,bucket:t.bucket,key:n.key}},catch:e=>s(`createMultipartUpload`,e,t)})),uploadPart:t=>(0,n.withS3ApiMetrics)(`uploadPart`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).uploadPart(t.partNumber,t.data);if(!n)throw Error(`Part is undefined`);return n.etag},catch:e=>s(`uploadPart`,e,{upload_id:t.key,part_number:t.partNumber,part_size:t.data.length,s3_bucket:t.bucket})}).pipe(r.Effect.map(e=>e))),completeMultipartUpload:(t,i)=>(0,n.withS3ApiMetrics)(`completeMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).complete(i);if(!n)throw Error(`Complete is undefined`);return n.key},catch:e=>s(`completeMultipartUpload`,e,{upload_id:t.key,parts_count:i.length,s3_bucket:t.bucket})})),abortMultipartUpload:t=>r.Effect.tryPromise({try:async()=>{await(await e.resumeMultipartUpload(t.key,t.uploadId)).abort()},catch:e=>u(`abortMultipartUpload`,e,{upload_id:t.key,s3_bucket:t.bucket})}),getIncompletePart:n=>r.Effect.tryPromise({try:async()=>{let t=await e.get(o(n));if(!(!t||!t.body))return t.body},catch:e=>s(`getIncompletePart`,e,{upload_id:n,bucket:t})}),getIncompletePartSize:e=>a(o(e)),putIncompletePart:(e,t)=>c(o(e),t).pipe(r.Effect.tap(()=>r.Effect.logInfo(`Incomplete part uploaded`).pipe(r.Effect.annotateLogs({upload_id:e})))),deleteIncompletePart:e=>l(o(e))}},h=(e,t)=>r.Layer.succeed(p,m(e,t)),g=e=>{let{id:t,metadata:n}=e;if(!n)return t;let r=n.filename||n.fileName||n.name;return typeof r==`string`&&r.includes(`.`)?`${t}${r.substring(r.lastIndexOf(`.`))}`:t};function _(o){let{deliveryUrl:s,partSize:c,minPartSize:l=5242880,useTags:u=!0,maxMultipartParts:d=1e4,maxConcurrentPartUploads:m=60,bucket:h}=o;return r.Effect.gen(function*(){let o=yield*p,_=yield*t.UploadFileKVStore,v=c||8*1024*1024,y=t=>{let n=t.storage.uploadId;return n?r.Effect.succeed(n):r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Upload ID is undefined`)))},b=(t,i,a)=>{let s=g(t);return(0,n.withS3TimingMetrics)(n.s3PartUploadDurationHistogram,r.Effect.gen(function*(){let c=yield*y(t),l=yield*o.uploadPart({bucket:o.bucket,key:s,uploadId:c,partNumber:a,data:i}).pipe(r.Effect.retry({schedule:r.Schedule.exponential(`1 second`,2).pipe(r.Schedule.intersect(r.Schedule.recurs(3))),while:e=>!f(e)}),r.Effect.tapError(e=>r.Effect.logWarning(`Retrying part upload`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:a,error_message:e.message,retry_attempt:`unknown`,part_size:i.length,s3_bucket:o.bucket}))),r.Effect.catchAll(n=>f(n)?r.Effect.fail(e.UploadistaError.fromCode(`UPLOAD_CANCELLED`,{cause:n,body:`Upload ${t.id} was cancelled`})):r.Effect.fail(n))),u=yield*_.get(t.id),d=[...u.storage.parts||[],{partNumber:a,etag:l,size:i.length}];return yield*_.set(t.id,{...u,storage:{...u.storage,parts:d}}),yield*(0,n.s3UploadPartsTotal)(r.Effect.succeed(1)),yield*r.Effect.logInfo(`Part uploaded successfully`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:a,part_size:i.length,etag:l})),l})).pipe(r.Effect.withSpan(`s3-upload-part-${a}`,{attributes:{"upload.id":t.id,"upload.part_number":a,"upload.part_size":i.length,"s3.bucket":o.bucket,"s3.key":s}}))},x=(e,t)=>o.putIncompletePart(e,t),S=e=>r.Effect.gen(function*(){let t=yield*o.getIncompletePart(e);if(!t)return;let n=t.getReader(),i=[],a=0;try{for(;;){let{done:e,value:t}=yield*r.Effect.promise(()=>n.read());if(e)break;i.push(t),a+=t.length}}finally{n.releaseLock()}let s=r.Stream.fromIterable(i);return{size:a,stream:s}}),C=e=>o.deleteIncompletePart(e),w=e=>o.getIncompletePartSize(e),T=(e,t)=>{let i=g(e);return r.Effect.gen(function*(){let n=yield*y(e);return yield*o.completeMultipartUpload({bucket:o.bucket,key:i,uploadId:n},t)}).pipe(r.Effect.tap(()=>(0,n.s3UploadSuccessTotal)(r.Effect.succeed(1))),r.Effect.withSpan(`s3-complete-multipart-upload`,{attributes:{"upload.id":e.id,"upload.parts_count":t.length,"s3.bucket":o.bucket,"s3.key":i}}))},E=e=>{let t=g(e);return r.Effect.gen(function*(){let n=yield*y(e);yield*o.abortMultipartUpload({bucket:o.bucket,key:t,uploadId:n}),yield*o.deleteObjects([t])})},D=e=>r.Effect.gen(function*(){let t=((yield*_.get(e)).storage.parts||[]).map(e=>({partNumber:e.partNumber,etag:e.etag,size:e.size}));return t.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),{uploadFound:!0,parts:t}}),O=(e,t)=>r.Effect.gen(function*(){if(!t)return 0;let n=yield*_.get(e.id),r=n.storage.uploadId;return r&&(yield*_.set(e.id,{...n,storage:{...n.storage,uploadId:r}})),0}),k=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Clearing cache`).pipe(r.Effect.annotateLogs({upload_id:e})),yield*_.delete(e)}),A=e=>{let t=g(e);return r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let i=yield*o.createMultipartUpload({bucket:o.bucket,key:t,uploadId:``,contentType:e.metadata?.contentType?.toString(),cacheControl:e.metadata?.cacheControl?.toString()}),a={...e,storage:{...e.storage,path:i.key,uploadId:i.uploadId,bucket:i.bucket},url:`${s}/${t}`};return yield*_.set(e.id,a),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:a.storage.uploadId,s3_key:t})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),yield*(0,n.s3FileSizeHistogram)(r.Effect.succeed(e.size||0)),a}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":o.bucket,"s3.key":t}}))},j=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let t=yield*A(e);return yield*_.set(e.id,t),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:t.storage.uploadId})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),t}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":h}})),M=e=>r.Effect.gen(function*(){yield*E(yield*_.get(e)),yield*k(e)}),N=(e,t)=>(0,n.withS3UploadMetrics)(e.file_id,(0,n.withS3TimingMetrics)(n.s3UploadDurationHistogram,r.Effect.gen(function*(){let{stream:i,file_id:o,offset:s}=e,{onProgress:c}=t,u=Date.now();yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(1));let{uploadFile:f,nextPartNumber:p,offset:h,data:g,existingPartSize:_}=yield*F(o,s,i),y=_||a(f.size,v,l,d);yield*r.Effect.logInfo(`Part size decision`).pipe(r.Effect.annotateLogs({upload_id:o,existing_part_size:_,calculated_part_size:a(f.size,v,l,d),final_part_size:y,next_part_number:p}));let b=h+(yield*B(f,g,p,h,y,l,m,c));return b>m&&f.size===b&&(yield*I(o,f,u)),b}).pipe(r.Effect.ensuring((0,n.s3ActiveUploadsGauge)(r.Effect.succeed(0)))))),P=e=>r.Effect.gen(function*(){let t=yield*_.get(e),{parts:n,uploadFound:r}=yield*D(e);if(!r)return{...t,offset:t.size,size:t.size};let a=i(n),o=yield*w(e);return{...t,offset:a+(o??0),size:t.size,storage:t.storage}}),F=(e,t,n)=>r.Effect.gen(function*(){let i=yield*_.get(e),a=i.storage.parts||[],o=(a.length>0&&a[a.length-1].partNumber?a[a.length-1].partNumber:0)+1,s=a.length>0&&a[0].size?a[0].size:null;if(s&&a.length>1){let t=a.slice(0,-1).find(e=>e.size!==s);t&&(yield*r.Effect.logWarning(`Inconsistent part sizes detected in existing upload`).pipe(r.Effect.annotateLogs({upload_id:e,expected_size:s,inconsistent_part:t.partNumber,inconsistent_size:t.size})))}let c=yield*S(e);if(c){yield*C(e);let a=t-c.size,l=c.stream.pipe(r.Stream.concat(n));return{uploadFile:i,nextPartNumber:o,offset:a,incompletePartSize:c.size,data:l,existingPartSize:s}}else return{uploadFile:i,nextPartNumber:o,offset:t,incompletePartSize:0,data:n,existingPartSize:s}}),I=(e,t,i)=>r.Effect.gen(function*(){let{parts:a}=yield*D(e);yield*r.Effect.logInfo(`Attempting to complete multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e,parts_count:a.length,parts_info:a.map((e,t)=>({part_number:e.partNumber,size:e.size,etag:e.etag,is_final_part:t===a.length-1}))})),yield*T(t,a),yield*O(t,u);let o=Date.now()-i,s=t.size||0,c=o>0?s*1e3/o:0,l=a.length>0?a.reduce((e,t)=>e+(t.size||0),0)/a.length:void 0;yield*(0,n.logS3UploadCompletion)(e,{fileSize:s,totalDurationMs:o,partsCount:a.length,averagePartSize:l,throughputBps:c})}).pipe(r.Effect.tapError(t=>r.Effect.gen(function*(){yield*(0,n.s3UploadErrorsTotal)(r.Effect.succeed(1)),yield*r.Effect.logError(`Failed to finish upload`).pipe(r.Effect.annotateLogs({upload_id:e,error:String(t)}))}))),L=()=>r.Effect.gen(function*(){return yield*r.Effect.logWarning(`R2 does not support automatic expired upload deletion via API. Please use R2 lifecycle rules instead.`).pipe(r.Effect.annotateLogs({bucket:o.bucket})),0}),R=e=>t=>r.Stream.async(n=>{let i=new Uint8Array,a=1,o=0,s=(t,i=!1)=>{r.Effect.runSync(r.Effect.logInfo(`Creating chunk`).pipe(r.Effect.annotateLogs({part_number:a,chunk_size:t.length,expected_size:e,is_final_chunk:i,total_bytes_processed:o+t.length}))),n.single({partNumber:a++,data:t,size:t.length})},c=t=>{let n=new Uint8Array(i.length+t.length);for(n.set(i),n.set(t,i.length),i=n,o+=t.length;i.length>=e;){let t=i.slice(0,e);i=i.slice(e),s(t,!1)}};r.Effect.runFork(t.pipe(r.Stream.runForEach(e=>r.Effect.sync(()=>c(e))),r.Effect.andThen(()=>r.Effect.sync(()=>{i.length>0&&s(i,!0),n.end()})),r.Effect.catchAll(e=>r.Effect.sync(()=>n.fail(e)))))}),z=(e,t=0)=>n=>e?r.Effect.gen(function*(){let i=yield*r.Ref.make(t);return n.pipe(r.Stream.tap(t=>r.Effect.gen(function*(){e(yield*r.Ref.updateAndGet(i,e=>e+t.length))})))}).pipe(r.Stream.unwrap):n,B=(t,i,a,o,s,c,l,u)=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Starting part uploads`).pipe(r.Effect.annotateLogs({upload_id:t.id,init_offset:o,file_size:t.size,part_size:s,min_part_size:c}));let d=i.pipe(z(u,o),R(s)),f=yield*r.Ref.make(o),p=yield*r.Ref.make(0),m=i=>r.Effect.gen(function*(){let o=yield*r.Ref.updateAndGet(f,e=>e+i.size),l=o>=(t.size||0);yield*r.Effect.logDebug(`Processing chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,cumulative_offset:o,file_size:t.size,chunk_size:i.size,is_final_part:l}));let u=a+i.partNumber-1;i.size>s&&(yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Part size ${i.size} exceeds upload part size ${s}`)))),i.size>=c||l?(yield*r.Effect.logDebug(`Uploading multipart chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:u,chunk_size:i.size,min_part_size:c,is_final_part:l})),yield*b(t,i.data,u),yield*(0,n.s3PartSizeHistogram)(r.Effect.succeed(i.size))):yield*x(t.id,i.data),yield*r.Ref.update(p,e=>e+i.size)});return yield*d.pipe(r.Stream.runForEach(e=>m(e)),r.Effect.withConcurrency(l)),yield*r.Ref.get(p)}),V=()=>({supportsParallelUploads:!0,supportsConcatenation:!0,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:m,minChunkSize:l,maxChunkSize:5368709120,maxParts:d,optimalChunkSize:v,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0}),H=()=>({minChunkSize:l,maxChunkSize:5368709120,optimalChunkSize:v,requiresOrderedChunks:!1}),U=e=>{let t=V(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return r.Effect.succeed(n)},W=e=>{let t=new Uint8Array(e.reduce((e,t)=>e+t.length,0)),n=0;for(let r of e)t.set(r,n),n+=r.length;return t},G=async e=>{let t=e.getReader(),n=[];for(;;){let{done:e,value:r}=await t.read();if(e)break;n.push(r)}return W(n)};return{bucket:h,create:j,remove:M,write:N,getUpload:P,read:t=>r.Effect.gen(function*(){let n=yield*_.get(t);if(!n.id)return yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let i=g(n),a=yield*o.getObject(i);return yield*r.Effect.promise(()=>G(a))}),readStream:(n,i)=>r.Effect.gen(function*(){let a=yield*_.get(n);if(!a.id)return yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let s={...t.DEFAULT_STREAMING_CONFIG,...i},c=g(a),l=yield*o.getObject(c);return r.Stream.async(t=>{let n=l.getReader(),i=s.chunkSize,a=new Uint8Array;return(async()=>{try{for(;;){let{done:e,value:r}=await n.read();if(e){a.length>0&&t.single(a),t.end();return}if(r){let e=new Uint8Array(a.length+r.length);for(e.set(a),e.set(r,a.length),a=e;a.length>=i;){let e=a.slice(0,i);a=a.slice(i),t.single(e)}}}}catch(n){t.fail(new e.UploadistaError({code:`FILE_READ_ERROR`,status:500,body:`Failed to read R2 object stream`,details:`R2 stream read failed: ${String(n)}`}))}})(),r.Effect.sync(()=>{n.releaseLock()})})}),writeStream:(t,i)=>(0,n.withS3TimingMetrics)(n.s3UploadDurationHistogram,r.Effect.gen(function*(){let c=Date.now(),u=t;yield*r.Effect.logInfo(`Starting streaming write to R2`).pipe(r.Effect.annotateLogs({upload_id:t,r2_key:u,size_hint:i.sizeHint})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(1));let f=i.sizeHint?a(i.sizeHint,v,l,d):v,p=(yield*o.createMultipartUpload({bucket:o.bucket,key:u,uploadId:``,contentType:i.contentType})).uploadId;yield*r.Effect.logInfo(`Multipart upload created for streaming write`).pipe(r.Effect.annotateLogs({upload_id:t,r2_upload_id:p,r2_key:u,part_size:f}));let m=yield*r.Ref.make([]),h=yield*r.Ref.make(0),g=yield*r.Ref.make(1),_=yield*r.Ref.make(new Uint8Array),y=(e,i)=>r.Effect.gen(function*(){if(e.length===0||e.length<l&&!i)return;let a=yield*r.Ref.getAndUpdate(g,e=>e+1);yield*r.Effect.logDebug(`Uploading part from stream`).pipe(r.Effect.annotateLogs({upload_id:t,part_number:a,part_size:e.length,is_final_part:i}));let s=yield*o.uploadPart({bucket:o.bucket,key:u,uploadId:p,partNumber:a,data:e}).pipe(r.Effect.retry(r.Schedule.exponential(`1 second`,2).pipe(r.Schedule.intersect(r.Schedule.recurs(3)))));yield*r.Ref.update(m,t=>[...t,{partNumber:a,etag:s,size:e.length}]),yield*(0,n.s3UploadPartsTotal)(r.Effect.succeed(1)),yield*(0,n.s3PartSizeHistogram)(r.Effect.succeed(e.length))});yield*i.stream.pipe(r.Stream.runForEach(e=>r.Effect.gen(function*(){yield*r.Ref.update(h,t=>t+e.length);let t=yield*r.Ref.get(_),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let i=0;for(;n.length-i>=f;)yield*y(n.slice(i,i+f),!1),i+=f;yield*r.Ref.set(_,n.slice(i))})));let b=yield*r.Ref.get(_);b.length>0&&(yield*y(b,!0));let x=yield*r.Ref.get(m),S=yield*r.Ref.get(h);if(x.length===0)return yield*o.abortMultipartUpload({bucket:o.bucket,key:u,uploadId:p}),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*r.Effect.fail(new e.UploadistaError({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));x.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),yield*o.completeMultipartUpload({bucket:o.bucket,key:u,uploadId:p},x);let C=Date.now()-c,w=C>0?S*1e3/C:0,T=x.length>0?S/x.length:void 0;return yield*(0,n.logS3UploadCompletion)(t,{fileSize:S,totalDurationMs:C,partsCount:x.length,averagePartSize:T,throughputBps:w}),yield*(0,n.s3UploadSuccessTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*(0,n.s3FileSizeHistogram)(r.Effect.succeed(S)),yield*r.Effect.logInfo(`Streaming write to R2 completed`).pipe(r.Effect.annotateLogs({upload_id:t,total_bytes:S,parts_count:x.length,duration_ms:C})),{id:u,size:S,path:u,bucket:o.bucket,url:`${s}/${u}`}}).pipe(r.Effect.catchAll(e=>r.Effect.gen(function*(){return yield*(0,n.s3UploadErrorsTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*r.Effect.fail(e)})))),deleteExpired:L,getCapabilities:V,getChunkerConstraints:H,validateUploadStrategy:U}})}const v=e=>{let{r2Bucket:t,bucket:n}=e;return _(e).pipe(r.Effect.provide(h(t,n)))};exports.createR2Store=_,exports.r2Store=v;
1
+ Object.defineProperty(exports,Symbol.toStringTag,{value:`Module`});let e=require(`@uploadista/core/errors`),t=require(`@uploadista/core/types`),n=require(`@uploadista/observability`),r=require(`effect`);const i=e=>e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0,a=(e,t,n,r,i=5497558138880)=>{let a=e??i,o;o=a<=t?a:a<=t*r?t:Math.ceil(a/r);let s=e&&e<n?o:Math.max(o,n),c=1024;return Math.ceil(s/c)*c},o=e=>`${e}.part`,s=(t,i,a={})=>(r.Effect.runSync((0,n.trackS3Error)(t,i,a)),e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,i)),c=e=>[`NotFound`,`NoSuchKey`,`NoSuchUpload`].includes(e),l=e=>{if(typeof e!=`object`||!e)return null;let t={};return`code`in e&&typeof e.code==`string`&&(t.code=e.code),`name`in e&&typeof e.name==`string`&&(t.name=e.name),Object.keys(t).length>0?t:null},u=(t,n,i={})=>{let a=l(n);return a&&(a.code&&c(a.code)||a.name&&c(a.name))?(r.Effect.runSync(r.Effect.logWarning(`File not found during ${t} operation`).pipe(r.Effect.annotateLogs({error_code:a.code,error_name:a.name,...i}))),e.UploadistaError.fromCode(`FILE_NOT_FOUND`)):s(t,n,i)},d=e=>e===`NoSuchUpload`||e===`NoSuchKey`,f=t=>{let n=l(t);if(n&&(n.code&&d(n.code)||n.name&&d(n.name)))return!0;if(t instanceof e.UploadistaError&&t.cause){let e=l(t.cause);if(e&&(e.code&&d(e.code)||e.name&&d(e.name)))return!0}return!1};var p=class extends r.Context.Tag(`R2ClientService`)(){};const m=(e,t)=>{let i=n=>r.Effect.gen(function*(){return yield*r.Effect.tryPromise({try:async()=>{let t=await e.get(n);if(!t)throw Error(`Object not found: ${n}`);return t.body},catch:e=>s(`getObject`,e,{key:n,bucket:t})})}),a=n=>r.Effect.tryPromise({try:async()=>{let t=await e.head(n);if(t)return t.size},catch:e=>s(`headObject`,e,{key:n,bucket:t})}),c=(n,i)=>r.Effect.tryPromise({try:async()=>{let t=await e.put(n,i);if(!t)throw Error(`Failed to put object`);return t.etag},catch:e=>s(`putObject`,e,{key:n,bucket:t,size:i.length})}),l=n=>r.Effect.tryPromise({try:async()=>{await e.delete(n)},catch:e=>s(`deleteObject`,e,{key:n,bucket:t})});return{bucket:t,getObject:i,headObject:a,putObject:c,deleteObject:l,deleteObjects:n=>r.Effect.tryPromise({try:()=>e.delete(n),catch:e=>s(`deleteObjects`,e,{keys:n.length,bucket:t})}),createMultipartUpload:t=>(0,n.withS3ApiMetrics)(`createMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await e.createMultipartUpload(t.key);if(!n.uploadId)throw Error(`Upload ID is undefined`);if(!n.key)throw Error(`Key is undefined`);return{uploadId:n.uploadId,bucket:t.bucket,key:n.key}},catch:e=>s(`createMultipartUpload`,e,t)})),uploadPart:t=>(0,n.withS3ApiMetrics)(`uploadPart`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).uploadPart(t.partNumber,t.data);if(!n)throw Error(`Part is undefined`);return n.etag},catch:e=>s(`uploadPart`,e,{upload_id:t.key,part_number:t.partNumber,part_size:t.data.length,s3_bucket:t.bucket})}).pipe(r.Effect.map(e=>e))),completeMultipartUpload:(t,i)=>(0,n.withS3ApiMetrics)(`completeMultipartUpload`,r.Effect.tryPromise({try:async()=>{let n=await(await e.resumeMultipartUpload(t.key,t.uploadId)).complete(i);if(!n)throw Error(`Complete is undefined`);return n.key},catch:e=>s(`completeMultipartUpload`,e,{upload_id:t.key,parts_count:i.length,s3_bucket:t.bucket})})),abortMultipartUpload:t=>r.Effect.tryPromise({try:async()=>{await(await e.resumeMultipartUpload(t.key,t.uploadId)).abort()},catch:e=>u(`abortMultipartUpload`,e,{upload_id:t.key,s3_bucket:t.bucket})}),getIncompletePart:n=>r.Effect.tryPromise({try:async()=>{let t=await e.get(o(n));if(!(!t||!t.body))return t.body},catch:e=>s(`getIncompletePart`,e,{upload_id:n,bucket:t})}),getIncompletePartSize:e=>a(o(e)),putIncompletePart:(e,t)=>c(o(e),t).pipe(r.Effect.tap(()=>r.Effect.logInfo(`Incomplete part uploaded`).pipe(r.Effect.annotateLogs({upload_id:e})))),deleteIncompletePart:e=>l(o(e))}},h=(e,t)=>r.Layer.succeed(p,m(e,t)),g=e=>{let{id:t,metadata:n}=e;if(!n)return t;let r=n.filename||n.fileName||n.name;return typeof r==`string`&&r.includes(`.`)?`${t}${r.substring(r.lastIndexOf(`.`))}`:t};function _(o){let{deliveryUrl:s,partSize:c,minPartSize:l=5242880,useTags:u=!0,maxMultipartParts:d=1e4,maxConcurrentPartUploads:m=60,bucket:h}=o;return r.Effect.gen(function*(){let o=yield*p,_=yield*t.UploadFileKVStore,v=c||8*1024*1024,y=t=>{let n=t.storage.uploadId;return n?r.Effect.succeed(n):r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Upload ID is undefined`)))},b=(t,i,a)=>{let s=g(t);return(0,n.withS3TimingMetrics)(n.s3PartUploadDurationHistogram,r.Effect.gen(function*(){let c=yield*y(t),l=yield*o.uploadPart({bucket:o.bucket,key:s,uploadId:c,partNumber:a,data:i}).pipe(r.Effect.retry({schedule:r.Schedule.exponential(`1 second`,2).pipe(r.Schedule.intersect(r.Schedule.recurs(3))),while:e=>!f(e)}),r.Effect.tapError(e=>r.Effect.logWarning(`Retrying part upload`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:a,error_message:e.message,retry_attempt:`unknown`,part_size:i.length,s3_bucket:o.bucket}))),r.Effect.catchAll(n=>f(n)?r.Effect.fail(e.UploadistaError.fromCode(`UPLOAD_CANCELLED`,{cause:n,body:`Upload ${t.id} was cancelled`})):r.Effect.fail(n))),u=yield*_.get(t.id),d=[...u.storage.parts||[],{partNumber:a,etag:l,size:i.length}];return yield*_.set(t.id,{...u,storage:{...u.storage,parts:d}}),yield*(0,n.s3UploadPartsTotal)(r.Effect.succeed(1)),yield*r.Effect.logInfo(`Part uploaded successfully`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:a,part_size:i.length,etag:l})),l})).pipe(r.Effect.withSpan(`s3-upload-part-${a}`,{attributes:{"upload.id":t.id,"upload.part_number":a,"upload.part_size":i.length,"s3.bucket":o.bucket,"s3.key":s}}))},x=(e,t)=>o.putIncompletePart(e,t),S=e=>r.Effect.gen(function*(){let t=yield*o.getIncompletePart(e);if(!t)return;let n=t.getReader(),i=[],a=0;try{for(;;){let{done:e,value:t}=yield*r.Effect.promise(()=>n.read());if(e)break;i.push(t),a+=t.length}}finally{n.releaseLock()}let s=r.Stream.fromIterable(i);return{size:a,stream:s}}),C=e=>o.deleteIncompletePart(e),w=e=>o.getIncompletePartSize(e),T=(e,t)=>{let i=g(e);return r.Effect.gen(function*(){let n=yield*y(e);return yield*o.completeMultipartUpload({bucket:o.bucket,key:i,uploadId:n},t)}).pipe(r.Effect.tap(()=>(0,n.s3UploadSuccessTotal)(r.Effect.succeed(1))),r.Effect.withSpan(`s3-complete-multipart-upload`,{attributes:{"upload.id":e.id,"upload.parts_count":t.length,"s3.bucket":o.bucket,"s3.key":i}}))},E=e=>{let t=g(e);return r.Effect.gen(function*(){let n=yield*y(e);yield*o.abortMultipartUpload({bucket:o.bucket,key:t,uploadId:n}),yield*o.deleteObjects([t])})},D=e=>r.Effect.gen(function*(){let t=((yield*_.get(e)).storage.parts||[]).map(e=>({partNumber:e.partNumber,etag:e.etag,size:e.size}));return t.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),{uploadFound:!0,parts:t}}),O=(e,t)=>r.Effect.gen(function*(){if(!t)return 0;let n=yield*_.get(e.id),r=n.storage.uploadId;return r&&(yield*_.set(e.id,{...n,storage:{...n.storage,uploadId:r}})),0}),k=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Clearing cache`).pipe(r.Effect.annotateLogs({upload_id:e})),yield*_.delete(e)}),A=e=>{let t=g(e);return r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let i=yield*o.createMultipartUpload({bucket:o.bucket,key:t,uploadId:``,contentType:e.metadata?.contentType?.toString(),cacheControl:e.metadata?.cacheControl?.toString()}),a={...e,storage:{...e.storage,path:i.key,uploadId:i.uploadId,bucket:i.bucket},url:`${s}/${t}`};return yield*_.set(e.id,a),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:a.storage.uploadId,s3_key:t})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),yield*(0,n.s3FileSizeHistogram)(r.Effect.succeed(e.size||0)),a}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":o.bucket,"s3.key":t}}))},j=e=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Initializing multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e.id}));let t=yield*A(e);return yield*_.set(e.id,t),yield*r.Effect.logInfo(`Multipart upload created`).pipe(r.Effect.annotateLogs({upload_id:e.id,s3_upload_id:t.storage.uploadId})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),t}).pipe(r.Effect.withSpan(`s3-create-upload`,{attributes:{"upload.id":e.id,"upload.size":e.size||0,"s3.bucket":h}})),M=e=>r.Effect.gen(function*(){yield*E(yield*_.get(e)),yield*k(e)}),N=(e,t)=>(0,n.withS3UploadMetrics)(e.file_id,(0,n.withS3TimingMetrics)(n.s3UploadDurationHistogram,r.Effect.gen(function*(){let{stream:i,file_id:o,offset:s}=e,{onProgress:c}=t,u=Date.now();yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(1));let{uploadFile:f,nextPartNumber:p,offset:h,data:g,existingPartSize:_}=yield*F(o,s,i),y=_||a(f.size,v,l,d);yield*r.Effect.logInfo(`Part size decision`).pipe(r.Effect.annotateLogs({upload_id:o,existing_part_size:_,calculated_part_size:a(f.size,v,l,d),final_part_size:y,next_part_number:p}));let b=h+(yield*B(f,g,p,h,y,l,m,c));return b>m&&f.size===b&&(yield*I(o,f,u)),b}).pipe(r.Effect.ensuring((0,n.s3ActiveUploadsGauge)(r.Effect.succeed(0)))))),P=e=>r.Effect.gen(function*(){let t=yield*_.get(e),{parts:n,uploadFound:r}=yield*D(e);if(!r)return{...t,offset:t.size,size:t.size};let a=i(n),o=yield*w(e);return{...t,offset:a+(o??0),size:t.size,storage:t.storage}}),F=(e,t,n)=>r.Effect.gen(function*(){let i=yield*_.get(e),a=i.storage.parts||[],o=(a.length>0&&a[a.length-1].partNumber?a[a.length-1].partNumber:0)+1,s=a.length>0&&a[0].size?a[0].size:null;if(s&&a.length>1){let t=a.slice(0,-1).find(e=>e.size!==s);t&&(yield*r.Effect.logWarning(`Inconsistent part sizes detected in existing upload`).pipe(r.Effect.annotateLogs({upload_id:e,expected_size:s,inconsistent_part:t.partNumber,inconsistent_size:t.size})))}let c=yield*S(e);if(c){yield*C(e);let a=t-c.size,l=c.stream.pipe(r.Stream.concat(n));return{uploadFile:i,nextPartNumber:o,offset:a,incompletePartSize:c.size,data:l,existingPartSize:s}}else return{uploadFile:i,nextPartNumber:o,offset:t,incompletePartSize:0,data:n,existingPartSize:s}}),I=(e,t,i)=>r.Effect.gen(function*(){let{parts:a}=yield*D(e);yield*r.Effect.logInfo(`Attempting to complete multipart upload`).pipe(r.Effect.annotateLogs({upload_id:e,parts_count:a.length,parts_info:a.map((e,t)=>({part_number:e.partNumber,size:e.size,etag:e.etag,is_final_part:t===a.length-1}))})),yield*T(t,a),yield*O(t,u);let o=Date.now()-i,s=t.size||0,c=o>0?s*1e3/o:0,l=a.length>0?a.reduce((e,t)=>e+(t.size||0),0)/a.length:void 0;yield*(0,n.logS3UploadCompletion)(e,{fileSize:s,totalDurationMs:o,partsCount:a.length,averagePartSize:l,throughputBps:c})}).pipe(r.Effect.tapError(t=>r.Effect.gen(function*(){yield*(0,n.s3UploadErrorsTotal)(r.Effect.succeed(1)),yield*r.Effect.logError(`Failed to finish upload`).pipe(r.Effect.annotateLogs({upload_id:e,error:String(t)}))}))),L=()=>r.Effect.gen(function*(){return yield*r.Effect.logWarning(`R2 does not support automatic expired upload deletion via API. Please use R2 lifecycle rules instead.`).pipe(r.Effect.annotateLogs({bucket:o.bucket})),0}),R=e=>t=>r.Stream.async(n=>{let i=new Uint8Array,a=1,o=0,s=(t,i=!1)=>{r.Effect.runSync(r.Effect.logInfo(`Creating chunk`).pipe(r.Effect.annotateLogs({part_number:a,chunk_size:t.length,expected_size:e,is_final_chunk:i,total_bytes_processed:o+t.length}))),n.single({partNumber:a++,data:t,size:t.length})},c=t=>{let n=new Uint8Array(i.length+t.length);for(n.set(i),n.set(t,i.length),i=n,o+=t.length;i.length>=e;){let t=i.slice(0,e);i=i.slice(e),s(t,!1)}};r.Effect.runFork(t.pipe(r.Stream.runForEach(e=>r.Effect.sync(()=>c(e))),r.Effect.andThen(()=>r.Effect.sync(()=>{i.length>0&&s(i,!0),n.end()})),r.Effect.catchAll(e=>r.Effect.sync(()=>n.fail(e)))))}),z=(e,t=0)=>n=>e?r.Effect.gen(function*(){let i=yield*r.Ref.make(t);return n.pipe(r.Stream.tap(t=>r.Effect.gen(function*(){e(yield*r.Ref.updateAndGet(i,e=>e+t.length))})))}).pipe(r.Stream.unwrap):n,B=(t,i,a,o,s,c,l,u)=>r.Effect.gen(function*(){yield*r.Effect.logInfo(`Starting part uploads`).pipe(r.Effect.annotateLogs({upload_id:t.id,init_offset:o,file_size:t.size,part_size:s,min_part_size:c}));let d=i.pipe(z(u,o),R(s)),f=yield*r.Ref.make(o),p=yield*r.Ref.make(0),m=i=>r.Effect.gen(function*(){let o=yield*r.Ref.updateAndGet(f,e=>e+i.size),l=o>=(t.size||0);yield*r.Effect.logDebug(`Processing chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,cumulative_offset:o,file_size:t.size,chunk_size:i.size,is_final_part:l}));let u=a+i.partNumber-1;i.size>s&&(yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_WRITE_ERROR`,Error(`Part size ${i.size} exceeds upload part size ${s}`)))),i.size>=c||l?(yield*r.Effect.logDebug(`Uploading multipart chunk`).pipe(r.Effect.annotateLogs({upload_id:t.id,part_number:u,chunk_size:i.size,min_part_size:c,is_final_part:l})),yield*b(t,i.data,u),yield*(0,n.s3PartSizeHistogram)(r.Effect.succeed(i.size))):yield*x(t.id,i.data),yield*r.Ref.update(p,e=>e+i.size)});return yield*d.pipe(r.Stream.runForEach(e=>m(e)),r.Effect.withConcurrency(l)),yield*r.Ref.get(p)}),V=()=>({supportsParallelUploads:!0,supportsConcatenation:!0,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:m,minChunkSize:l,maxChunkSize:5368709120,maxParts:d,optimalChunkSize:v,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0}),H=()=>({minChunkSize:l,maxChunkSize:5368709120,optimalChunkSize:v,requiresOrderedChunks:!1}),U=e=>{let t=V(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return r.Effect.succeed(n)},W=e=>{let t=new Uint8Array(e.reduce((e,t)=>e+t.length,0)),n=0;for(let r of e)t.set(r,n),n+=r.length;return t},G=async e=>{let t=e.getReader(),n=[];for(;;){let{done:e,value:r}=await t.read();if(e)break;n.push(r)}return W(n)};return{bucket:h,create:j,remove:M,write:N,getUpload:P,read:t=>r.Effect.gen(function*(){let n=yield*_.get(t);if(!n.id)return yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let i=g(n),a=yield*o.getObject(i);return yield*r.Effect.promise(()=>G(a))}),readStream:(n,i)=>r.Effect.gen(function*(){let a=yield*_.get(n);if(!a.id)return yield*r.Effect.fail(e.UploadistaError.fromCode(`FILE_READ_ERROR`,Error(`Upload Key is undefined`)));let s={...t.DEFAULT_STREAMING_CONFIG,...i},c=g(a),l=yield*o.getObject(c);return r.Stream.async(t=>{let n=l.getReader(),i=s.chunkSize,a=new Uint8Array;return(async()=>{try{for(;;){let{done:e,value:r}=await n.read();if(e){a.length>0&&t.single(a),t.end();return}if(r){let e=new Uint8Array(a.length+r.length);for(e.set(a),e.set(r,a.length),a=e;a.length>=i;){let e=a.slice(0,i);a=a.slice(i),t.single(e)}}}}catch(n){t.fail(new e.UploadistaError({code:`FILE_READ_ERROR`,status:500,body:`Failed to read R2 object stream`,details:`R2 stream read failed: ${String(n)}`}))}})(),r.Effect.sync(()=>{n.releaseLock()})})}),writeStream:(t,i)=>(0,n.withS3TimingMetrics)(n.s3UploadDurationHistogram,r.Effect.gen(function*(){let c=Date.now(),u=t;yield*r.Effect.logInfo(`Starting streaming write to R2`).pipe(r.Effect.annotateLogs({upload_id:t,r2_key:u,size_hint:i.sizeHint})),yield*(0,n.s3UploadRequestsTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(1));let f=i.sizeHint?a(i.sizeHint,v,l,d):v,p=(yield*o.createMultipartUpload({bucket:o.bucket,key:u,uploadId:``,contentType:i.contentType})).uploadId;yield*r.Effect.logInfo(`Multipart upload created for streaming write`).pipe(r.Effect.annotateLogs({upload_id:t,r2_upload_id:p,r2_key:u,part_size:f}));let m=yield*r.Ref.make([]),h=yield*r.Ref.make(0),g=yield*r.Ref.make(1),_=yield*r.Ref.make(new Uint8Array),y=(e,i)=>r.Effect.gen(function*(){if(e.length===0||e.length<l&&!i)return;let a=yield*r.Ref.getAndUpdate(g,e=>e+1);yield*r.Effect.logDebug(`Uploading part from stream`).pipe(r.Effect.annotateLogs({upload_id:t,part_number:a,part_size:e.length,is_final_part:i}));let s=yield*o.uploadPart({bucket:o.bucket,key:u,uploadId:p,partNumber:a,data:e}).pipe(r.Effect.retry(r.Schedule.exponential(`1 second`,2).pipe(r.Schedule.intersect(r.Schedule.recurs(3)))));yield*r.Ref.update(m,t=>[...t,{partNumber:a,etag:s,size:e.length}]),yield*(0,n.s3UploadPartsTotal)(r.Effect.succeed(1)),yield*(0,n.s3PartSizeHistogram)(r.Effect.succeed(e.length))});yield*i.stream.pipe(r.Stream.runForEach(e=>r.Effect.gen(function*(){yield*r.Ref.update(h,t=>t+e.length);let t=yield*r.Ref.get(_),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let i=0;for(;n.length-i>=f;)yield*y(n.slice(i,i+f),!1),i+=f;yield*r.Ref.set(_,n.slice(i))})));let b=yield*r.Ref.get(_);b.length>0&&(yield*y(b,!0));let x=yield*r.Ref.get(m),S=yield*r.Ref.get(h);if(x.length===0)return yield*o.abortMultipartUpload({bucket:o.bucket,key:u,uploadId:p}),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*r.Effect.fail(new e.UploadistaError({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));x.sort((e,t)=>(e.partNumber??0)-(t.partNumber??0)),yield*o.completeMultipartUpload({bucket:o.bucket,key:u,uploadId:p},x);let C=Date.now()-c,w=C>0?S*1e3/C:0,T=x.length>0?S/x.length:void 0;return yield*(0,n.logS3UploadCompletion)(t,{fileSize:S,totalDurationMs:C,partsCount:x.length,averagePartSize:T,throughputBps:w}),yield*(0,n.s3UploadSuccessTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*(0,n.s3FileSizeHistogram)(r.Effect.succeed(S)),yield*r.Effect.logInfo(`Streaming write to R2 completed`).pipe(r.Effect.annotateLogs({upload_id:t,total_bytes:S,parts_count:x.length,duration_ms:C})),{id:u,size:S,path:u,bucket:o.bucket,url:`${s}/${u}`}}).pipe(r.Effect.catchAll(e=>r.Effect.gen(function*(){return yield*(0,n.s3UploadErrorsTotal)(r.Effect.succeed(1)),yield*(0,n.s3ActiveUploadsGauge)(r.Effect.succeed(-1)),yield*r.Effect.fail(e)})))),deleteExpired:L,getCapabilities:V,getChunkerConstraints:H,validateUploadStrategy:U}})}const v=e=>{let{r2Bucket:t,bucket:n}=e;return _(e).pipe(r.Effect.provide(h(t,n)))};exports.createR2Store=_,exports.r2Store=v;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@uploadista/data-store-r2",
3
3
  "type": "module",
4
- "version": "0.1.4-beta.1",
4
+ "version": "1.0.0-beta.2",
5
5
  "description": "Cloudflare R2 data store for Uploadista",
6
6
  "license": "MIT",
7
7
  "author": "Uploadista",
@@ -14,20 +14,20 @@
14
14
  }
15
15
  },
16
16
  "dependencies": {
17
- "@cloudflare/workers-types": "4.20260131.0",
18
- "@uploadista/core": "0.1.4-beta.1",
19
- "@uploadista/observability": "0.1.4-beta.1"
17
+ "@cloudflare/workers-types": "4.20260227.0",
18
+ "@uploadista/core": "1.0.0-beta.2",
19
+ "@uploadista/observability": "1.0.0-beta.2"
20
20
  },
21
21
  "peerDependencies": {
22
22
  "effect": "^3.0.0"
23
23
  },
24
24
  "devDependencies": {
25
25
  "@effect/vitest": "0.27.0",
26
- "effect": "3.19.15",
27
- "tsdown": "0.20.1",
26
+ "effect": "3.19.19",
27
+ "tsdown": "0.20.3",
28
28
  "vitest": "4.0.18",
29
- "@uploadista/kv-store-memory": "0.1.4-beta.1",
30
- "@uploadista/typescript-config": "0.1.4-beta.1"
29
+ "@uploadista/kv-store-memory": "1.0.0-beta.2",
30
+ "@uploadista/typescript-config": "1.0.0-beta.2"
31
31
  },
32
32
  "scripts": {
33
33
  "build": "tsc --noEmit && tsdown",