@uploadista/data-store-azure 0.0.20-beta.6 → 0.0.20-beta.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1 +1 @@
1
- let e=require(`@azure/storage-blob`),t=require(`@uploadista/core/errors`),n=require(`@uploadista/core/types`),r=require(`@uploadista/observability`),i=require(`effect`);const a=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function o(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function s({deliveryUrl:s,blockSize:c,minBlockSize:l=1024,maxBlocks:u=5e4,maxConcurrentBlockUploads:d=60,expirationPeriodInMilliseconds:f=1e3*60*60*24*7,connectionString:p,sasUrl:m,credential:h,accountName:g,accountKey:_,containerName:v}){return i.Effect.gen(function*(){let y=yield*n.UploadFileKVStore,b=c||8*1024*1024,x;if(p)x=e.BlobServiceClient.fromConnectionString(p);else if(m)x=new e.BlobServiceClient(m);else if(h){let t=g?`https://${g}.blob.core.windows.net`:m?.split(`?`)[0]||``;if(!t)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);x=new e.BlobServiceClient(t,h)}else if(g&&_)try{let t=new e.StorageSharedKeyCredential(g,_);x=new e.BlobServiceClient(`https://${g}.blob.core.windows.net`,t)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let S=x.getContainerClient(v),C=e=>`${e}.incomplete`,w=(e,n,a)=>(0,r.withAzureTimingMetrics)(r.azurePartUploadDurationHistogram,i.Effect.gen(function*(){yield*i.Effect.logInfo(`Uploading block`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_id:a,block_size:n.length})),yield*(0,r.azureUploadPartsTotal)(i.Effect.succeed(1)),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.length));try{let o=S.getBlockBlobClient(e.id);yield*i.Effect.tryPromise({try:async()=>{await o.stageBlock(a,n,n.length)},catch:o=>(i.Effect.runSync((0,r.trackAzureError)(`uploadBlock`,o,{upload_id:e.id,block_id:a,block_size:n.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:o}))}),yield*i.Effect.logInfo(`Finished uploading block`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_id:a,block_size:n.length}))}catch(t){throw i.Effect.runSync((0,r.trackAzureError)(`uploadBlock`,t,{upload_id:e.id,block_id:a,block_size:n.length})),t}})),T=(e,n)=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(C(e)).upload(n,n.length)},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(i.Effect.tap(()=>i.Effect.logInfo(`Finished uploading incomplete block`).pipe(i.Effect.annotateLogs({upload_id:e})))),E=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(C(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),D=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(C(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),O=e=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(C(e)).deleteIfExists()},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),k=e=>i.Effect.gen(function*(){let t=yield*E(e);if(!t)return;let n=t.getReader(),r=[],a=0;try{for(;;){let e=yield*i.Effect.promise(()=>n.read());if(e.done)break;r.push(e.value),a+=e.value.length}}finally{n.releaseLock()}let o=i.Stream.fromIterable(r);return{size:a,stream:o}}),A=e=>{let t=e??5497558138880,n;n=t<=b?t:t<=b*u?b:Math.ceil(t/u);let r=Math.max(n,l);return Math.ceil(r/1024)*1024},j=e=>t=>i.Stream.async(n=>{let r=new Uint8Array,a=1,o=0,s=(t,r=!1)=>{i.Effect.runSync(i.Effect.logInfo(`Creating chunk`).pipe(i.Effect.annotateLogs({block_number:a,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:o+t.length}))),n.single({blockNumber:a++,data:t,size:t.length})},c=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,o+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),s(t,!1)}};i.Effect.runFork(t.pipe(i.Stream.runForEach(e=>i.Effect.sync(()=>c(e))),i.Effect.andThen(()=>i.Effect.sync(()=>{r.length>0&&s(r,!0),n.end()})),i.Effect.catchAll(e=>i.Effect.sync(()=>n.fail(e)))))}),M=(e,t=0)=>n=>e?i.Effect.gen(function*(){let r=yield*i.Ref.make(t);return n.pipe(i.Stream.tap(t=>i.Effect.gen(function*(){e(yield*i.Ref.updateAndGet(r,e=>e+t.length))})))}).pipe(i.Stream.unwrap):n,N=(e,n,o,s,c)=>i.Effect.gen(function*(){yield*i.Effect.logInfo(`Uploading blocks`).pipe(i.Effect.annotateLogs({upload_id:e.id,init_offset:s,file_size:e.size}));let u=e.size,f=A(u);yield*i.Effect.logInfo(`Block size`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_size:f}));let p=n.pipe(M(c,s),j(f)),m=yield*i.Ref.make(s),h=yield*i.Ref.make(0),g=yield*i.Ref.make([]),_=n=>i.Effect.gen(function*(){let s=yield*i.Ref.updateAndGet(m,e=>e+n.size),c=s>=(e.size||0);yield*i.Effect.logDebug(`Processing chunk`).pipe(i.Effect.annotateLogs({upload_id:e.id,cumulative_offset:s,file_size:e.size,chunk_size:n.size,is_final_block:c}));let u=o+n.blockNumber-1;if(n.size>f&&(yield*i.Effect.fail(t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${n.size} exceeds upload block size ${f}`)}))),n.size>=l||c){yield*i.Effect.logDebug(`Uploading multipart chunk`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_number:u,chunk_size:n.size,min_block_size:l,is_final_block:c}));let t=a(`block-${u.toString().padStart(6,`0`)}`).toString(`base64`);yield*w(e,n.data,t),yield*i.Ref.update(g,e=>[...e,t]),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.size))}else yield*T(e.id,n.data);yield*i.Ref.update(h,e=>e+n.size)});return yield*p.pipe(i.Stream.runForEach(e=>_(e)),i.Effect.withConcurrency(d)),{bytesUploaded:yield*i.Ref.get(h),blockIds:yield*i.Ref.get(g)}}),P=(e,n)=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(e.id).commitBlockList(n,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),F=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>t.UploadistaError.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),I=e=>i.Effect.gen(function*(){yield*i.Effect.logInfo(`Removing cached data`).pipe(i.Effect.annotateLogs({upload_id:e})),yield*y.delete(e)}),L=e=>i.Effect.gen(function*(){return yield*(0,r.azureUploadRequestsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(1)),yield*(0,r.azureFileSizeHistogram)(i.Effect.succeed(e.size||0)),yield*i.Effect.logInfo(`Initializing Azure blob upload`).pipe(i.Effect.annotateLogs({upload_id:e.id})),e.creationDate=new Date().toISOString(),e.storage={id:e.storage.id,type:e.storage.type,path:e.id,bucket:v},e.url=`${s}/${e.id}`,yield*y.set(e.id,e),yield*i.Effect.logInfo(`Azure blob upload initialized`).pipe(i.Effect.annotateLogs({upload_id:e.id})),e}),R=e=>i.Effect.tryPromise({try:async()=>{let t=await S.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),z=(e,r)=>i.Effect.gen(function*(){let a={...n.DEFAULT_STREAMING_CONFIG,...r},o=yield*R(e);if(o instanceof Blob){let e=yield*i.Effect.promise(()=>o.arrayBuffer()),t=new Uint8Array(e),n=a.chunkSize,r=[];for(let e=0;e<t.length;e+=n)r.push(t.slice(e,Math.min(e+n,t.length)));return i.Stream.fromIterable(r)}return i.Stream.async(e=>{let n=o.getReader(),r=a.chunkSize,s=new Uint8Array;return(async()=>{try{for(;;){let{done:t,value:i}=await n.read();if(t){s.length>0&&e.single(s),e.end();return}if(i){let t=new Uint8Array(s.length+i.length);for(t.set(s),t.set(i,s.length),s=t;s.length>=r;){let t=s.slice(0,r);s=s.slice(r),e.single(t)}}}}catch(n){e.fail(new t.UploadistaError({code:`FILE_READ_ERROR`,status:500,body:`Failed to read Azure blob stream`,details:`Azure stream read failed: ${String(n)}`}))}})(),i.Effect.sync(()=>{n.releaseLock()})})}),B=e=>i.Effect.gen(function*(){let t=yield*z(e),n=[];yield*i.Stream.runForEach(t,e=>i.Effect.sync(()=>{n.push(e)}));let r=n.reduce((e,t)=>e+t.length,0),a=new Uint8Array(r),o=0;for(let e of n)a.set(e,o),o+=e.length;return a}),V=(e,t,n)=>i.Effect.gen(function*(){let r=yield*y.get(e),a=(yield*F(e)).length+1,o=yield*k(e);if(o){yield*O(e);let s=t-o.size,c=o.stream.pipe(i.Stream.concat(n));return{uploadFile:r,nextBlockNumber:a-1,offset:s,incompleteBlockSize:o.size,data:c}}else return{uploadFile:r,nextBlockNumber:a,offset:t,incompleteBlockSize:0,data:n}}),H=(e,t)=>(0,r.withAzureUploadMetrics)(e.file_id,(0,r.withAzureTimingMetrics)(r.azureUploadDurationHistogram,i.Effect.gen(function*(){let n=Date.now(),{stream:a,file_id:o,offset:s}=e,{onProgress:c}=t,{uploadFile:l,nextBlockNumber:u,offset:d,data:f}=yield*V(o,s,a),{bytesUploaded:p,blockIds:m}=yield*N(l,f,u,d,c),h=d+p;if(l.size===h)try{yield*P(l,m),yield*y.set(o,{...l,offset:h}),yield*(0,r.logAzureUploadCompletion)(o,{fileSize:l.size||0,totalDurationMs:Date.now()-n,partsCount:m.length,averagePartSize:l.size,throughputBps:l.size/(Date.now()-n),retryCount:0}),yield*(0,r.azureUploadSuccessTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1))}catch(e){throw yield*i.Effect.logError(`Failed to finish upload`).pipe(i.Effect.annotateLogs({upload_id:o,error:JSON.stringify(e)})),yield*(0,r.azureUploadErrorsTotal)(i.Effect.succeed(1)),i.Effect.runSync((0,r.trackAzureError)(`write`,e,{upload_id:o,operation:`commit`,blocks:m.length})),e}return h}))),U=e=>i.Effect.gen(function*(){let t=yield*y.get(e),n=0;try{n=o(yield*F(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*i.Effect.logError(`Error on get upload`).pipe(i.Effect.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*D(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),W=e=>i.Effect.gen(function*(){try{let t=S.getBlockBlobClient(e);yield*i.Effect.promise(()=>t.deleteIfExists()),yield*O(e)}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return yield*i.Effect.logError(`No file found`).pipe(i.Effect.annotateLogs({upload_id:e})),yield*i.Effect.fail(t.UploadistaError.fromCode(`FILE_NOT_FOUND`));throw i.Effect.runSync((0,r.trackAzureError)(`remove`,n,{upload_id:e})),n}yield*I(e),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1))}),G=()=>f,K=e=>{let t=new Date(e);return new Date(t.getTime()+G())},q=()=>i.Effect.tryPromise({try:async()=>{if(G()===0)return 0;let e=0,t=S.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>K(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await S.deleteBlob(t),e++;return e},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),J=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:d,minChunkSize:l,maxChunkSize:4e3*1024*1024,maxParts:u,optimalChunkSize:b,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:v,create:L,remove:W,write:H,getUpload:U,read:B,readStream:z,writeStream:(e,n)=>(0,r.withAzureTimingMetrics)(r.azureUploadDurationHistogram,i.Effect.gen(function*(){let o=Date.now();yield*i.Effect.logInfo(`Starting streaming write to Azure`).pipe(i.Effect.annotateLogs({upload_id:e,container:v,size_hint:n.sizeHint})),yield*(0,r.azureUploadRequestsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(1));let s=A(n.sizeHint),c=yield*i.Ref.make([]),u=yield*i.Ref.make(0),d=yield*i.Ref.make(1),f=yield*i.Ref.make(new Uint8Array),p=(n,o)=>i.Effect.gen(function*(){if(n.length===0||n.length<l&&!o)return;let s=yield*i.Ref.getAndUpdate(d,e=>e+1),u=a(`stream-block-${s.toString().padStart(6,`0`)}`).toString(`base64`);yield*i.Effect.logDebug(`Staging block from stream`).pipe(i.Effect.annotateLogs({upload_id:e,block_number:s,block_size:n.length,is_final_block:o}));let f=S.getBlockBlobClient(e);yield*i.Effect.tryPromise({try:()=>f.stageBlock(u,n,n.length),catch:a=>(i.Effect.runSync((0,r.trackAzureError)(`writeStream`,a,{upload_id:e,block_number:s,block_size:n.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:a}))}),yield*i.Ref.update(c,e=>[...e,u]),yield*(0,r.azureUploadPartsTotal)(i.Effect.succeed(1)),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.length))});yield*n.stream.pipe(i.Stream.runForEach(e=>i.Effect.gen(function*(){yield*i.Ref.update(u,t=>t+e.length);let t=yield*i.Ref.get(f),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let r=0;for(;n.length-r>=s;)yield*p(n.slice(r,r+s),!1),r+=s;yield*i.Ref.set(f,n.slice(r))})));let m=yield*i.Ref.get(f);m.length>0&&(yield*p(m,!0));let h=yield*i.Ref.get(c),g=yield*i.Ref.get(u);if(h.length===0)return yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*i.Effect.fail(new t.UploadistaError({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));let _=S.getBlockBlobClient(e);yield*i.Effect.tryPromise({try:()=>_.commitBlockList(h,{blobHTTPHeaders:{blobContentType:n.contentType}}),catch:n=>(i.Effect.runSync((0,r.trackAzureError)(`writeStream`,n,{upload_id:e,operation:`commit`,blocks:h.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:n}))});let y=Date.now()-o,b=y>0?g*1e3/y:0,x=h.length>0?g/h.length:void 0;return yield*(0,r.logAzureUploadCompletion)(e,{fileSize:g,totalDurationMs:y,partsCount:h.length,averagePartSize:x,throughputBps:b,retryCount:0}),yield*(0,r.azureUploadSuccessTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*(0,r.azureFileSizeHistogram)(i.Effect.succeed(g)),yield*i.Effect.logInfo(`Streaming write to Azure completed`).pipe(i.Effect.annotateLogs({upload_id:e,total_bytes:g,blocks_count:h.length,duration_ms:y})),{id:e,size:g,path:e,bucket:v}}).pipe(i.Effect.catchAll(e=>i.Effect.gen(function*(){return yield*(0,r.azureUploadErrorsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*i.Effect.fail(e)})))),deleteExpired:q(),getCapabilities:J,getChunkerConstraints:()=>({minChunkSize:l,maxChunkSize:4e3*1024*1024,optimalChunkSize:b,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=J(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return i.Effect.succeed(n)}}})}exports.azureStore=s;
1
+ let e=require(`@azure/storage-blob`),t=require(`@uploadista/core/errors`),n=require(`@uploadista/core/types`),r=require(`@uploadista/observability`),i=require(`effect`);const a=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function o(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function s({deliveryUrl:s,blockSize:c,minBlockSize:l=1024,maxBlocks:u=5e4,maxConcurrentBlockUploads:d=60,expirationPeriodInMilliseconds:f=1e3*60*60*24*7,connectionString:p,sasUrl:m,credential:h,accountName:g,accountKey:_,containerName:v}){return i.Effect.gen(function*(){let y=yield*n.UploadFileKVStore,b=c||8*1024*1024,x;if(p)x=e.BlobServiceClient.fromConnectionString(p);else if(m)x=new e.BlobServiceClient(m);else if(h){let t=g?`https://${g}.blob.core.windows.net`:m?.split(`?`)[0]||``;if(!t)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);x=new e.BlobServiceClient(t,h)}else if(g&&_)try{let t=new e.StorageSharedKeyCredential(g,_);x=new e.BlobServiceClient(`https://${g}.blob.core.windows.net`,t)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let S=x.getContainerClient(v),C=e=>`${e}.incomplete`,w=(e,n,a)=>(0,r.withAzureTimingMetrics)(r.azurePartUploadDurationHistogram,i.Effect.gen(function*(){yield*i.Effect.logInfo(`Uploading block`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_id:a,block_size:n.length})),yield*(0,r.azureUploadPartsTotal)(i.Effect.succeed(1)),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.length));try{let o=S.getBlockBlobClient(e.id);yield*i.Effect.tryPromise({try:async()=>{await o.stageBlock(a,n,n.length)},catch:o=>(i.Effect.runSync((0,r.trackAzureError)(`uploadBlock`,o,{upload_id:e.id,block_id:a,block_size:n.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:o}))}),yield*i.Effect.logInfo(`Finished uploading block`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_id:a,block_size:n.length}))}catch(t){throw i.Effect.runSync((0,r.trackAzureError)(`uploadBlock`,t,{upload_id:e.id,block_id:a,block_size:n.length})),t}})),T=(e,n)=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(C(e)).upload(n,n.length)},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(i.Effect.tap(()=>i.Effect.logInfo(`Finished uploading incomplete block`).pipe(i.Effect.annotateLogs({upload_id:e})))),E=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(C(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),D=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(C(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),O=e=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(C(e)).deleteIfExists()},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),k=e=>i.Effect.gen(function*(){let t=yield*E(e);if(!t)return;let n=t.getReader(),r=[],a=0;try{for(;;){let e=yield*i.Effect.promise(()=>n.read());if(e.done)break;r.push(e.value),a+=e.value.length}}finally{n.releaseLock()}let o=i.Stream.fromIterable(r);return{size:a,stream:o}}),A=e=>{let t=e??5497558138880,n;n=t<=b?t:t<=b*u?b:Math.ceil(t/u);let r=Math.max(n,l);return Math.ceil(r/1024)*1024},j=e=>t=>i.Stream.async(n=>{let r=new Uint8Array,a=1,o=0,s=(t,r=!1)=>{i.Effect.runSync(i.Effect.logInfo(`Creating chunk`).pipe(i.Effect.annotateLogs({block_number:a,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:o+t.length}))),n.single({blockNumber:a++,data:t,size:t.length})},c=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,o+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),s(t,!1)}};i.Effect.runFork(t.pipe(i.Stream.runForEach(e=>i.Effect.sync(()=>c(e))),i.Effect.andThen(()=>i.Effect.sync(()=>{r.length>0&&s(r,!0),n.end()})),i.Effect.catchAll(e=>i.Effect.sync(()=>n.fail(e)))))}),M=(e,t=0)=>n=>e?i.Effect.gen(function*(){let r=yield*i.Ref.make(t);return n.pipe(i.Stream.tap(t=>i.Effect.gen(function*(){e(yield*i.Ref.updateAndGet(r,e=>e+t.length))})))}).pipe(i.Stream.unwrap):n,N=(e,n,o,s,c)=>i.Effect.gen(function*(){yield*i.Effect.logInfo(`Uploading blocks`).pipe(i.Effect.annotateLogs({upload_id:e.id,init_offset:s,file_size:e.size}));let u=e.size,f=A(u);yield*i.Effect.logInfo(`Block size`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_size:f}));let p=n.pipe(M(c,s),j(f)),m=yield*i.Ref.make(s),h=yield*i.Ref.make(0),g=yield*i.Ref.make([]),_=n=>i.Effect.gen(function*(){let s=yield*i.Ref.updateAndGet(m,e=>e+n.size),c=s>=(e.size||0);yield*i.Effect.logDebug(`Processing chunk`).pipe(i.Effect.annotateLogs({upload_id:e.id,cumulative_offset:s,file_size:e.size,chunk_size:n.size,is_final_block:c}));let u=o+n.blockNumber-1;if(n.size>f&&(yield*i.Effect.fail(t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${n.size} exceeds upload block size ${f}`)}))),n.size>=l||c){yield*i.Effect.logDebug(`Uploading multipart chunk`).pipe(i.Effect.annotateLogs({upload_id:e.id,block_number:u,chunk_size:n.size,min_block_size:l,is_final_block:c}));let t=a(`block-${u.toString().padStart(6,`0`)}`).toString(`base64`);yield*w(e,n.data,t),yield*i.Ref.update(g,e=>[...e,t]),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.size))}else yield*T(e.id,n.data);yield*i.Ref.update(h,e=>e+n.size)});return yield*p.pipe(i.Stream.runForEach(e=>_(e)),i.Effect.withConcurrency(d)),{bytesUploaded:yield*i.Ref.get(h),blockIds:yield*i.Ref.get(g)}}),P=(e,n)=>i.Effect.tryPromise({try:async()=>{await S.getBlockBlobClient(e.id).commitBlockList(n,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),F=e=>i.Effect.tryPromise({try:async()=>{try{return(await S.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>t.UploadistaError.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),I=e=>i.Effect.gen(function*(){yield*i.Effect.logInfo(`Removing cached data`).pipe(i.Effect.annotateLogs({upload_id:e})),yield*y.delete(e)}),L=e=>i.Effect.gen(function*(){return yield*(0,r.azureUploadRequestsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(1)),yield*(0,r.azureFileSizeHistogram)(i.Effect.succeed(e.size||0)),yield*i.Effect.logInfo(`Initializing Azure blob upload`).pipe(i.Effect.annotateLogs({upload_id:e.id})),e.creationDate=new Date().toISOString(),e.storage={id:e.storage.id,type:e.storage.type,path:e.id,bucket:v},e.url=`${s}/${e.id}`,yield*y.set(e.id,e),yield*i.Effect.logInfo(`Azure blob upload initialized`).pipe(i.Effect.annotateLogs({upload_id:e.id})),e}),R=e=>i.Effect.tryPromise({try:async()=>{let t=await S.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),z=(e,r)=>i.Effect.gen(function*(){let a={...n.DEFAULT_STREAMING_CONFIG,...r},o=yield*R(e);if(o instanceof Blob){let e=yield*i.Effect.promise(()=>o.arrayBuffer()),t=new Uint8Array(e),n=a.chunkSize,r=[];for(let e=0;e<t.length;e+=n)r.push(t.slice(e,Math.min(e+n,t.length)));return i.Stream.fromIterable(r)}return i.Stream.async(e=>{let n=o.getReader(),r=a.chunkSize,s=new Uint8Array;return(async()=>{try{for(;;){let{done:t,value:i}=await n.read();if(t){s.length>0&&e.single(s),e.end();return}if(i){let t=new Uint8Array(s.length+i.length);for(t.set(s),t.set(i,s.length),s=t;s.length>=r;){let t=s.slice(0,r);s=s.slice(r),e.single(t)}}}}catch(n){e.fail(new t.UploadistaError({code:`FILE_READ_ERROR`,status:500,body:`Failed to read Azure blob stream`,details:`Azure stream read failed: ${String(n)}`}))}})(),i.Effect.sync(()=>{n.releaseLock()})})}),B=e=>i.Effect.gen(function*(){let t=yield*z(e),n=[];yield*i.Stream.runForEach(t,e=>i.Effect.sync(()=>{n.push(e)}));let r=n.reduce((e,t)=>e+t.length,0),a=new Uint8Array(r),o=0;for(let e of n)a.set(e,o),o+=e.length;return a}),V=(e,t,n)=>i.Effect.gen(function*(){let r=yield*y.get(e),a=(yield*F(e)).length+1,o=yield*k(e);if(o){yield*O(e);let s=t-o.size,c=o.stream.pipe(i.Stream.concat(n));return{uploadFile:r,nextBlockNumber:a-1,offset:s,incompleteBlockSize:o.size,data:c}}else return{uploadFile:r,nextBlockNumber:a,offset:t,incompleteBlockSize:0,data:n}}),H=(e,t)=>(0,r.withAzureUploadMetrics)(e.file_id,(0,r.withAzureTimingMetrics)(r.azureUploadDurationHistogram,i.Effect.gen(function*(){let n=Date.now(),{stream:a,file_id:o,offset:s}=e,{onProgress:c}=t,{uploadFile:l,nextBlockNumber:u,offset:d,data:f}=yield*V(o,s,a),{bytesUploaded:p,blockIds:m}=yield*N(l,f,u,d,c),h=d+p;if(l.size===h)try{yield*P(l,m),yield*y.set(o,{...l,offset:h}),yield*(0,r.logAzureUploadCompletion)(o,{fileSize:l.size||0,totalDurationMs:Date.now()-n,partsCount:m.length,averagePartSize:l.size,throughputBps:l.size/(Date.now()-n),retryCount:0}),yield*(0,r.azureUploadSuccessTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1))}catch(e){throw yield*i.Effect.logError(`Failed to finish upload`).pipe(i.Effect.annotateLogs({upload_id:o,error:JSON.stringify(e)})),yield*(0,r.azureUploadErrorsTotal)(i.Effect.succeed(1)),i.Effect.runSync((0,r.trackAzureError)(`write`,e,{upload_id:o,operation:`commit`,blocks:m.length})),e}return h}))),U=e=>i.Effect.gen(function*(){let t=yield*y.get(e),n=0;try{n=o(yield*F(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*i.Effect.logError(`Error on get upload`).pipe(i.Effect.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*D(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),W=e=>i.Effect.gen(function*(){try{let t=S.getBlockBlobClient(e);yield*i.Effect.promise(()=>t.deleteIfExists()),yield*O(e)}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return yield*i.Effect.logError(`No file found`).pipe(i.Effect.annotateLogs({upload_id:e})),yield*i.Effect.fail(t.UploadistaError.fromCode(`FILE_NOT_FOUND`));throw i.Effect.runSync((0,r.trackAzureError)(`remove`,n,{upload_id:e})),n}yield*I(e),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1))}),G=()=>f,K=e=>{let t=new Date(e);return new Date(t.getTime()+G())},q=()=>i.Effect.tryPromise({try:async()=>{if(G()===0)return 0;let e=0,t=S.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>K(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await S.deleteBlob(t),e++;return e},catch:e=>t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),J=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:d,minChunkSize:l,maxChunkSize:4e3*1024*1024,maxParts:u,optimalChunkSize:b,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:v,create:L,remove:W,write:H,getUpload:U,read:B,readStream:z,writeStream:(e,n)=>(0,r.withAzureTimingMetrics)(r.azureUploadDurationHistogram,i.Effect.gen(function*(){let o=Date.now();yield*i.Effect.logInfo(`Starting streaming write to Azure`).pipe(i.Effect.annotateLogs({upload_id:e,container:v,size_hint:n.sizeHint})),yield*(0,r.azureUploadRequestsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(1));let s=A(n.sizeHint),c=yield*i.Ref.make([]),u=yield*i.Ref.make(0),d=yield*i.Ref.make(1),f=yield*i.Ref.make(new Uint8Array),p=(n,o)=>i.Effect.gen(function*(){if(n.length===0||n.length<l&&!o)return;let s=yield*i.Ref.getAndUpdate(d,e=>e+1),u=a(`stream-block-${s.toString().padStart(6,`0`)}`).toString(`base64`);yield*i.Effect.logDebug(`Staging block from stream`).pipe(i.Effect.annotateLogs({upload_id:e,block_number:s,block_size:n.length,is_final_block:o}));let f=S.getBlockBlobClient(e);yield*i.Effect.tryPromise({try:()=>f.stageBlock(u,n,n.length),catch:a=>(i.Effect.runSync((0,r.trackAzureError)(`writeStream`,a,{upload_id:e,block_number:s,block_size:n.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:a}))}),yield*i.Ref.update(c,e=>[...e,u]),yield*(0,r.azureUploadPartsTotal)(i.Effect.succeed(1)),yield*(0,r.azurePartSizeHistogram)(i.Effect.succeed(n.length))});yield*n.stream.pipe(i.Stream.runForEach(e=>i.Effect.gen(function*(){yield*i.Ref.update(u,t=>t+e.length);let t=yield*i.Ref.get(f),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let r=0;for(;n.length-r>=s;)yield*p(n.slice(r,r+s),!1),r+=s;yield*i.Ref.set(f,n.slice(r))})));let m=yield*i.Ref.get(f);m.length>0&&(yield*p(m,!0));let h=yield*i.Ref.get(c),g=yield*i.Ref.get(u);if(h.length===0)return yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*i.Effect.fail(new t.UploadistaError({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));let _=S.getBlockBlobClient(e);yield*i.Effect.tryPromise({try:()=>_.commitBlockList(h,{blobHTTPHeaders:{blobContentType:n.contentType}}),catch:n=>(i.Effect.runSync((0,r.trackAzureError)(`writeStream`,n,{upload_id:e,operation:`commit`,blocks:h.length})),t.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:n}))});let y=Date.now()-o,b=y>0?g*1e3/y:0,x=h.length>0?g/h.length:void 0;return yield*(0,r.logAzureUploadCompletion)(e,{fileSize:g,totalDurationMs:y,partsCount:h.length,averagePartSize:x,throughputBps:b,retryCount:0}),yield*(0,r.azureUploadSuccessTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*(0,r.azureFileSizeHistogram)(i.Effect.succeed(g)),yield*i.Effect.logInfo(`Streaming write to Azure completed`).pipe(i.Effect.annotateLogs({upload_id:e,total_bytes:g,blocks_count:h.length,duration_ms:y})),{id:e,size:g,path:e,bucket:v}}).pipe(i.Effect.catchAll(e=>i.Effect.gen(function*(){return yield*(0,r.azureUploadErrorsTotal)(i.Effect.succeed(1)),yield*(0,r.azureActiveUploadsGauge)(i.Effect.succeed(-1)),yield*i.Effect.fail(e)})))),deleteExpired:q,getCapabilities:J,getChunkerConstraints:()=>({minChunkSize:l,maxChunkSize:4e3*1024*1024,optimalChunkSize:b,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=J(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return i.Effect.succeed(n)}}})}exports.azureStore=s;
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.cts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KAgDY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAkDA,WAAA,CAAA,EAAU,OAAA;CAAa;AAAV,KAlDb,iBAAA,GAkDa;EACkB,WAAA,EAAA,MAAA;EAAY;;;;;EAIlC,SAAO,CAAA,EAAA,MAAA;EAAqC;;;AASjE;;EAEE,YAAA,CAAA,EAAA,MAAA;EACA;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAGkB,MAAA,CAAA,EAAA,MAAA;EAAA;;;;eA9CL;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;oCAG1C,oBACN,MAAA,CAAO,OAAO,MAAA,CAAO,OAAO,YAAY,kBAAkB;;;;;;;;iBASjD,UAAA;;;;;;;;;;;;;;;GAab,oBAAiB,MAAA,CAAA,OAAA,UAAA,oBAAA"}
1
+ {"version":3,"file":"index.d.cts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KAgDY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAkDA,WAAA,CAAA,EAAU,OAAA;CAAa;AAAV,KAlDb,iBAAA,GAkDa;EACkB,WAAA,EAAA,MAAA;EAAY;;;;;EAKnD,SAAO,CAAA,EAAA,MAAA;EACP;;;AAUJ;;EAEE,YAAA,CAAA,EAAA,MAAA;EACA;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAGkB,MAAA,CAAA,EAAA,MAAA;EAAA;;;;eAjDL;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;oCAG1C,oBACN,MAAA,CAAO,OACV,MAAA,CAAO,OAAO,YAAY,kBAC1B;;;;;;;;iBAUY,UAAA;;;;;;;;;;;;;;;GAab,oBAAiB,MAAA,CAAA,OAAA,UAAA,oBAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.mts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KAgDY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAkDA,WAAA,CAAA,EAAU,OAAA;CAAa;AAAV,KAlDb,iBAAA,GAkDa;EACkB,WAAA,EAAA,MAAA;EAAY;;;;;EAIlC,SAAO,CAAA,EAAA,MAAA;EAAqC;;;AASjE;;EAEE,YAAA,CAAA,EAAA,MAAA;EACA;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAGkB,MAAA,CAAA,EAAA,MAAA;EAAA;;;;eA9CL;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;oCAG1C,oBACN,MAAA,CAAO,OAAO,MAAA,CAAO,OAAO,YAAY,kBAAkB;;;;;;;;iBASjD,UAAA;;;;;;;;;;;;;;;GAab,oBAAiB,MAAA,CAAA,OAAA,UAAA,oBAAA"}
1
+ {"version":3,"file":"index.d.mts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KAgDY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAkDA,WAAA,CAAA,EAAU,OAAA;CAAa;AAAV,KAlDb,iBAAA,GAkDa;EACkB,WAAA,EAAA,MAAA;EAAY;;;;;EAKnD,SAAO,CAAA,EAAA,MAAA;EACP;;;AAUJ;;EAEE,YAAA,CAAA,EAAA,MAAA;EACA;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAGkB,MAAA,CAAA,EAAA,MAAA;EAAA;;;;eAjDL;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;oCAG1C,oBACN,MAAA,CAAO,OACV,MAAA,CAAO,OAAO,YAAY,kBAC1B;;;;;;;;iBAUY,UAAA;;;;;;;;;;;;;;;GAab,oBAAiB,MAAA,CAAA,OAAA,UAAA,oBAAA"}
package/dist/index.mjs CHANGED
@@ -1,2 +1,2 @@
1
- import{BlobServiceClient as e,StorageSharedKeyCredential as t}from"@azure/storage-blob";import{UploadistaError as n}from"@uploadista/core/errors";import{DEFAULT_STREAMING_CONFIG as r,UploadFileKVStore as i}from"@uploadista/core/types";import{azureActiveUploadsGauge as a,azureFileSizeHistogram as o,azurePartSizeHistogram as s,azurePartUploadDurationHistogram as c,azureUploadDurationHistogram as l,azureUploadErrorsTotal as u,azureUploadPartsTotal as d,azureUploadRequestsTotal as f,azureUploadSuccessTotal as p,logAzureUploadCompletion as m,trackAzureError as h,withAzureTimingMetrics as g,withAzureUploadMetrics as _}from"@uploadista/observability";import{Effect as v,Ref as y,Stream as b}from"effect";const x=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function S(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function C({deliveryUrl:C,blockSize:w,minBlockSize:T=1024,maxBlocks:E=5e4,maxConcurrentBlockUploads:D=60,expirationPeriodInMilliseconds:O=1e3*60*60*24*7,connectionString:k,sasUrl:A,credential:j,accountName:M,accountKey:N,containerName:P}){return v.gen(function*(){let F=yield*i,I=w||8*1024*1024,L;if(k)L=e.fromConnectionString(k);else if(A)L=new e(A);else if(j){let t=M?`https://${M}.blob.core.windows.net`:A?.split(`?`)[0]||``;if(!t)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);L=new e(t,j)}else if(M&&N)try{let n=new t(M,N);L=new e(`https://${M}.blob.core.windows.net`,n)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let R=L.getContainerClient(P),z=e=>`${e}.incomplete`,ee=(e,t,r)=>g(c,v.gen(function*(){yield*v.logInfo(`Uploading block`).pipe(v.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length})),yield*d(v.succeed(1)),yield*s(v.succeed(t.length));try{let i=R.getBlockBlobClient(e.id);yield*v.tryPromise({try:async()=>{await i.stageBlock(r,t,t.length)},catch:i=>(v.runSync(h(`uploadBlock`,i,{upload_id:e.id,block_id:r,block_size:t.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:i}))}),yield*v.logInfo(`Finished uploading block`).pipe(v.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length}))}catch(n){throw v.runSync(h(`uploadBlock`,n,{upload_id:e.id,block_id:r,block_size:t.length})),n}})),te=(e,t)=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(z(e)).upload(t,t.length)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(v.tap(()=>v.logInfo(`Finished uploading incomplete block`).pipe(v.annotateLogs({upload_id:e})))),B=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(z(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),ne=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(z(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),V=e=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(z(e)).deleteIfExists()},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),re=e=>v.gen(function*(){let t=yield*B(e);if(!t)return;let n=t.getReader(),r=[],i=0;try{for(;;){let e=yield*v.promise(()=>n.read());if(e.done)break;r.push(e.value),i+=e.value.length}}finally{n.releaseLock()}let a=b.fromIterable(r);return{size:i,stream:a}}),H=e=>{let t=e??5497558138880,n;n=t<=I?t:t<=I*E?I:Math.ceil(t/E);let r=Math.max(n,T);return Math.ceil(r/1024)*1024},U=e=>t=>b.async(n=>{let r=new Uint8Array,i=1,a=0,o=(t,r=!1)=>{v.runSync(v.logInfo(`Creating chunk`).pipe(v.annotateLogs({block_number:i,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:a+t.length}))),n.single({blockNumber:i++,data:t,size:t.length})},s=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,a+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),o(t,!1)}};v.runFork(t.pipe(b.runForEach(e=>v.sync(()=>s(e))),v.andThen(()=>v.sync(()=>{r.length>0&&o(r,!0),n.end()})),v.catchAll(e=>v.sync(()=>n.fail(e)))))}),W=(e,t=0)=>n=>e?v.gen(function*(){let r=yield*y.make(t);return n.pipe(b.tap(t=>v.gen(function*(){e(yield*y.updateAndGet(r,e=>e+t.length))})))}).pipe(b.unwrap):n,G=(e,t,r,i,a)=>v.gen(function*(){yield*v.logInfo(`Uploading blocks`).pipe(v.annotateLogs({upload_id:e.id,init_offset:i,file_size:e.size}));let o=e.size,c=H(o);yield*v.logInfo(`Block size`).pipe(v.annotateLogs({upload_id:e.id,block_size:c}));let l=t.pipe(W(a,i),U(c)),u=yield*y.make(i),d=yield*y.make(0),f=yield*y.make([]),p=t=>v.gen(function*(){let i=yield*y.updateAndGet(u,e=>e+t.size),a=i>=(e.size||0);yield*v.logDebug(`Processing chunk`).pipe(v.annotateLogs({upload_id:e.id,cumulative_offset:i,file_size:e.size,chunk_size:t.size,is_final_block:a}));let o=r+t.blockNumber-1;if(t.size>c&&(yield*v.fail(n.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${t.size} exceeds upload block size ${c}`)}))),t.size>=T||a){yield*v.logDebug(`Uploading multipart chunk`).pipe(v.annotateLogs({upload_id:e.id,block_number:o,chunk_size:t.size,min_block_size:T,is_final_block:a}));let n=x(`block-${o.toString().padStart(6,`0`)}`).toString(`base64`);yield*ee(e,t.data,n),yield*y.update(f,e=>[...e,n]),yield*s(v.succeed(t.size))}else yield*te(e.id,t.data);yield*y.update(d,e=>e+t.size)});return yield*l.pipe(b.runForEach(e=>p(e)),v.withConcurrency(D)),{bytesUploaded:yield*y.get(d),blockIds:yield*y.get(f)}}),K=(e,t)=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(e.id).commitBlockList(t,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),q=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>n.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),J=e=>v.gen(function*(){yield*v.logInfo(`Removing cached data`).pipe(v.annotateLogs({upload_id:e})),yield*F.delete(e)}),Y=e=>v.gen(function*(){return yield*f(v.succeed(1)),yield*a(v.succeed(1)),yield*o(v.succeed(e.size||0)),yield*v.logInfo(`Initializing Azure blob upload`).pipe(v.annotateLogs({upload_id:e.id})),e.creationDate=new Date().toISOString(),e.storage={id:e.storage.id,type:e.storage.type,path:e.id,bucket:P},e.url=`${C}/${e.id}`,yield*F.set(e.id,e),yield*v.logInfo(`Azure blob upload initialized`).pipe(v.annotateLogs({upload_id:e.id})),e}),ie=e=>v.tryPromise({try:async()=>{let t=await R.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),X=(e,t)=>v.gen(function*(){let i={...r,...t},a=yield*ie(e);if(a instanceof Blob){let e=yield*v.promise(()=>a.arrayBuffer()),t=new Uint8Array(e),n=i.chunkSize,r=[];for(let e=0;e<t.length;e+=n)r.push(t.slice(e,Math.min(e+n,t.length)));return b.fromIterable(r)}return b.async(e=>{let t=a.getReader(),r=i.chunkSize,o=new Uint8Array;return(async()=>{try{for(;;){let{done:n,value:i}=await t.read();if(n){o.length>0&&e.single(o),e.end();return}if(i){let t=new Uint8Array(o.length+i.length);for(t.set(o),t.set(i,o.length),o=t;o.length>=r;){let t=o.slice(0,r);o=o.slice(r),e.single(t)}}}}catch(t){e.fail(new n({code:`FILE_READ_ERROR`,status:500,body:`Failed to read Azure blob stream`,details:`Azure stream read failed: ${String(t)}`}))}})(),v.sync(()=>{t.releaseLock()})})}),ae=e=>v.gen(function*(){let t=yield*X(e),n=[];yield*b.runForEach(t,e=>v.sync(()=>{n.push(e)}));let r=n.reduce((e,t)=>e+t.length,0),i=new Uint8Array(r),a=0;for(let e of n)i.set(e,a),a+=e.length;return i}),oe=(e,t,n)=>v.gen(function*(){let r=yield*F.get(e),i=(yield*q(e)).length+1,a=yield*re(e);if(a){yield*V(e);let o=t-a.size,s=a.stream.pipe(b.concat(n));return{uploadFile:r,nextBlockNumber:i-1,offset:o,incompleteBlockSize:a.size,data:s}}else return{uploadFile:r,nextBlockNumber:i,offset:t,incompleteBlockSize:0,data:n}}),Z=(e,t)=>_(e.file_id,g(l,v.gen(function*(){let n=Date.now(),{stream:r,file_id:i,offset:o}=e,{onProgress:s}=t,{uploadFile:c,nextBlockNumber:l,offset:d,data:f}=yield*oe(i,o,r),{bytesUploaded:g,blockIds:_}=yield*G(c,f,l,d,s),y=d+g;if(c.size===y)try{yield*K(c,_),yield*F.set(i,{...c,offset:y}),yield*m(i,{fileSize:c.size||0,totalDurationMs:Date.now()-n,partsCount:_.length,averagePartSize:c.size,throughputBps:c.size/(Date.now()-n),retryCount:0}),yield*p(v.succeed(1)),yield*a(v.succeed(-1))}catch(e){throw yield*v.logError(`Failed to finish upload`).pipe(v.annotateLogs({upload_id:i,error:JSON.stringify(e)})),yield*u(v.succeed(1)),v.runSync(h(`write`,e,{upload_id:i,operation:`commit`,blocks:_.length})),e}return y}))),se=e=>v.gen(function*(){let t=yield*F.get(e),n=0;try{n=S(yield*q(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*v.logError(`Error on get upload`).pipe(v.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*ne(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),ce=e=>v.gen(function*(){try{let t=R.getBlockBlobClient(e);yield*v.promise(()=>t.deleteIfExists()),yield*V(e)}catch(t){if(typeof t==`object`&&t&&`statusCode`in t&&t.statusCode===404)return yield*v.logError(`No file found`).pipe(v.annotateLogs({upload_id:e})),yield*v.fail(n.fromCode(`FILE_NOT_FOUND`));throw v.runSync(h(`remove`,t,{upload_id:e})),t}yield*J(e),yield*a(v.succeed(-1))}),Q=()=>O,le=e=>{let t=new Date(e);return new Date(t.getTime()+Q())},ue=()=>v.tryPromise({try:async()=>{if(Q()===0)return 0;let e=0,t=R.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>le(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await R.deleteBlob(t),e++;return e},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),$=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:D,minChunkSize:T,maxChunkSize:4e3*1024*1024,maxParts:E,optimalChunkSize:I,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:P,create:Y,remove:ce,write:Z,getUpload:se,read:ae,readStream:X,writeStream:(e,t)=>g(l,v.gen(function*(){let r=Date.now();yield*v.logInfo(`Starting streaming write to Azure`).pipe(v.annotateLogs({upload_id:e,container:P,size_hint:t.sizeHint})),yield*f(v.succeed(1)),yield*a(v.succeed(1));let i=H(t.sizeHint),c=yield*y.make([]),l=yield*y.make(0),u=yield*y.make(1),g=yield*y.make(new Uint8Array),_=(t,r)=>v.gen(function*(){if(t.length===0||t.length<T&&!r)return;let i=yield*y.getAndUpdate(u,e=>e+1),a=x(`stream-block-${i.toString().padStart(6,`0`)}`).toString(`base64`);yield*v.logDebug(`Staging block from stream`).pipe(v.annotateLogs({upload_id:e,block_number:i,block_size:t.length,is_final_block:r}));let o=R.getBlockBlobClient(e);yield*v.tryPromise({try:()=>o.stageBlock(a,t,t.length),catch:r=>(v.runSync(h(`writeStream`,r,{upload_id:e,block_number:i,block_size:t.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:r}))}),yield*y.update(c,e=>[...e,a]),yield*d(v.succeed(1)),yield*s(v.succeed(t.length))});yield*t.stream.pipe(b.runForEach(e=>v.gen(function*(){yield*y.update(l,t=>t+e.length);let t=yield*y.get(g),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let r=0;for(;n.length-r>=i;)yield*_(n.slice(r,r+i),!1),r+=i;yield*y.set(g,n.slice(r))})));let S=yield*y.get(g);S.length>0&&(yield*_(S,!0));let C=yield*y.get(c),w=yield*y.get(l);if(C.length===0)return yield*a(v.succeed(-1)),yield*v.fail(new n({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));let E=R.getBlockBlobClient(e);yield*v.tryPromise({try:()=>E.commitBlockList(C,{blobHTTPHeaders:{blobContentType:t.contentType}}),catch:t=>(v.runSync(h(`writeStream`,t,{upload_id:e,operation:`commit`,blocks:C.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:t}))});let D=Date.now()-r,O=D>0?w*1e3/D:0,k=C.length>0?w/C.length:void 0;return yield*m(e,{fileSize:w,totalDurationMs:D,partsCount:C.length,averagePartSize:k,throughputBps:O,retryCount:0}),yield*p(v.succeed(1)),yield*a(v.succeed(-1)),yield*o(v.succeed(w)),yield*v.logInfo(`Streaming write to Azure completed`).pipe(v.annotateLogs({upload_id:e,total_bytes:w,blocks_count:C.length,duration_ms:D})),{id:e,size:w,path:e,bucket:P}}).pipe(v.catchAll(e=>v.gen(function*(){return yield*u(v.succeed(1)),yield*a(v.succeed(-1)),yield*v.fail(e)})))),deleteExpired:ue(),getCapabilities:$,getChunkerConstraints:()=>({minChunkSize:T,maxChunkSize:4e3*1024*1024,optimalChunkSize:I,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=$(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return v.succeed(n)}}})}export{C as azureStore};
1
+ import{BlobServiceClient as e,StorageSharedKeyCredential as t}from"@azure/storage-blob";import{UploadistaError as n}from"@uploadista/core/errors";import{DEFAULT_STREAMING_CONFIG as r,UploadFileKVStore as i}from"@uploadista/core/types";import{azureActiveUploadsGauge as a,azureFileSizeHistogram as o,azurePartSizeHistogram as s,azurePartUploadDurationHistogram as c,azureUploadDurationHistogram as l,azureUploadErrorsTotal as u,azureUploadPartsTotal as d,azureUploadRequestsTotal as f,azureUploadSuccessTotal as p,logAzureUploadCompletion as m,trackAzureError as h,withAzureTimingMetrics as g,withAzureUploadMetrics as _}from"@uploadista/observability";import{Effect as v,Ref as y,Stream as b}from"effect";const x=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function S(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function C({deliveryUrl:C,blockSize:w,minBlockSize:T=1024,maxBlocks:E=5e4,maxConcurrentBlockUploads:D=60,expirationPeriodInMilliseconds:O=1e3*60*60*24*7,connectionString:k,sasUrl:A,credential:j,accountName:M,accountKey:N,containerName:P}){return v.gen(function*(){let F=yield*i,I=w||8*1024*1024,L;if(k)L=e.fromConnectionString(k);else if(A)L=new e(A);else if(j){let t=M?`https://${M}.blob.core.windows.net`:A?.split(`?`)[0]||``;if(!t)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);L=new e(t,j)}else if(M&&N)try{let n=new t(M,N);L=new e(`https://${M}.blob.core.windows.net`,n)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let R=L.getContainerClient(P),z=e=>`${e}.incomplete`,ee=(e,t,r)=>g(c,v.gen(function*(){yield*v.logInfo(`Uploading block`).pipe(v.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length})),yield*d(v.succeed(1)),yield*s(v.succeed(t.length));try{let i=R.getBlockBlobClient(e.id);yield*v.tryPromise({try:async()=>{await i.stageBlock(r,t,t.length)},catch:i=>(v.runSync(h(`uploadBlock`,i,{upload_id:e.id,block_id:r,block_size:t.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:i}))}),yield*v.logInfo(`Finished uploading block`).pipe(v.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length}))}catch(n){throw v.runSync(h(`uploadBlock`,n,{upload_id:e.id,block_id:r,block_size:t.length})),n}})),te=(e,t)=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(z(e)).upload(t,t.length)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(v.tap(()=>v.logInfo(`Finished uploading incomplete block`).pipe(v.annotateLogs({upload_id:e})))),B=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(z(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),ne=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(z(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),V=e=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(z(e)).deleteIfExists()},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),re=e=>v.gen(function*(){let t=yield*B(e);if(!t)return;let n=t.getReader(),r=[],i=0;try{for(;;){let e=yield*v.promise(()=>n.read());if(e.done)break;r.push(e.value),i+=e.value.length}}finally{n.releaseLock()}let a=b.fromIterable(r);return{size:i,stream:a}}),H=e=>{let t=e??5497558138880,n;n=t<=I?t:t<=I*E?I:Math.ceil(t/E);let r=Math.max(n,T);return Math.ceil(r/1024)*1024},U=e=>t=>b.async(n=>{let r=new Uint8Array,i=1,a=0,o=(t,r=!1)=>{v.runSync(v.logInfo(`Creating chunk`).pipe(v.annotateLogs({block_number:i,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:a+t.length}))),n.single({blockNumber:i++,data:t,size:t.length})},s=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,a+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),o(t,!1)}};v.runFork(t.pipe(b.runForEach(e=>v.sync(()=>s(e))),v.andThen(()=>v.sync(()=>{r.length>0&&o(r,!0),n.end()})),v.catchAll(e=>v.sync(()=>n.fail(e)))))}),W=(e,t=0)=>n=>e?v.gen(function*(){let r=yield*y.make(t);return n.pipe(b.tap(t=>v.gen(function*(){e(yield*y.updateAndGet(r,e=>e+t.length))})))}).pipe(b.unwrap):n,G=(e,t,r,i,a)=>v.gen(function*(){yield*v.logInfo(`Uploading blocks`).pipe(v.annotateLogs({upload_id:e.id,init_offset:i,file_size:e.size}));let o=e.size,c=H(o);yield*v.logInfo(`Block size`).pipe(v.annotateLogs({upload_id:e.id,block_size:c}));let l=t.pipe(W(a,i),U(c)),u=yield*y.make(i),d=yield*y.make(0),f=yield*y.make([]),p=t=>v.gen(function*(){let i=yield*y.updateAndGet(u,e=>e+t.size),a=i>=(e.size||0);yield*v.logDebug(`Processing chunk`).pipe(v.annotateLogs({upload_id:e.id,cumulative_offset:i,file_size:e.size,chunk_size:t.size,is_final_block:a}));let o=r+t.blockNumber-1;if(t.size>c&&(yield*v.fail(n.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${t.size} exceeds upload block size ${c}`)}))),t.size>=T||a){yield*v.logDebug(`Uploading multipart chunk`).pipe(v.annotateLogs({upload_id:e.id,block_number:o,chunk_size:t.size,min_block_size:T,is_final_block:a}));let n=x(`block-${o.toString().padStart(6,`0`)}`).toString(`base64`);yield*ee(e,t.data,n),yield*y.update(f,e=>[...e,n]),yield*s(v.succeed(t.size))}else yield*te(e.id,t.data);yield*y.update(d,e=>e+t.size)});return yield*l.pipe(b.runForEach(e=>p(e)),v.withConcurrency(D)),{bytesUploaded:yield*y.get(d),blockIds:yield*y.get(f)}}),K=(e,t)=>v.tryPromise({try:async()=>{await R.getBlockBlobClient(e.id).commitBlockList(t,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),q=e=>v.tryPromise({try:async()=>{try{return(await R.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>n.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),J=e=>v.gen(function*(){yield*v.logInfo(`Removing cached data`).pipe(v.annotateLogs({upload_id:e})),yield*F.delete(e)}),Y=e=>v.gen(function*(){return yield*f(v.succeed(1)),yield*a(v.succeed(1)),yield*o(v.succeed(e.size||0)),yield*v.logInfo(`Initializing Azure blob upload`).pipe(v.annotateLogs({upload_id:e.id})),e.creationDate=new Date().toISOString(),e.storage={id:e.storage.id,type:e.storage.type,path:e.id,bucket:P},e.url=`${C}/${e.id}`,yield*F.set(e.id,e),yield*v.logInfo(`Azure blob upload initialized`).pipe(v.annotateLogs({upload_id:e.id})),e}),ie=e=>v.tryPromise({try:async()=>{let t=await R.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),X=(e,t)=>v.gen(function*(){let i={...r,...t},a=yield*ie(e);if(a instanceof Blob){let e=yield*v.promise(()=>a.arrayBuffer()),t=new Uint8Array(e),n=i.chunkSize,r=[];for(let e=0;e<t.length;e+=n)r.push(t.slice(e,Math.min(e+n,t.length)));return b.fromIterable(r)}return b.async(e=>{let t=a.getReader(),r=i.chunkSize,o=new Uint8Array;return(async()=>{try{for(;;){let{done:n,value:i}=await t.read();if(n){o.length>0&&e.single(o),e.end();return}if(i){let t=new Uint8Array(o.length+i.length);for(t.set(o),t.set(i,o.length),o=t;o.length>=r;){let t=o.slice(0,r);o=o.slice(r),e.single(t)}}}}catch(t){e.fail(new n({code:`FILE_READ_ERROR`,status:500,body:`Failed to read Azure blob stream`,details:`Azure stream read failed: ${String(t)}`}))}})(),v.sync(()=>{t.releaseLock()})})}),ae=e=>v.gen(function*(){let t=yield*X(e),n=[];yield*b.runForEach(t,e=>v.sync(()=>{n.push(e)}));let r=n.reduce((e,t)=>e+t.length,0),i=new Uint8Array(r),a=0;for(let e of n)i.set(e,a),a+=e.length;return i}),oe=(e,t,n)=>v.gen(function*(){let r=yield*F.get(e),i=(yield*q(e)).length+1,a=yield*re(e);if(a){yield*V(e);let o=t-a.size,s=a.stream.pipe(b.concat(n));return{uploadFile:r,nextBlockNumber:i-1,offset:o,incompleteBlockSize:a.size,data:s}}else return{uploadFile:r,nextBlockNumber:i,offset:t,incompleteBlockSize:0,data:n}}),Z=(e,t)=>_(e.file_id,g(l,v.gen(function*(){let n=Date.now(),{stream:r,file_id:i,offset:o}=e,{onProgress:s}=t,{uploadFile:c,nextBlockNumber:l,offset:d,data:f}=yield*oe(i,o,r),{bytesUploaded:g,blockIds:_}=yield*G(c,f,l,d,s),y=d+g;if(c.size===y)try{yield*K(c,_),yield*F.set(i,{...c,offset:y}),yield*m(i,{fileSize:c.size||0,totalDurationMs:Date.now()-n,partsCount:_.length,averagePartSize:c.size,throughputBps:c.size/(Date.now()-n),retryCount:0}),yield*p(v.succeed(1)),yield*a(v.succeed(-1))}catch(e){throw yield*v.logError(`Failed to finish upload`).pipe(v.annotateLogs({upload_id:i,error:JSON.stringify(e)})),yield*u(v.succeed(1)),v.runSync(h(`write`,e,{upload_id:i,operation:`commit`,blocks:_.length})),e}return y}))),se=e=>v.gen(function*(){let t=yield*F.get(e),n=0;try{n=S(yield*q(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*v.logError(`Error on get upload`).pipe(v.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*ne(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),ce=e=>v.gen(function*(){try{let t=R.getBlockBlobClient(e);yield*v.promise(()=>t.deleteIfExists()),yield*V(e)}catch(t){if(typeof t==`object`&&t&&`statusCode`in t&&t.statusCode===404)return yield*v.logError(`No file found`).pipe(v.annotateLogs({upload_id:e})),yield*v.fail(n.fromCode(`FILE_NOT_FOUND`));throw v.runSync(h(`remove`,t,{upload_id:e})),t}yield*J(e),yield*a(v.succeed(-1))}),Q=()=>O,le=e=>{let t=new Date(e);return new Date(t.getTime()+Q())},ue=()=>v.tryPromise({try:async()=>{if(Q()===0)return 0;let e=0,t=R.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>le(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await R.deleteBlob(t),e++;return e},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),$=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,supportsStreamingRead:!0,supportsStreamingWrite:!0,maxConcurrentUploads:D,minChunkSize:T,maxChunkSize:4e3*1024*1024,maxParts:E,optimalChunkSize:I,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:P,create:Y,remove:ce,write:Z,getUpload:se,read:ae,readStream:X,writeStream:(e,t)=>g(l,v.gen(function*(){let r=Date.now();yield*v.logInfo(`Starting streaming write to Azure`).pipe(v.annotateLogs({upload_id:e,container:P,size_hint:t.sizeHint})),yield*f(v.succeed(1)),yield*a(v.succeed(1));let i=H(t.sizeHint),c=yield*y.make([]),l=yield*y.make(0),u=yield*y.make(1),g=yield*y.make(new Uint8Array),_=(t,r)=>v.gen(function*(){if(t.length===0||t.length<T&&!r)return;let i=yield*y.getAndUpdate(u,e=>e+1),a=x(`stream-block-${i.toString().padStart(6,`0`)}`).toString(`base64`);yield*v.logDebug(`Staging block from stream`).pipe(v.annotateLogs({upload_id:e,block_number:i,block_size:t.length,is_final_block:r}));let o=R.getBlockBlobClient(e);yield*v.tryPromise({try:()=>o.stageBlock(a,t,t.length),catch:r=>(v.runSync(h(`writeStream`,r,{upload_id:e,block_number:i,block_size:t.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:r}))}),yield*y.update(c,e=>[...e,a]),yield*d(v.succeed(1)),yield*s(v.succeed(t.length))});yield*t.stream.pipe(b.runForEach(e=>v.gen(function*(){yield*y.update(l,t=>t+e.length);let t=yield*y.get(g),n=new Uint8Array(t.length+e.length);n.set(t),n.set(e,t.length);let r=0;for(;n.length-r>=i;)yield*_(n.slice(r,r+i),!1),r+=i;yield*y.set(g,n.slice(r))})));let S=yield*y.get(g);S.length>0&&(yield*_(S,!0));let C=yield*y.get(c),w=yield*y.get(l);if(C.length===0)return yield*a(v.succeed(-1)),yield*v.fail(new n({code:`FILE_WRITE_ERROR`,status:400,body:`Cannot complete upload with no data`,details:`The stream provided no data to upload`}));let E=R.getBlockBlobClient(e);yield*v.tryPromise({try:()=>E.commitBlockList(C,{blobHTTPHeaders:{blobContentType:t.contentType}}),catch:t=>(v.runSync(h(`writeStream`,t,{upload_id:e,operation:`commit`,blocks:C.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:t}))});let D=Date.now()-r,O=D>0?w*1e3/D:0,k=C.length>0?w/C.length:void 0;return yield*m(e,{fileSize:w,totalDurationMs:D,partsCount:C.length,averagePartSize:k,throughputBps:O,retryCount:0}),yield*p(v.succeed(1)),yield*a(v.succeed(-1)),yield*o(v.succeed(w)),yield*v.logInfo(`Streaming write to Azure completed`).pipe(v.annotateLogs({upload_id:e,total_bytes:w,blocks_count:C.length,duration_ms:D})),{id:e,size:w,path:e,bucket:P}}).pipe(v.catchAll(e=>v.gen(function*(){return yield*u(v.succeed(1)),yield*a(v.succeed(-1)),yield*v.fail(e)})))),deleteExpired:ue,getCapabilities:$,getChunkerConstraints:()=>({minChunkSize:T,maxChunkSize:4e3*1024*1024,optimalChunkSize:I,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=$(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return v.succeed(n)}}})}export{C as azureStore};
2
2
  //# sourceMappingURL=index.mjs.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.mjs","names":["blobServiceClient: BlobServiceClient","BlobService","containerClient: ContainerClient","withTimingMetrics","partUploadDurationHistogram","readStream","uploadPartsTotal","partSizeHistogram","chunks: Uint8Array[]","optimalBlockSize: number","uploadRequestsTotal","activeUploadsGauge","fileSizeHistogram","withUploadMetrics","uploadDurationHistogram","uploadSuccessTotal","uploadErrorsTotal","expiredBlobs: string[]","blobClient"],"sources":["../src/azure-store.ts"],"sourcesContent":["import type { TokenCredential } from \"@azure/core-auth\";\nimport {\n BlobServiceClient as BlobService,\n type BlobServiceClient,\n type ContainerClient,\n StorageSharedKeyCredential,\n} from \"@azure/storage-blob\";\nimport { UploadistaError } from \"@uploadista/core/errors\";\n\nimport {\n type DataStore,\n type DataStoreCapabilities,\n type DataStoreWriteOptions,\n DEFAULT_STREAMING_CONFIG,\n type StreamingConfig,\n type StreamWriteOptions,\n type StreamWriteResult,\n type UploadFile,\n UploadFileKVStore,\n type UploadStrategy,\n} from \"@uploadista/core/types\";\nimport {\n azureActiveUploadsGauge as activeUploadsGauge,\n azureFileSizeHistogram as fileSizeHistogram,\n logAzureUploadCompletion,\n azurePartSizeHistogram as partSizeHistogram,\n azurePartUploadDurationHistogram as partUploadDurationHistogram,\n trackAzureError,\n azureUploadDurationHistogram as uploadDurationHistogram,\n azureUploadErrorsTotal as uploadErrorsTotal,\n azureUploadPartsTotal as uploadPartsTotal,\n azureUploadRequestsTotal as uploadRequestsTotal,\n azureUploadSuccessTotal as uploadSuccessTotal,\n withAzureTimingMetrics as withTimingMetrics,\n withAzureUploadMetrics as withUploadMetrics,\n} from \"@uploadista/observability\";\nimport { Effect, Ref, Stream } from \"effect\";\n\n// Using base64 encoding that works in both Node.js and browser\nconst bufferFrom = (str: string) => {\n // Use global Buffer if available, otherwise fallback to btoa\n if (typeof globalThis !== \"undefined\" && \"Buffer\" in globalThis) {\n return (globalThis as any).Buffer.from(str);\n }\n // Fallback for browser environments\n return new Uint8Array(Array.from(str, (c) => c.charCodeAt(0)));\n};\n\nexport type ChunkInfo = {\n blockNumber: number;\n data: Uint8Array;\n size: number;\n isFinalPart?: boolean;\n};\n\nexport type AzureStoreOptions = {\n deliveryUrl: string;\n /**\n * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.\n * The server calculates the optimal block size, which takes this size into account,\n * but may increase it to not exceed the Azure 50K blocks limit.\n */\n blockSize?: number;\n /**\n * The minimal block size for blocks.\n * Can be used to ensure that all non-trailing blocks are exactly the same size.\n * Can not be lower than 1 byte or more than 4000MiB.\n */\n minBlockSize?: number;\n /**\n * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.\n */\n maxBlocks?: number;\n maxConcurrentBlockUploads?: number;\n expirationPeriodInMilliseconds?: number;\n // Azure authentication options (choose one)\n connectionString?: string;\n /**\n * SAS URL for the storage account (works in all environments including browsers)\n * Format: https://<account>.blob.core.windows.net?<sas-token>\n */\n sasUrl?: string;\n /**\n * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)\n * Works in all environments and is the recommended approach for production\n */\n credential?: TokenCredential;\n /**\n * Account name and key for shared key authentication (Node.js only)\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountName?: string;\n /**\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountKey?: string;\n containerName: string;\n};\n\nfunction calcOffsetFromBlocks(blocks?: Array<{ size: number }>) {\n return blocks && blocks.length > 0\n ? blocks.reduce((a, b) => a + (b?.size ?? 0), 0)\n : 0;\n}\n\nexport type AzureStore = DataStore<UploadFile> & {\n getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;\n readStream: (\n id: string,\n config?: StreamingConfig,\n ) => Effect.Effect<Stream.Stream<Uint8Array, UploadistaError>, UploadistaError>;\n getChunkerConstraints: () => {\n minChunkSize: number;\n maxChunkSize: number;\n optimalChunkSize: number;\n requiresOrderedChunks: boolean;\n };\n};\n\nexport function azureStore({\n deliveryUrl,\n blockSize,\n minBlockSize = 1024, // 1KB minimum\n maxBlocks = 50_000,\n maxConcurrentBlockUploads = 60,\n expirationPeriodInMilliseconds = 1000 * 60 * 60 * 24 * 7, // 1 week\n connectionString,\n sasUrl,\n credential,\n accountName,\n accountKey,\n containerName,\n}: AzureStoreOptions) {\n return Effect.gen(function* () {\n const kvStore = yield* UploadFileKVStore;\n const preferredBlockSize = blockSize || 8 * 1024 * 1024; // 8MB default\n const maxUploadSize = 5_497_558_138_880 as const; // 5TiB (Azure Block Blob limit)\n\n // Initialize Azure Blob Service Client with cross-platform authentication\n let blobServiceClient: BlobServiceClient;\n\n if (connectionString) {\n // Connection string (works in all environments)\n blobServiceClient = BlobService.fromConnectionString(connectionString);\n } else if (sasUrl) {\n // SAS URL (works in all environments including browsers)\n blobServiceClient = new BlobService(sasUrl);\n } else if (credential) {\n // OAuth token credential (works in all environments, recommended for production)\n const accountUrl = accountName\n ? `https://${accountName}.blob.core.windows.net`\n : sasUrl?.split(\"?\")[0] || \"\";\n if (!accountUrl) {\n throw new Error(\n \"When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL\",\n );\n }\n blobServiceClient = new BlobService(accountUrl, credential);\n } else if (accountName && accountKey) {\n // Legacy shared key authentication (Node.js only)\n // This will fail in browser/edge environments\n try {\n const sharedKeyCredential = new StorageSharedKeyCredential(\n accountName,\n accountKey,\n );\n blobServiceClient = new BlobService(\n `https://${accountName}.blob.core.windows.net`,\n sharedKeyCredential,\n );\n } catch (error) {\n throw new Error(\n \"StorageSharedKeyCredential is only available in Node.js environments. \" +\n \"Use sasUrl or credential options for cross-platform compatibility. \" +\n `Original error: ${error}`,\n );\n }\n } else {\n throw new Error(\n \"Azure authentication required. Provide one of: \" +\n \"connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)\",\n );\n }\n\n const containerClient: ContainerClient =\n blobServiceClient.getContainerClient(containerName);\n\n const incompletePartKey = (id: string) => {\n return `${id}.incomplete`;\n };\n\n const uploadBlock = (\n uploadFile: UploadFile,\n readStream: Uint8Array,\n blockId: string,\n ) => {\n return withTimingMetrics(\n partUploadDurationHistogram,\n Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* partSizeHistogram(Effect.succeed(readStream.length));\n\n try {\n const blobClient = containerClient.getBlockBlobClient(\n uploadFile.id,\n );\n yield* Effect.tryPromise({\n try: async () => {\n await blobClient.stageBlock(\n blockId,\n readStream,\n readStream.length,\n );\n },\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n yield* Effect.logInfo(\"Finished uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n } catch (error) {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n throw error;\n }\n }),\n );\n };\n\n const uploadIncompleteBlock = (id: string, readStream: Uint8Array) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.upload(readStream, readStream.length);\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n }).pipe(\n Effect.tap(() =>\n Effect.logInfo(\"Finished uploading incomplete block\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n ),\n ),\n );\n };\n\n const getIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const response = await blobClient.download();\n return response.readableStreamBody as unknown as ReadableStream;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const getIncompleteBlockSize = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const properties = await blobClient.getProperties();\n return properties.contentLength;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const deleteIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.deleteIfExists();\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const downloadIncompleteBlock = (id: string) => {\n return Effect.gen(function* () {\n const incompleteBlock = yield* getIncompleteBlock(id);\n\n if (!incompleteBlock) {\n return;\n }\n\n // Read the stream and collect all chunks to calculate size\n const reader = incompleteBlock.getReader();\n const chunks: Uint8Array[] = [];\n let incompleteBlockSize = 0;\n\n try {\n while (true) {\n const result = yield* Effect.promise(() => reader.read());\n if (result.done) break;\n chunks.push(result.value);\n incompleteBlockSize += result.value.length;\n }\n } finally {\n reader.releaseLock();\n }\n\n // Create a new readable stream from the chunks\n const stream = Stream.fromIterable(chunks);\n\n return {\n size: incompleteBlockSize,\n stream,\n };\n });\n };\n\n const calcOptimalBlockSize = (initSize?: number): number => {\n const size = initSize ?? maxUploadSize;\n let optimalBlockSize: number;\n\n if (size <= preferredBlockSize) {\n optimalBlockSize = size;\n } else if (size <= preferredBlockSize * maxBlocks) {\n optimalBlockSize = preferredBlockSize;\n } else {\n // Calculate the minimum block size needed to fit within the max blocks limit\n optimalBlockSize = Math.ceil(size / maxBlocks);\n }\n\n // Ensure the block size respects the minimum and is aligned properly\n const finalBlockSize = Math.max(optimalBlockSize, minBlockSize);\n\n // Round up to ensure consistent block sizes\n return Math.ceil(finalBlockSize / 1024) * 1024; // Align to 1KB boundaries\n };\n\n // Proper single-pass chunking using Effect's async stream constructor\n // Ensures all parts except the final part are exactly the same size (S3 requirement)\n const createChunkedStream =\n (chunkSize: number) =>\n <E>(\n stream: Stream.Stream<Uint8Array, E>,\n ): Stream.Stream<ChunkInfo, E> => {\n return Stream.async<ChunkInfo, E>((emit) => {\n let buffer = new Uint8Array(0);\n let blockNumber = 1;\n let totalBytesProcessed = 0;\n\n const emitChunk = (data: Uint8Array, isFinalChunk = false) => {\n // Log chunk information for debugging - use INFO level to see in logs\n Effect.runSync(\n Effect.logInfo(\"Creating chunk\").pipe(\n Effect.annotateLogs({\n block_number: blockNumber,\n chunk_size: data.length,\n expected_size: chunkSize,\n is_final_chunk: isFinalChunk,\n total_bytes_processed: totalBytesProcessed + data.length,\n }),\n ),\n );\n emit.single({\n blockNumber: blockNumber++,\n data,\n size: data.length,\n });\n };\n\n const processChunk = (newData: Uint8Array) => {\n // Combine buffer with new data\n const combined = new Uint8Array(buffer.length + newData.length);\n combined.set(buffer);\n combined.set(newData, buffer.length);\n buffer = combined;\n totalBytesProcessed += newData.length;\n\n // Emit full chunks of exactly chunkSize bytes\n // This ensures S3 multipart upload rule: all parts except last must be same size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emitChunk(chunk, false);\n }\n };\n\n // Process the stream\n Effect.runFork(\n stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.sync(() => processChunk(chunk)),\n ),\n Effect.andThen(() =>\n Effect.sync(() => {\n // Emit final chunk if there's remaining data\n // The final chunk can be any size < chunkSize (S3 allows this)\n if (buffer.length > 0) {\n emitChunk(buffer, true);\n }\n emit.end();\n }),\n ),\n Effect.catchAll((error) => Effect.sync(() => emit.fail(error))),\n ),\n );\n });\n };\n\n // Byte-level progress tracking during streaming\n // This provides smooth, immediate progress feedback by tracking bytes as they\n // flow through the stream, before they reach S3. This solves the issue where\n // small files (< 5MB) would jump from 0% to 100% instantly.\n const withByteProgressTracking =\n (onProgress?: (totalBytes: number) => void, initialOffset = 0) =>\n <E, R>(stream: Stream.Stream<Uint8Array, E, R>) => {\n if (!onProgress) return stream;\n\n return Effect.gen(function* () {\n const totalBytesProcessedRef = yield* Ref.make(initialOffset);\n\n return stream.pipe(\n Stream.tap((chunk) =>\n Effect.gen(function* () {\n const newTotal = yield* Ref.updateAndGet(\n totalBytesProcessedRef,\n (total) => total + chunk.length,\n );\n onProgress(newTotal);\n }),\n ),\n );\n }).pipe(Stream.unwrap);\n };\n\n /**\n * Uploads a stream to Azure using multiple blocks\n */\n const uploadBlocks = (\n uploadFile: UploadFile,\n readStream: Stream.Stream<Uint8Array, UploadistaError>,\n initCurrentBlockNumber: number,\n initOffset: number,\n onProgress?: (newOffset: number) => void,\n ) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading blocks\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n init_offset: initOffset,\n file_size: uploadFile.size,\n }),\n );\n\n const size = uploadFile.size;\n\n const uploadBlockSize = calcOptimalBlockSize(size);\n yield* Effect.logInfo(\"Block size\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_size: uploadBlockSize,\n }),\n );\n // Enhanced Progress Tracking Strategy:\n // 1. Byte-level progress during streaming - provides immediate, smooth feedback\n // as data flows through the pipeline (even for small files)\n // 2. This tracks progress BEFORE S3 upload, giving users immediate feedback\n // 3. For large files with multiple parts, this provides granular updates\n // 4. For small files (single part), this prevents 0%->100% jumps\n const chunkStream = readStream.pipe(\n // Add byte-level progress tracking during streaming (immediate feedback)\n withByteProgressTracking(onProgress, initOffset),\n // Create chunks for S3 multipart upload with uniform part sizes\n createChunkedStream(uploadBlockSize),\n );\n\n // Track cumulative offset and total bytes with Effect Refs\n const cumulativeOffsetRef = yield* Ref.make(initOffset);\n const totalBytesUploadedRef = yield* Ref.make(0);\n const blockIdsRef = yield* Ref.make<string[]>([]);\n // Create a chunk upload function for the sink\n const uploadChunk = (chunkInfo: ChunkInfo) =>\n Effect.gen(function* () {\n // Calculate cumulative bytes to determine if this is the final block\n const cumulativeOffset = yield* Ref.updateAndGet(\n cumulativeOffsetRef,\n (offset) => offset + chunkInfo.size,\n );\n const isFinalBlock = cumulativeOffset >= (uploadFile.size || 0);\n\n yield* Effect.logDebug(\"Processing chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n cumulative_offset: cumulativeOffset,\n file_size: uploadFile.size,\n chunk_size: chunkInfo.size,\n is_final_block: isFinalBlock,\n }),\n );\n\n const actualBlockNumber =\n initCurrentBlockNumber + chunkInfo.blockNumber - 1;\n\n if (chunkInfo.size > uploadBlockSize) {\n yield* Effect.fail(\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: new Error(\n `Block size ${chunkInfo.size} exceeds upload block size ${uploadBlockSize}`,\n ),\n }),\n );\n }\n\n // For parts that meet the minimum part size (5MB) or are the final part,\n // upload them as regular multipart parts\n if (chunkInfo.size >= minBlockSize || isFinalBlock) {\n yield* Effect.logDebug(\"Uploading multipart chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_number: actualBlockNumber,\n chunk_size: chunkInfo.size,\n min_block_size: minBlockSize,\n is_final_block: isFinalBlock,\n }),\n );\n // Generate block ID (base64 encoded, must be consistent)\n const blockId = bufferFrom(\n `block-${actualBlockNumber.toString().padStart(6, \"0\")}`,\n ).toString(\"base64\");\n yield* uploadBlock(uploadFile, chunkInfo.data, blockId);\n yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);\n yield* partSizeHistogram(Effect.succeed(chunkInfo.size));\n } else {\n // Only upload as incomplete part if it's smaller than minimum and not final\n yield* uploadIncompleteBlock(uploadFile.id, chunkInfo.data);\n }\n\n yield* Ref.update(\n totalBytesUploadedRef,\n (total) => total + chunkInfo.size,\n );\n\n // Note: Byte-level progress is now tracked during streaming phase\n // This ensures smooth progress updates regardless of part size\n // Azure upload completion is tracked via totalBytesUploadedRef for accuracy\n });\n\n // Process chunks concurrently with controlled concurrency\n yield* chunkStream.pipe(\n Stream.runForEach((chunkInfo) => uploadChunk(chunkInfo)),\n Effect.withConcurrency(maxConcurrentBlockUploads),\n );\n\n return {\n bytesUploaded: yield* Ref.get(totalBytesUploadedRef),\n blockIds: yield* Ref.get(blockIdsRef),\n };\n });\n };\n\n /**\n * Commits all staged blocks to create the final blob\n */\n const commitBlocks = (uploadFile: UploadFile, blockIds: string[]) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(uploadFile.id);\n await blobClient.commitBlockList(blockIds, {\n blobHTTPHeaders: {\n blobContentType: uploadFile.metadata?.contentType?.toString(),\n blobCacheControl: uploadFile.metadata?.cacheControl?.toString(),\n },\n });\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Gets the committed blocks for a blob\n */\n const retrieveBlocks = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n const blockList = await blobClient.getBlockList(\"committed\");\n\n const blocks =\n blockList.committedBlocks?.map((block) => ({\n size: block.size,\n })) ?? [];\n\n return blocks;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return [];\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"UPLOAD_ID_NOT_FOUND\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Removes cached data for a given file\n */\n const clearCache = (id: string) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Removing cached data\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n yield* kvStore.delete(id);\n });\n };\n\n /**\n * Creates a blob placeholder in Azure and stores metadata\n */\n const create = (upload: UploadFile) => {\n return Effect.gen(function* () {\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(1));\n yield* fileSizeHistogram(Effect.succeed(upload.size || 0));\n\n yield* Effect.logInfo(\"Initializing Azure blob upload\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n upload.creationDate = new Date().toISOString();\n upload.storage = {\n id: upload.storage.id,\n type: upload.storage.type,\n path: upload.id,\n bucket: containerName,\n };\n upload.url = `${deliveryUrl}/${upload.id}`;\n\n yield* kvStore.set(upload.id, upload);\n yield* Effect.logInfo(\"Azure blob upload initialized\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n return upload;\n });\n };\n\n /**\n * Internal helper to get raw Azure stream (for backward compatibility).\n */\n const getAzureStream = (\n id: string,\n ): Effect.Effect<ReadableStream | Blob, UploadistaError> => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(id);\n const response = await blobClient.download();\n if (response.blobBody) {\n return response.blobBody;\n }\n if (response.readableStreamBody) {\n return response.readableStreamBody as unknown as ReadableStream;\n }\n throw new Error(\"No blob body or readable stream body\");\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Reads file content as a stream of chunks for memory-efficient processing.\n * Uses Azure BlobClient.download and converts to an Effect Stream.\n *\n * @param id - The unique identifier of the file to read\n * @param config - Optional streaming configuration (chunk size)\n * @returns An Effect that resolves to a Stream of byte chunks\n */\n const readStream = (id: string, config?: StreamingConfig) =>\n Effect.gen(function* () {\n // Merge config with defaults\n const effectiveConfig = {\n ...DEFAULT_STREAMING_CONFIG,\n ...config,\n };\n\n const azureStream = yield* getAzureStream(id);\n\n // Handle Blob type (browser environment)\n if (azureStream instanceof Blob) {\n const arrayBuffer = yield* Effect.promise(() =>\n azureStream.arrayBuffer(),\n );\n const bytes = new Uint8Array(arrayBuffer as ArrayBuffer);\n\n // Convert to chunked stream\n const chunkSize = effectiveConfig.chunkSize;\n const chunks: Uint8Array[] = [];\n for (let i = 0; i < bytes.length; i += chunkSize) {\n chunks.push(bytes.slice(i, Math.min(i + chunkSize, bytes.length)));\n }\n return Stream.fromIterable(chunks);\n }\n\n // Handle ReadableStream type\n return Stream.async<Uint8Array, UploadistaError>((emit) => {\n const reader = azureStream.getReader();\n const chunkSize = effectiveConfig.chunkSize;\n let buffer = new Uint8Array(0);\n\n const processChunk = async () => {\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n // Emit any remaining data in buffer\n if (buffer.length > 0) {\n emit.single(buffer);\n }\n emit.end();\n return;\n }\n\n if (value) {\n // Combine buffer with new value\n const combined = new Uint8Array(buffer.length + value.length);\n combined.set(buffer);\n combined.set(value, buffer.length);\n buffer = combined;\n\n // Emit chunks of the configured size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emit.single(chunk);\n }\n }\n }\n } catch (error) {\n emit.fail(\n new UploadistaError({\n code: \"FILE_READ_ERROR\",\n status: 500,\n body: \"Failed to read Azure blob stream\",\n details: `Azure stream read failed: ${String(error)}`,\n }),\n );\n }\n };\n\n // Start processing\n processChunk();\n\n // Cleanup function\n return Effect.sync(() => {\n reader.releaseLock();\n });\n });\n });\n\n const read = (id: string): Effect.Effect<Uint8Array, UploadistaError> => {\n return Effect.gen(function* () {\n const stream = yield* readStream(id);\n\n // Collect all chunks from the Effect Stream\n const chunks: Uint8Array[] = [];\n yield* Stream.runForEach(stream, (chunk) =>\n Effect.sync(() => {\n chunks.push(chunk);\n }),\n );\n\n // Concatenate all chunks\n const totalLength = chunks.reduce(\n (acc, chunk) => acc + chunk.length,\n 0,\n );\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const chunk of chunks) {\n result.set(chunk, offset);\n offset += chunk.length;\n }\n\n return result;\n });\n };\n\n const prepareUpload = (\n file_id: string,\n initialOffset: number,\n initialData: Stream.Stream<Uint8Array, UploadistaError>,\n ) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(file_id);\n\n const blocks = yield* retrieveBlocks(file_id);\n\n const blockNumber = blocks.length;\n const nextBlockNumber = blockNumber + 1;\n\n const incompleteBlock = yield* downloadIncompleteBlock(file_id);\n\n if (incompleteBlock) {\n yield* deleteIncompleteBlock(file_id);\n const offset = initialOffset - incompleteBlock.size;\n const data = incompleteBlock.stream.pipe(Stream.concat(initialData));\n return {\n uploadFile,\n nextBlockNumber: nextBlockNumber - 1,\n offset,\n incompleteBlockSize: incompleteBlock.size,\n data,\n };\n } else {\n return {\n uploadFile,\n nextBlockNumber,\n offset: initialOffset,\n incompleteBlockSize: 0,\n data: initialData,\n };\n }\n });\n };\n\n /**\n * Write to the file, starting at the provided offset\n */\n const write = (\n options: DataStoreWriteOptions,\n dependencies: {\n onProgress?: (chunkSize: number) => void;\n },\n ) => {\n return withUploadMetrics(\n options.file_id,\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const startTime = Date.now();\n const {\n stream: initialData,\n file_id,\n offset: initialOffset,\n } = options;\n const { onProgress } = dependencies;\n\n const prepareResult = yield* prepareUpload(\n file_id,\n initialOffset,\n initialData,\n );\n\n const { uploadFile, nextBlockNumber, offset, data } = prepareResult;\n\n const { bytesUploaded, blockIds } = yield* uploadBlocks(\n uploadFile,\n data,\n nextBlockNumber,\n offset,\n onProgress,\n );\n\n const newOffset = offset + bytesUploaded;\n\n if (uploadFile.size === newOffset) {\n try {\n // Commit all blocks to finalize the blob\n yield* commitBlocks(uploadFile, blockIds);\n\n // Update the upload file with the final offset in the KV store\n yield* kvStore.set(file_id, {\n ...uploadFile,\n offset: newOffset,\n });\n\n // Log completion with observability\n yield* logAzureUploadCompletion(file_id, {\n fileSize: uploadFile.size || 0,\n totalDurationMs: Date.now() - startTime,\n partsCount: blockIds.length,\n averagePartSize: uploadFile.size,\n throughputBps: uploadFile.size / (Date.now() - startTime),\n retryCount: 0,\n });\n\n yield* uploadSuccessTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n } catch (error) {\n yield* Effect.logError(\"Failed to finish upload\").pipe(\n Effect.annotateLogs({\n upload_id: file_id,\n error: JSON.stringify(error),\n }),\n );\n yield* uploadErrorsTotal(Effect.succeed(1));\n Effect.runSync(\n trackAzureError(\"write\", error, {\n upload_id: file_id,\n operation: \"commit\",\n blocks: blockIds.length,\n }),\n );\n throw error;\n }\n }\n\n return newOffset;\n }),\n ),\n );\n };\n\n const getUpload = (id: string) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(id);\n\n let offset = 0;\n\n try {\n const blocks = yield* retrieveBlocks(id);\n offset = calcOffsetFromBlocks(blocks);\n } catch (error) {\n // Check if the error is caused by the blob not being found\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return {\n ...uploadFile,\n offset: uploadFile.size as number,\n size: uploadFile.size,\n metadata: uploadFile.metadata,\n storage: uploadFile.storage,\n };\n }\n\n yield* Effect.logError(\"Error on get upload\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n error: JSON.stringify(error),\n }),\n );\n throw error;\n }\n\n const incompleteBlockSize = yield* getIncompleteBlockSize(id);\n\n return {\n ...uploadFile,\n offset: offset + (incompleteBlockSize ?? 0),\n size: uploadFile.size,\n storage: uploadFile.storage,\n };\n });\n };\n\n const remove = (id: string) => {\n return Effect.gen(function* () {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n yield* Effect.promise(() => blobClient.deleteIfExists());\n\n // Also delete incomplete block if it exists\n yield* deleteIncompleteBlock(id);\n } catch (error) {\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n yield* Effect.logError(\"No file found\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n return yield* Effect.fail(\n UploadistaError.fromCode(\"FILE_NOT_FOUND\"),\n );\n }\n Effect.runSync(\n trackAzureError(\"remove\", error, {\n upload_id: id,\n }),\n );\n throw error;\n }\n\n yield* clearCache(id);\n yield* activeUploadsGauge(Effect.succeed(-1));\n });\n };\n\n const getExpiration = () => {\n return expirationPeriodInMilliseconds;\n };\n\n const getExpirationDate = (created_at: string) => {\n const date = new Date(created_at);\n return new Date(date.getTime() + getExpiration());\n };\n\n const deleteExpired = (): Effect.Effect<number, UploadistaError> => {\n return Effect.tryPromise({\n try: async (): Promise<number> => {\n if (getExpiration() === 0) {\n return 0;\n }\n\n let deleted = 0;\n\n const response = containerClient.listBlobsFlat({\n includeMetadata: true,\n });\n\n const expiredBlobs: string[] = [];\n\n for await (const blob of response) {\n if (blob.metadata?.creationDate) {\n const creationDate = new Date(blob.metadata.creationDate);\n if (\n Date.now() >\n getExpirationDate(creationDate.toISOString()).getTime()\n ) {\n expiredBlobs.push(blob.name);\n }\n }\n }\n\n // Delete expired blobs\n for (const blobName of expiredBlobs) {\n await containerClient.deleteBlob(blobName);\n deleted++;\n }\n\n return deleted;\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const getCapabilities = (): DataStoreCapabilities => {\n return {\n supportsParallelUploads: true,\n supportsConcatenation: false, // Azure doesn't have native concatenation like GCS\n supportsDeferredLength: true,\n supportsResumableUploads: true,\n supportsTransactionalUploads: true,\n supportsStreamingRead: true, // Supports streaming reads via BlobClient.download\n supportsStreamingWrite: true, // Supports streaming writes via block staging\n maxConcurrentUploads: maxConcurrentBlockUploads,\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n maxParts: maxBlocks,\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n requiresMimeTypeValidation: true,\n maxValidationSize: undefined, // no size limit\n };\n };\n\n const getChunkerConstraints = () => {\n return {\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n };\n };\n\n const validateUploadStrategy = (\n strategy: UploadStrategy,\n ): Effect.Effect<boolean, never> => {\n const capabilities = getCapabilities();\n\n const result = (() => {\n switch (strategy) {\n case \"parallel\":\n return capabilities.supportsParallelUploads;\n case \"single\":\n return true;\n default:\n return false;\n }\n })();\n\n return Effect.succeed(result);\n };\n\n /**\n * Writes file content from a stream without knowing the final size upfront.\n * Uses Azure block blob staging to stream content as blocks are buffered.\n *\n * @param fileId - The unique identifier for the file\n * @param options - Stream write options including the Effect Stream\n * @returns StreamWriteResult with final size after stream completes\n */\n const writeStream = (\n fileId: string,\n options: StreamWriteOptions,\n ): Effect.Effect<StreamWriteResult, UploadistaError> =>\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const startTime = Date.now();\n\n yield* Effect.logInfo(\"Starting streaming write to Azure\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n container: containerName,\n size_hint: options.sizeHint,\n }),\n );\n\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(1));\n\n // Calculate optimal block size based on size hint or use default\n const uploadBlockSize = calcOptimalBlockSize(options.sizeHint);\n\n // Track blocks and total bytes\n const blockIdsRef = yield* Ref.make<string[]>([]);\n const totalBytesRef = yield* Ref.make(0);\n const blockNumberRef = yield* Ref.make(1);\n const bufferRef = yield* Ref.make(new Uint8Array(0));\n\n // Helper to stage a block\n const stageBlock = (data: Uint8Array, isFinalBlock: boolean) =>\n Effect.gen(function* () {\n if (data.length === 0) {\n return;\n }\n\n // Only stage if we have enough data or it's the final block\n if (data.length < minBlockSize && !isFinalBlock) {\n return;\n }\n\n const blockNumber = yield* Ref.getAndUpdate(\n blockNumberRef,\n (n) => n + 1,\n );\n\n // Generate block ID (base64 encoded, must be consistent length)\n const blockId = bufferFrom(\n `stream-block-${blockNumber.toString().padStart(6, \"0\")}`,\n ).toString(\"base64\");\n\n yield* Effect.logDebug(\"Staging block from stream\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n block_number: blockNumber,\n block_size: data.length,\n is_final_block: isFinalBlock,\n }),\n );\n\n const blobClient = containerClient.getBlockBlobClient(fileId);\n yield* Effect.tryPromise({\n try: () => blobClient.stageBlock(blockId, data, data.length),\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"writeStream\", error, {\n upload_id: fileId,\n block_number: blockNumber,\n block_size: data.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* partSizeHistogram(Effect.succeed(data.length));\n });\n\n // Process stream chunks\n yield* options.stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.gen(function* () {\n // Update total bytes\n yield* Ref.update(totalBytesRef, (total) => total + chunk.length);\n\n // Get current buffer and append new chunk\n const currentBuffer = yield* Ref.get(bufferRef);\n const combined = new Uint8Array(\n currentBuffer.length + chunk.length,\n );\n combined.set(currentBuffer);\n combined.set(chunk, currentBuffer.length);\n\n // Extract full blocks and keep remainder in buffer\n let offset = 0;\n while (combined.length - offset >= uploadBlockSize) {\n const blockData = combined.slice(offset, offset + uploadBlockSize);\n yield* stageBlock(blockData, false);\n offset += uploadBlockSize;\n }\n\n // Store remaining data in buffer\n yield* Ref.set(bufferRef, combined.slice(offset));\n }),\n ),\n );\n\n // Stage any remaining data as final block\n const remainingBuffer = yield* Ref.get(bufferRef);\n if (remainingBuffer.length > 0) {\n yield* stageBlock(remainingBuffer, true);\n }\n\n // Get all block IDs and commit the block list\n const blockIds = yield* Ref.get(blockIdsRef);\n const totalBytes = yield* Ref.get(totalBytesRef);\n\n if (blockIds.length === 0) {\n // No blocks staged (empty stream) - fail\n yield* activeUploadsGauge(Effect.succeed(-1));\n return yield* Effect.fail(\n new UploadistaError({\n code: \"FILE_WRITE_ERROR\",\n status: 400,\n body: \"Cannot complete upload with no data\",\n details: \"The stream provided no data to upload\",\n }),\n );\n }\n\n // Commit block list\n const blobClient = containerClient.getBlockBlobClient(fileId);\n yield* Effect.tryPromise({\n try: () =>\n blobClient.commitBlockList(blockIds, {\n blobHTTPHeaders: {\n blobContentType: options.contentType,\n },\n }),\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"writeStream\", error, {\n upload_id: fileId,\n operation: \"commit\",\n blocks: blockIds.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n // Log completion metrics\n const endTime = Date.now();\n const totalDurationMs = endTime - startTime;\n const throughputBps =\n totalDurationMs > 0 ? (totalBytes * 1000) / totalDurationMs : 0;\n const averageBlockSize =\n blockIds.length > 0 ? totalBytes / blockIds.length : undefined;\n\n yield* logAzureUploadCompletion(fileId, {\n fileSize: totalBytes,\n totalDurationMs,\n partsCount: blockIds.length,\n averagePartSize: averageBlockSize,\n throughputBps,\n retryCount: 0,\n });\n\n yield* uploadSuccessTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n yield* fileSizeHistogram(Effect.succeed(totalBytes));\n\n yield* Effect.logInfo(\"Streaming write to Azure completed\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n total_bytes: totalBytes,\n blocks_count: blockIds.length,\n duration_ms: totalDurationMs,\n }),\n );\n\n return {\n id: fileId,\n size: totalBytes,\n path: fileId,\n bucket: containerName,\n } satisfies StreamWriteResult;\n }).pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* uploadErrorsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n return yield* Effect.fail(error);\n }),\n ),\n ),\n );\n\n return {\n bucket: containerName,\n create,\n remove,\n write,\n getUpload,\n read,\n readStream,\n writeStream,\n deleteExpired: deleteExpired(),\n getCapabilities,\n getChunkerConstraints,\n validateUploadStrategy,\n } as DataStore<UploadFile>;\n });\n}\n"],"mappings":"isBAuCA,MAAM,EAAc,GAEd,OAAO,WAAe,KAAe,WAAY,WAC3C,WAAmB,OAAO,KAAK,EAAI,CAGtC,IAAI,WAAW,MAAM,KAAK,EAAM,GAAM,EAAE,WAAW,EAAE,CAAC,CAAC,CAsDhE,SAAS,EAAqB,EAAkC,CAC9D,OAAO,GAAU,EAAO,OAAS,EAC7B,EAAO,QAAQ,EAAG,IAAM,GAAK,GAAG,MAAQ,GAAI,EAAE,CAC9C,EAiBN,SAAgB,EAAW,CACzB,cACA,YACA,eAAe,KACf,YAAY,IACZ,4BAA4B,GAC5B,iCAAiC,IAAO,GAAK,GAAK,GAAK,EACvD,mBACA,SACA,aACA,cACA,aACA,iBACoB,CACpB,OAAO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAU,MAAO,EACjB,EAAqB,GAAa,EAAI,KAAO,KAI/CA,EAEJ,GAAI,EAEF,EAAoBC,EAAY,qBAAqB,EAAiB,SAC7D,EAET,EAAoB,IAAIA,EAAY,EAAO,SAClC,EAAY,CAErB,IAAM,EAAa,EACf,WAAW,EAAY,wBACvB,GAAQ,MAAM,IAAI,CAAC,IAAM,GAC7B,GAAI,CAAC,EACH,MAAU,MACR,2HACD,CAEH,EAAoB,IAAIA,EAAY,EAAY,EAAW,SAClD,GAAe,EAGxB,GAAI,CACF,IAAM,EAAsB,IAAI,EAC9B,EACA,EACD,CACD,EAAoB,IAAIA,EACtB,WAAW,EAAY,wBACvB,EACD,OACM,EAAO,CACd,MAAU,MACR,4JAEqB,IACtB,MAGH,MAAU,MACR,kIAED,CAGH,IAAMC,EACJ,EAAkB,mBAAmB,EAAc,CAE/C,EAAqB,GAClB,GAAG,EAAG,aAGT,IACJ,EACA,EACA,IAEOC,EACLC,EACA,EAAO,IAAI,WAAa,CACtB,MAAO,EAAO,QAAQ,kBAAkB,CAAC,KACvC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYC,EAAW,OACxB,CAAC,CACH,CAED,MAAOC,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAOC,EAAkB,EAAO,QAAQF,EAAW,OAAO,CAAC,CAE3D,GAAI,CACF,IAAM,EAAa,EAAgB,mBACjC,EAAW,GACZ,CACD,MAAO,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,MAAM,EAAW,WACf,EACAA,EACAA,EAAW,OACZ,EAEH,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAEF,MAAO,EAAO,QAAQ,2BAA2B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,OACM,EAAO,CAQd,MAPA,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACK,IAER,CACH,CAGG,IAAyB,EAAY,IAClC,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,OAAOA,EAAYA,EAAW,OAAO,EAExD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAAC,KACD,EAAO,QACL,EAAO,QAAQ,sCAAsC,CAAC,KACpD,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACF,CACF,CAGG,EAAsB,GACnB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADiB,MAHE,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACiC,UAAU,EAC5B,yBACT,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,GAA0B,GACvB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADmB,MAHA,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACmC,eAAe,EACjC,oBACX,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,EAAyB,GACtB,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,gBAAgB,EAEnC,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,GAA2B,GACxB,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAkB,MAAO,EAAmB,EAAG,CAErD,GAAI,CAAC,EACH,OAIF,IAAM,EAAS,EAAgB,WAAW,CACpCG,EAAuB,EAAE,CAC3B,EAAsB,EAE1B,GAAI,CACF,OAAa,CACX,IAAM,EAAS,MAAO,EAAO,YAAc,EAAO,MAAM,CAAC,CACzD,GAAI,EAAO,KAAM,MACjB,EAAO,KAAK,EAAO,MAAM,CACzB,GAAuB,EAAO,MAAM,eAE9B,CACR,EAAO,aAAa,CAItB,IAAM,EAAS,EAAO,aAAa,EAAO,CAE1C,MAAO,CACL,KAAM,EACN,SACD,EACD,CAGE,EAAwB,GAA8B,CAC1D,IAAM,EAAO,GAAY,cACrBC,EAEJ,AAME,EANE,GAAQ,EACS,EACV,GAAQ,EAAqB,EACnB,EAGA,KAAK,KAAK,EAAO,EAAU,CAIhD,IAAM,EAAiB,KAAK,IAAI,EAAkB,EAAa,CAG/D,OAAO,KAAK,KAAK,EAAiB,KAAK,CAAG,MAKtC,EACH,GAEC,GAEO,EAAO,MAAqB,GAAS,CAC1C,IAAI,EAAS,IAAI,WACb,EAAc,EACd,EAAsB,EAEpB,GAAa,EAAkB,EAAe,KAAU,CAE5D,EAAO,QACL,EAAO,QAAQ,iBAAiB,CAAC,KAC/B,EAAO,aAAa,CAClB,aAAc,EACd,WAAY,EAAK,OACjB,cAAe,EACf,eAAgB,EAChB,sBAAuB,EAAsB,EAAK,OACnD,CAAC,CACH,CACF,CACD,EAAK,OAAO,CACV,YAAa,IACb,OACA,KAAM,EAAK,OACZ,CAAC,EAGE,EAAgB,GAAwB,CAE5C,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAQ,OAAO,CAQ/D,IAPA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAS,EAAO,OAAO,CACpC,EAAS,EACT,GAAuB,EAAQ,OAIxB,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAU,EAAO,GAAM,GAK3B,EAAO,QACL,EAAO,KACL,EAAO,WAAY,GACjB,EAAO,SAAW,EAAa,EAAM,CAAC,CACvC,CACD,EAAO,YACL,EAAO,SAAW,CAGZ,EAAO,OAAS,GAClB,EAAU,EAAQ,GAAK,CAEzB,EAAK,KAAK,EACV,CACH,CACD,EAAO,SAAU,GAAU,EAAO,SAAW,EAAK,KAAK,EAAM,CAAC,CAAC,CAChE,CACF,EACD,CAOA,GACH,EAA2C,EAAgB,IACrD,GACA,EAEE,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAyB,MAAO,EAAI,KAAK,EAAc,CAE7D,OAAO,EAAO,KACZ,EAAO,IAAK,GACV,EAAO,IAAI,WAAa,CAKtB,EAJiB,MAAO,EAAI,aAC1B,EACC,GAAU,EAAQ,EAAM,OAC1B,CACmB,EACpB,CACH,CACF,EACD,CAAC,KAAK,EAAO,OAAO,CAhBE,EAsBtB,GACJ,EACA,EACA,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,mBAAmB,CAAC,KACxC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,UAAW,EAAW,KACvB,CAAC,CACH,CAED,IAAM,EAAO,EAAW,KAElB,EAAkB,EAAqB,EAAK,CAClD,MAAO,EAAO,QAAQ,aAAa,CAAC,KAClC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,WAAY,EACb,CAAC,CACH,CAOD,IAAM,EAAcJ,EAAW,KAE7B,EAAyB,EAAY,EAAW,CAEhD,EAAoB,EAAgB,CACrC,CAGK,EAAsB,MAAO,EAAI,KAAK,EAAW,CACjD,EAAwB,MAAO,EAAI,KAAK,EAAE,CAC1C,EAAc,MAAO,EAAI,KAAe,EAAE,CAAC,CAE3C,EAAe,GACnB,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAmB,MAAO,EAAI,aAClC,EACC,GAAW,EAAS,EAAU,KAChC,CACK,EAAe,IAAqB,EAAW,MAAQ,GAE7D,MAAO,EAAO,SAAS,mBAAmB,CAAC,KACzC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,kBAAmB,EACnB,UAAW,EAAW,KACtB,WAAY,EAAU,KACtB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EACJ,EAAyB,EAAU,YAAc,EAcnD,GAZI,EAAU,KAAO,IACnB,MAAO,EAAO,KACZ,EAAgB,SAAS,mBAAoB,CAC3C,MAAW,MACT,cAAc,EAAU,KAAK,6BAA6B,IAC3D,CACF,CAAC,CACH,EAKC,EAAU,MAAQ,GAAgB,EAAc,CAClD,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,aAAc,EACd,WAAY,EAAU,KACtB,eAAgB,EAChB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EAAU,EACd,SAAS,EAAkB,UAAU,CAAC,SAAS,EAAG,IAAI,GACvD,CAAC,SAAS,SAAS,CACpB,MAAO,GAAY,EAAY,EAAU,KAAM,EAAQ,CACvD,MAAO,EAAI,OAAO,EAAc,GAAQ,CAAC,GAAG,EAAK,EAAQ,CAAC,CAC1D,MAAOE,EAAkB,EAAO,QAAQ,EAAU,KAAK,CAAC,MAGxD,MAAO,GAAsB,EAAW,GAAI,EAAU,KAAK,CAG7D,MAAO,EAAI,OACT,EACC,GAAU,EAAQ,EAAU,KAC9B,EAKD,CAQJ,OALA,MAAO,EAAY,KACjB,EAAO,WAAY,GAAc,EAAY,EAAU,CAAC,CACxD,EAAO,gBAAgB,EAA0B,CAClD,CAEM,CACL,cAAe,MAAO,EAAI,IAAI,EAAsB,CACpD,SAAU,MAAO,EAAI,IAAI,EAAY,CACtC,EACD,CAME,GAAgB,EAAwB,IACrC,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,MADmB,EAAgB,mBAAmB,EAAW,GAAG,CACnD,gBAAgB,EAAU,CACzC,gBAAiB,CACf,gBAAiB,EAAW,UAAU,aAAa,UAAU,CAC7D,iBAAkB,EAAW,UAAU,cAAc,UAAU,CAChE,CACF,CAAC,EAEJ,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAME,EAAkB,GACf,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CASF,OAPkB,MADC,EAAgB,mBAAmB,EAAG,CACtB,aAAa,YAAY,EAGhD,iBAAiB,IAAK,IAAW,CACzC,KAAM,EAAM,KACb,EAAE,EAAI,EAAE,OAGJ,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,EAAE,CAEX,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,sBAAuB,CAC9C,MAAO,EACR,CAAC,CACL,CAAC,CAME,EAAc,GACX,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,uBAAuB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACD,MAAO,EAAQ,OAAO,EAAG,EACzB,CAME,EAAU,GACP,EAAO,IAAI,WAAa,CA2B7B,OA1BA,MAAOG,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOC,EAAkB,EAAO,QAAQ,EAAO,MAAQ,EAAE,CAAC,CAE1D,MAAO,EAAO,QAAQ,iCAAiC,CAAC,KACtD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAED,EAAO,aAAe,IAAI,MAAM,CAAC,aAAa,CAC9C,EAAO,QAAU,CACf,GAAI,EAAO,QAAQ,GACnB,KAAM,EAAO,QAAQ,KACrB,KAAM,EAAO,GACb,OAAQ,EACT,CACD,EAAO,IAAM,GAAG,EAAY,GAAG,EAAO,KAEtC,MAAO,EAAQ,IAAI,EAAO,GAAI,EAAO,CACrC,MAAO,EAAO,QAAQ,gCAAgC,CAAC,KACrD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAEM,GACP,CAME,GACJ,GAEO,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,IAAM,EAAW,MADE,EAAgB,mBAAmB,EAAG,CACvB,UAAU,CAC5C,GAAI,EAAS,SACX,OAAO,EAAS,SAElB,GAAI,EAAS,mBACX,OAAO,EAAS,mBAElB,MAAU,MAAM,uCAAuC,EAEzD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAWE,GAAc,EAAY,IAC9B,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAkB,CACtB,GAAG,EACH,GAAG,EACJ,CAEK,EAAc,MAAO,GAAe,EAAG,CAG7C,GAAI,aAAuB,KAAM,CAC/B,IAAM,EAAc,MAAO,EAAO,YAChC,EAAY,aAAa,CAC1B,CACK,EAAQ,IAAI,WAAW,EAA2B,CAGlD,EAAY,EAAgB,UAC5BJ,EAAuB,EAAE,CAC/B,IAAK,IAAI,EAAI,EAAG,EAAI,EAAM,OAAQ,GAAK,EACrC,EAAO,KAAK,EAAM,MAAM,EAAG,KAAK,IAAI,EAAI,EAAW,EAAM,OAAO,CAAC,CAAC,CAEpE,OAAO,EAAO,aAAa,EAAO,CAIpC,OAAO,EAAO,MAAoC,GAAS,CACzD,IAAM,EAAS,EAAY,WAAW,CAChC,EAAY,EAAgB,UAC9B,EAAS,IAAI,WA+CjB,OA7CqB,SAAY,CAC/B,GAAI,CACF,OAAa,CACX,GAAM,CAAE,OAAM,SAAU,MAAM,EAAO,MAAM,CAE3C,GAAI,EAAM,CAEJ,EAAO,OAAS,GAClB,EAAK,OAAO,EAAO,CAErB,EAAK,KAAK,CACV,OAGF,GAAI,EAAO,CAET,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAM,OAAO,CAM7D,IALA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAO,EAAO,OAAO,CAClC,EAAS,EAGF,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAK,OAAO,EAAM,UAIjB,EAAO,CACd,EAAK,KACH,IAAI,EAAgB,CAClB,KAAM,kBACN,OAAQ,IACR,KAAM,mCACN,QAAS,6BAA6B,OAAO,EAAM,GACpD,CAAC,CACH,KAKS,CAGP,EAAO,SAAW,CACvB,EAAO,aAAa,EACpB,EACF,EACF,CAEE,GAAQ,GACL,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAS,MAAO,EAAW,EAAG,CAG9BA,EAAuB,EAAE,CAC/B,MAAO,EAAO,WAAW,EAAS,GAChC,EAAO,SAAW,CAChB,EAAO,KAAK,EAAM,EAClB,CACH,CAGD,IAAM,EAAc,EAAO,QACxB,EAAK,IAAU,EAAM,EAAM,OAC5B,EACD,CACK,EAAS,IAAI,WAAW,EAAY,CACtC,EAAS,EACb,IAAK,IAAM,KAAS,EAClB,EAAO,IAAI,EAAO,EAAO,CACzB,GAAU,EAAM,OAGlB,OAAO,GACP,CAGE,IACJ,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAQ,CAKxC,GAHS,MAAO,EAAe,EAAQ,EAElB,OACW,EAEhC,EAAkB,MAAO,GAAwB,EAAQ,CAE/D,GAAI,EAAiB,CACnB,MAAO,EAAsB,EAAQ,CACrC,IAAM,EAAS,EAAgB,EAAgB,KACzC,EAAO,EAAgB,OAAO,KAAK,EAAO,OAAO,EAAY,CAAC,CACpE,MAAO,CACL,aACA,gBAAiB,EAAkB,EACnC,SACA,oBAAqB,EAAgB,KACrC,OACD,MAED,MAAO,CACL,aACA,kBACA,OAAQ,EACR,oBAAqB,EACrB,KAAM,EACP,EAEH,CAME,GACJ,EACA,IAIOK,EACL,EAAQ,QACRV,EACEW,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAY,KAAK,KAAK,CACtB,CACJ,OAAQ,EACR,UACA,OAAQ,GACN,EACE,CAAE,cAAe,EAQjB,CAAE,aAAY,kBAAiB,SAAQ,QANvB,MAAO,GAC3B,EACA,EACA,EACD,CAIK,CAAE,gBAAe,YAAa,MAAO,EACzC,EACA,EACA,EACA,EACA,EACD,CAEK,EAAY,EAAS,EAE3B,GAAI,EAAW,OAAS,EACtB,GAAI,CAEF,MAAO,EAAa,EAAY,EAAS,CAGzC,MAAO,EAAQ,IAAI,EAAS,CAC1B,GAAG,EACH,OAAQ,EACT,CAAC,CAGF,MAAO,EAAyB,EAAS,CACvC,SAAU,EAAW,MAAQ,EAC7B,gBAAiB,KAAK,KAAK,CAAG,EAC9B,WAAY,EAAS,OACrB,gBAAiB,EAAW,KAC5B,cAAe,EAAW,MAAQ,KAAK,KAAK,CAAG,GAC/C,WAAY,EACb,CAAC,CAEF,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOJ,EAAmB,EAAO,QAAQ,GAAG,CAAC,OACtC,EAAO,CAed,MAdA,MAAO,EAAO,SAAS,0BAA0B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACD,MAAOK,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,EAAO,QACL,EAAgB,QAAS,EAAO,CAC9B,UAAW,EACX,UAAW,SACX,OAAQ,EAAS,OAClB,CAAC,CACH,CACK,EAIV,OAAO,GACP,CACH,CACF,CAGG,GAAa,GACV,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAG,CAErC,EAAS,EAEb,GAAI,CAEF,EAAS,EADM,MAAO,EAAe,EAAG,CACH,OAC9B,EAAO,CAEd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,CACL,GAAG,EACH,OAAQ,EAAW,KACnB,KAAM,EAAW,KACjB,SAAU,EAAW,SACrB,QAAS,EAAW,QACrB,CASH,MANA,MAAO,EAAO,SAAS,sBAAsB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACK,EAGR,IAAM,EAAsB,MAAO,GAAuB,EAAG,CAE7D,MAAO,CACL,GAAG,EACH,OAAQ,GAAU,GAAuB,GACzC,KAAM,EAAW,KACjB,QAAS,EAAW,QACrB,EACD,CAGE,GAAU,GACP,EAAO,IAAI,WAAa,CAC7B,GAAI,CACF,IAAM,EAAa,EAAgB,mBAAmB,EAAG,CACzD,MAAO,EAAO,YAAc,EAAW,gBAAgB,CAAC,CAGxD,MAAO,EAAsB,EAAG,OACzB,EAAO,CACd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAOrB,OALA,MAAO,EAAO,SAAS,gBAAgB,CAAC,KACtC,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACM,MAAO,EAAO,KACnB,EAAgB,SAAS,iBAAiB,CAC3C,CAOH,MALA,EAAO,QACL,EAAgB,SAAU,EAAO,CAC/B,UAAW,EACZ,CAAC,CACH,CACK,EAGR,MAAO,EAAW,EAAG,CACrB,MAAOL,EAAmB,EAAO,QAAQ,GAAG,CAAC,EAC7C,CAGE,MACG,EAGH,GAAqB,GAAuB,CAChD,IAAM,EAAO,IAAI,KAAK,EAAW,CACjC,OAAO,IAAI,KAAK,EAAK,SAAS,CAAG,GAAe,CAAC,EAG7C,OACG,EAAO,WAAW,CACvB,IAAK,SAA6B,CAChC,GAAI,GAAe,GAAK,EACtB,MAAO,GAGT,IAAI,EAAU,EAER,EAAW,EAAgB,cAAc,CAC7C,gBAAiB,GAClB,CAAC,CAEIM,EAAyB,EAAE,CAEjC,UAAW,IAAM,KAAQ,EACvB,GAAI,EAAK,UAAU,aAAc,CAC/B,IAAM,EAAe,IAAI,KAAK,EAAK,SAAS,aAAa,CAEvD,KAAK,KAAK,CACV,GAAkB,EAAa,aAAa,CAAC,CAAC,SAAS,EAEvD,EAAa,KAAK,EAAK,KAAK,CAMlC,IAAK,IAAM,KAAY,EACrB,MAAM,EAAgB,WAAW,EAAS,CAC1C,IAGF,OAAO,GAET,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,OACG,CACL,wBAAyB,GACzB,sBAAuB,GACvB,uBAAwB,GACxB,yBAA0B,GAC1B,6BAA8B,GAC9B,sBAAuB,GACvB,uBAAwB,GACxB,qBAAsB,EACtB,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,SAAU,EACV,iBAAkB,EAClB,sBAAuB,GACvB,2BAA4B,GAC5B,kBAAmB,IAAA,GACpB,EAmPH,MAAO,CACL,OAAQ,EACR,SACA,UACA,QACA,aACA,QACA,aACA,aAnNA,EACA,IAEAd,EACEW,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAY,KAAK,KAAK,CAE5B,MAAO,EAAO,QAAQ,oCAAoC,CAAC,KACzD,EAAO,aAAa,CAClB,UAAW,EACX,UAAW,EACX,UAAW,EAAQ,SACpB,CAAC,CACH,CAED,MAAOJ,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAG5C,IAAM,EAAkB,EAAqB,EAAQ,SAAS,CAGxD,EAAc,MAAO,EAAI,KAAe,EAAE,CAAC,CAC3C,EAAgB,MAAO,EAAI,KAAK,EAAE,CAClC,EAAiB,MAAO,EAAI,KAAK,EAAE,CACnC,EAAY,MAAO,EAAI,KAAK,IAAI,WAAc,CAG9C,GAAc,EAAkB,IACpC,EAAO,IAAI,WAAa,CAMtB,GALI,EAAK,SAAW,GAKhB,EAAK,OAAS,GAAgB,CAAC,EACjC,OAGF,IAAM,EAAc,MAAO,EAAI,aAC7B,EACC,GAAM,EAAI,EACZ,CAGK,EAAU,EACd,gBAAgB,EAAY,UAAU,CAAC,SAAS,EAAG,IAAI,GACxD,CAAC,SAAS,SAAS,CAEpB,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EACX,aAAc,EACd,WAAY,EAAK,OACjB,eAAgB,EACjB,CAAC,CACH,CAED,IAAMO,EAAa,EAAgB,mBAAmB,EAAO,CAC7D,MAAO,EAAO,WAAW,CACvB,QAAWA,EAAW,WAAW,EAAS,EAAM,EAAK,OAAO,CAC5D,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EACX,aAAc,EACd,WAAY,EAAK,OAClB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAEF,MAAO,EAAI,OAAO,EAAc,GAAQ,CAAC,GAAG,EAAK,EAAQ,CAAC,CAC1D,MAAOZ,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAOC,EAAkB,EAAO,QAAQ,EAAK,OAAO,CAAC,EACrD,CAGJ,MAAO,EAAQ,OAAO,KACpB,EAAO,WAAY,GACjB,EAAO,IAAI,WAAa,CAEtB,MAAO,EAAI,OAAO,EAAgB,GAAU,EAAQ,EAAM,OAAO,CAGjE,IAAM,EAAgB,MAAO,EAAI,IAAI,EAAU,CACzC,EAAW,IAAI,WACnB,EAAc,OAAS,EAAM,OAC9B,CACD,EAAS,IAAI,EAAc,CAC3B,EAAS,IAAI,EAAO,EAAc,OAAO,CAGzC,IAAI,EAAS,EACb,KAAO,EAAS,OAAS,GAAU,GAEjC,MAAO,EADW,EAAS,MAAM,EAAQ,EAAS,EAAgB,CACrC,GAAM,CACnC,GAAU,EAIZ,MAAO,EAAI,IAAI,EAAW,EAAS,MAAM,EAAO,CAAC,EACjD,CACH,CACF,CAGD,IAAM,EAAkB,MAAO,EAAI,IAAI,EAAU,CAC7C,EAAgB,OAAS,IAC3B,MAAO,EAAW,EAAiB,GAAK,EAI1C,IAAM,EAAW,MAAO,EAAI,IAAI,EAAY,CACtC,EAAa,MAAO,EAAI,IAAI,EAAc,CAEhD,GAAI,EAAS,SAAW,EAGtB,OADA,MAAOI,EAAmB,EAAO,QAAQ,GAAG,CAAC,CACtC,MAAO,EAAO,KACnB,IAAI,EAAgB,CAClB,KAAM,mBACN,OAAQ,IACR,KAAM,sCACN,QAAS,wCACV,CAAC,CACH,CAIH,IAAM,EAAa,EAAgB,mBAAmB,EAAO,CAC7D,MAAO,EAAO,WAAW,CACvB,QACE,EAAW,gBAAgB,EAAU,CACnC,gBAAiB,CACf,gBAAiB,EAAQ,YAC1B,CACF,CAAC,CACJ,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EACX,UAAW,SACX,OAAQ,EAAS,OAClB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAIF,IAAM,EADU,KAAK,KAAK,CACQ,EAC5B,EACJ,EAAkB,EAAK,EAAa,IAAQ,EAAkB,EAC1D,EACJ,EAAS,OAAS,EAAI,EAAa,EAAS,OAAS,IAAA,GAwBvD,OAtBA,MAAO,EAAyB,EAAQ,CACtC,SAAU,EACV,kBACA,WAAY,EAAS,OACrB,gBAAiB,EACjB,gBACA,WAAY,EACb,CAAC,CAEF,MAAOI,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOJ,EAAmB,EAAO,QAAQ,GAAG,CAAC,CAC7C,MAAOC,EAAkB,EAAO,QAAQ,EAAW,CAAC,CAEpD,MAAO,EAAO,QAAQ,qCAAqC,CAAC,KAC1D,EAAO,aAAa,CAClB,UAAW,EACX,YAAa,EACb,aAAc,EAAS,OACvB,YAAa,EACd,CAAC,CACH,CAEM,CACL,GAAI,EACJ,KAAM,EACN,KAAM,EACN,OAAQ,EACT,EACD,CAAC,KACD,EAAO,SAAU,GACf,EAAO,IAAI,WAAa,CAGtB,OAFA,MAAOI,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,MAAOL,EAAmB,EAAO,QAAQ,GAAG,CAAC,CACtC,MAAO,EAAO,KAAK,EAAM,EAChC,CACH,CACF,CACF,CAWD,cAAe,IAAe,CAC9B,kBACA,2BA1PO,CACL,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,iBAAkB,EAClB,sBAAuB,GACxB,EAsPD,uBAlPA,GACkC,CAClC,IAAM,EAAe,GAAiB,CAEhC,OAAgB,CACpB,OAAQ,EAAR,CACE,IAAK,WACH,OAAO,EAAa,wBACtB,IAAK,SACH,MAAO,GACT,QACE,MAAO,OAET,CAEJ,OAAO,EAAO,QAAQ,EAAO,EAoO9B,EACD"}
1
+ {"version":3,"file":"index.mjs","names":["blobServiceClient: BlobServiceClient","BlobService","containerClient: ContainerClient","withTimingMetrics","partUploadDurationHistogram","readStream","uploadPartsTotal","partSizeHistogram","chunks: Uint8Array[]","optimalBlockSize: number","uploadRequestsTotal","activeUploadsGauge","fileSizeHistogram","withUploadMetrics","uploadDurationHistogram","uploadSuccessTotal","uploadErrorsTotal","expiredBlobs: string[]","blobClient"],"sources":["../src/azure-store.ts"],"sourcesContent":["import type { TokenCredential } from \"@azure/core-auth\";\nimport {\n BlobServiceClient as BlobService,\n type BlobServiceClient,\n type ContainerClient,\n StorageSharedKeyCredential,\n} from \"@azure/storage-blob\";\nimport { UploadistaError } from \"@uploadista/core/errors\";\n\nimport {\n type DataStore,\n type DataStoreCapabilities,\n type DataStoreWriteOptions,\n DEFAULT_STREAMING_CONFIG,\n type StreamingConfig,\n type StreamWriteOptions,\n type StreamWriteResult,\n type UploadFile,\n UploadFileKVStore,\n type UploadStrategy,\n} from \"@uploadista/core/types\";\nimport {\n azureActiveUploadsGauge as activeUploadsGauge,\n azureFileSizeHistogram as fileSizeHistogram,\n logAzureUploadCompletion,\n azurePartSizeHistogram as partSizeHistogram,\n azurePartUploadDurationHistogram as partUploadDurationHistogram,\n trackAzureError,\n azureUploadDurationHistogram as uploadDurationHistogram,\n azureUploadErrorsTotal as uploadErrorsTotal,\n azureUploadPartsTotal as uploadPartsTotal,\n azureUploadRequestsTotal as uploadRequestsTotal,\n azureUploadSuccessTotal as uploadSuccessTotal,\n withAzureTimingMetrics as withTimingMetrics,\n withAzureUploadMetrics as withUploadMetrics,\n} from \"@uploadista/observability\";\nimport { Effect, Ref, Stream } from \"effect\";\n\n// Using base64 encoding that works in both Node.js and browser\nconst bufferFrom = (str: string) => {\n // Use global Buffer if available, otherwise fallback to btoa\n if (typeof globalThis !== \"undefined\" && \"Buffer\" in globalThis) {\n return (globalThis as any).Buffer.from(str);\n }\n // Fallback for browser environments\n return new Uint8Array(Array.from(str, (c) => c.charCodeAt(0)));\n};\n\nexport type ChunkInfo = {\n blockNumber: number;\n data: Uint8Array;\n size: number;\n isFinalPart?: boolean;\n};\n\nexport type AzureStoreOptions = {\n deliveryUrl: string;\n /**\n * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.\n * The server calculates the optimal block size, which takes this size into account,\n * but may increase it to not exceed the Azure 50K blocks limit.\n */\n blockSize?: number;\n /**\n * The minimal block size for blocks.\n * Can be used to ensure that all non-trailing blocks are exactly the same size.\n * Can not be lower than 1 byte or more than 4000MiB.\n */\n minBlockSize?: number;\n /**\n * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.\n */\n maxBlocks?: number;\n maxConcurrentBlockUploads?: number;\n expirationPeriodInMilliseconds?: number;\n // Azure authentication options (choose one)\n connectionString?: string;\n /**\n * SAS URL for the storage account (works in all environments including browsers)\n * Format: https://<account>.blob.core.windows.net?<sas-token>\n */\n sasUrl?: string;\n /**\n * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)\n * Works in all environments and is the recommended approach for production\n */\n credential?: TokenCredential;\n /**\n * Account name and key for shared key authentication (Node.js only)\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountName?: string;\n /**\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountKey?: string;\n containerName: string;\n};\n\nfunction calcOffsetFromBlocks(blocks?: Array<{ size: number }>) {\n return blocks && blocks.length > 0\n ? blocks.reduce((a, b) => a + (b?.size ?? 0), 0)\n : 0;\n}\n\nexport type AzureStore = DataStore<UploadFile> & {\n getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;\n readStream: (\n id: string,\n config?: StreamingConfig,\n ) => Effect.Effect<\n Stream.Stream<Uint8Array, UploadistaError>,\n UploadistaError\n >;\n getChunkerConstraints: () => {\n minChunkSize: number;\n maxChunkSize: number;\n optimalChunkSize: number;\n requiresOrderedChunks: boolean;\n };\n};\n\nexport function azureStore({\n deliveryUrl,\n blockSize,\n minBlockSize = 1024, // 1KB minimum\n maxBlocks = 50_000,\n maxConcurrentBlockUploads = 60,\n expirationPeriodInMilliseconds = 1000 * 60 * 60 * 24 * 7, // 1 week\n connectionString,\n sasUrl,\n credential,\n accountName,\n accountKey,\n containerName,\n}: AzureStoreOptions) {\n return Effect.gen(function* () {\n const kvStore = yield* UploadFileKVStore;\n const preferredBlockSize = blockSize || 8 * 1024 * 1024; // 8MB default\n const maxUploadSize = 5_497_558_138_880 as const; // 5TiB (Azure Block Blob limit)\n\n // Initialize Azure Blob Service Client with cross-platform authentication\n let blobServiceClient: BlobServiceClient;\n\n if (connectionString) {\n // Connection string (works in all environments)\n blobServiceClient = BlobService.fromConnectionString(connectionString);\n } else if (sasUrl) {\n // SAS URL (works in all environments including browsers)\n blobServiceClient = new BlobService(sasUrl);\n } else if (credential) {\n // OAuth token credential (works in all environments, recommended for production)\n const accountUrl = accountName\n ? `https://${accountName}.blob.core.windows.net`\n : sasUrl?.split(\"?\")[0] || \"\";\n if (!accountUrl) {\n throw new Error(\n \"When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL\",\n );\n }\n blobServiceClient = new BlobService(accountUrl, credential);\n } else if (accountName && accountKey) {\n // Legacy shared key authentication (Node.js only)\n // This will fail in browser/edge environments\n try {\n const sharedKeyCredential = new StorageSharedKeyCredential(\n accountName,\n accountKey,\n );\n blobServiceClient = new BlobService(\n `https://${accountName}.blob.core.windows.net`,\n sharedKeyCredential,\n );\n } catch (error) {\n throw new Error(\n \"StorageSharedKeyCredential is only available in Node.js environments. \" +\n \"Use sasUrl or credential options for cross-platform compatibility. \" +\n `Original error: ${error}`,\n );\n }\n } else {\n throw new Error(\n \"Azure authentication required. Provide one of: \" +\n \"connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)\",\n );\n }\n\n const containerClient: ContainerClient =\n blobServiceClient.getContainerClient(containerName);\n\n const incompletePartKey = (id: string) => {\n return `${id}.incomplete`;\n };\n\n const uploadBlock = (\n uploadFile: UploadFile,\n readStream: Uint8Array,\n blockId: string,\n ) => {\n return withTimingMetrics(\n partUploadDurationHistogram,\n Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* partSizeHistogram(Effect.succeed(readStream.length));\n\n try {\n const blobClient = containerClient.getBlockBlobClient(\n uploadFile.id,\n );\n yield* Effect.tryPromise({\n try: async () => {\n await blobClient.stageBlock(\n blockId,\n readStream,\n readStream.length,\n );\n },\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n yield* Effect.logInfo(\"Finished uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n } catch (error) {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n throw error;\n }\n }),\n );\n };\n\n const uploadIncompleteBlock = (id: string, readStream: Uint8Array) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.upload(readStream, readStream.length);\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n }).pipe(\n Effect.tap(() =>\n Effect.logInfo(\"Finished uploading incomplete block\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n ),\n ),\n );\n };\n\n const getIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const response = await blobClient.download();\n return response.readableStreamBody as unknown as ReadableStream;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const getIncompleteBlockSize = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const properties = await blobClient.getProperties();\n return properties.contentLength;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const deleteIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.deleteIfExists();\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const downloadIncompleteBlock = (id: string) => {\n return Effect.gen(function* () {\n const incompleteBlock = yield* getIncompleteBlock(id);\n\n if (!incompleteBlock) {\n return;\n }\n\n // Read the stream and collect all chunks to calculate size\n const reader = incompleteBlock.getReader();\n const chunks: Uint8Array[] = [];\n let incompleteBlockSize = 0;\n\n try {\n while (true) {\n const result = yield* Effect.promise(() => reader.read());\n if (result.done) break;\n chunks.push(result.value);\n incompleteBlockSize += result.value.length;\n }\n } finally {\n reader.releaseLock();\n }\n\n // Create a new readable stream from the chunks\n const stream = Stream.fromIterable(chunks);\n\n return {\n size: incompleteBlockSize,\n stream,\n };\n });\n };\n\n const calcOptimalBlockSize = (initSize?: number): number => {\n const size = initSize ?? maxUploadSize;\n let optimalBlockSize: number;\n\n if (size <= preferredBlockSize) {\n optimalBlockSize = size;\n } else if (size <= preferredBlockSize * maxBlocks) {\n optimalBlockSize = preferredBlockSize;\n } else {\n // Calculate the minimum block size needed to fit within the max blocks limit\n optimalBlockSize = Math.ceil(size / maxBlocks);\n }\n\n // Ensure the block size respects the minimum and is aligned properly\n const finalBlockSize = Math.max(optimalBlockSize, minBlockSize);\n\n // Round up to ensure consistent block sizes\n return Math.ceil(finalBlockSize / 1024) * 1024; // Align to 1KB boundaries\n };\n\n // Proper single-pass chunking using Effect's async stream constructor\n // Ensures all parts except the final part are exactly the same size (S3 requirement)\n const createChunkedStream =\n (chunkSize: number) =>\n <E>(\n stream: Stream.Stream<Uint8Array, E>,\n ): Stream.Stream<ChunkInfo, E> => {\n return Stream.async<ChunkInfo, E>((emit) => {\n let buffer = new Uint8Array(0);\n let blockNumber = 1;\n let totalBytesProcessed = 0;\n\n const emitChunk = (data: Uint8Array, isFinalChunk = false) => {\n // Log chunk information for debugging - use INFO level to see in logs\n Effect.runSync(\n Effect.logInfo(\"Creating chunk\").pipe(\n Effect.annotateLogs({\n block_number: blockNumber,\n chunk_size: data.length,\n expected_size: chunkSize,\n is_final_chunk: isFinalChunk,\n total_bytes_processed: totalBytesProcessed + data.length,\n }),\n ),\n );\n emit.single({\n blockNumber: blockNumber++,\n data,\n size: data.length,\n });\n };\n\n const processChunk = (newData: Uint8Array) => {\n // Combine buffer with new data\n const combined = new Uint8Array(buffer.length + newData.length);\n combined.set(buffer);\n combined.set(newData, buffer.length);\n buffer = combined;\n totalBytesProcessed += newData.length;\n\n // Emit full chunks of exactly chunkSize bytes\n // This ensures S3 multipart upload rule: all parts except last must be same size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emitChunk(chunk, false);\n }\n };\n\n // Process the stream\n Effect.runFork(\n stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.sync(() => processChunk(chunk)),\n ),\n Effect.andThen(() =>\n Effect.sync(() => {\n // Emit final chunk if there's remaining data\n // The final chunk can be any size < chunkSize (S3 allows this)\n if (buffer.length > 0) {\n emitChunk(buffer, true);\n }\n emit.end();\n }),\n ),\n Effect.catchAll((error) => Effect.sync(() => emit.fail(error))),\n ),\n );\n });\n };\n\n // Byte-level progress tracking during streaming\n // This provides smooth, immediate progress feedback by tracking bytes as they\n // flow through the stream, before they reach S3. This solves the issue where\n // small files (< 5MB) would jump from 0% to 100% instantly.\n const withByteProgressTracking =\n (onProgress?: (totalBytes: number) => void, initialOffset = 0) =>\n <E, R>(stream: Stream.Stream<Uint8Array, E, R>) => {\n if (!onProgress) return stream;\n\n return Effect.gen(function* () {\n const totalBytesProcessedRef = yield* Ref.make(initialOffset);\n\n return stream.pipe(\n Stream.tap((chunk) =>\n Effect.gen(function* () {\n const newTotal = yield* Ref.updateAndGet(\n totalBytesProcessedRef,\n (total) => total + chunk.length,\n );\n onProgress(newTotal);\n }),\n ),\n );\n }).pipe(Stream.unwrap);\n };\n\n /**\n * Uploads a stream to Azure using multiple blocks\n */\n const uploadBlocks = (\n uploadFile: UploadFile,\n readStream: Stream.Stream<Uint8Array, UploadistaError>,\n initCurrentBlockNumber: number,\n initOffset: number,\n onProgress?: (newOffset: number) => void,\n ) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading blocks\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n init_offset: initOffset,\n file_size: uploadFile.size,\n }),\n );\n\n const size = uploadFile.size;\n\n const uploadBlockSize = calcOptimalBlockSize(size);\n yield* Effect.logInfo(\"Block size\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_size: uploadBlockSize,\n }),\n );\n // Enhanced Progress Tracking Strategy:\n // 1. Byte-level progress during streaming - provides immediate, smooth feedback\n // as data flows through the pipeline (even for small files)\n // 2. This tracks progress BEFORE S3 upload, giving users immediate feedback\n // 3. For large files with multiple parts, this provides granular updates\n // 4. For small files (single part), this prevents 0%->100% jumps\n const chunkStream = readStream.pipe(\n // Add byte-level progress tracking during streaming (immediate feedback)\n withByteProgressTracking(onProgress, initOffset),\n // Create chunks for S3 multipart upload with uniform part sizes\n createChunkedStream(uploadBlockSize),\n );\n\n // Track cumulative offset and total bytes with Effect Refs\n const cumulativeOffsetRef = yield* Ref.make(initOffset);\n const totalBytesUploadedRef = yield* Ref.make(0);\n const blockIdsRef = yield* Ref.make<string[]>([]);\n // Create a chunk upload function for the sink\n const uploadChunk = (chunkInfo: ChunkInfo) =>\n Effect.gen(function* () {\n // Calculate cumulative bytes to determine if this is the final block\n const cumulativeOffset = yield* Ref.updateAndGet(\n cumulativeOffsetRef,\n (offset) => offset + chunkInfo.size,\n );\n const isFinalBlock = cumulativeOffset >= (uploadFile.size || 0);\n\n yield* Effect.logDebug(\"Processing chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n cumulative_offset: cumulativeOffset,\n file_size: uploadFile.size,\n chunk_size: chunkInfo.size,\n is_final_block: isFinalBlock,\n }),\n );\n\n const actualBlockNumber =\n initCurrentBlockNumber + chunkInfo.blockNumber - 1;\n\n if (chunkInfo.size > uploadBlockSize) {\n yield* Effect.fail(\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: new Error(\n `Block size ${chunkInfo.size} exceeds upload block size ${uploadBlockSize}`,\n ),\n }),\n );\n }\n\n // For parts that meet the minimum part size (5MB) or are the final part,\n // upload them as regular multipart parts\n if (chunkInfo.size >= minBlockSize || isFinalBlock) {\n yield* Effect.logDebug(\"Uploading multipart chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_number: actualBlockNumber,\n chunk_size: chunkInfo.size,\n min_block_size: minBlockSize,\n is_final_block: isFinalBlock,\n }),\n );\n // Generate block ID (base64 encoded, must be consistent)\n const blockId = bufferFrom(\n `block-${actualBlockNumber.toString().padStart(6, \"0\")}`,\n ).toString(\"base64\");\n yield* uploadBlock(uploadFile, chunkInfo.data, blockId);\n yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);\n yield* partSizeHistogram(Effect.succeed(chunkInfo.size));\n } else {\n // Only upload as incomplete part if it's smaller than minimum and not final\n yield* uploadIncompleteBlock(uploadFile.id, chunkInfo.data);\n }\n\n yield* Ref.update(\n totalBytesUploadedRef,\n (total) => total + chunkInfo.size,\n );\n\n // Note: Byte-level progress is now tracked during streaming phase\n // This ensures smooth progress updates regardless of part size\n // Azure upload completion is tracked via totalBytesUploadedRef for accuracy\n });\n\n // Process chunks concurrently with controlled concurrency\n yield* chunkStream.pipe(\n Stream.runForEach((chunkInfo) => uploadChunk(chunkInfo)),\n Effect.withConcurrency(maxConcurrentBlockUploads),\n );\n\n return {\n bytesUploaded: yield* Ref.get(totalBytesUploadedRef),\n blockIds: yield* Ref.get(blockIdsRef),\n };\n });\n };\n\n /**\n * Commits all staged blocks to create the final blob\n */\n const commitBlocks = (uploadFile: UploadFile, blockIds: string[]) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(uploadFile.id);\n await blobClient.commitBlockList(blockIds, {\n blobHTTPHeaders: {\n blobContentType: uploadFile.metadata?.contentType?.toString(),\n blobCacheControl: uploadFile.metadata?.cacheControl?.toString(),\n },\n });\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Gets the committed blocks for a blob\n */\n const retrieveBlocks = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n const blockList = await blobClient.getBlockList(\"committed\");\n\n const blocks =\n blockList.committedBlocks?.map((block) => ({\n size: block.size,\n })) ?? [];\n\n return blocks;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return [];\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"UPLOAD_ID_NOT_FOUND\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Removes cached data for a given file\n */\n const clearCache = (id: string) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Removing cached data\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n yield* kvStore.delete(id);\n });\n };\n\n /**\n * Creates a blob placeholder in Azure and stores metadata\n */\n const create = (upload: UploadFile) => {\n return Effect.gen(function* () {\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(1));\n yield* fileSizeHistogram(Effect.succeed(upload.size || 0));\n\n yield* Effect.logInfo(\"Initializing Azure blob upload\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n upload.creationDate = new Date().toISOString();\n upload.storage = {\n id: upload.storage.id,\n type: upload.storage.type,\n path: upload.id,\n bucket: containerName,\n };\n upload.url = `${deliveryUrl}/${upload.id}`;\n\n yield* kvStore.set(upload.id, upload);\n yield* Effect.logInfo(\"Azure blob upload initialized\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n return upload;\n });\n };\n\n /**\n * Internal helper to get raw Azure stream (for backward compatibility).\n */\n const getAzureStream = (\n id: string,\n ): Effect.Effect<ReadableStream | Blob, UploadistaError> => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(id);\n const response = await blobClient.download();\n if (response.blobBody) {\n return response.blobBody;\n }\n if (response.readableStreamBody) {\n return response.readableStreamBody as unknown as ReadableStream;\n }\n throw new Error(\"No blob body or readable stream body\");\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Reads file content as a stream of chunks for memory-efficient processing.\n * Uses Azure BlobClient.download and converts to an Effect Stream.\n *\n * @param id - The unique identifier of the file to read\n * @param config - Optional streaming configuration (chunk size)\n * @returns An Effect that resolves to a Stream of byte chunks\n */\n const readStream = (id: string, config?: StreamingConfig) =>\n Effect.gen(function* () {\n // Merge config with defaults\n const effectiveConfig = {\n ...DEFAULT_STREAMING_CONFIG,\n ...config,\n };\n\n const azureStream = yield* getAzureStream(id);\n\n // Handle Blob type (browser environment)\n if (azureStream instanceof Blob) {\n const arrayBuffer = yield* Effect.promise(() =>\n azureStream.arrayBuffer(),\n );\n const bytes = new Uint8Array(arrayBuffer as ArrayBuffer);\n\n // Convert to chunked stream\n const chunkSize = effectiveConfig.chunkSize;\n const chunks: Uint8Array[] = [];\n for (let i = 0; i < bytes.length; i += chunkSize) {\n chunks.push(bytes.slice(i, Math.min(i + chunkSize, bytes.length)));\n }\n return Stream.fromIterable(chunks);\n }\n\n // Handle ReadableStream type\n return Stream.async<Uint8Array, UploadistaError>((emit) => {\n const reader = azureStream.getReader();\n const chunkSize = effectiveConfig.chunkSize;\n let buffer = new Uint8Array(0);\n\n const processChunk = async () => {\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n // Emit any remaining data in buffer\n if (buffer.length > 0) {\n emit.single(buffer);\n }\n emit.end();\n return;\n }\n\n if (value) {\n // Combine buffer with new value\n const combined = new Uint8Array(buffer.length + value.length);\n combined.set(buffer);\n combined.set(value, buffer.length);\n buffer = combined;\n\n // Emit chunks of the configured size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emit.single(chunk);\n }\n }\n }\n } catch (error) {\n emit.fail(\n new UploadistaError({\n code: \"FILE_READ_ERROR\",\n status: 500,\n body: \"Failed to read Azure blob stream\",\n details: `Azure stream read failed: ${String(error)}`,\n }),\n );\n }\n };\n\n // Start processing\n processChunk();\n\n // Cleanup function\n return Effect.sync(() => {\n reader.releaseLock();\n });\n });\n });\n\n const read = (id: string): Effect.Effect<Uint8Array, UploadistaError> => {\n return Effect.gen(function* () {\n const stream = yield* readStream(id);\n\n // Collect all chunks from the Effect Stream\n const chunks: Uint8Array[] = [];\n yield* Stream.runForEach(stream, (chunk) =>\n Effect.sync(() => {\n chunks.push(chunk);\n }),\n );\n\n // Concatenate all chunks\n const totalLength = chunks.reduce(\n (acc, chunk) => acc + chunk.length,\n 0,\n );\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const chunk of chunks) {\n result.set(chunk, offset);\n offset += chunk.length;\n }\n\n return result;\n });\n };\n\n const prepareUpload = (\n file_id: string,\n initialOffset: number,\n initialData: Stream.Stream<Uint8Array, UploadistaError>,\n ) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(file_id);\n\n const blocks = yield* retrieveBlocks(file_id);\n\n const blockNumber = blocks.length;\n const nextBlockNumber = blockNumber + 1;\n\n const incompleteBlock = yield* downloadIncompleteBlock(file_id);\n\n if (incompleteBlock) {\n yield* deleteIncompleteBlock(file_id);\n const offset = initialOffset - incompleteBlock.size;\n const data = incompleteBlock.stream.pipe(Stream.concat(initialData));\n return {\n uploadFile,\n nextBlockNumber: nextBlockNumber - 1,\n offset,\n incompleteBlockSize: incompleteBlock.size,\n data,\n };\n } else {\n return {\n uploadFile,\n nextBlockNumber,\n offset: initialOffset,\n incompleteBlockSize: 0,\n data: initialData,\n };\n }\n });\n };\n\n /**\n * Write to the file, starting at the provided offset\n */\n const write = (\n options: DataStoreWriteOptions,\n dependencies: {\n onProgress?: (chunkSize: number) => void;\n },\n ) => {\n return withUploadMetrics(\n options.file_id,\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const startTime = Date.now();\n const {\n stream: initialData,\n file_id,\n offset: initialOffset,\n } = options;\n const { onProgress } = dependencies;\n\n const prepareResult = yield* prepareUpload(\n file_id,\n initialOffset,\n initialData,\n );\n\n const { uploadFile, nextBlockNumber, offset, data } = prepareResult;\n\n const { bytesUploaded, blockIds } = yield* uploadBlocks(\n uploadFile,\n data,\n nextBlockNumber,\n offset,\n onProgress,\n );\n\n const newOffset = offset + bytesUploaded;\n\n if (uploadFile.size === newOffset) {\n try {\n // Commit all blocks to finalize the blob\n yield* commitBlocks(uploadFile, blockIds);\n\n // Update the upload file with the final offset in the KV store\n yield* kvStore.set(file_id, {\n ...uploadFile,\n offset: newOffset,\n });\n\n // Log completion with observability\n yield* logAzureUploadCompletion(file_id, {\n fileSize: uploadFile.size || 0,\n totalDurationMs: Date.now() - startTime,\n partsCount: blockIds.length,\n averagePartSize: uploadFile.size,\n throughputBps: uploadFile.size / (Date.now() - startTime),\n retryCount: 0,\n });\n\n yield* uploadSuccessTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n } catch (error) {\n yield* Effect.logError(\"Failed to finish upload\").pipe(\n Effect.annotateLogs({\n upload_id: file_id,\n error: JSON.stringify(error),\n }),\n );\n yield* uploadErrorsTotal(Effect.succeed(1));\n Effect.runSync(\n trackAzureError(\"write\", error, {\n upload_id: file_id,\n operation: \"commit\",\n blocks: blockIds.length,\n }),\n );\n throw error;\n }\n }\n\n return newOffset;\n }),\n ),\n );\n };\n\n const getUpload = (id: string) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(id);\n\n let offset = 0;\n\n try {\n const blocks = yield* retrieveBlocks(id);\n offset = calcOffsetFromBlocks(blocks);\n } catch (error) {\n // Check if the error is caused by the blob not being found\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return {\n ...uploadFile,\n offset: uploadFile.size as number,\n size: uploadFile.size,\n metadata: uploadFile.metadata,\n storage: uploadFile.storage,\n };\n }\n\n yield* Effect.logError(\"Error on get upload\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n error: JSON.stringify(error),\n }),\n );\n throw error;\n }\n\n const incompleteBlockSize = yield* getIncompleteBlockSize(id);\n\n return {\n ...uploadFile,\n offset: offset + (incompleteBlockSize ?? 0),\n size: uploadFile.size,\n storage: uploadFile.storage,\n };\n });\n };\n\n const remove = (id: string) => {\n return Effect.gen(function* () {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n yield* Effect.promise(() => blobClient.deleteIfExists());\n\n // Also delete incomplete block if it exists\n yield* deleteIncompleteBlock(id);\n } catch (error) {\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n yield* Effect.logError(\"No file found\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n return yield* Effect.fail(\n UploadistaError.fromCode(\"FILE_NOT_FOUND\"),\n );\n }\n Effect.runSync(\n trackAzureError(\"remove\", error, {\n upload_id: id,\n }),\n );\n throw error;\n }\n\n yield* clearCache(id);\n yield* activeUploadsGauge(Effect.succeed(-1));\n });\n };\n\n const getExpiration = () => {\n return expirationPeriodInMilliseconds;\n };\n\n const getExpirationDate = (created_at: string) => {\n const date = new Date(created_at);\n return new Date(date.getTime() + getExpiration());\n };\n\n const deleteExpired = (): Effect.Effect<number, UploadistaError> => {\n return Effect.tryPromise({\n try: async (): Promise<number> => {\n if (getExpiration() === 0) {\n return 0;\n }\n\n let deleted = 0;\n\n const response = containerClient.listBlobsFlat({\n includeMetadata: true,\n });\n\n const expiredBlobs: string[] = [];\n\n for await (const blob of response) {\n if (blob.metadata?.creationDate) {\n const creationDate = new Date(blob.metadata.creationDate);\n if (\n Date.now() >\n getExpirationDate(creationDate.toISOString()).getTime()\n ) {\n expiredBlobs.push(blob.name);\n }\n }\n }\n\n // Delete expired blobs\n for (const blobName of expiredBlobs) {\n await containerClient.deleteBlob(blobName);\n deleted++;\n }\n\n return deleted;\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const getCapabilities = (): DataStoreCapabilities => {\n return {\n supportsParallelUploads: true,\n supportsConcatenation: false, // Azure doesn't have native concatenation like GCS\n supportsDeferredLength: true,\n supportsResumableUploads: true,\n supportsTransactionalUploads: true,\n supportsStreamingRead: true, // Supports streaming reads via BlobClient.download\n supportsStreamingWrite: true, // Supports streaming writes via block staging\n maxConcurrentUploads: maxConcurrentBlockUploads,\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n maxParts: maxBlocks,\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n requiresMimeTypeValidation: true,\n maxValidationSize: undefined, // no size limit\n };\n };\n\n const getChunkerConstraints = () => {\n return {\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n };\n };\n\n const validateUploadStrategy = (\n strategy: UploadStrategy,\n ): Effect.Effect<boolean, never> => {\n const capabilities = getCapabilities();\n\n const result = (() => {\n switch (strategy) {\n case \"parallel\":\n return capabilities.supportsParallelUploads;\n case \"single\":\n return true;\n default:\n return false;\n }\n })();\n\n return Effect.succeed(result);\n };\n\n /**\n * Writes file content from a stream without knowing the final size upfront.\n * Uses Azure block blob staging to stream content as blocks are buffered.\n *\n * @param fileId - The unique identifier for the file\n * @param options - Stream write options including the Effect Stream\n * @returns StreamWriteResult with final size after stream completes\n */\n const writeStream = (\n fileId: string,\n options: StreamWriteOptions,\n ): Effect.Effect<StreamWriteResult, UploadistaError> =>\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const startTime = Date.now();\n\n yield* Effect.logInfo(\"Starting streaming write to Azure\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n container: containerName,\n size_hint: options.sizeHint,\n }),\n );\n\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(1));\n\n // Calculate optimal block size based on size hint or use default\n const uploadBlockSize = calcOptimalBlockSize(options.sizeHint);\n\n // Track blocks and total bytes\n const blockIdsRef = yield* Ref.make<string[]>([]);\n const totalBytesRef = yield* Ref.make(0);\n const blockNumberRef = yield* Ref.make(1);\n const bufferRef = yield* Ref.make(new Uint8Array(0));\n\n // Helper to stage a block\n const stageBlock = (data: Uint8Array, isFinalBlock: boolean) =>\n Effect.gen(function* () {\n if (data.length === 0) {\n return;\n }\n\n // Only stage if we have enough data or it's the final block\n if (data.length < minBlockSize && !isFinalBlock) {\n return;\n }\n\n const blockNumber = yield* Ref.getAndUpdate(\n blockNumberRef,\n (n) => n + 1,\n );\n\n // Generate block ID (base64 encoded, must be consistent length)\n const blockId = bufferFrom(\n `stream-block-${blockNumber.toString().padStart(6, \"0\")}`,\n ).toString(\"base64\");\n\n yield* Effect.logDebug(\"Staging block from stream\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n block_number: blockNumber,\n block_size: data.length,\n is_final_block: isFinalBlock,\n }),\n );\n\n const blobClient = containerClient.getBlockBlobClient(fileId);\n yield* Effect.tryPromise({\n try: () => blobClient.stageBlock(blockId, data, data.length),\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"writeStream\", error, {\n upload_id: fileId,\n block_number: blockNumber,\n block_size: data.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* partSizeHistogram(Effect.succeed(data.length));\n });\n\n // Process stream chunks\n yield* options.stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.gen(function* () {\n // Update total bytes\n yield* Ref.update(\n totalBytesRef,\n (total) => total + chunk.length,\n );\n\n // Get current buffer and append new chunk\n const currentBuffer = yield* Ref.get(bufferRef);\n const combined = new Uint8Array(\n currentBuffer.length + chunk.length,\n );\n combined.set(currentBuffer);\n combined.set(chunk, currentBuffer.length);\n\n // Extract full blocks and keep remainder in buffer\n let offset = 0;\n while (combined.length - offset >= uploadBlockSize) {\n const blockData = combined.slice(\n offset,\n offset + uploadBlockSize,\n );\n yield* stageBlock(blockData, false);\n offset += uploadBlockSize;\n }\n\n // Store remaining data in buffer\n yield* Ref.set(bufferRef, combined.slice(offset));\n }),\n ),\n );\n\n // Stage any remaining data as final block\n const remainingBuffer = yield* Ref.get(bufferRef);\n if (remainingBuffer.length > 0) {\n yield* stageBlock(remainingBuffer, true);\n }\n\n // Get all block IDs and commit the block list\n const blockIds = yield* Ref.get(blockIdsRef);\n const totalBytes = yield* Ref.get(totalBytesRef);\n\n if (blockIds.length === 0) {\n // No blocks staged (empty stream) - fail\n yield* activeUploadsGauge(Effect.succeed(-1));\n return yield* Effect.fail(\n new UploadistaError({\n code: \"FILE_WRITE_ERROR\",\n status: 400,\n body: \"Cannot complete upload with no data\",\n details: \"The stream provided no data to upload\",\n }),\n );\n }\n\n // Commit block list\n const blobClient = containerClient.getBlockBlobClient(fileId);\n yield* Effect.tryPromise({\n try: () =>\n blobClient.commitBlockList(blockIds, {\n blobHTTPHeaders: {\n blobContentType: options.contentType,\n },\n }),\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"writeStream\", error, {\n upload_id: fileId,\n operation: \"commit\",\n blocks: blockIds.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n // Log completion metrics\n const endTime = Date.now();\n const totalDurationMs = endTime - startTime;\n const throughputBps =\n totalDurationMs > 0 ? (totalBytes * 1000) / totalDurationMs : 0;\n const averageBlockSize =\n blockIds.length > 0 ? totalBytes / blockIds.length : undefined;\n\n yield* logAzureUploadCompletion(fileId, {\n fileSize: totalBytes,\n totalDurationMs,\n partsCount: blockIds.length,\n averagePartSize: averageBlockSize,\n throughputBps,\n retryCount: 0,\n });\n\n yield* uploadSuccessTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n yield* fileSizeHistogram(Effect.succeed(totalBytes));\n\n yield* Effect.logInfo(\"Streaming write to Azure completed\").pipe(\n Effect.annotateLogs({\n upload_id: fileId,\n total_bytes: totalBytes,\n blocks_count: blockIds.length,\n duration_ms: totalDurationMs,\n }),\n );\n\n return {\n id: fileId,\n size: totalBytes,\n path: fileId,\n bucket: containerName,\n } satisfies StreamWriteResult;\n }).pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* uploadErrorsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n return yield* Effect.fail(error);\n }),\n ),\n ),\n );\n\n return {\n bucket: containerName,\n create,\n remove,\n write,\n getUpload,\n read,\n readStream,\n writeStream,\n deleteExpired,\n getCapabilities,\n getChunkerConstraints,\n validateUploadStrategy,\n } as DataStore<UploadFile>;\n });\n}\n"],"mappings":"isBAuCA,MAAM,EAAc,GAEd,OAAO,WAAe,KAAe,WAAY,WAC3C,WAAmB,OAAO,KAAK,EAAI,CAGtC,IAAI,WAAW,MAAM,KAAK,EAAM,GAAM,EAAE,WAAW,EAAE,CAAC,CAAC,CAsDhE,SAAS,EAAqB,EAAkC,CAC9D,OAAO,GAAU,EAAO,OAAS,EAC7B,EAAO,QAAQ,EAAG,IAAM,GAAK,GAAG,MAAQ,GAAI,EAAE,CAC9C,EAoBN,SAAgB,EAAW,CACzB,cACA,YACA,eAAe,KACf,YAAY,IACZ,4BAA4B,GAC5B,iCAAiC,IAAO,GAAK,GAAK,GAAK,EACvD,mBACA,SACA,aACA,cACA,aACA,iBACoB,CACpB,OAAO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAU,MAAO,EACjB,EAAqB,GAAa,EAAI,KAAO,KAI/CA,EAEJ,GAAI,EAEF,EAAoBC,EAAY,qBAAqB,EAAiB,SAC7D,EAET,EAAoB,IAAIA,EAAY,EAAO,SAClC,EAAY,CAErB,IAAM,EAAa,EACf,WAAW,EAAY,wBACvB,GAAQ,MAAM,IAAI,CAAC,IAAM,GAC7B,GAAI,CAAC,EACH,MAAU,MACR,2HACD,CAEH,EAAoB,IAAIA,EAAY,EAAY,EAAW,SAClD,GAAe,EAGxB,GAAI,CACF,IAAM,EAAsB,IAAI,EAC9B,EACA,EACD,CACD,EAAoB,IAAIA,EACtB,WAAW,EAAY,wBACvB,EACD,OACM,EAAO,CACd,MAAU,MACR,4JAEqB,IACtB,MAGH,MAAU,MACR,kIAED,CAGH,IAAMC,EACJ,EAAkB,mBAAmB,EAAc,CAE/C,EAAqB,GAClB,GAAG,EAAG,aAGT,IACJ,EACA,EACA,IAEOC,EACLC,EACA,EAAO,IAAI,WAAa,CACtB,MAAO,EAAO,QAAQ,kBAAkB,CAAC,KACvC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYC,EAAW,OACxB,CAAC,CACH,CAED,MAAOC,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAOC,EAAkB,EAAO,QAAQF,EAAW,OAAO,CAAC,CAE3D,GAAI,CACF,IAAM,EAAa,EAAgB,mBACjC,EAAW,GACZ,CACD,MAAO,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,MAAM,EAAW,WACf,EACAA,EACAA,EAAW,OACZ,EAEH,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAEF,MAAO,EAAO,QAAQ,2BAA2B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,OACM,EAAO,CAQd,MAPA,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACK,IAER,CACH,CAGG,IAAyB,EAAY,IAClC,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,OAAOA,EAAYA,EAAW,OAAO,EAExD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAAC,KACD,EAAO,QACL,EAAO,QAAQ,sCAAsC,CAAC,KACpD,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACF,CACF,CAGG,EAAsB,GACnB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADiB,MAHE,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACiC,UAAU,EAC5B,yBACT,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,GAA0B,GACvB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADmB,MAHA,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACmC,eAAe,EACjC,oBACX,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,EAAyB,GACtB,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,gBAAgB,EAEnC,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,GAA2B,GACxB,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAkB,MAAO,EAAmB,EAAG,CAErD,GAAI,CAAC,EACH,OAIF,IAAM,EAAS,EAAgB,WAAW,CACpCG,EAAuB,EAAE,CAC3B,EAAsB,EAE1B,GAAI,CACF,OAAa,CACX,IAAM,EAAS,MAAO,EAAO,YAAc,EAAO,MAAM,CAAC,CACzD,GAAI,EAAO,KAAM,MACjB,EAAO,KAAK,EAAO,MAAM,CACzB,GAAuB,EAAO,MAAM,eAE9B,CACR,EAAO,aAAa,CAItB,IAAM,EAAS,EAAO,aAAa,EAAO,CAE1C,MAAO,CACL,KAAM,EACN,SACD,EACD,CAGE,EAAwB,GAA8B,CAC1D,IAAM,EAAO,GAAY,cACrBC,EAEJ,AAME,EANE,GAAQ,EACS,EACV,GAAQ,EAAqB,EACnB,EAGA,KAAK,KAAK,EAAO,EAAU,CAIhD,IAAM,EAAiB,KAAK,IAAI,EAAkB,EAAa,CAG/D,OAAO,KAAK,KAAK,EAAiB,KAAK,CAAG,MAKtC,EACH,GAEC,GAEO,EAAO,MAAqB,GAAS,CAC1C,IAAI,EAAS,IAAI,WACb,EAAc,EACd,EAAsB,EAEpB,GAAa,EAAkB,EAAe,KAAU,CAE5D,EAAO,QACL,EAAO,QAAQ,iBAAiB,CAAC,KAC/B,EAAO,aAAa,CAClB,aAAc,EACd,WAAY,EAAK,OACjB,cAAe,EACf,eAAgB,EAChB,sBAAuB,EAAsB,EAAK,OACnD,CAAC,CACH,CACF,CACD,EAAK,OAAO,CACV,YAAa,IACb,OACA,KAAM,EAAK,OACZ,CAAC,EAGE,EAAgB,GAAwB,CAE5C,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAQ,OAAO,CAQ/D,IAPA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAS,EAAO,OAAO,CACpC,EAAS,EACT,GAAuB,EAAQ,OAIxB,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAU,EAAO,GAAM,GAK3B,EAAO,QACL,EAAO,KACL,EAAO,WAAY,GACjB,EAAO,SAAW,EAAa,EAAM,CAAC,CACvC,CACD,EAAO,YACL,EAAO,SAAW,CAGZ,EAAO,OAAS,GAClB,EAAU,EAAQ,GAAK,CAEzB,EAAK,KAAK,EACV,CACH,CACD,EAAO,SAAU,GAAU,EAAO,SAAW,EAAK,KAAK,EAAM,CAAC,CAAC,CAChE,CACF,EACD,CAOA,GACH,EAA2C,EAAgB,IACrD,GACA,EAEE,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAyB,MAAO,EAAI,KAAK,EAAc,CAE7D,OAAO,EAAO,KACZ,EAAO,IAAK,GACV,EAAO,IAAI,WAAa,CAKtB,EAJiB,MAAO,EAAI,aAC1B,EACC,GAAU,EAAQ,EAAM,OAC1B,CACmB,EACpB,CACH,CACF,EACD,CAAC,KAAK,EAAO,OAAO,CAhBE,EAsBtB,GACJ,EACA,EACA,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,mBAAmB,CAAC,KACxC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,UAAW,EAAW,KACvB,CAAC,CACH,CAED,IAAM,EAAO,EAAW,KAElB,EAAkB,EAAqB,EAAK,CAClD,MAAO,EAAO,QAAQ,aAAa,CAAC,KAClC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,WAAY,EACb,CAAC,CACH,CAOD,IAAM,EAAcJ,EAAW,KAE7B,EAAyB,EAAY,EAAW,CAEhD,EAAoB,EAAgB,CACrC,CAGK,EAAsB,MAAO,EAAI,KAAK,EAAW,CACjD,EAAwB,MAAO,EAAI,KAAK,EAAE,CAC1C,EAAc,MAAO,EAAI,KAAe,EAAE,CAAC,CAE3C,EAAe,GACnB,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAmB,MAAO,EAAI,aAClC,EACC,GAAW,EAAS,EAAU,KAChC,CACK,EAAe,IAAqB,EAAW,MAAQ,GAE7D,MAAO,EAAO,SAAS,mBAAmB,CAAC,KACzC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,kBAAmB,EACnB,UAAW,EAAW,KACtB,WAAY,EAAU,KACtB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EACJ,EAAyB,EAAU,YAAc,EAcnD,GAZI,EAAU,KAAO,IACnB,MAAO,EAAO,KACZ,EAAgB,SAAS,mBAAoB,CAC3C,MAAW,MACT,cAAc,EAAU,KAAK,6BAA6B,IAC3D,CACF,CAAC,CACH,EAKC,EAAU,MAAQ,GAAgB,EAAc,CAClD,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,aAAc,EACd,WAAY,EAAU,KACtB,eAAgB,EAChB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EAAU,EACd,SAAS,EAAkB,UAAU,CAAC,SAAS,EAAG,IAAI,GACvD,CAAC,SAAS,SAAS,CACpB,MAAO,GAAY,EAAY,EAAU,KAAM,EAAQ,CACvD,MAAO,EAAI,OAAO,EAAc,GAAQ,CAAC,GAAG,EAAK,EAAQ,CAAC,CAC1D,MAAOE,EAAkB,EAAO,QAAQ,EAAU,KAAK,CAAC,MAGxD,MAAO,GAAsB,EAAW,GAAI,EAAU,KAAK,CAG7D,MAAO,EAAI,OACT,EACC,GAAU,EAAQ,EAAU,KAC9B,EAKD,CAQJ,OALA,MAAO,EAAY,KACjB,EAAO,WAAY,GAAc,EAAY,EAAU,CAAC,CACxD,EAAO,gBAAgB,EAA0B,CAClD,CAEM,CACL,cAAe,MAAO,EAAI,IAAI,EAAsB,CACpD,SAAU,MAAO,EAAI,IAAI,EAAY,CACtC,EACD,CAME,GAAgB,EAAwB,IACrC,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,MADmB,EAAgB,mBAAmB,EAAW,GAAG,CACnD,gBAAgB,EAAU,CACzC,gBAAiB,CACf,gBAAiB,EAAW,UAAU,aAAa,UAAU,CAC7D,iBAAkB,EAAW,UAAU,cAAc,UAAU,CAChE,CACF,CAAC,EAEJ,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAME,EAAkB,GACf,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CASF,OAPkB,MADC,EAAgB,mBAAmB,EAAG,CACtB,aAAa,YAAY,EAGhD,iBAAiB,IAAK,IAAW,CACzC,KAAM,EAAM,KACb,EAAE,EAAI,EAAE,OAGJ,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,EAAE,CAEX,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,sBAAuB,CAC9C,MAAO,EACR,CAAC,CACL,CAAC,CAME,EAAc,GACX,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,uBAAuB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACD,MAAO,EAAQ,OAAO,EAAG,EACzB,CAME,EAAU,GACP,EAAO,IAAI,WAAa,CA2B7B,OA1BA,MAAOG,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOC,EAAkB,EAAO,QAAQ,EAAO,MAAQ,EAAE,CAAC,CAE1D,MAAO,EAAO,QAAQ,iCAAiC,CAAC,KACtD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAED,EAAO,aAAe,IAAI,MAAM,CAAC,aAAa,CAC9C,EAAO,QAAU,CACf,GAAI,EAAO,QAAQ,GACnB,KAAM,EAAO,QAAQ,KACrB,KAAM,EAAO,GACb,OAAQ,EACT,CACD,EAAO,IAAM,GAAG,EAAY,GAAG,EAAO,KAEtC,MAAO,EAAQ,IAAI,EAAO,GAAI,EAAO,CACrC,MAAO,EAAO,QAAQ,gCAAgC,CAAC,KACrD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAEM,GACP,CAME,GACJ,GAEO,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,IAAM,EAAW,MADE,EAAgB,mBAAmB,EAAG,CACvB,UAAU,CAC5C,GAAI,EAAS,SACX,OAAO,EAAS,SAElB,GAAI,EAAS,mBACX,OAAO,EAAS,mBAElB,MAAU,MAAM,uCAAuC,EAEzD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAWE,GAAc,EAAY,IAC9B,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAkB,CACtB,GAAG,EACH,GAAG,EACJ,CAEK,EAAc,MAAO,GAAe,EAAG,CAG7C,GAAI,aAAuB,KAAM,CAC/B,IAAM,EAAc,MAAO,EAAO,YAChC,EAAY,aAAa,CAC1B,CACK,EAAQ,IAAI,WAAW,EAA2B,CAGlD,EAAY,EAAgB,UAC5BJ,EAAuB,EAAE,CAC/B,IAAK,IAAI,EAAI,EAAG,EAAI,EAAM,OAAQ,GAAK,EACrC,EAAO,KAAK,EAAM,MAAM,EAAG,KAAK,IAAI,EAAI,EAAW,EAAM,OAAO,CAAC,CAAC,CAEpE,OAAO,EAAO,aAAa,EAAO,CAIpC,OAAO,EAAO,MAAoC,GAAS,CACzD,IAAM,EAAS,EAAY,WAAW,CAChC,EAAY,EAAgB,UAC9B,EAAS,IAAI,WA+CjB,OA7CqB,SAAY,CAC/B,GAAI,CACF,OAAa,CACX,GAAM,CAAE,OAAM,SAAU,MAAM,EAAO,MAAM,CAE3C,GAAI,EAAM,CAEJ,EAAO,OAAS,GAClB,EAAK,OAAO,EAAO,CAErB,EAAK,KAAK,CACV,OAGF,GAAI,EAAO,CAET,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAM,OAAO,CAM7D,IALA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAO,EAAO,OAAO,CAClC,EAAS,EAGF,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAK,OAAO,EAAM,UAIjB,EAAO,CACd,EAAK,KACH,IAAI,EAAgB,CAClB,KAAM,kBACN,OAAQ,IACR,KAAM,mCACN,QAAS,6BAA6B,OAAO,EAAM,GACpD,CAAC,CACH,KAKS,CAGP,EAAO,SAAW,CACvB,EAAO,aAAa,EACpB,EACF,EACF,CAEE,GAAQ,GACL,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAS,MAAO,EAAW,EAAG,CAG9BA,EAAuB,EAAE,CAC/B,MAAO,EAAO,WAAW,EAAS,GAChC,EAAO,SAAW,CAChB,EAAO,KAAK,EAAM,EAClB,CACH,CAGD,IAAM,EAAc,EAAO,QACxB,EAAK,IAAU,EAAM,EAAM,OAC5B,EACD,CACK,EAAS,IAAI,WAAW,EAAY,CACtC,EAAS,EACb,IAAK,IAAM,KAAS,EAClB,EAAO,IAAI,EAAO,EAAO,CACzB,GAAU,EAAM,OAGlB,OAAO,GACP,CAGE,IACJ,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAQ,CAKxC,GAHS,MAAO,EAAe,EAAQ,EAElB,OACW,EAEhC,EAAkB,MAAO,GAAwB,EAAQ,CAE/D,GAAI,EAAiB,CACnB,MAAO,EAAsB,EAAQ,CACrC,IAAM,EAAS,EAAgB,EAAgB,KACzC,EAAO,EAAgB,OAAO,KAAK,EAAO,OAAO,EAAY,CAAC,CACpE,MAAO,CACL,aACA,gBAAiB,EAAkB,EACnC,SACA,oBAAqB,EAAgB,KACrC,OACD,MAED,MAAO,CACL,aACA,kBACA,OAAQ,EACR,oBAAqB,EACrB,KAAM,EACP,EAEH,CAME,GACJ,EACA,IAIOK,EACL,EAAQ,QACRV,EACEW,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAY,KAAK,KAAK,CACtB,CACJ,OAAQ,EACR,UACA,OAAQ,GACN,EACE,CAAE,cAAe,EAQjB,CAAE,aAAY,kBAAiB,SAAQ,QANvB,MAAO,GAC3B,EACA,EACA,EACD,CAIK,CAAE,gBAAe,YAAa,MAAO,EACzC,EACA,EACA,EACA,EACA,EACD,CAEK,EAAY,EAAS,EAE3B,GAAI,EAAW,OAAS,EACtB,GAAI,CAEF,MAAO,EAAa,EAAY,EAAS,CAGzC,MAAO,EAAQ,IAAI,EAAS,CAC1B,GAAG,EACH,OAAQ,EACT,CAAC,CAGF,MAAO,EAAyB,EAAS,CACvC,SAAU,EAAW,MAAQ,EAC7B,gBAAiB,KAAK,KAAK,CAAG,EAC9B,WAAY,EAAS,OACrB,gBAAiB,EAAW,KAC5B,cAAe,EAAW,MAAQ,KAAK,KAAK,CAAG,GAC/C,WAAY,EACb,CAAC,CAEF,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOJ,EAAmB,EAAO,QAAQ,GAAG,CAAC,OACtC,EAAO,CAed,MAdA,MAAO,EAAO,SAAS,0BAA0B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACD,MAAOK,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,EAAO,QACL,EAAgB,QAAS,EAAO,CAC9B,UAAW,EACX,UAAW,SACX,OAAQ,EAAS,OAClB,CAAC,CACH,CACK,EAIV,OAAO,GACP,CACH,CACF,CAGG,GAAa,GACV,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAG,CAErC,EAAS,EAEb,GAAI,CAEF,EAAS,EADM,MAAO,EAAe,EAAG,CACH,OAC9B,EAAO,CAEd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,CACL,GAAG,EACH,OAAQ,EAAW,KACnB,KAAM,EAAW,KACjB,SAAU,EAAW,SACrB,QAAS,EAAW,QACrB,CASH,MANA,MAAO,EAAO,SAAS,sBAAsB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACK,EAGR,IAAM,EAAsB,MAAO,GAAuB,EAAG,CAE7D,MAAO,CACL,GAAG,EACH,OAAQ,GAAU,GAAuB,GACzC,KAAM,EAAW,KACjB,QAAS,EAAW,QACrB,EACD,CAGE,GAAU,GACP,EAAO,IAAI,WAAa,CAC7B,GAAI,CACF,IAAM,EAAa,EAAgB,mBAAmB,EAAG,CACzD,MAAO,EAAO,YAAc,EAAW,gBAAgB,CAAC,CAGxD,MAAO,EAAsB,EAAG,OACzB,EAAO,CACd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAOrB,OALA,MAAO,EAAO,SAAS,gBAAgB,CAAC,KACtC,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACM,MAAO,EAAO,KACnB,EAAgB,SAAS,iBAAiB,CAC3C,CAOH,MALA,EAAO,QACL,EAAgB,SAAU,EAAO,CAC/B,UAAW,EACZ,CAAC,CACH,CACK,EAGR,MAAO,EAAW,EAAG,CACrB,MAAOL,EAAmB,EAAO,QAAQ,GAAG,CAAC,EAC7C,CAGE,MACG,EAGH,GAAqB,GAAuB,CAChD,IAAM,EAAO,IAAI,KAAK,EAAW,CACjC,OAAO,IAAI,KAAK,EAAK,SAAS,CAAG,GAAe,CAAC,EAG7C,OACG,EAAO,WAAW,CACvB,IAAK,SAA6B,CAChC,GAAI,GAAe,GAAK,EACtB,MAAO,GAGT,IAAI,EAAU,EAER,EAAW,EAAgB,cAAc,CAC7C,gBAAiB,GAClB,CAAC,CAEIM,EAAyB,EAAE,CAEjC,UAAW,IAAM,KAAQ,EACvB,GAAI,EAAK,UAAU,aAAc,CAC/B,IAAM,EAAe,IAAI,KAAK,EAAK,SAAS,aAAa,CAEvD,KAAK,KAAK,CACV,GAAkB,EAAa,aAAa,CAAC,CAAC,SAAS,EAEvD,EAAa,KAAK,EAAK,KAAK,CAMlC,IAAK,IAAM,KAAY,EACrB,MAAM,EAAgB,WAAW,EAAS,CAC1C,IAGF,OAAO,GAET,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,OACG,CACL,wBAAyB,GACzB,sBAAuB,GACvB,uBAAwB,GACxB,yBAA0B,GAC1B,6BAA8B,GAC9B,sBAAuB,GACvB,uBAAwB,GACxB,qBAAsB,EACtB,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,SAAU,EACV,iBAAkB,EAClB,sBAAuB,GACvB,2BAA4B,GAC5B,kBAAmB,IAAA,GACpB,EAyPH,MAAO,CACL,OAAQ,EACR,SACA,UACA,QACA,aACA,QACA,aACA,aAzNA,EACA,IAEAd,EACEW,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAY,KAAK,KAAK,CAE5B,MAAO,EAAO,QAAQ,oCAAoC,CAAC,KACzD,EAAO,aAAa,CAClB,UAAW,EACX,UAAW,EACX,UAAW,EAAQ,SACpB,CAAC,CACH,CAED,MAAOJ,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAG5C,IAAM,EAAkB,EAAqB,EAAQ,SAAS,CAGxD,EAAc,MAAO,EAAI,KAAe,EAAE,CAAC,CAC3C,EAAgB,MAAO,EAAI,KAAK,EAAE,CAClC,EAAiB,MAAO,EAAI,KAAK,EAAE,CACnC,EAAY,MAAO,EAAI,KAAK,IAAI,WAAc,CAG9C,GAAc,EAAkB,IACpC,EAAO,IAAI,WAAa,CAMtB,GALI,EAAK,SAAW,GAKhB,EAAK,OAAS,GAAgB,CAAC,EACjC,OAGF,IAAM,EAAc,MAAO,EAAI,aAC7B,EACC,GAAM,EAAI,EACZ,CAGK,EAAU,EACd,gBAAgB,EAAY,UAAU,CAAC,SAAS,EAAG,IAAI,GACxD,CAAC,SAAS,SAAS,CAEpB,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EACX,aAAc,EACd,WAAY,EAAK,OACjB,eAAgB,EACjB,CAAC,CACH,CAED,IAAMO,EAAa,EAAgB,mBAAmB,EAAO,CAC7D,MAAO,EAAO,WAAW,CACvB,QAAWA,EAAW,WAAW,EAAS,EAAM,EAAK,OAAO,CAC5D,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EACX,aAAc,EACd,WAAY,EAAK,OAClB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAEF,MAAO,EAAI,OAAO,EAAc,GAAQ,CAAC,GAAG,EAAK,EAAQ,CAAC,CAC1D,MAAOZ,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAOC,EAAkB,EAAO,QAAQ,EAAK,OAAO,CAAC,EACrD,CAGJ,MAAO,EAAQ,OAAO,KACpB,EAAO,WAAY,GACjB,EAAO,IAAI,WAAa,CAEtB,MAAO,EAAI,OACT,EACC,GAAU,EAAQ,EAAM,OAC1B,CAGD,IAAM,EAAgB,MAAO,EAAI,IAAI,EAAU,CACzC,EAAW,IAAI,WACnB,EAAc,OAAS,EAAM,OAC9B,CACD,EAAS,IAAI,EAAc,CAC3B,EAAS,IAAI,EAAO,EAAc,OAAO,CAGzC,IAAI,EAAS,EACb,KAAO,EAAS,OAAS,GAAU,GAKjC,MAAO,EAJW,EAAS,MACzB,EACA,EAAS,EACV,CAC4B,GAAM,CACnC,GAAU,EAIZ,MAAO,EAAI,IAAI,EAAW,EAAS,MAAM,EAAO,CAAC,EACjD,CACH,CACF,CAGD,IAAM,EAAkB,MAAO,EAAI,IAAI,EAAU,CAC7C,EAAgB,OAAS,IAC3B,MAAO,EAAW,EAAiB,GAAK,EAI1C,IAAM,EAAW,MAAO,EAAI,IAAI,EAAY,CACtC,EAAa,MAAO,EAAI,IAAI,EAAc,CAEhD,GAAI,EAAS,SAAW,EAGtB,OADA,MAAOI,EAAmB,EAAO,QAAQ,GAAG,CAAC,CACtC,MAAO,EAAO,KACnB,IAAI,EAAgB,CAClB,KAAM,mBACN,OAAQ,IACR,KAAM,sCACN,QAAS,wCACV,CAAC,CACH,CAIH,IAAM,EAAa,EAAgB,mBAAmB,EAAO,CAC7D,MAAO,EAAO,WAAW,CACvB,QACE,EAAW,gBAAgB,EAAU,CACnC,gBAAiB,CACf,gBAAiB,EAAQ,YAC1B,CACF,CAAC,CACJ,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EACX,UAAW,SACX,OAAQ,EAAS,OAClB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAIF,IAAM,EADU,KAAK,KAAK,CACQ,EAC5B,EACJ,EAAkB,EAAK,EAAa,IAAQ,EAAkB,EAC1D,EACJ,EAAS,OAAS,EAAI,EAAa,EAAS,OAAS,IAAA,GAwBvD,OAtBA,MAAO,EAAyB,EAAQ,CACtC,SAAU,EACV,kBACA,WAAY,EAAS,OACrB,gBAAiB,EACjB,gBACA,WAAY,EACb,CAAC,CAEF,MAAOI,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOJ,EAAmB,EAAO,QAAQ,GAAG,CAAC,CAC7C,MAAOC,EAAkB,EAAO,QAAQ,EAAW,CAAC,CAEpD,MAAO,EAAO,QAAQ,qCAAqC,CAAC,KAC1D,EAAO,aAAa,CAClB,UAAW,EACX,YAAa,EACb,aAAc,EAAS,OACvB,YAAa,EACd,CAAC,CACH,CAEM,CACL,GAAI,EACJ,KAAM,EACN,KAAM,EACN,OAAQ,EACT,EACD,CAAC,KACD,EAAO,SAAU,GACf,EAAO,IAAI,WAAa,CAGtB,OAFA,MAAOI,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,MAAOL,EAAmB,EAAO,QAAQ,GAAG,CAAC,CACtC,MAAO,EAAO,KAAK,EAAM,EAChC,CACH,CACF,CACF,CAWD,iBACA,kBACA,2BAhQO,CACL,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,iBAAkB,EAClB,sBAAuB,GACxB,EA4PD,uBAxPA,GACkC,CAClC,IAAM,EAAe,GAAiB,CAEhC,OAAgB,CACpB,OAAQ,EAAR,CACE,IAAK,WACH,OAAO,EAAa,wBACtB,IAAK,SACH,MAAO,GACT,QACE,MAAO,OAET,CAEJ,OAAO,EAAO,QAAQ,EAAO,EA0O9B,EACD"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@uploadista/data-store-azure",
3
3
  "type": "module",
4
- "version": "0.0.20-beta.6",
4
+ "version": "0.0.20-beta.8",
5
5
  "description": "Azure Blob Storage data store for Uploadista",
6
6
  "license": "MIT",
7
7
  "author": "Uploadista",
@@ -16,22 +16,22 @@
16
16
  "dependencies": {
17
17
  "@azure/core-auth": "^1.8.0",
18
18
  "@azure/storage-blob": "12.29.1",
19
- "@uploadista/core": "0.0.20-beta.6",
20
- "@uploadista/kv-store-memory": "0.0.20-beta.6",
21
- "@uploadista/observability": "0.0.20-beta.6"
19
+ "@uploadista/core": "0.0.20-beta.8",
20
+ "@uploadista/kv-store-memory": "0.0.20-beta.8",
21
+ "@uploadista/observability": "0.0.20-beta.8"
22
22
  },
23
23
  "peerDependencies": {
24
24
  "effect": "^3.0.0"
25
25
  },
26
26
  "devDependencies": {
27
27
  "@effect/vitest": "0.27.0",
28
- "effect": "3.19.10",
29
- "tsdown": "0.17.2",
28
+ "effect": "3.19.12",
29
+ "tsdown": "0.18.0",
30
30
  "vitest": "4.0.15",
31
- "@uploadista/typescript-config": "0.0.20-beta.6"
31
+ "@uploadista/typescript-config": "0.0.20-beta.8"
32
32
  },
33
33
  "scripts": {
34
- "build": "tsdown",
34
+ "build": "tsc --noEmit && tsdown",
35
35
  "check": "biome check --write ./src",
36
36
  "format": "biome format --write ./src",
37
37
  "lint": "biome lint --write ./src",
@@ -108,7 +108,10 @@ export type AzureStore = DataStore<UploadFile> & {
108
108
  readStream: (
109
109
  id: string,
110
110
  config?: StreamingConfig,
111
- ) => Effect.Effect<Stream.Stream<Uint8Array, UploadistaError>, UploadistaError>;
111
+ ) => Effect.Effect<
112
+ Stream.Stream<Uint8Array, UploadistaError>,
113
+ UploadistaError
114
+ >;
112
115
  getChunkerConstraints: () => {
113
116
  minChunkSize: number;
114
117
  maxChunkSize: number;
@@ -1277,7 +1280,10 @@ export function azureStore({
1277
1280
  Stream.runForEach((chunk) =>
1278
1281
  Effect.gen(function* () {
1279
1282
  // Update total bytes
1280
- yield* Ref.update(totalBytesRef, (total) => total + chunk.length);
1283
+ yield* Ref.update(
1284
+ totalBytesRef,
1285
+ (total) => total + chunk.length,
1286
+ );
1281
1287
 
1282
1288
  // Get current buffer and append new chunk
1283
1289
  const currentBuffer = yield* Ref.get(bufferRef);
@@ -1290,7 +1296,10 @@ export function azureStore({
1290
1296
  // Extract full blocks and keep remainder in buffer
1291
1297
  let offset = 0;
1292
1298
  while (combined.length - offset >= uploadBlockSize) {
1293
- const blockData = combined.slice(offset, offset + uploadBlockSize);
1299
+ const blockData = combined.slice(
1300
+ offset,
1301
+ offset + uploadBlockSize,
1302
+ );
1294
1303
  yield* stageBlock(blockData, false);
1295
1304
  offset += uploadBlockSize;
1296
1305
  }
@@ -1403,7 +1412,7 @@ export function azureStore({
1403
1412
  read,
1404
1413
  readStream,
1405
1414
  writeStream,
1406
- deleteExpired: deleteExpired(),
1415
+ deleteExpired,
1407
1416
  getCapabilities,
1408
1417
  getChunkerConstraints,
1409
1418
  validateUploadStrategy,