@uploadista/data-store-azure 0.0.3 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,22 @@
1
1
 
2
2
  
3
- > @uploadista/data-store-azure@0.0.2 build /Users/denislaboureyras/Documents/uploadista/dev/uploadista-workspace/uploadista-sdk/packages/data-stores/azure
4
- > tsc -b
3
+ > @uploadista/data-store-azure@0.0.3 build /Users/denislaboureyras/Documents/uploadista/dev/uploadista-workspace/uploadista-sdk/packages/data-stores/azure
4
+ > tsdown
5
5
 
6
+ ℹ tsdown v0.15.9 powered by rolldown v1.0.0-beta.44
7
+ ℹ Using tsdown config: /Users/denislaboureyras/Documents/uploadista/dev/uploadista-workspace/uploadista-sdk/packages/data-stores/azure/tsdown.config.ts
8
+ ℹ entry: src/index.ts
9
+ ℹ tsconfig: tsconfig.json
10
+ ℹ Build start
11
+ ℹ Cleaning 7 files
12
+ ℹ [CJS] dist/index.cjs 11.85 kB │ gzip: 3.94 kB
13
+ ℹ [CJS] 1 files, total: 11.85 kB
14
+ ℹ [ESM] dist/index.js 10.42 kB │ gzip: 3.73 kB
15
+ ℹ [ESM] dist/index.js.map 46.57 kB │ gzip: 11.51 kB
16
+ ℹ [ESM] dist/index.d.ts.map  0.65 kB │ gzip: 0.33 kB
17
+ ℹ [ESM] dist/index.d.ts  2.68 kB │ gzip: 1.09 kB
18
+ ℹ [ESM] 4 files, total: 60.33 kB
19
+ ℹ [CJS] dist/index.d.cts.map 0.65 kB │ gzip: 0.34 kB
20
+ ℹ [CJS] dist/index.d.cts 2.68 kB │ gzip: 1.10 kB
21
+ ℹ [CJS] 2 files, total: 3.34 kB
22
+ ✔ Build complete in 6413ms
package/dist/index.cjs ADDED
@@ -0,0 +1 @@
1
+ var e=Object.create,t=Object.defineProperty,n=Object.getOwnPropertyDescriptor,r=Object.getOwnPropertyNames,i=Object.getPrototypeOf,a=Object.prototype.hasOwnProperty,o=(e,i,o,s)=>{if(i&&typeof i==`object`||typeof i==`function`)for(var c=r(i),l=0,u=c.length,d;l<u;l++)d=c[l],!a.call(e,d)&&d!==o&&t(e,d,{get:(e=>i[e]).bind(null,d),enumerable:!(s=n(i,d))||s.enumerable});return e},s=(n,r,a)=>(a=n==null?{}:e(i(n)),o(r||!n||!n.__esModule?t(a,`default`,{value:n,enumerable:!0}):a,n));let c=require(`@azure/storage-blob`);c=s(c);let l=require(`@uploadista/core/errors`);l=s(l);let u=require(`@uploadista/observability`);u=s(u);let d=require(`effect`);d=s(d);const f=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function p(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function m({deliveryUrl:e,blockSize:t,minBlockSize:n=1024,maxBlocks:r=5e4,kvStore:i,maxConcurrentBlockUploads:a=60,expirationPeriodInMilliseconds:o=1e3*60*60*24*7,connectionString:s,sasUrl:m,credential:h,accountName:g,accountKey:_,containerName:v}){let y=t||8*1024*1024,b;if(s)b=c.BlobServiceClient.fromConnectionString(s);else if(m)b=new c.BlobServiceClient(m);else if(h){let e=g?`https://${g}.blob.core.windows.net`:m?.split(`?`)[0]||``;if(!e)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);b=new c.BlobServiceClient(e,h)}else if(g&&_)try{let e=new c.StorageSharedKeyCredential(g,_);b=new c.BlobServiceClient(`https://${g}.blob.core.windows.net`,e)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let x=b.getContainerClient(v),S=e=>`${e}.incomplete`,C=(e,t,n)=>(0,u.withAzureTimingMetrics)(u.azurePartUploadDurationHistogram,d.Effect.gen(function*(){yield*d.Effect.logInfo(`Uploading block`).pipe(d.Effect.annotateLogs({upload_id:e.id,block_id:n,block_size:t.length})),yield*(0,u.azureUploadPartsTotal)(d.Effect.succeed(1)),yield*(0,u.azurePartSizeHistogram)(d.Effect.succeed(t.length));try{let r=x.getBlockBlobClient(e.id);yield*d.Effect.tryPromise({try:async()=>{await r.stageBlock(n,t,t.length)},catch:r=>(d.Effect.runSync((0,u.trackAzureError)(`uploadBlock`,r,{upload_id:e.id,block_id:n,block_size:t.length})),l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:r}))}),yield*d.Effect.logInfo(`Finished uploading block`).pipe(d.Effect.annotateLogs({upload_id:e.id,block_id:n,block_size:t.length}))}catch(r){throw d.Effect.runSync((0,u.trackAzureError)(`uploadBlock`,r,{upload_id:e.id,block_id:n,block_size:t.length})),r}})),w=(e,t)=>d.Effect.tryPromise({try:async()=>{await x.getBlockBlobClient(S(e)).upload(t,t.length)},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(d.Effect.tap(()=>d.Effect.logInfo(`Finished uploading incomplete block`).pipe(d.Effect.annotateLogs({upload_id:e})))),T=e=>d.Effect.tryPromise({try:async()=>{try{return(await x.getBlockBlobClient(S(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),E=e=>d.Effect.tryPromise({try:async()=>{try{return(await x.getBlockBlobClient(S(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),D=e=>d.Effect.tryPromise({try:async()=>{await x.getBlockBlobClient(S(e)).deleteIfExists()},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),O=e=>d.Effect.gen(function*(){let t=yield*T(e);if(!t)return;let n=t.getReader(),r=[],i=0;try{for(;;){let e=yield*d.Effect.promise(()=>n.read());if(e.done)break;r.push(e.value),i+=e.value.length}}finally{n.releaseLock()}let a=d.Stream.fromIterable(r);return{size:i,stream:a}}),k=e=>{let t=e??5497558138880,i;i=t<=y?t:t<=y*r?y:Math.ceil(t/r);let a=Math.max(i,n);return Math.ceil(a/1024)*1024},A=e=>t=>d.Stream.async(n=>{let r=new Uint8Array,i=1,a=0,o=(t,r=!1)=>{d.Effect.runSync(d.Effect.logInfo(`Creating chunk`).pipe(d.Effect.annotateLogs({block_number:i,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:a+t.length}))),n.single({blockNumber:i++,data:t,size:t.length})},s=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,a+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),o(t,!1)}};d.Effect.runFork(t.pipe(d.Stream.runForEach(e=>d.Effect.sync(()=>s(e))),d.Effect.andThen(()=>d.Effect.sync(()=>{r.length>0&&o(r,!0),n.end()})),d.Effect.catchAll(e=>d.Effect.sync(()=>n.fail(e)))))}),j=(e,t=0)=>n=>e?d.Effect.gen(function*(){let r=yield*d.Ref.make(t);return n.pipe(d.Stream.tap(t=>d.Effect.gen(function*(){e(yield*d.Ref.updateAndGet(r,e=>e+t.length))})))}).pipe(d.Stream.unwrap):n,M=(e,t,r,i,o)=>d.Effect.gen(function*(){yield*d.Effect.logInfo(`Uploading blocks`).pipe(d.Effect.annotateLogs({upload_id:e.id,init_offset:i,file_size:e.size}));let s=e.size,c=k(s);yield*d.Effect.logInfo(`Block size`).pipe(d.Effect.annotateLogs({upload_id:e.id,block_size:c}));let p=t.pipe(j(o,i),A(c)),m=yield*d.Ref.make(i),h=yield*d.Ref.make(0),g=yield*d.Ref.make([]),_=t=>d.Effect.gen(function*(){let i=yield*d.Ref.updateAndGet(m,e=>e+t.size),a=i>=(e.size||0);yield*d.Effect.logDebug(`Processing chunk`).pipe(d.Effect.annotateLogs({upload_id:e.id,cumulative_offset:i,file_size:e.size,chunk_size:t.size,is_final_block:a}));let o=r+t.blockNumber-1;if(t.size>c&&(yield*d.Effect.fail(l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${t.size} exceeds upload block size ${c}`)}))),t.size>=n||a){yield*d.Effect.logDebug(`Uploading multipart chunk`).pipe(d.Effect.annotateLogs({upload_id:e.id,block_number:o,chunk_size:t.size,min_block_size:n,is_final_block:a}));let r=f(`block-${o.toString().padStart(6,`0`)}`).toString(`base64`);yield*C(e,t.data,r),yield*d.Ref.update(g,e=>[...e,r]),yield*(0,u.azurePartSizeHistogram)(d.Effect.succeed(t.size))}else yield*w(e.id,t.data);yield*d.Ref.update(h,e=>e+t.size)});return yield*p.pipe(d.Stream.runForEach(e=>_(e)),d.Effect.withConcurrency(a)),{bytesUploaded:yield*d.Ref.get(h),blockIds:yield*d.Ref.get(g)}}),N=(e,t)=>d.Effect.tryPromise({try:async()=>{await x.getBlockBlobClient(e.id).commitBlockList(t,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),P=e=>d.Effect.tryPromise({try:async()=>{try{return(await x.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>l.UploadistaError.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),F=e=>d.Effect.gen(function*(){yield*d.Effect.logInfo(`Removing cached data`).pipe(d.Effect.annotateLogs({upload_id:e})),yield*i.delete(e)}),I=t=>d.Effect.gen(function*(){return yield*(0,u.azureUploadRequestsTotal)(d.Effect.succeed(1)),yield*(0,u.azureActiveUploadsGauge)(d.Effect.succeed(1)),yield*(0,u.azureFileSizeHistogram)(d.Effect.succeed(t.size||0)),yield*d.Effect.logInfo(`Initializing Azure blob upload`).pipe(d.Effect.annotateLogs({upload_id:t.id})),t.creationDate=new Date().toISOString(),t.storage={id:t.storage.id,type:t.storage.type,path:t.id,bucket:v},t.url=`${e}/${t.id}`,yield*i.set(t.id,t),yield*d.Effect.logInfo(`Azure blob upload initialized`).pipe(d.Effect.annotateLogs({upload_id:t.id})),t}),L=e=>d.Effect.tryPromise({try:async()=>{let t=await x.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),R=e=>d.Effect.gen(function*(){let t=yield*L(e);if(t instanceof Blob){let e=yield*d.Effect.promise(()=>t.arrayBuffer());return new Uint8Array(e)}let n=t.getReader(),r=[];try{for(;;){let e=yield*d.Effect.promise(()=>n.read());if(e.done)break;r.push(e.value)}}finally{n.releaseLock()}let i=r.reduce((e,t)=>e+t.length,0),a=new Uint8Array(i),o=0;for(let e of r)a.set(e,o),o+=e.length;return a}),z=(e,t,n)=>d.Effect.gen(function*(){let r=yield*i.get(e),a=(yield*P(e)).length+1,o=yield*O(e);if(o){yield*D(e);let i=t-o.size,s=o.stream.pipe(d.Stream.concat(n));return{uploadFile:r,nextBlockNumber:a-1,offset:i,incompleteBlockSize:o.size,data:s}}else return{uploadFile:r,nextBlockNumber:a,offset:t,incompleteBlockSize:0,data:n}}),B=(e,t)=>(0,u.withAzureUploadMetrics)(e.file_id,(0,u.withAzureTimingMetrics)(u.azureUploadDurationHistogram,d.Effect.gen(function*(){let n=Date.now(),{stream:r,file_id:i,offset:a}=e,{onProgress:o}=t,{uploadFile:s,nextBlockNumber:c,offset:l,data:f}=yield*z(i,a,r),{bytesUploaded:p,blockIds:m}=yield*M(s,f,c,l,o),h=l+p;if(s.size===h)try{yield*N(s,m),yield*F(i),yield*(0,u.logAzureUploadCompletion)(i,{fileSize:s.size||0,totalDurationMs:Date.now()-n,partsCount:m.length,averagePartSize:s.size,throughputBps:s.size/(Date.now()-n),retryCount:0}),yield*(0,u.azureUploadSuccessTotal)(d.Effect.succeed(1)),yield*(0,u.azureActiveUploadsGauge)(d.Effect.succeed(-1))}catch(e){throw yield*d.Effect.logError(`Failed to finish upload`).pipe(d.Effect.annotateLogs({upload_id:i,error:JSON.stringify(e)})),yield*(0,u.azureUploadErrorsTotal)(d.Effect.succeed(1)),d.Effect.runSync((0,u.trackAzureError)(`write`,e,{upload_id:i,operation:`commit`,blocks:m.length})),e}return h}))),V=e=>d.Effect.gen(function*(){let t=yield*i.get(e),n=0;try{n=p(yield*P(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*d.Effect.logError(`Error on get upload`).pipe(d.Effect.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*E(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),H=e=>d.Effect.gen(function*(){try{let t=x.getBlockBlobClient(e);yield*d.Effect.promise(()=>t.deleteIfExists()),yield*D(e)}catch(t){if(typeof t==`object`&&t&&`statusCode`in t&&t.statusCode===404)return yield*d.Effect.logError(`No file found`).pipe(d.Effect.annotateLogs({upload_id:e})),yield*d.Effect.fail(l.UploadistaError.fromCode(`FILE_NOT_FOUND`));throw d.Effect.runSync((0,u.trackAzureError)(`remove`,t,{upload_id:e})),t}yield*F(e),yield*(0,u.azureActiveUploadsGauge)(d.Effect.succeed(-1))}),U=()=>o,W=e=>{let t=new Date(e);return new Date(t.getTime()+U())},G=()=>d.Effect.tryPromise({try:async()=>{if(U()===0)return 0;let e=0,t=x.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>W(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await x.deleteBlob(t),e++;return e},catch:e=>l.UploadistaError.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),K=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,maxConcurrentUploads:a,minChunkSize:n,maxChunkSize:4e3*1024*1024,maxParts:r,optimalChunkSize:y,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:v,create:I,remove:H,write:B,getUpload:V,read:R,readStream:L,deleteExpired:G(),getCapabilities:K,getChunkerConstraints:()=>({minChunkSize:n,maxChunkSize:4e3*1024*1024,optimalChunkSize:y,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=K(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return d.Effect.succeed(n)}}}exports.azureStore=m;
@@ -0,0 +1,85 @@
1
+ import { TokenCredential } from "@azure/core-auth";
2
+ import { UploadistaError } from "@uploadista/core/errors";
3
+ import { DataStore, KvStore, UploadFile } from "@uploadista/core/types";
4
+ import { Effect } from "effect";
5
+
6
+ //#region src/azure-store.d.ts
7
+ type ChunkInfo = {
8
+ blockNumber: number;
9
+ data: Uint8Array;
10
+ size: number;
11
+ isFinalPart?: boolean;
12
+ };
13
+ type AzureStoreOptions = {
14
+ deliveryUrl: string;
15
+ /**
16
+ * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.
17
+ * The server calculates the optimal block size, which takes this size into account,
18
+ * but may increase it to not exceed the Azure 50K blocks limit.
19
+ */
20
+ blockSize?: number;
21
+ /**
22
+ * The minimal block size for blocks.
23
+ * Can be used to ensure that all non-trailing blocks are exactly the same size.
24
+ * Can not be lower than 1 byte or more than 4000MiB.
25
+ */
26
+ minBlockSize?: number;
27
+ /**
28
+ * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.
29
+ */
30
+ maxBlocks?: number;
31
+ maxConcurrentBlockUploads?: number;
32
+ kvStore: KvStore<UploadFile>;
33
+ expirationPeriodInMilliseconds?: number;
34
+ connectionString?: string;
35
+ /**
36
+ * SAS URL for the storage account (works in all environments including browsers)
37
+ * Format: https://<account>.blob.core.windows.net?<sas-token>
38
+ */
39
+ sasUrl?: string;
40
+ /**
41
+ * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)
42
+ * Works in all environments and is the recommended approach for production
43
+ */
44
+ credential?: TokenCredential;
45
+ /**
46
+ * Account name and key for shared key authentication (Node.js only)
47
+ * @deprecated Use sasUrl or credential instead for cross-platform compatibility
48
+ */
49
+ accountName?: string;
50
+ /**
51
+ * @deprecated Use sasUrl or credential instead for cross-platform compatibility
52
+ */
53
+ accountKey?: string;
54
+ containerName: string;
55
+ };
56
+ type AzureStore = DataStore<UploadFile> & {
57
+ getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;
58
+ readStream: (id: string) => Effect.Effect<ReadableStream | Blob, UploadistaError>;
59
+ getChunkerConstraints: () => {
60
+ minChunkSize: number;
61
+ maxChunkSize: number;
62
+ optimalChunkSize: number;
63
+ requiresOrderedChunks: boolean;
64
+ };
65
+ };
66
+ declare function azureStore({
67
+ deliveryUrl,
68
+ blockSize,
69
+ minBlockSize,
70
+ // 1KB minimum
71
+ maxBlocks,
72
+ kvStore,
73
+ maxConcurrentBlockUploads,
74
+ expirationPeriodInMilliseconds,
75
+ // 1 week
76
+ connectionString,
77
+ sasUrl,
78
+ credential,
79
+ accountName,
80
+ accountKey,
81
+ containerName
82
+ }: AzureStoreOptions): AzureStore;
83
+ //#endregion
84
+ export { AzureStore, AzureStoreOptions, ChunkInfo, azureStore };
85
+ //# sourceMappingURL=index.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.cts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KA4CY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAmBO,WAAA,CAAA,EAAA,OAAA;CAAR;AAaI,KAhCH,iBAAA,GAgCG;EAAe,WAAA,EAAA,MAAA;EAmBlB;;;;;EACiB,SAAO,CAAA,EAAA,MAAA;EAGf;;;;;EASL,YAAA,CAAU,EAAA,MAAA;EACxB;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,OAAA,EAnDS,OAmDT,CAnDiB,UAmDjB,CAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAIA,MAAA,CAAA,EAAA,MAAA;EACC;;;;eA9CY;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;8BAGhD,MAAA,CAAO,OAAO,iBAAiB,MAAM;;;;;;;;iBAS5B,UAAA;;;;;;;;;;;;;;;;GAcb,oBAAoB"}
package/dist/index.d.ts CHANGED
@@ -1,2 +1,85 @@
1
- export * from "./azure-store";
1
+ import { UploadistaError } from "@uploadista/core/errors";
2
+ import { Effect } from "effect";
3
+ import { TokenCredential } from "@azure/core-auth";
4
+ import { DataStore, KvStore, UploadFile } from "@uploadista/core/types";
5
+
6
+ //#region src/azure-store.d.ts
7
+ type ChunkInfo = {
8
+ blockNumber: number;
9
+ data: Uint8Array;
10
+ size: number;
11
+ isFinalPart?: boolean;
12
+ };
13
+ type AzureStoreOptions = {
14
+ deliveryUrl: string;
15
+ /**
16
+ * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.
17
+ * The server calculates the optimal block size, which takes this size into account,
18
+ * but may increase it to not exceed the Azure 50K blocks limit.
19
+ */
20
+ blockSize?: number;
21
+ /**
22
+ * The minimal block size for blocks.
23
+ * Can be used to ensure that all non-trailing blocks are exactly the same size.
24
+ * Can not be lower than 1 byte or more than 4000MiB.
25
+ */
26
+ minBlockSize?: number;
27
+ /**
28
+ * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.
29
+ */
30
+ maxBlocks?: number;
31
+ maxConcurrentBlockUploads?: number;
32
+ kvStore: KvStore<UploadFile>;
33
+ expirationPeriodInMilliseconds?: number;
34
+ connectionString?: string;
35
+ /**
36
+ * SAS URL for the storage account (works in all environments including browsers)
37
+ * Format: https://<account>.blob.core.windows.net?<sas-token>
38
+ */
39
+ sasUrl?: string;
40
+ /**
41
+ * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)
42
+ * Works in all environments and is the recommended approach for production
43
+ */
44
+ credential?: TokenCredential;
45
+ /**
46
+ * Account name and key for shared key authentication (Node.js only)
47
+ * @deprecated Use sasUrl or credential instead for cross-platform compatibility
48
+ */
49
+ accountName?: string;
50
+ /**
51
+ * @deprecated Use sasUrl or credential instead for cross-platform compatibility
52
+ */
53
+ accountKey?: string;
54
+ containerName: string;
55
+ };
56
+ type AzureStore = DataStore<UploadFile> & {
57
+ getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;
58
+ readStream: (id: string) => Effect.Effect<ReadableStream | Blob, UploadistaError>;
59
+ getChunkerConstraints: () => {
60
+ minChunkSize: number;
61
+ maxChunkSize: number;
62
+ optimalChunkSize: number;
63
+ requiresOrderedChunks: boolean;
64
+ };
65
+ };
66
+ declare function azureStore({
67
+ deliveryUrl,
68
+ blockSize,
69
+ minBlockSize,
70
+ // 1KB minimum
71
+ maxBlocks,
72
+ kvStore,
73
+ maxConcurrentBlockUploads,
74
+ expirationPeriodInMilliseconds,
75
+ // 1 week
76
+ connectionString,
77
+ sasUrl,
78
+ credential,
79
+ accountName,
80
+ accountKey,
81
+ containerName
82
+ }: AzureStoreOptions): AzureStore;
83
+ //#endregion
84
+ export { AzureStore, AzureStoreOptions, ChunkInfo, azureStore };
2
85
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,eAAe,CAAC"}
1
+ {"version":3,"file":"index.d.ts","names":[],"sources":["../src/azure-store.ts"],"sourcesContent":[],"mappings":";;;;;;KA4CY,SAAA;;EAAA,IAAA,EAEJ,UAFa;EAOT,IAAA,EAAA,MAAA;EAmBO,WAAA,CAAA,EAAA,OAAA;CAAR;AAaI,KAhCH,iBAAA,GAgCG;EAAe,WAAA,EAAA,MAAA;EAmBlB;;;;;EACiB,SAAO,CAAA,EAAA,MAAA;EAGf;;;;;EASL,YAAA,CAAU,EAAA,MAAA;EACxB;;;EAGA,SAAA,CAAA,EAAA,MAAA;EACA,yBAAA,CAAA,EAAA,MAAA;EACA,OAAA,EAnDS,OAmDT,CAnDiB,UAmDjB,CAAA;EACA,8BAAA,CAAA,EAAA,MAAA;EACA,gBAAA,CAAA,EAAA,MAAA;EACA;;;;EAIA,MAAA,CAAA,EAAA,MAAA;EACC;;;;eA9CY;;;;;;;;;;;;KAmBH,UAAA,GAAa,UAAU;6BACN,MAAA,CAAO,OAAO,YAAY;8BAGhD,MAAA,CAAO,OAAO,iBAAiB,MAAM;;;;;;;;iBAS5B,UAAA;;;;;;;;;;;;;;;;GAcb,oBAAoB"}
package/dist/index.js CHANGED
@@ -1 +1,2 @@
1
- export * from "./azure-store";
1
+ import{BlobServiceClient as e,StorageSharedKeyCredential as t}from"@azure/storage-blob";import{UploadistaError as n}from"@uploadista/core/errors";import{azureActiveUploadsGauge as r,azureFileSizeHistogram as i,azurePartSizeHistogram as a,azurePartUploadDurationHistogram as o,azureUploadDurationHistogram as s,azureUploadErrorsTotal as c,azureUploadPartsTotal as l,azureUploadRequestsTotal as u,azureUploadSuccessTotal as d,logAzureUploadCompletion as f,trackAzureError as p,withAzureTimingMetrics as m,withAzureUploadMetrics as h}from"@uploadista/observability";import{Effect as g,Ref as _,Stream as v}from"effect";const y=e=>typeof globalThis<`u`&&`Buffer`in globalThis?globalThis.Buffer.from(e):new Uint8Array(Array.from(e,e=>e.charCodeAt(0)));function b(e){return e&&e.length>0?e.reduce((e,t)=>e+(t?.size??0),0):0}function x({deliveryUrl:x,blockSize:S,minBlockSize:C=1024,maxBlocks:w=5e4,kvStore:T,maxConcurrentBlockUploads:E=60,expirationPeriodInMilliseconds:D=1e3*60*60*24*7,connectionString:O,sasUrl:k,credential:A,accountName:j,accountKey:M,containerName:N}){let P=S||8*1024*1024,F;if(O)F=e.fromConnectionString(O);else if(k)F=new e(k);else if(A){let t=j?`https://${j}.blob.core.windows.net`:k?.split(`?`)[0]||``;if(!t)throw Error(`When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL`);F=new e(t,A)}else if(j&&M)try{let n=new t(j,M);F=new e(`https://${j}.blob.core.windows.net`,n)}catch(e){throw Error(`StorageSharedKeyCredential is only available in Node.js environments. Use sasUrl or credential options for cross-platform compatibility. Original error: ${e}`)}else throw Error(`Azure authentication required. Provide one of: connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)`);let I=F.getContainerClient(N),L=e=>`${e}.incomplete`,R=(e,t,r)=>m(o,g.gen(function*(){yield*g.logInfo(`Uploading block`).pipe(g.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length})),yield*l(g.succeed(1)),yield*a(g.succeed(t.length));try{let i=I.getBlockBlobClient(e.id);yield*g.tryPromise({try:async()=>{await i.stageBlock(r,t,t.length)},catch:i=>(g.runSync(p(`uploadBlock`,i,{upload_id:e.id,block_id:r,block_size:t.length})),n.fromCode(`FILE_WRITE_ERROR`,{cause:i}))}),yield*g.logInfo(`Finished uploading block`).pipe(g.annotateLogs({upload_id:e.id,block_id:r,block_size:t.length}))}catch(n){throw g.runSync(p(`uploadBlock`,n,{upload_id:e.id,block_id:r,block_size:t.length})),n}})),z=(e,t)=>g.tryPromise({try:async()=>{await I.getBlockBlobClient(L(e)).upload(t,t.length)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}).pipe(g.tap(()=>g.logInfo(`Finished uploading incomplete block`).pipe(g.annotateLogs({upload_id:e})))),B=e=>g.tryPromise({try:async()=>{try{return(await I.getBlockBlobClient(L(e)).download()).readableStreamBody}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),V=e=>g.tryPromise({try:async()=>{try{return(await I.getBlockBlobClient(L(e)).getProperties()).contentLength}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return;throw e}},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),H=e=>g.tryPromise({try:async()=>{await I.getBlockBlobClient(L(e)).deleteIfExists()},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),U=e=>g.gen(function*(){let t=yield*B(e);if(!t)return;let n=t.getReader(),r=[],i=0;try{for(;;){let e=yield*g.promise(()=>n.read());if(e.done)break;r.push(e.value),i+=e.value.length}}finally{n.releaseLock()}let a=v.fromIterable(r);return{size:i,stream:a}}),W=e=>{let t=e??5497558138880,n;n=t<=P?t:t<=P*w?P:Math.ceil(t/w);let r=Math.max(n,C);return Math.ceil(r/1024)*1024},G=e=>t=>v.async(n=>{let r=new Uint8Array,i=1,a=0,o=(t,r=!1)=>{g.runSync(g.logInfo(`Creating chunk`).pipe(g.annotateLogs({block_number:i,chunk_size:t.length,expected_size:e,is_final_chunk:r,total_bytes_processed:a+t.length}))),n.single({blockNumber:i++,data:t,size:t.length})},s=t=>{let n=new Uint8Array(r.length+t.length);for(n.set(r),n.set(t,r.length),r=n,a+=t.length;r.length>=e;){let t=r.slice(0,e);r=r.slice(e),o(t,!1)}};g.runFork(t.pipe(v.runForEach(e=>g.sync(()=>s(e))),g.andThen(()=>g.sync(()=>{r.length>0&&o(r,!0),n.end()})),g.catchAll(e=>g.sync(()=>n.fail(e)))))}),K=(e,t=0)=>n=>e?g.gen(function*(){let r=yield*_.make(t);return n.pipe(v.tap(t=>g.gen(function*(){e(yield*_.updateAndGet(r,e=>e+t.length))})))}).pipe(v.unwrap):n,q=(e,t,r,i,o)=>g.gen(function*(){yield*g.logInfo(`Uploading blocks`).pipe(g.annotateLogs({upload_id:e.id,init_offset:i,file_size:e.size}));let s=e.size,c=W(s);yield*g.logInfo(`Block size`).pipe(g.annotateLogs({upload_id:e.id,block_size:c}));let l=t.pipe(K(o,i),G(c)),u=yield*_.make(i),d=yield*_.make(0),f=yield*_.make([]),p=t=>g.gen(function*(){let i=yield*_.updateAndGet(u,e=>e+t.size),o=i>=(e.size||0);yield*g.logDebug(`Processing chunk`).pipe(g.annotateLogs({upload_id:e.id,cumulative_offset:i,file_size:e.size,chunk_size:t.size,is_final_block:o}));let s=r+t.blockNumber-1;if(t.size>c&&(yield*g.fail(n.fromCode(`FILE_WRITE_ERROR`,{cause:Error(`Block size ${t.size} exceeds upload block size ${c}`)}))),t.size>=C||o){yield*g.logDebug(`Uploading multipart chunk`).pipe(g.annotateLogs({upload_id:e.id,block_number:s,chunk_size:t.size,min_block_size:C,is_final_block:o}));let n=y(`block-${s.toString().padStart(6,`0`)}`).toString(`base64`);yield*R(e,t.data,n),yield*_.update(f,e=>[...e,n]),yield*a(g.succeed(t.size))}else yield*z(e.id,t.data);yield*_.update(d,e=>e+t.size)});return yield*l.pipe(v.runForEach(e=>p(e)),g.withConcurrency(E)),{bytesUploaded:yield*_.get(d),blockIds:yield*_.get(f)}}),J=(e,t)=>g.tryPromise({try:async()=>{await I.getBlockBlobClient(e.id).commitBlockList(t,{blobHTTPHeaders:{blobContentType:e.metadata?.contentType?.toString(),blobCacheControl:e.metadata?.cacheControl?.toString()}})},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),Y=e=>g.tryPromise({try:async()=>{try{return(await I.getBlockBlobClient(e).getBlockList(`committed`)).committedBlocks?.map(e=>({size:e.size}))??[]}catch(e){if(e&&typeof e==`object`&&`statusCode`in e&&e.statusCode===404)return[];throw e}},catch:e=>n.fromCode(`UPLOAD_ID_NOT_FOUND`,{cause:e})}),X=e=>g.gen(function*(){yield*g.logInfo(`Removing cached data`).pipe(g.annotateLogs({upload_id:e})),yield*T.delete(e)}),ee=e=>g.gen(function*(){return yield*u(g.succeed(1)),yield*r(g.succeed(1)),yield*i(g.succeed(e.size||0)),yield*g.logInfo(`Initializing Azure blob upload`).pipe(g.annotateLogs({upload_id:e.id})),e.creationDate=new Date().toISOString(),e.storage={id:e.storage.id,type:e.storage.type,path:e.id,bucket:N},e.url=`${x}/${e.id}`,yield*T.set(e.id,e),yield*g.logInfo(`Azure blob upload initialized`).pipe(g.annotateLogs({upload_id:e.id})),e}),Z=e=>g.tryPromise({try:async()=>{let t=await I.getBlockBlobClient(e).download();if(t.blobBody)return t.blobBody;if(t.readableStreamBody)return t.readableStreamBody;throw Error(`No blob body or readable stream body`)},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),te=e=>g.gen(function*(){let t=yield*Z(e);if(t instanceof Blob){let e=yield*g.promise(()=>t.arrayBuffer());return new Uint8Array(e)}let n=t.getReader(),r=[];try{for(;;){let e=yield*g.promise(()=>n.read());if(e.done)break;r.push(e.value)}}finally{n.releaseLock()}let i=r.reduce((e,t)=>e+t.length,0),a=new Uint8Array(i),o=0;for(let e of r)a.set(e,o),o+=e.length;return a}),ne=(e,t,n)=>g.gen(function*(){let r=yield*T.get(e),i=(yield*Y(e)).length+1,a=yield*U(e);if(a){yield*H(e);let o=t-a.size,s=a.stream.pipe(v.concat(n));return{uploadFile:r,nextBlockNumber:i-1,offset:o,incompleteBlockSize:a.size,data:s}}else return{uploadFile:r,nextBlockNumber:i,offset:t,incompleteBlockSize:0,data:n}}),re=(e,t)=>h(e.file_id,m(s,g.gen(function*(){let n=Date.now(),{stream:i,file_id:a,offset:o}=e,{onProgress:s}=t,{uploadFile:l,nextBlockNumber:u,offset:m,data:h}=yield*ne(a,o,i),{bytesUploaded:_,blockIds:v}=yield*q(l,h,u,m,s),y=m+_;if(l.size===y)try{yield*J(l,v),yield*X(a),yield*f(a,{fileSize:l.size||0,totalDurationMs:Date.now()-n,partsCount:v.length,averagePartSize:l.size,throughputBps:l.size/(Date.now()-n),retryCount:0}),yield*d(g.succeed(1)),yield*r(g.succeed(-1))}catch(e){throw yield*g.logError(`Failed to finish upload`).pipe(g.annotateLogs({upload_id:a,error:JSON.stringify(e)})),yield*c(g.succeed(1)),g.runSync(p(`write`,e,{upload_id:a,operation:`commit`,blocks:v.length})),e}return y}))),ie=e=>g.gen(function*(){let t=yield*T.get(e),n=0;try{n=b(yield*Y(e))}catch(n){if(typeof n==`object`&&n&&`statusCode`in n&&n.statusCode===404)return{...t,offset:t.size,size:t.size,metadata:t.metadata,storage:t.storage};throw yield*g.logError(`Error on get upload`).pipe(g.annotateLogs({upload_id:e,error:JSON.stringify(n)})),n}let r=yield*V(e);return{...t,offset:n+(r??0),size:t.size,storage:t.storage}}),ae=e=>g.gen(function*(){try{let t=I.getBlockBlobClient(e);yield*g.promise(()=>t.deleteIfExists()),yield*H(e)}catch(t){if(typeof t==`object`&&t&&`statusCode`in t&&t.statusCode===404)return yield*g.logError(`No file found`).pipe(g.annotateLogs({upload_id:e})),yield*g.fail(n.fromCode(`FILE_NOT_FOUND`));throw g.runSync(p(`remove`,t,{upload_id:e})),t}yield*X(e),yield*r(g.succeed(-1))}),Q=()=>D,oe=e=>{let t=new Date(e);return new Date(t.getTime()+Q())},se=()=>g.tryPromise({try:async()=>{if(Q()===0)return 0;let e=0,t=I.listBlobsFlat({includeMetadata:!0}),n=[];for await(let e of t)if(e.metadata?.creationDate){let t=new Date(e.metadata.creationDate);Date.now()>oe(t.toISOString()).getTime()&&n.push(e.name)}for(let t of n)await I.deleteBlob(t),e++;return e},catch:e=>n.fromCode(`FILE_WRITE_ERROR`,{cause:e})}),$=()=>({supportsParallelUploads:!0,supportsConcatenation:!1,supportsDeferredLength:!0,supportsResumableUploads:!0,supportsTransactionalUploads:!0,maxConcurrentUploads:E,minChunkSize:C,maxChunkSize:4e3*1024*1024,maxParts:w,optimalChunkSize:P,requiresOrderedChunks:!1,requiresMimeTypeValidation:!0,maxValidationSize:void 0});return{bucket:N,create:ee,remove:ae,write:re,getUpload:ie,read:te,readStream:Z,deleteExpired:se(),getCapabilities:$,getChunkerConstraints:()=>({minChunkSize:C,maxChunkSize:4e3*1024*1024,optimalChunkSize:P,requiresOrderedChunks:!1}),validateUploadStrategy:e=>{let t=$(),n=(()=>{switch(e){case`parallel`:return t.supportsParallelUploads;case`single`:return!0;default:return!1}})();return g.succeed(n)}}}export{x as azureStore};
2
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","names":["blobServiceClient: BlobServiceClient","BlobService","containerClient: ContainerClient","withTimingMetrics","partUploadDurationHistogram","readStream","uploadPartsTotal","partSizeHistogram","chunks: Uint8Array[]","optimalBlockSize: number","uploadRequestsTotal","activeUploadsGauge","fileSizeHistogram","result","withUploadMetrics","uploadDurationHistogram","uploadSuccessTotal","uploadErrorsTotal","expiredBlobs: string[]"],"sources":["../src/azure-store.ts"],"sourcesContent":["import type { TokenCredential } from \"@azure/core-auth\";\nimport {\n BlobServiceClient as BlobService,\n type BlobServiceClient,\n type ContainerClient,\n StorageSharedKeyCredential,\n} from \"@azure/storage-blob\";\nimport { UploadistaError } from \"@uploadista/core/errors\";\n\nimport type {\n DataStore,\n DataStoreCapabilities,\n DataStoreWriteOptions,\n KvStore,\n UploadFile,\n UploadStrategy,\n} from \"@uploadista/core/types\";\nimport {\n azureActiveUploadsGauge as activeUploadsGauge,\n azureFileSizeHistogram as fileSizeHistogram,\n logAzureUploadCompletion,\n azurePartSizeHistogram as partSizeHistogram,\n azurePartUploadDurationHistogram as partUploadDurationHistogram,\n trackAzureError,\n azureUploadDurationHistogram as uploadDurationHistogram,\n azureUploadErrorsTotal as uploadErrorsTotal,\n azureUploadPartsTotal as uploadPartsTotal,\n azureUploadRequestsTotal as uploadRequestsTotal,\n azureUploadSuccessTotal as uploadSuccessTotal,\n withAzureTimingMetrics as withTimingMetrics,\n withAzureUploadMetrics as withUploadMetrics,\n} from \"@uploadista/observability\";\nimport { Effect, Ref, Stream } from \"effect\";\n\n// Using base64 encoding that works in both Node.js and browser\nconst bufferFrom = (str: string) => {\n // Use global Buffer if available, otherwise fallback to btoa\n if (typeof globalThis !== \"undefined\" && \"Buffer\" in globalThis) {\n return (globalThis as any).Buffer.from(str);\n }\n // Fallback for browser environments\n return new Uint8Array(Array.from(str, (c) => c.charCodeAt(0)));\n};\n\nexport type ChunkInfo = {\n blockNumber: number;\n data: Uint8Array;\n size: number;\n isFinalPart?: boolean;\n};\n\nexport type AzureStoreOptions = {\n deliveryUrl: string;\n /**\n * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.\n * The server calculates the optimal block size, which takes this size into account,\n * but may increase it to not exceed the Azure 50K blocks limit.\n */\n blockSize?: number;\n /**\n * The minimal block size for blocks.\n * Can be used to ensure that all non-trailing blocks are exactly the same size.\n * Can not be lower than 1 byte or more than 4000MiB.\n */\n minBlockSize?: number;\n /**\n * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.\n */\n maxBlocks?: number;\n maxConcurrentBlockUploads?: number;\n kvStore: KvStore<UploadFile>;\n expirationPeriodInMilliseconds?: number;\n // Azure authentication options (choose one)\n connectionString?: string;\n /**\n * SAS URL for the storage account (works in all environments including browsers)\n * Format: https://<account>.blob.core.windows.net?<sas-token>\n */\n sasUrl?: string;\n /**\n * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)\n * Works in all environments and is the recommended approach for production\n */\n credential?: TokenCredential;\n /**\n * Account name and key for shared key authentication (Node.js only)\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountName?: string;\n /**\n * @deprecated Use sasUrl or credential instead for cross-platform compatibility\n */\n accountKey?: string;\n containerName: string;\n};\n\nfunction calcOffsetFromBlocks(blocks?: Array<{ size: number }>) {\n return blocks && blocks.length > 0\n ? blocks.reduce((a, b) => a + (b?.size ?? 0), 0)\n : 0;\n}\n\nexport type AzureStore = DataStore<UploadFile> & {\n getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;\n readStream: (\n id: string,\n ) => Effect.Effect<ReadableStream | Blob, UploadistaError>;\n getChunkerConstraints: () => {\n minChunkSize: number;\n maxChunkSize: number;\n optimalChunkSize: number;\n requiresOrderedChunks: boolean;\n };\n};\n\nexport function azureStore({\n deliveryUrl,\n blockSize,\n minBlockSize = 1024, // 1KB minimum\n maxBlocks = 50_000,\n kvStore,\n maxConcurrentBlockUploads = 60,\n expirationPeriodInMilliseconds = 1000 * 60 * 60 * 24 * 7, // 1 week\n connectionString,\n sasUrl,\n credential,\n accountName,\n accountKey,\n containerName,\n}: AzureStoreOptions): AzureStore {\n const preferredBlockSize = blockSize || 8 * 1024 * 1024; // 8MB default\n const maxUploadSize = 5_497_558_138_880 as const; // 5TiB (Azure Block Blob limit)\n\n // Initialize Azure Blob Service Client with cross-platform authentication\n let blobServiceClient: BlobServiceClient;\n\n if (connectionString) {\n // Connection string (works in all environments)\n blobServiceClient = BlobService.fromConnectionString(connectionString);\n } else if (sasUrl) {\n // SAS URL (works in all environments including browsers)\n blobServiceClient = new BlobService(sasUrl);\n } else if (credential) {\n // OAuth token credential (works in all environments, recommended for production)\n const accountUrl = accountName\n ? `https://${accountName}.blob.core.windows.net`\n : sasUrl?.split(\"?\")[0] || \"\";\n if (!accountUrl) {\n throw new Error(\n \"When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL\",\n );\n }\n blobServiceClient = new BlobService(accountUrl, credential);\n } else if (accountName && accountKey) {\n // Legacy shared key authentication (Node.js only)\n // This will fail in browser/edge environments\n try {\n const sharedKeyCredential = new StorageSharedKeyCredential(\n accountName,\n accountKey,\n );\n blobServiceClient = new BlobService(\n `https://${accountName}.blob.core.windows.net`,\n sharedKeyCredential,\n );\n } catch (error) {\n throw new Error(\n \"StorageSharedKeyCredential is only available in Node.js environments. \" +\n \"Use sasUrl or credential options for cross-platform compatibility. \" +\n `Original error: ${error}`,\n );\n }\n } else {\n throw new Error(\n \"Azure authentication required. Provide one of: \" +\n \"connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)\",\n );\n }\n\n const containerClient: ContainerClient =\n blobServiceClient.getContainerClient(containerName);\n\n const incompletePartKey = (id: string) => {\n return `${id}.incomplete`;\n };\n\n const uploadBlock = (\n uploadFile: UploadFile,\n readStream: Uint8Array,\n blockId: string,\n ) => {\n return withTimingMetrics(\n partUploadDurationHistogram,\n Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n\n yield* uploadPartsTotal(Effect.succeed(1));\n yield* partSizeHistogram(Effect.succeed(readStream.length));\n\n try {\n const blobClient = containerClient.getBlockBlobClient(uploadFile.id);\n yield* Effect.tryPromise({\n try: async () => {\n await blobClient.stageBlock(\n blockId,\n readStream,\n readStream.length,\n );\n },\n catch: (error) => {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n return UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n });\n },\n });\n\n yield* Effect.logInfo(\"Finished uploading block\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n } catch (error) {\n Effect.runSync(\n trackAzureError(\"uploadBlock\", error, {\n upload_id: uploadFile.id,\n block_id: blockId,\n block_size: readStream.length,\n }),\n );\n throw error;\n }\n }),\n );\n };\n\n const uploadIncompleteBlock = (id: string, readStream: Uint8Array) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.upload(readStream, readStream.length);\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n }).pipe(\n Effect.tap(() =>\n Effect.logInfo(\"Finished uploading incomplete block\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n ),\n ),\n );\n };\n\n const getIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const response = await blobClient.download();\n return response.readableStreamBody as unknown as ReadableStream;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n });\n };\n\n const getIncompleteBlockSize = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n const properties = await blobClient.getProperties();\n return properties.contentLength;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return undefined;\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n });\n };\n\n const deleteIncompleteBlock = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(\n incompletePartKey(id),\n );\n await blobClient.deleteIfExists();\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n });\n };\n\n const downloadIncompleteBlock = (id: string) => {\n return Effect.gen(function* () {\n const incompleteBlock = yield* getIncompleteBlock(id);\n\n if (!incompleteBlock) {\n return;\n }\n\n // Read the stream and collect all chunks to calculate size\n const reader = incompleteBlock.getReader();\n const chunks: Uint8Array[] = [];\n let incompleteBlockSize = 0;\n\n try {\n while (true) {\n const result = yield* Effect.promise(() => reader.read());\n if (result.done) break;\n chunks.push(result.value);\n incompleteBlockSize += result.value.length;\n }\n } finally {\n reader.releaseLock();\n }\n\n // Create a new readable stream from the chunks\n const stream = Stream.fromIterable(chunks);\n\n return {\n size: incompleteBlockSize,\n stream,\n };\n });\n };\n\n const calcOptimalBlockSize = (initSize?: number): number => {\n const size = initSize ?? maxUploadSize;\n let optimalBlockSize: number;\n\n if (size <= preferredBlockSize) {\n optimalBlockSize = size;\n } else if (size <= preferredBlockSize * maxBlocks) {\n optimalBlockSize = preferredBlockSize;\n } else {\n // Calculate the minimum block size needed to fit within the max blocks limit\n optimalBlockSize = Math.ceil(size / maxBlocks);\n }\n\n // Ensure the block size respects the minimum and is aligned properly\n const finalBlockSize = Math.max(optimalBlockSize, minBlockSize);\n\n // Round up to ensure consistent block sizes\n return Math.ceil(finalBlockSize / 1024) * 1024; // Align to 1KB boundaries\n };\n\n // Proper single-pass chunking using Effect's async stream constructor\n // Ensures all parts except the final part are exactly the same size (S3 requirement)\n const createChunkedStream =\n (chunkSize: number) =>\n <E>(stream: Stream.Stream<Uint8Array, E>): Stream.Stream<ChunkInfo, E> => {\n return Stream.async<ChunkInfo, E>((emit) => {\n let buffer = new Uint8Array(0);\n let blockNumber = 1;\n let totalBytesProcessed = 0;\n\n const emitChunk = (data: Uint8Array, isFinalChunk = false) => {\n // Log chunk information for debugging - use INFO level to see in logs\n Effect.runSync(\n Effect.logInfo(\"Creating chunk\").pipe(\n Effect.annotateLogs({\n block_number: blockNumber,\n chunk_size: data.length,\n expected_size: chunkSize,\n is_final_chunk: isFinalChunk,\n total_bytes_processed: totalBytesProcessed + data.length,\n }),\n ),\n );\n emit.single({\n blockNumber: blockNumber++,\n data,\n size: data.length,\n });\n };\n\n const processChunk = (newData: Uint8Array) => {\n // Combine buffer with new data\n const combined = new Uint8Array(buffer.length + newData.length);\n combined.set(buffer);\n combined.set(newData, buffer.length);\n buffer = combined;\n totalBytesProcessed += newData.length;\n\n // Emit full chunks of exactly chunkSize bytes\n // This ensures S3 multipart upload rule: all parts except last must be same size\n while (buffer.length >= chunkSize) {\n const chunk = buffer.slice(0, chunkSize);\n buffer = buffer.slice(chunkSize);\n emitChunk(chunk, false);\n }\n };\n\n // Process the stream\n Effect.runFork(\n stream.pipe(\n Stream.runForEach((chunk) =>\n Effect.sync(() => processChunk(chunk)),\n ),\n Effect.andThen(() =>\n Effect.sync(() => {\n // Emit final chunk if there's remaining data\n // The final chunk can be any size < chunkSize (S3 allows this)\n if (buffer.length > 0) {\n emitChunk(buffer, true);\n }\n emit.end();\n }),\n ),\n Effect.catchAll((error) => Effect.sync(() => emit.fail(error))),\n ),\n );\n });\n };\n\n // Byte-level progress tracking during streaming\n // This provides smooth, immediate progress feedback by tracking bytes as they\n // flow through the stream, before they reach S3. This solves the issue where\n // small files (< 5MB) would jump from 0% to 100% instantly.\n const withByteProgressTracking =\n (onProgress?: (totalBytes: number) => void, initialOffset = 0) =>\n <E, R>(stream: Stream.Stream<Uint8Array, E, R>) => {\n if (!onProgress) return stream;\n\n return Effect.gen(function* () {\n const totalBytesProcessedRef = yield* Ref.make(initialOffset);\n\n return stream.pipe(\n Stream.tap((chunk) =>\n Effect.gen(function* () {\n const newTotal = yield* Ref.updateAndGet(\n totalBytesProcessedRef,\n (total) => total + chunk.length,\n );\n onProgress(newTotal);\n }),\n ),\n );\n }).pipe(Stream.unwrap);\n };\n\n /**\n * Uploads a stream to Azure using multiple blocks\n */\n const uploadBlocks = (\n uploadFile: UploadFile,\n readStream: Stream.Stream<Uint8Array, UploadistaError>,\n initCurrentBlockNumber: number,\n initOffset: number,\n onProgress?: (newOffset: number) => void,\n ) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Uploading blocks\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n init_offset: initOffset,\n file_size: uploadFile.size,\n }),\n );\n\n const size = uploadFile.size;\n\n const uploadBlockSize = calcOptimalBlockSize(size);\n yield* Effect.logInfo(\"Block size\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_size: uploadBlockSize,\n }),\n );\n // Enhanced Progress Tracking Strategy:\n // 1. Byte-level progress during streaming - provides immediate, smooth feedback\n // as data flows through the pipeline (even for small files)\n // 2. This tracks progress BEFORE S3 upload, giving users immediate feedback\n // 3. For large files with multiple parts, this provides granular updates\n // 4. For small files (single part), this prevents 0%->100% jumps\n const chunkStream = readStream.pipe(\n // Add byte-level progress tracking during streaming (immediate feedback)\n withByteProgressTracking(onProgress, initOffset),\n // Create chunks for S3 multipart upload with uniform part sizes\n createChunkedStream(uploadBlockSize),\n );\n\n // Track cumulative offset and total bytes with Effect Refs\n const cumulativeOffsetRef = yield* Ref.make(initOffset);\n const totalBytesUploadedRef = yield* Ref.make(0);\n const blockIdsRef = yield* Ref.make<string[]>([]);\n // Create a chunk upload function for the sink\n const uploadChunk = (chunkInfo: ChunkInfo) =>\n Effect.gen(function* () {\n // Calculate cumulative bytes to determine if this is the final block\n const cumulativeOffset = yield* Ref.updateAndGet(\n cumulativeOffsetRef,\n (offset) => offset + chunkInfo.size,\n );\n const isFinalBlock = cumulativeOffset >= (uploadFile.size || 0);\n\n yield* Effect.logDebug(\"Processing chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n cumulative_offset: cumulativeOffset,\n file_size: uploadFile.size,\n chunk_size: chunkInfo.size,\n is_final_block: isFinalBlock,\n }),\n );\n\n const actualBlockNumber =\n initCurrentBlockNumber + chunkInfo.blockNumber - 1;\n\n if (chunkInfo.size > uploadBlockSize) {\n yield* Effect.fail(\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: new Error(\n `Block size ${chunkInfo.size} exceeds upload block size ${uploadBlockSize}`,\n ),\n }),\n );\n }\n\n // For parts that meet the minimum part size (5MB) or are the final part,\n // upload them as regular multipart parts\n if (chunkInfo.size >= minBlockSize || isFinalBlock) {\n yield* Effect.logDebug(\"Uploading multipart chunk\").pipe(\n Effect.annotateLogs({\n upload_id: uploadFile.id,\n block_number: actualBlockNumber,\n chunk_size: chunkInfo.size,\n min_block_size: minBlockSize,\n is_final_block: isFinalBlock,\n }),\n );\n // Generate block ID (base64 encoded, must be consistent)\n const blockId = bufferFrom(\n `block-${actualBlockNumber.toString().padStart(6, \"0\")}`,\n ).toString(\"base64\");\n yield* uploadBlock(uploadFile, chunkInfo.data, blockId);\n yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);\n yield* partSizeHistogram(Effect.succeed(chunkInfo.size));\n } else {\n // Only upload as incomplete part if it's smaller than minimum and not final\n yield* uploadIncompleteBlock(uploadFile.id, chunkInfo.data);\n }\n\n yield* Ref.update(\n totalBytesUploadedRef,\n (total) => total + chunkInfo.size,\n );\n\n // Note: Byte-level progress is now tracked during streaming phase\n // This ensures smooth progress updates regardless of part size\n // Azure upload completion is tracked via totalBytesUploadedRef for accuracy\n });\n\n // Process chunks concurrently with controlled concurrency\n yield* chunkStream.pipe(\n Stream.runForEach((chunkInfo) => uploadChunk(chunkInfo)),\n Effect.withConcurrency(maxConcurrentBlockUploads),\n );\n\n return {\n bytesUploaded: yield* Ref.get(totalBytesUploadedRef),\n blockIds: yield* Ref.get(blockIdsRef),\n };\n });\n };\n\n /**\n * Commits all staged blocks to create the final blob\n */\n const commitBlocks = (uploadFile: UploadFile, blockIds: string[]) => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(uploadFile.id);\n await blobClient.commitBlockList(blockIds, {\n blobHTTPHeaders: {\n blobContentType: uploadFile.metadata?.contentType?.toString(),\n blobCacheControl: uploadFile.metadata?.cacheControl?.toString(),\n },\n });\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n });\n };\n\n /**\n * Gets the committed blocks for a blob\n */\n const retrieveBlocks = (id: string) => {\n return Effect.tryPromise({\n try: async () => {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n const blockList = await blobClient.getBlockList(\"committed\");\n\n const blocks =\n blockList.committedBlocks?.map((block) => ({\n size: block.size,\n })) ?? [];\n\n return blocks;\n } catch (error) {\n if (\n error &&\n typeof error === \"object\" &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return [];\n }\n throw error;\n }\n },\n catch: (error) =>\n UploadistaError.fromCode(\"UPLOAD_ID_NOT_FOUND\", {\n cause: error as Error,\n }),\n });\n };\n\n /**\n * Removes cached data for a given file\n */\n const clearCache = (id: string) => {\n return Effect.gen(function* () {\n yield* Effect.logInfo(\"Removing cached data\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n yield* kvStore.delete(id);\n });\n };\n\n /**\n * Creates a blob placeholder in Azure and stores metadata\n */\n const create = (upload: UploadFile) => {\n return Effect.gen(function* () {\n yield* uploadRequestsTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(1));\n yield* fileSizeHistogram(Effect.succeed(upload.size || 0));\n\n yield* Effect.logInfo(\"Initializing Azure blob upload\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n upload.creationDate = new Date().toISOString();\n upload.storage = {\n id: upload.storage.id,\n type: upload.storage.type,\n path: upload.id,\n bucket: containerName,\n };\n upload.url = `${deliveryUrl}/${upload.id}`;\n\n yield* kvStore.set(upload.id, upload);\n yield* Effect.logInfo(\"Azure blob upload initialized\").pipe(\n Effect.annotateLogs({\n upload_id: upload.id,\n }),\n );\n\n return upload;\n });\n };\n\n const readStream = (\n id: string,\n ): Effect.Effect<ReadableStream | Blob, UploadistaError> => {\n return Effect.tryPromise({\n try: async () => {\n const blobClient = containerClient.getBlockBlobClient(id);\n const response = await blobClient.download();\n if (response.blobBody) {\n return response.blobBody;\n }\n if (response.readableStreamBody) {\n return response.readableStreamBody as unknown as ReadableStream;\n }\n throw new Error(\"No blob body or readable stream body\");\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", {\n cause: error as Error,\n }),\n });\n };\n\n const read = (id: string): Effect.Effect<Uint8Array, UploadistaError> => {\n return Effect.gen(function* () {\n const stream = yield* readStream(id);\n\n // Convert stream/blob to Uint8Array\n if (stream instanceof Blob) {\n const arrayBuffer = yield* Effect.promise(() => stream.arrayBuffer());\n return new Uint8Array(arrayBuffer as ArrayBuffer);\n }\n\n // Read from ReadableStream\n const reader = stream.getReader();\n const chunks: Uint8Array[] = [];\n\n try {\n while (true) {\n const result = yield* Effect.promise(() => reader.read());\n if (result.done) break;\n chunks.push(result.value);\n }\n } finally {\n reader.releaseLock();\n }\n\n // Concatenate all chunks\n const totalLength = chunks.reduce((acc, chunk) => acc + chunk.length, 0);\n const result = new Uint8Array(totalLength);\n let offset = 0;\n for (const chunk of chunks) {\n result.set(chunk, offset);\n offset += chunk.length;\n }\n\n return result;\n });\n };\n\n const prepareUpload = (\n file_id: string,\n initialOffset: number,\n initialData: Stream.Stream<Uint8Array, UploadistaError>,\n ) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(file_id);\n\n const blocks = yield* retrieveBlocks(file_id);\n\n const blockNumber = blocks.length;\n const nextBlockNumber = blockNumber + 1;\n\n const incompleteBlock = yield* downloadIncompleteBlock(file_id);\n\n if (incompleteBlock) {\n yield* deleteIncompleteBlock(file_id);\n const offset = initialOffset - incompleteBlock.size;\n const data = incompleteBlock.stream.pipe(Stream.concat(initialData));\n return {\n uploadFile,\n nextBlockNumber: nextBlockNumber - 1,\n offset,\n incompleteBlockSize: incompleteBlock.size,\n data,\n };\n } else {\n return {\n uploadFile,\n nextBlockNumber,\n offset: initialOffset,\n incompleteBlockSize: 0,\n data: initialData,\n };\n }\n });\n };\n\n /**\n * Write to the file, starting at the provided offset\n */\n const write = (\n options: DataStoreWriteOptions,\n dependencies: {\n onProgress?: (chunkSize: number) => void;\n },\n ) => {\n return withUploadMetrics(\n options.file_id,\n withTimingMetrics(\n uploadDurationHistogram,\n Effect.gen(function* () {\n const startTime = Date.now();\n const {\n stream: initialData,\n file_id,\n offset: initialOffset,\n } = options;\n const { onProgress } = dependencies;\n\n const prepareResult = yield* prepareUpload(\n file_id,\n initialOffset,\n initialData,\n );\n\n const { uploadFile, nextBlockNumber, offset, data } = prepareResult;\n\n const { bytesUploaded, blockIds } = yield* uploadBlocks(\n uploadFile,\n data,\n nextBlockNumber,\n offset,\n onProgress,\n );\n\n const newOffset = offset + bytesUploaded;\n\n if (uploadFile.size === newOffset) {\n try {\n // Commit all blocks to finalize the blob\n yield* commitBlocks(uploadFile, blockIds);\n yield* clearCache(file_id);\n\n // Log completion with observability\n yield* logAzureUploadCompletion(file_id, {\n fileSize: uploadFile.size || 0,\n totalDurationMs: Date.now() - startTime,\n partsCount: blockIds.length,\n averagePartSize: uploadFile.size,\n throughputBps: uploadFile.size / (Date.now() - startTime),\n retryCount: 0,\n });\n\n yield* uploadSuccessTotal(Effect.succeed(1));\n yield* activeUploadsGauge(Effect.succeed(-1));\n } catch (error) {\n yield* Effect.logError(\"Failed to finish upload\").pipe(\n Effect.annotateLogs({\n upload_id: file_id,\n error: JSON.stringify(error),\n }),\n );\n yield* uploadErrorsTotal(Effect.succeed(1));\n Effect.runSync(\n trackAzureError(\"write\", error, {\n upload_id: file_id,\n operation: \"commit\",\n blocks: blockIds.length,\n }),\n );\n throw error;\n }\n }\n\n return newOffset;\n }),\n ),\n );\n };\n\n const getUpload = (id: string) => {\n return Effect.gen(function* () {\n const uploadFile = yield* kvStore.get(id);\n\n let offset = 0;\n\n try {\n const blocks = yield* retrieveBlocks(id);\n offset = calcOffsetFromBlocks(blocks);\n } catch (error) {\n // Check if the error is caused by the blob not being found\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n return {\n ...uploadFile,\n offset: uploadFile.size as number,\n size: uploadFile.size,\n metadata: uploadFile.metadata,\n storage: uploadFile.storage,\n };\n }\n\n yield* Effect.logError(\"Error on get upload\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n error: JSON.stringify(error),\n }),\n );\n throw error;\n }\n\n const incompleteBlockSize = yield* getIncompleteBlockSize(id);\n\n return {\n ...uploadFile,\n offset: offset + (incompleteBlockSize ?? 0),\n size: uploadFile.size,\n storage: uploadFile.storage,\n };\n });\n };\n\n const remove = (id: string) => {\n return Effect.gen(function* () {\n try {\n const blobClient = containerClient.getBlockBlobClient(id);\n yield* Effect.promise(() => blobClient.deleteIfExists());\n\n // Also delete incomplete block if it exists\n yield* deleteIncompleteBlock(id);\n } catch (error) {\n if (\n typeof error === \"object\" &&\n error !== null &&\n \"statusCode\" in error &&\n error.statusCode === 404\n ) {\n yield* Effect.logError(\"No file found\").pipe(\n Effect.annotateLogs({\n upload_id: id,\n }),\n );\n return yield* Effect.fail(UploadistaError.fromCode(\"FILE_NOT_FOUND\"));\n }\n Effect.runSync(\n trackAzureError(\"remove\", error, {\n upload_id: id,\n }),\n );\n throw error;\n }\n\n yield* clearCache(id);\n yield* activeUploadsGauge(Effect.succeed(-1));\n });\n };\n\n const getExpiration = () => {\n return expirationPeriodInMilliseconds;\n };\n\n const getExpirationDate = (created_at: string) => {\n const date = new Date(created_at);\n return new Date(date.getTime() + getExpiration());\n };\n\n const deleteExpired = (): Effect.Effect<number, UploadistaError> => {\n return Effect.tryPromise({\n try: async (): Promise<number> => {\n if (getExpiration() === 0) {\n return 0;\n }\n\n let deleted = 0;\n\n const response = containerClient.listBlobsFlat({\n includeMetadata: true,\n });\n\n const expiredBlobs: string[] = [];\n\n for await (const blob of response) {\n if (blob.metadata?.creationDate) {\n const creationDate = new Date(blob.metadata.creationDate);\n if (\n Date.now() >\n getExpirationDate(creationDate.toISOString()).getTime()\n ) {\n expiredBlobs.push(blob.name);\n }\n }\n }\n\n // Delete expired blobs\n for (const blobName of expiredBlobs) {\n await containerClient.deleteBlob(blobName);\n deleted++;\n }\n\n return deleted;\n },\n catch: (error) =>\n UploadistaError.fromCode(\"FILE_WRITE_ERROR\", { cause: error as Error }),\n });\n };\n\n const getCapabilities = (): DataStoreCapabilities => {\n return {\n supportsParallelUploads: true,\n supportsConcatenation: false, // Azure doesn't have native concatenation like GCS\n supportsDeferredLength: true,\n supportsResumableUploads: true,\n supportsTransactionalUploads: true,\n maxConcurrentUploads: maxConcurrentBlockUploads,\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n maxParts: maxBlocks,\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n requiresMimeTypeValidation: true,\n maxValidationSize: undefined, // no size limit\n };\n };\n\n const getChunkerConstraints = () => {\n return {\n minChunkSize: minBlockSize,\n maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit\n optimalChunkSize: preferredBlockSize,\n requiresOrderedChunks: false,\n };\n };\n\n const validateUploadStrategy = (\n strategy: UploadStrategy,\n ): Effect.Effect<boolean, never> => {\n const capabilities = getCapabilities();\n\n const result = (() => {\n switch (strategy) {\n case \"parallel\":\n return capabilities.supportsParallelUploads;\n case \"single\":\n return true;\n default:\n return false;\n }\n })();\n\n return Effect.succeed(result);\n };\n\n return {\n bucket: containerName,\n create,\n remove,\n write,\n getUpload,\n read,\n readStream,\n deleteExpired: deleteExpired(),\n getCapabilities,\n getChunkerConstraints,\n validateUploadStrategy,\n };\n}\n"],"mappings":"wmBAmCA,MAAM,EAAc,GAEd,OAAO,WAAe,KAAe,WAAY,WAC3C,WAAmB,OAAO,KAAK,EAAI,CAGtC,IAAI,WAAW,MAAM,KAAK,EAAM,GAAM,EAAE,WAAW,EAAE,CAAC,CAAC,CAuDhE,SAAS,EAAqB,EAAkC,CAC9D,OAAO,GAAU,EAAO,OAAS,EAC7B,EAAO,QAAQ,EAAG,IAAM,GAAK,GAAG,MAAQ,GAAI,EAAE,CAC9C,EAgBN,SAAgB,EAAW,CACzB,cACA,YACA,eAAe,KACf,YAAY,IACZ,UACA,4BAA4B,GAC5B,iCAAiC,IAAO,GAAK,GAAK,GAAK,EACvD,mBACA,SACA,aACA,cACA,aACA,iBACgC,CAChC,IAAM,EAAqB,GAAa,EAAI,KAAO,KAI/CA,EAEJ,GAAI,EAEF,EAAoBC,EAAY,qBAAqB,EAAiB,SAC7D,EAET,EAAoB,IAAIA,EAAY,EAAO,SAClC,EAAY,CAErB,IAAM,EAAa,EACf,WAAW,EAAY,wBACvB,GAAQ,MAAM,IAAI,CAAC,IAAM,GAC7B,GAAI,CAAC,EACH,MAAU,MACR,2HACD,CAEH,EAAoB,IAAIA,EAAY,EAAY,EAAW,SAClD,GAAe,EAGxB,GAAI,CACF,IAAM,EAAsB,IAAI,EAC9B,EACA,EACD,CACD,EAAoB,IAAIA,EACtB,WAAW,EAAY,wBACvB,EACD,OACM,EAAO,CACd,MAAU,MACR,4JAEqB,IACtB,MAGH,MAAU,MACR,kIAED,CAGH,IAAMC,EACJ,EAAkB,mBAAmB,EAAc,CAE/C,EAAqB,GAClB,GAAG,EAAG,aAGT,GACJ,EACA,EACA,IAEOC,EACLC,EACA,EAAO,IAAI,WAAa,CACtB,MAAO,EAAO,QAAQ,kBAAkB,CAAC,KACvC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYC,EAAW,OACxB,CAAC,CACH,CAED,MAAOC,EAAiB,EAAO,QAAQ,EAAE,CAAC,CAC1C,MAAOC,EAAkB,EAAO,QAAQF,EAAW,OAAO,CAAC,CAE3D,GAAI,CACF,IAAM,EAAa,EAAgB,mBAAmB,EAAW,GAAG,CACpE,MAAO,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,MAAM,EAAW,WACf,EACAA,EACAA,EAAW,OACZ,EAEH,MAAQ,IACN,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACM,EAAgB,SAAS,mBAAoB,CAClD,MAAO,EACR,CAAC,EAEL,CAAC,CAEF,MAAO,EAAO,QAAQ,2BAA2B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,OACM,EAAO,CAQd,MAPA,EAAO,QACL,EAAgB,cAAe,EAAO,CACpC,UAAW,EAAW,GACtB,SAAU,EACV,WAAYA,EAAW,OACxB,CAAC,CACH,CACK,IAER,CACH,CAGG,GAAyB,EAAY,IAClC,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,OAAOA,EAAYA,EAAW,OAAO,EAExD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAAC,KACD,EAAO,QACL,EAAO,QAAQ,sCAAsC,CAAC,KACpD,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACF,CACF,CAGG,EAAsB,GACnB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADiB,MAHE,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACiC,UAAU,EAC5B,yBACT,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAGE,EAA0B,GACvB,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CAKF,OADmB,MAHA,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACmC,eAAe,EACjC,oBACX,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,OAEF,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAGE,EAAyB,GACtB,EAAO,WAAW,CACvB,IAAK,SAAY,CAIf,MAHmB,EAAgB,mBACjC,EAAkB,EAAG,CACtB,CACgB,gBAAgB,EAEnC,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAGE,EAA2B,GACxB,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAkB,MAAO,EAAmB,EAAG,CAErD,GAAI,CAAC,EACH,OAIF,IAAM,EAAS,EAAgB,WAAW,CACpCG,EAAuB,EAAE,CAC3B,EAAsB,EAE1B,GAAI,CACF,OAAa,CACX,IAAM,EAAS,MAAO,EAAO,YAAc,EAAO,MAAM,CAAC,CACzD,GAAI,EAAO,KAAM,MACjB,EAAO,KAAK,EAAO,MAAM,CACzB,GAAuB,EAAO,MAAM,eAE9B,CACR,EAAO,aAAa,CAItB,IAAM,EAAS,EAAO,aAAa,EAAO,CAE1C,MAAO,CACL,KAAM,EACN,SACD,EACD,CAGE,EAAwB,GAA8B,CAC1D,IAAM,EAAO,GAAY,cACrBC,EAEJ,AAME,EANE,GAAQ,EACS,EACV,GAAQ,EAAqB,EACnB,EAGA,KAAK,KAAK,EAAO,EAAU,CAIhD,IAAM,EAAiB,KAAK,IAAI,EAAkB,EAAa,CAG/D,OAAO,KAAK,KAAK,EAAiB,KAAK,CAAG,MAKtC,EACH,GACG,GACK,EAAO,MAAqB,GAAS,CAC1C,IAAI,EAAS,IAAI,WACb,EAAc,EACd,EAAsB,EAEpB,GAAa,EAAkB,EAAe,KAAU,CAE5D,EAAO,QACL,EAAO,QAAQ,iBAAiB,CAAC,KAC/B,EAAO,aAAa,CAClB,aAAc,EACd,WAAY,EAAK,OACjB,cAAe,EACf,eAAgB,EAChB,sBAAuB,EAAsB,EAAK,OACnD,CAAC,CACH,CACF,CACD,EAAK,OAAO,CACV,YAAa,IACb,OACA,KAAM,EAAK,OACZ,CAAC,EAGE,EAAgB,GAAwB,CAE5C,IAAM,EAAW,IAAI,WAAW,EAAO,OAAS,EAAQ,OAAO,CAQ/D,IAPA,EAAS,IAAI,EAAO,CACpB,EAAS,IAAI,EAAS,EAAO,OAAO,CACpC,EAAS,EACT,GAAuB,EAAQ,OAIxB,EAAO,QAAU,GAAW,CACjC,IAAM,EAAQ,EAAO,MAAM,EAAG,EAAU,CACxC,EAAS,EAAO,MAAM,EAAU,CAChC,EAAU,EAAO,GAAM,GAK3B,EAAO,QACL,EAAO,KACL,EAAO,WAAY,GACjB,EAAO,SAAW,EAAa,EAAM,CAAC,CACvC,CACD,EAAO,YACL,EAAO,SAAW,CAGZ,EAAO,OAAS,GAClB,EAAU,EAAQ,GAAK,CAEzB,EAAK,KAAK,EACV,CACH,CACD,EAAO,SAAU,GAAU,EAAO,SAAW,EAAK,KAAK,EAAM,CAAC,CAAC,CAChE,CACF,EACD,CAOA,GACH,EAA2C,EAAgB,IACrD,GACA,EAEE,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAyB,MAAO,EAAI,KAAK,EAAc,CAE7D,OAAO,EAAO,KACZ,EAAO,IAAK,GACV,EAAO,IAAI,WAAa,CAKtB,EAJiB,MAAO,EAAI,aAC1B,EACC,GAAU,EAAQ,EAAM,OAC1B,CACmB,EACpB,CACH,CACF,EACD,CAAC,KAAK,EAAO,OAAO,CAhBE,EAsBtB,GACJ,EACA,EACA,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,mBAAmB,CAAC,KACxC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,YAAa,EACb,UAAW,EAAW,KACvB,CAAC,CACH,CAED,IAAM,EAAO,EAAW,KAElB,EAAkB,EAAqB,EAAK,CAClD,MAAO,EAAO,QAAQ,aAAa,CAAC,KAClC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,WAAY,EACb,CAAC,CACH,CAOD,IAAM,EAAcJ,EAAW,KAE7B,EAAyB,EAAY,EAAW,CAEhD,EAAoB,EAAgB,CACrC,CAGK,EAAsB,MAAO,EAAI,KAAK,EAAW,CACjD,EAAwB,MAAO,EAAI,KAAK,EAAE,CAC1C,EAAc,MAAO,EAAI,KAAe,EAAE,CAAC,CAE3C,EAAe,GACnB,EAAO,IAAI,WAAa,CAEtB,IAAM,EAAmB,MAAO,EAAI,aAClC,EACC,GAAW,EAAS,EAAU,KAChC,CACK,EAAe,IAAqB,EAAW,MAAQ,GAE7D,MAAO,EAAO,SAAS,mBAAmB,CAAC,KACzC,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,kBAAmB,EACnB,UAAW,EAAW,KACtB,WAAY,EAAU,KACtB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EACJ,EAAyB,EAAU,YAAc,EAcnD,GAZI,EAAU,KAAO,IACnB,MAAO,EAAO,KACZ,EAAgB,SAAS,mBAAoB,CAC3C,MAAW,MACT,cAAc,EAAU,KAAK,6BAA6B,IAC3D,CACF,CAAC,CACH,EAKC,EAAU,MAAQ,GAAgB,EAAc,CAClD,MAAO,EAAO,SAAS,4BAA4B,CAAC,KAClD,EAAO,aAAa,CAClB,UAAW,EAAW,GACtB,aAAc,EACd,WAAY,EAAU,KACtB,eAAgB,EAChB,eAAgB,EACjB,CAAC,CACH,CAED,IAAM,EAAU,EACd,SAAS,EAAkB,UAAU,CAAC,SAAS,EAAG,IAAI,GACvD,CAAC,SAAS,SAAS,CACpB,MAAO,EAAY,EAAY,EAAU,KAAM,EAAQ,CACvD,MAAO,EAAI,OAAO,EAAc,GAAQ,CAAC,GAAG,EAAK,EAAQ,CAAC,CAC1D,MAAOE,EAAkB,EAAO,QAAQ,EAAU,KAAK,CAAC,MAGxD,MAAO,EAAsB,EAAW,GAAI,EAAU,KAAK,CAG7D,MAAO,EAAI,OACT,EACC,GAAU,EAAQ,EAAU,KAC9B,EAKD,CAQJ,OALA,MAAO,EAAY,KACjB,EAAO,WAAY,GAAc,EAAY,EAAU,CAAC,CACxD,EAAO,gBAAgB,EAA0B,CAClD,CAEM,CACL,cAAe,MAAO,EAAI,IAAI,EAAsB,CACpD,SAAU,MAAO,EAAI,IAAI,EAAY,CACtC,EACD,CAME,GAAgB,EAAwB,IACrC,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,MADmB,EAAgB,mBAAmB,EAAW,GAAG,CACnD,gBAAgB,EAAU,CACzC,gBAAiB,CACf,gBAAiB,EAAW,UAAU,aAAa,UAAU,CAC7D,iBAAkB,EAAW,UAAU,cAAc,UAAU,CAChE,CACF,CAAC,EAEJ,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAME,EAAkB,GACf,EAAO,WAAW,CACvB,IAAK,SAAY,CACf,GAAI,CASF,OAPkB,MADC,EAAgB,mBAAmB,EAAG,CACtB,aAAa,YAAY,EAGhD,iBAAiB,IAAK,IAAW,CACzC,KAAM,EAAM,KACb,EAAE,EAAI,EAAE,OAGJ,EAAO,CACd,GACE,GACA,OAAO,GAAU,UACjB,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,EAAE,CAEX,MAAM,IAGV,MAAQ,GACN,EAAgB,SAAS,sBAAuB,CAC9C,MAAO,EACR,CAAC,CACL,CAAC,CAME,EAAc,GACX,EAAO,IAAI,WAAa,CAC7B,MAAO,EAAO,QAAQ,uBAAuB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACD,MAAO,EAAQ,OAAO,EAAG,EACzB,CAME,GAAU,GACP,EAAO,IAAI,WAAa,CA2B7B,OA1BA,MAAOG,EAAoB,EAAO,QAAQ,EAAE,CAAC,CAC7C,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOC,EAAkB,EAAO,QAAQ,EAAO,MAAQ,EAAE,CAAC,CAE1D,MAAO,EAAO,QAAQ,iCAAiC,CAAC,KACtD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAED,EAAO,aAAe,IAAI,MAAM,CAAC,aAAa,CAC9C,EAAO,QAAU,CACf,GAAI,EAAO,QAAQ,GACnB,KAAM,EAAO,QAAQ,KACrB,KAAM,EAAO,GACb,OAAQ,EACT,CACD,EAAO,IAAM,GAAG,EAAY,GAAG,EAAO,KAEtC,MAAO,EAAQ,IAAI,EAAO,GAAI,EAAO,CACrC,MAAO,EAAO,QAAQ,gCAAgC,CAAC,KACrD,EAAO,aAAa,CAClB,UAAW,EAAO,GACnB,CAAC,CACH,CAEM,GACP,CAGE,EACJ,GAEO,EAAO,WAAW,CACvB,IAAK,SAAY,CAEf,IAAM,EAAW,MADE,EAAgB,mBAAmB,EAAG,CACvB,UAAU,CAC5C,GAAI,EAAS,SACX,OAAO,EAAS,SAElB,GAAI,EAAS,mBACX,OAAO,EAAS,mBAElB,MAAU,MAAM,uCAAuC,EAEzD,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAC3C,MAAO,EACR,CAAC,CACL,CAAC,CAGE,GAAQ,GACL,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAS,MAAO,EAAW,EAAG,CAGpC,GAAI,aAAkB,KAAM,CAC1B,IAAM,EAAc,MAAO,EAAO,YAAc,EAAO,aAAa,CAAC,CACrE,OAAO,IAAI,WAAW,EAA2B,CAInD,IAAM,EAAS,EAAO,WAAW,CAC3BJ,EAAuB,EAAE,CAE/B,GAAI,CACF,OAAa,CACX,IAAMK,EAAS,MAAO,EAAO,YAAc,EAAO,MAAM,CAAC,CACzD,GAAIA,EAAO,KAAM,MACjB,EAAO,KAAKA,EAAO,MAAM,SAEnB,CACR,EAAO,aAAa,CAItB,IAAM,EAAc,EAAO,QAAQ,EAAK,IAAU,EAAM,EAAM,OAAQ,EAAE,CAClE,EAAS,IAAI,WAAW,EAAY,CACtC,EAAS,EACb,IAAK,IAAM,KAAS,EAClB,EAAO,IAAI,EAAO,EAAO,CACzB,GAAU,EAAM,OAGlB,OAAO,GACP,CAGE,IACJ,EACA,EACA,IAEO,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAQ,CAKxC,GAHS,MAAO,EAAe,EAAQ,EAElB,OACW,EAEhC,EAAkB,MAAO,EAAwB,EAAQ,CAE/D,GAAI,EAAiB,CACnB,MAAO,EAAsB,EAAQ,CACrC,IAAM,EAAS,EAAgB,EAAgB,KACzC,EAAO,EAAgB,OAAO,KAAK,EAAO,OAAO,EAAY,CAAC,CACpE,MAAO,CACL,aACA,gBAAiB,EAAkB,EACnC,SACA,oBAAqB,EAAgB,KACrC,OACD,MAED,MAAO,CACL,aACA,kBACA,OAAQ,EACR,oBAAqB,EACrB,KAAM,EACP,EAEH,CAME,IACJ,EACA,IAIOC,EACL,EAAQ,QACRX,EACEY,EACA,EAAO,IAAI,WAAa,CACtB,IAAM,EAAY,KAAK,KAAK,CACtB,CACJ,OAAQ,EACR,UACA,OAAQ,GACN,EACE,CAAE,cAAe,EAQjB,CAAE,aAAY,kBAAiB,SAAQ,QANvB,MAAO,GAC3B,EACA,EACA,EACD,CAIK,CAAE,gBAAe,YAAa,MAAO,EACzC,EACA,EACA,EACA,EACA,EACD,CAEK,EAAY,EAAS,EAE3B,GAAI,EAAW,OAAS,EACtB,GAAI,CAEF,MAAO,EAAa,EAAY,EAAS,CACzC,MAAO,EAAW,EAAQ,CAG1B,MAAO,EAAyB,EAAS,CACvC,SAAU,EAAW,MAAQ,EAC7B,gBAAiB,KAAK,KAAK,CAAG,EAC9B,WAAY,EAAS,OACrB,gBAAiB,EAAW,KAC5B,cAAe,EAAW,MAAQ,KAAK,KAAK,CAAG,GAC/C,WAAY,EACb,CAAC,CAEF,MAAOC,EAAmB,EAAO,QAAQ,EAAE,CAAC,CAC5C,MAAOL,EAAmB,EAAO,QAAQ,GAAG,CAAC,OACtC,EAAO,CAed,MAdA,MAAO,EAAO,SAAS,0BAA0B,CAAC,KAChD,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACD,MAAOM,EAAkB,EAAO,QAAQ,EAAE,CAAC,CAC3C,EAAO,QACL,EAAgB,QAAS,EAAO,CAC9B,UAAW,EACX,UAAW,SACX,OAAQ,EAAS,OAClB,CAAC,CACH,CACK,EAIV,OAAO,GACP,CACH,CACF,CAGG,GAAa,GACV,EAAO,IAAI,WAAa,CAC7B,IAAM,EAAa,MAAO,EAAQ,IAAI,EAAG,CAErC,EAAS,EAEb,GAAI,CAEF,EAAS,EADM,MAAO,EAAe,EAAG,CACH,OAC9B,EAAO,CAEd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAErB,MAAO,CACL,GAAG,EACH,OAAQ,EAAW,KACnB,KAAM,EAAW,KACjB,SAAU,EAAW,SACrB,QAAS,EAAW,QACrB,CASH,MANA,MAAO,EAAO,SAAS,sBAAsB,CAAC,KAC5C,EAAO,aAAa,CAClB,UAAW,EACX,MAAO,KAAK,UAAU,EAAM,CAC7B,CAAC,CACH,CACK,EAGR,IAAM,EAAsB,MAAO,EAAuB,EAAG,CAE7D,MAAO,CACL,GAAG,EACH,OAAQ,GAAU,GAAuB,GACzC,KAAM,EAAW,KACjB,QAAS,EAAW,QACrB,EACD,CAGE,GAAU,GACP,EAAO,IAAI,WAAa,CAC7B,GAAI,CACF,IAAM,EAAa,EAAgB,mBAAmB,EAAG,CACzD,MAAO,EAAO,YAAc,EAAW,gBAAgB,CAAC,CAGxD,MAAO,EAAsB,EAAG,OACzB,EAAO,CACd,GACE,OAAO,GAAU,UACjB,GACA,eAAgB,GAChB,EAAM,aAAe,IAOrB,OALA,MAAO,EAAO,SAAS,gBAAgB,CAAC,KACtC,EAAO,aAAa,CAClB,UAAW,EACZ,CAAC,CACH,CACM,MAAO,EAAO,KAAK,EAAgB,SAAS,iBAAiB,CAAC,CAOvE,MALA,EAAO,QACL,EAAgB,SAAU,EAAO,CAC/B,UAAW,EACZ,CAAC,CACH,CACK,EAGR,MAAO,EAAW,EAAG,CACrB,MAAON,EAAmB,EAAO,QAAQ,GAAG,CAAC,EAC7C,CAGE,MACG,EAGH,GAAqB,GAAuB,CAChD,IAAM,EAAO,IAAI,KAAK,EAAW,CACjC,OAAO,IAAI,KAAK,EAAK,SAAS,CAAG,GAAe,CAAC,EAG7C,OACG,EAAO,WAAW,CACvB,IAAK,SAA6B,CAChC,GAAI,GAAe,GAAK,EACtB,MAAO,GAGT,IAAI,EAAU,EAER,EAAW,EAAgB,cAAc,CAC7C,gBAAiB,GAClB,CAAC,CAEIO,EAAyB,EAAE,CAEjC,UAAW,IAAM,KAAQ,EACvB,GAAI,EAAK,UAAU,aAAc,CAC/B,IAAM,EAAe,IAAI,KAAK,EAAK,SAAS,aAAa,CAEvD,KAAK,KAAK,CACV,GAAkB,EAAa,aAAa,CAAC,CAAC,SAAS,EAEvD,EAAa,KAAK,EAAK,KAAK,CAMlC,IAAK,IAAM,KAAY,EACrB,MAAM,EAAgB,WAAW,EAAS,CAC1C,IAGF,OAAO,GAET,MAAQ,GACN,EAAgB,SAAS,mBAAoB,CAAE,MAAO,EAAgB,CAAC,CAC1E,CAAC,CAGE,OACG,CACL,wBAAyB,GACzB,sBAAuB,GACvB,uBAAwB,GACxB,yBAA0B,GAC1B,6BAA8B,GAC9B,qBAAsB,EACtB,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,SAAU,EACV,iBAAkB,EAClB,sBAAuB,GACvB,2BAA4B,GAC5B,kBAAmB,IAAA,GACpB,EA+BH,MAAO,CACL,OAAQ,EACR,UACA,UACA,SACA,aACA,QACA,aACA,cAAe,IAAe,CAC9B,kBACA,2BArCO,CACL,aAAc,EACd,aAAc,IAAO,KAAO,KAC5B,iBAAkB,EAClB,sBAAuB,GACxB,EAiCD,uBA7BA,GACkC,CAClC,IAAM,EAAe,GAAiB,CAEhC,OAAgB,CACpB,OAAQ,EAAR,CACE,IAAK,WACH,OAAO,EAAa,wBACtB,IAAK,SACH,MAAO,GACT,QACE,MAAO,OAET,CAEJ,OAAO,EAAO,QAAQ,EAAO,EAe9B"}
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@uploadista/data-store-azure",
3
3
  "type": "module",
4
- "version": "0.0.3",
4
+ "version": "0.0.4",
5
5
  "description": "Azure Blob Storage data store for Uploadista",
6
6
  "license": "MIT",
7
7
  "author": "Uploadista",
@@ -16,15 +16,16 @@
16
16
  "@azure/core-auth": "^1.8.0",
17
17
  "@azure/storage-blob": "12.29.1",
18
18
  "effect": "3.18.4",
19
- "@uploadista/core": "0.0.3",
20
- "@uploadista/kv-store-memory": "0.0.3",
21
- "@uploadista/observability": "0.0.3"
19
+ "@uploadista/core": "0.0.4",
20
+ "@uploadista/kv-store-memory": "0.0.4",
21
+ "@uploadista/observability": "0.0.4"
22
22
  },
23
23
  "devDependencies": {
24
- "@uploadista/typescript-config": "0.0.3"
24
+ "tsdown": "0.15.9",
25
+ "@uploadista/typescript-config": "0.0.4"
25
26
  },
26
27
  "scripts": {
27
- "build": "tsc -b",
28
+ "build": "tsdown",
28
29
  "format": "biome format --write ./src",
29
30
  "lint": "biome lint --write ./src",
30
31
  "check": "biome check --write ./src",
@@ -0,0 +1,11 @@
1
+ import { defineConfig } from "tsdown";
2
+
3
+ export default defineConfig({
4
+ entry: {
5
+ index: "src/index.ts",
6
+ },
7
+ minify: true,
8
+ format: ["esm", "cjs"],
9
+ dts: true,
10
+ outDir: "dist",
11
+ });
@@ -1,67 +0,0 @@
1
- import type { TokenCredential } from "@azure/core-auth";
2
- import { UploadistaError } from "@uploadista/core/errors";
3
- import type { DataStore, KvStore, UploadFile } from "@uploadista/core/types";
4
- import { Effect } from "effect";
5
- export type ChunkInfo = {
6
- blockNumber: number;
7
- data: Uint8Array;
8
- size: number;
9
- isFinalPart?: boolean;
10
- };
11
- export type AzureStoreOptions = {
12
- deliveryUrl: string;
13
- /**
14
- * The preferred block size for blocks sent to Azure. Can not be lower than 1 byte or more than 4000MiB.
15
- * The server calculates the optimal block size, which takes this size into account,
16
- * but may increase it to not exceed the Azure 50K blocks limit.
17
- */
18
- blockSize?: number;
19
- /**
20
- * The minimal block size for blocks.
21
- * Can be used to ensure that all non-trailing blocks are exactly the same size.
22
- * Can not be lower than 1 byte or more than 4000MiB.
23
- */
24
- minBlockSize?: number;
25
- /**
26
- * The maximum number of blocks allowed in a block blob upload. Defaults to 50,000.
27
- */
28
- maxBlocks?: number;
29
- maxConcurrentBlockUploads?: number;
30
- kvStore: KvStore<UploadFile>;
31
- expirationPeriodInMilliseconds?: number;
32
- connectionString?: string;
33
- /**
34
- * SAS URL for the storage account (works in all environments including browsers)
35
- * Format: https://<account>.blob.core.windows.net?<sas-token>
36
- */
37
- sasUrl?: string;
38
- /**
39
- * TokenCredential for OAuth authentication (e.g., DefaultAzureCredential)
40
- * Works in all environments and is the recommended approach for production
41
- */
42
- credential?: TokenCredential;
43
- /**
44
- * Account name and key for shared key authentication (Node.js only)
45
- * @deprecated Use sasUrl or credential instead for cross-platform compatibility
46
- */
47
- accountName?: string;
48
- /**
49
- * @deprecated Use sasUrl or credential instead for cross-platform compatibility
50
- */
51
- accountKey?: string;
52
- containerName: string;
53
- };
54
- export type AzureStore = DataStore<UploadFile> & {
55
- getUpload: (id: string) => Effect.Effect<UploadFile, UploadistaError>;
56
- readStream: (id: string) => Effect.Effect<ReadableStream | Blob, UploadistaError>;
57
- getChunkerConstraints: () => {
58
- minChunkSize: number;
59
- maxChunkSize: number;
60
- optimalChunkSize: number;
61
- requiresOrderedChunks: boolean;
62
- };
63
- };
64
- export declare function azureStore({ deliveryUrl, blockSize, minBlockSize, // 1KB minimum
65
- maxBlocks, kvStore, maxConcurrentBlockUploads, expirationPeriodInMilliseconds, // 1 week
66
- connectionString, sasUrl, credential, accountName, accountKey, containerName, }: AzureStoreOptions): AzureStore;
67
- //# sourceMappingURL=azure-store.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"azure-store.d.ts","sourceRoot":"","sources":["../src/azure-store.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAOxD,OAAO,EAAE,eAAe,EAAE,MAAM,yBAAyB,CAAC;AAE1D,OAAO,KAAK,EACV,SAAS,EAGT,OAAO,EACP,UAAU,EAEX,MAAM,wBAAwB,CAAC;AAgBhC,OAAO,EAAE,MAAM,EAAe,MAAM,QAAQ,CAAC;AAY7C,MAAM,MAAM,SAAS,GAAG;IACtB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE,UAAU,CAAC;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,CAAC,EAAE,OAAO,CAAC;CACvB,CAAC;AAEF,MAAM,MAAM,iBAAiB,GAAG;IAC9B,WAAW,EAAE,MAAM,CAAC;IACpB;;;;OAIG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB;;;;OAIG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,yBAAyB,CAAC,EAAE,MAAM,CAAC;IACnC,OAAO,EAAE,OAAO,CAAC,UAAU,CAAC,CAAC;IAC7B,8BAA8B,CAAC,EAAE,MAAM,CAAC;IAExC,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,UAAU,CAAC,EAAE,eAAe,CAAC;IAC7B;;;OAGG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,aAAa,EAAE,MAAM,CAAC;CACvB,CAAC;AAQF,MAAM,MAAM,UAAU,GAAG,SAAS,CAAC,UAAU,CAAC,GAAG;IAC/C,SAAS,EAAE,CAAC,EAAE,EAAE,MAAM,KAAK,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE,eAAe,CAAC,CAAC;IACtE,UAAU,EAAE,CACV,EAAE,EAAE,MAAM,KACP,MAAM,CAAC,MAAM,CAAC,cAAc,GAAG,IAAI,EAAE,eAAe,CAAC,CAAC;IAC3D,qBAAqB,EAAE,MAAM;QAC3B,YAAY,EAAE,MAAM,CAAC;QACrB,YAAY,EAAE,MAAM,CAAC;QACrB,gBAAgB,EAAE,MAAM,CAAC;QACzB,qBAAqB,EAAE,OAAO,CAAC;KAChC,CAAC;CACH,CAAC;AAEF,wBAAgB,UAAU,CAAC,EACzB,WAAW,EACX,SAAS,EACT,YAAmB,EAAE,cAAc;AACnC,SAAkB,EAClB,OAAO,EACP,yBAA8B,EAC9B,8BAAwD,EAAE,SAAS;AACnE,gBAAgB,EAChB,MAAM,EACN,UAAU,EACV,WAAW,EACX,UAAU,EACV,aAAa,GACd,EAAE,iBAAiB,GAAG,UAAU,CAy7BhC"}
@@ -1,725 +0,0 @@
1
- import { BlobServiceClient as BlobService, StorageSharedKeyCredential, } from "@azure/storage-blob";
2
- import { UploadistaError } from "@uploadista/core/errors";
3
- import { azureActiveUploadsGauge as activeUploadsGauge, azureFileSizeHistogram as fileSizeHistogram, logAzureUploadCompletion, azurePartSizeHistogram as partSizeHistogram, azurePartUploadDurationHistogram as partUploadDurationHistogram, trackAzureError, azureUploadDurationHistogram as uploadDurationHistogram, azureUploadErrorsTotal as uploadErrorsTotal, azureUploadPartsTotal as uploadPartsTotal, azureUploadRequestsTotal as uploadRequestsTotal, azureUploadSuccessTotal as uploadSuccessTotal, withAzureTimingMetrics as withTimingMetrics, withAzureUploadMetrics as withUploadMetrics, } from "@uploadista/observability";
4
- import { Effect, Ref, Stream } from "effect";
5
- // Using base64 encoding that works in both Node.js and browser
6
- const bufferFrom = (str) => {
7
- // Use global Buffer if available, otherwise fallback to btoa
8
- if (typeof globalThis !== "undefined" && "Buffer" in globalThis) {
9
- return globalThis.Buffer.from(str);
10
- }
11
- // Fallback for browser environments
12
- return new Uint8Array(Array.from(str, (c) => c.charCodeAt(0)));
13
- };
14
- function calcOffsetFromBlocks(blocks) {
15
- return blocks && blocks.length > 0
16
- ? blocks.reduce((a, b) => a + (b?.size ?? 0), 0)
17
- : 0;
18
- }
19
- export function azureStore({ deliveryUrl, blockSize, minBlockSize = 1024, // 1KB minimum
20
- maxBlocks = 50_000, kvStore, maxConcurrentBlockUploads = 60, expirationPeriodInMilliseconds = 1000 * 60 * 60 * 24 * 7, // 1 week
21
- connectionString, sasUrl, credential, accountName, accountKey, containerName, }) {
22
- const preferredBlockSize = blockSize || 8 * 1024 * 1024; // 8MB default
23
- const maxUploadSize = 5_497_558_138_880; // 5TiB (Azure Block Blob limit)
24
- // Initialize Azure Blob Service Client with cross-platform authentication
25
- let blobServiceClient;
26
- if (connectionString) {
27
- // Connection string (works in all environments)
28
- blobServiceClient = BlobService.fromConnectionString(connectionString);
29
- }
30
- else if (sasUrl) {
31
- // SAS URL (works in all environments including browsers)
32
- blobServiceClient = new BlobService(sasUrl);
33
- }
34
- else if (credential) {
35
- // OAuth token credential (works in all environments, recommended for production)
36
- const accountUrl = accountName
37
- ? `https://${accountName}.blob.core.windows.net`
38
- : sasUrl?.split("?")[0] || "";
39
- if (!accountUrl) {
40
- throw new Error("When using credential authentication, either accountName or a valid sasUrl must be provided to determine the account URL");
41
- }
42
- blobServiceClient = new BlobService(accountUrl, credential);
43
- }
44
- else if (accountName && accountKey) {
45
- // Legacy shared key authentication (Node.js only)
46
- // This will fail in browser/edge environments
47
- try {
48
- const sharedKeyCredential = new StorageSharedKeyCredential(accountName, accountKey);
49
- blobServiceClient = new BlobService(`https://${accountName}.blob.core.windows.net`, sharedKeyCredential);
50
- }
51
- catch (error) {
52
- throw new Error("StorageSharedKeyCredential is only available in Node.js environments. " +
53
- "Use sasUrl or credential options for cross-platform compatibility. " +
54
- `Original error: ${error}`);
55
- }
56
- }
57
- else {
58
- throw new Error("Azure authentication required. Provide one of: " +
59
- "connectionString, sasUrl, credential, or accountName + accountKey (Node.js only)");
60
- }
61
- const containerClient = blobServiceClient.getContainerClient(containerName);
62
- const incompletePartKey = (id) => {
63
- return `${id}.incomplete`;
64
- };
65
- const uploadBlock = (uploadFile, readStream, blockId) => {
66
- return withTimingMetrics(partUploadDurationHistogram, Effect.gen(function* () {
67
- yield* Effect.logInfo("Uploading block").pipe(Effect.annotateLogs({
68
- upload_id: uploadFile.id,
69
- block_id: blockId,
70
- block_size: readStream.length,
71
- }));
72
- yield* uploadPartsTotal(Effect.succeed(1));
73
- yield* partSizeHistogram(Effect.succeed(readStream.length));
74
- try {
75
- const blobClient = containerClient.getBlockBlobClient(uploadFile.id);
76
- yield* Effect.tryPromise({
77
- try: async () => {
78
- await blobClient.stageBlock(blockId, readStream, readStream.length);
79
- },
80
- catch: (error) => {
81
- Effect.runSync(trackAzureError("uploadBlock", error, {
82
- upload_id: uploadFile.id,
83
- block_id: blockId,
84
- block_size: readStream.length,
85
- }));
86
- return UploadistaError.fromCode("FILE_WRITE_ERROR", {
87
- cause: error,
88
- });
89
- },
90
- });
91
- yield* Effect.logInfo("Finished uploading block").pipe(Effect.annotateLogs({
92
- upload_id: uploadFile.id,
93
- block_id: blockId,
94
- block_size: readStream.length,
95
- }));
96
- }
97
- catch (error) {
98
- Effect.runSync(trackAzureError("uploadBlock", error, {
99
- upload_id: uploadFile.id,
100
- block_id: blockId,
101
- block_size: readStream.length,
102
- }));
103
- throw error;
104
- }
105
- }));
106
- };
107
- const uploadIncompleteBlock = (id, readStream) => {
108
- return Effect.tryPromise({
109
- try: async () => {
110
- const blobClient = containerClient.getBlockBlobClient(incompletePartKey(id));
111
- await blobClient.upload(readStream, readStream.length);
112
- },
113
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
114
- }).pipe(Effect.tap(() => Effect.logInfo("Finished uploading incomplete block").pipe(Effect.annotateLogs({
115
- upload_id: id,
116
- }))));
117
- };
118
- const getIncompleteBlock = (id) => {
119
- return Effect.tryPromise({
120
- try: async () => {
121
- try {
122
- const blobClient = containerClient.getBlockBlobClient(incompletePartKey(id));
123
- const response = await blobClient.download();
124
- return response.readableStreamBody;
125
- }
126
- catch (error) {
127
- if (error &&
128
- typeof error === "object" &&
129
- "statusCode" in error &&
130
- error.statusCode === 404) {
131
- return undefined;
132
- }
133
- throw error;
134
- }
135
- },
136
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
137
- });
138
- };
139
- const getIncompleteBlockSize = (id) => {
140
- return Effect.tryPromise({
141
- try: async () => {
142
- try {
143
- const blobClient = containerClient.getBlockBlobClient(incompletePartKey(id));
144
- const properties = await blobClient.getProperties();
145
- return properties.contentLength;
146
- }
147
- catch (error) {
148
- if (error &&
149
- typeof error === "object" &&
150
- "statusCode" in error &&
151
- error.statusCode === 404) {
152
- return undefined;
153
- }
154
- throw error;
155
- }
156
- },
157
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
158
- });
159
- };
160
- const deleteIncompleteBlock = (id) => {
161
- return Effect.tryPromise({
162
- try: async () => {
163
- const blobClient = containerClient.getBlockBlobClient(incompletePartKey(id));
164
- await blobClient.deleteIfExists();
165
- },
166
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
167
- });
168
- };
169
- const downloadIncompleteBlock = (id) => {
170
- return Effect.gen(function* () {
171
- const incompleteBlock = yield* getIncompleteBlock(id);
172
- if (!incompleteBlock) {
173
- return;
174
- }
175
- // Read the stream and collect all chunks to calculate size
176
- const reader = incompleteBlock.getReader();
177
- const chunks = [];
178
- let incompleteBlockSize = 0;
179
- try {
180
- while (true) {
181
- const result = yield* Effect.promise(() => reader.read());
182
- if (result.done)
183
- break;
184
- chunks.push(result.value);
185
- incompleteBlockSize += result.value.length;
186
- }
187
- }
188
- finally {
189
- reader.releaseLock();
190
- }
191
- // Create a new readable stream from the chunks
192
- const stream = Stream.fromIterable(chunks);
193
- return {
194
- size: incompleteBlockSize,
195
- stream,
196
- };
197
- });
198
- };
199
- const calcOptimalBlockSize = (initSize) => {
200
- const size = initSize ?? maxUploadSize;
201
- let optimalBlockSize;
202
- if (size <= preferredBlockSize) {
203
- optimalBlockSize = size;
204
- }
205
- else if (size <= preferredBlockSize * maxBlocks) {
206
- optimalBlockSize = preferredBlockSize;
207
- }
208
- else {
209
- // Calculate the minimum block size needed to fit within the max blocks limit
210
- optimalBlockSize = Math.ceil(size / maxBlocks);
211
- }
212
- // Ensure the block size respects the minimum and is aligned properly
213
- const finalBlockSize = Math.max(optimalBlockSize, minBlockSize);
214
- // Round up to ensure consistent block sizes
215
- return Math.ceil(finalBlockSize / 1024) * 1024; // Align to 1KB boundaries
216
- };
217
- // Proper single-pass chunking using Effect's async stream constructor
218
- // Ensures all parts except the final part are exactly the same size (S3 requirement)
219
- const createChunkedStream = (chunkSize) => (stream) => {
220
- return Stream.async((emit) => {
221
- let buffer = new Uint8Array(0);
222
- let blockNumber = 1;
223
- let totalBytesProcessed = 0;
224
- const emitChunk = (data, isFinalChunk = false) => {
225
- // Log chunk information for debugging - use INFO level to see in logs
226
- Effect.runSync(Effect.logInfo("Creating chunk").pipe(Effect.annotateLogs({
227
- block_number: blockNumber,
228
- chunk_size: data.length,
229
- expected_size: chunkSize,
230
- is_final_chunk: isFinalChunk,
231
- total_bytes_processed: totalBytesProcessed + data.length,
232
- })));
233
- emit.single({
234
- blockNumber: blockNumber++,
235
- data,
236
- size: data.length,
237
- });
238
- };
239
- const processChunk = (newData) => {
240
- // Combine buffer with new data
241
- const combined = new Uint8Array(buffer.length + newData.length);
242
- combined.set(buffer);
243
- combined.set(newData, buffer.length);
244
- buffer = combined;
245
- totalBytesProcessed += newData.length;
246
- // Emit full chunks of exactly chunkSize bytes
247
- // This ensures S3 multipart upload rule: all parts except last must be same size
248
- while (buffer.length >= chunkSize) {
249
- const chunk = buffer.slice(0, chunkSize);
250
- buffer = buffer.slice(chunkSize);
251
- emitChunk(chunk, false);
252
- }
253
- };
254
- // Process the stream
255
- Effect.runFork(stream.pipe(Stream.runForEach((chunk) => Effect.sync(() => processChunk(chunk))), Effect.andThen(() => Effect.sync(() => {
256
- // Emit final chunk if there's remaining data
257
- // The final chunk can be any size < chunkSize (S3 allows this)
258
- if (buffer.length > 0) {
259
- emitChunk(buffer, true);
260
- }
261
- emit.end();
262
- })), Effect.catchAll((error) => Effect.sync(() => emit.fail(error)))));
263
- });
264
- };
265
- // Byte-level progress tracking during streaming
266
- // This provides smooth, immediate progress feedback by tracking bytes as they
267
- // flow through the stream, before they reach S3. This solves the issue where
268
- // small files (< 5MB) would jump from 0% to 100% instantly.
269
- const withByteProgressTracking = (onProgress, initialOffset = 0) => (stream) => {
270
- if (!onProgress)
271
- return stream;
272
- return Effect.gen(function* () {
273
- const totalBytesProcessedRef = yield* Ref.make(initialOffset);
274
- return stream.pipe(Stream.tap((chunk) => Effect.gen(function* () {
275
- const newTotal = yield* Ref.updateAndGet(totalBytesProcessedRef, (total) => total + chunk.length);
276
- onProgress(newTotal);
277
- })));
278
- }).pipe(Stream.unwrap);
279
- };
280
- /**
281
- * Uploads a stream to Azure using multiple blocks
282
- */
283
- const uploadBlocks = (uploadFile, readStream, initCurrentBlockNumber, initOffset, onProgress) => {
284
- return Effect.gen(function* () {
285
- yield* Effect.logInfo("Uploading blocks").pipe(Effect.annotateLogs({
286
- upload_id: uploadFile.id,
287
- init_offset: initOffset,
288
- file_size: uploadFile.size,
289
- }));
290
- const size = uploadFile.size;
291
- const uploadBlockSize = calcOptimalBlockSize(size);
292
- yield* Effect.logInfo("Block size").pipe(Effect.annotateLogs({
293
- upload_id: uploadFile.id,
294
- block_size: uploadBlockSize,
295
- }));
296
- // Enhanced Progress Tracking Strategy:
297
- // 1. Byte-level progress during streaming - provides immediate, smooth feedback
298
- // as data flows through the pipeline (even for small files)
299
- // 2. This tracks progress BEFORE S3 upload, giving users immediate feedback
300
- // 3. For large files with multiple parts, this provides granular updates
301
- // 4. For small files (single part), this prevents 0%->100% jumps
302
- const chunkStream = readStream.pipe(
303
- // Add byte-level progress tracking during streaming (immediate feedback)
304
- withByteProgressTracking(onProgress, initOffset),
305
- // Create chunks for S3 multipart upload with uniform part sizes
306
- createChunkedStream(uploadBlockSize));
307
- // Track cumulative offset and total bytes with Effect Refs
308
- const cumulativeOffsetRef = yield* Ref.make(initOffset);
309
- const totalBytesUploadedRef = yield* Ref.make(0);
310
- const blockIdsRef = yield* Ref.make([]);
311
- // Create a chunk upload function for the sink
312
- const uploadChunk = (chunkInfo) => Effect.gen(function* () {
313
- // Calculate cumulative bytes to determine if this is the final block
314
- const cumulativeOffset = yield* Ref.updateAndGet(cumulativeOffsetRef, (offset) => offset + chunkInfo.size);
315
- const isFinalBlock = cumulativeOffset >= (uploadFile.size || 0);
316
- yield* Effect.logDebug("Processing chunk").pipe(Effect.annotateLogs({
317
- upload_id: uploadFile.id,
318
- cumulative_offset: cumulativeOffset,
319
- file_size: uploadFile.size,
320
- chunk_size: chunkInfo.size,
321
- is_final_block: isFinalBlock,
322
- }));
323
- const actualBlockNumber = initCurrentBlockNumber + chunkInfo.blockNumber - 1;
324
- if (chunkInfo.size > uploadBlockSize) {
325
- yield* Effect.fail(UploadistaError.fromCode("FILE_WRITE_ERROR", {
326
- cause: new Error(`Block size ${chunkInfo.size} exceeds upload block size ${uploadBlockSize}`),
327
- }));
328
- }
329
- // For parts that meet the minimum part size (5MB) or are the final part,
330
- // upload them as regular multipart parts
331
- if (chunkInfo.size >= minBlockSize || isFinalBlock) {
332
- yield* Effect.logDebug("Uploading multipart chunk").pipe(Effect.annotateLogs({
333
- upload_id: uploadFile.id,
334
- block_number: actualBlockNumber,
335
- chunk_size: chunkInfo.size,
336
- min_block_size: minBlockSize,
337
- is_final_block: isFinalBlock,
338
- }));
339
- // Generate block ID (base64 encoded, must be consistent)
340
- const blockId = bufferFrom(`block-${actualBlockNumber.toString().padStart(6, "0")}`).toString("base64");
341
- yield* uploadBlock(uploadFile, chunkInfo.data, blockId);
342
- yield* Ref.update(blockIdsRef, (ids) => [...ids, blockId]);
343
- yield* partSizeHistogram(Effect.succeed(chunkInfo.size));
344
- }
345
- else {
346
- // Only upload as incomplete part if it's smaller than minimum and not final
347
- yield* uploadIncompleteBlock(uploadFile.id, chunkInfo.data);
348
- }
349
- yield* Ref.update(totalBytesUploadedRef, (total) => total + chunkInfo.size);
350
- // Note: Byte-level progress is now tracked during streaming phase
351
- // This ensures smooth progress updates regardless of part size
352
- // Azure upload completion is tracked via totalBytesUploadedRef for accuracy
353
- });
354
- // Process chunks concurrently with controlled concurrency
355
- yield* chunkStream.pipe(Stream.runForEach((chunkInfo) => uploadChunk(chunkInfo)), Effect.withConcurrency(maxConcurrentBlockUploads));
356
- return {
357
- bytesUploaded: yield* Ref.get(totalBytesUploadedRef),
358
- blockIds: yield* Ref.get(blockIdsRef),
359
- };
360
- });
361
- };
362
- /**
363
- * Commits all staged blocks to create the final blob
364
- */
365
- const commitBlocks = (uploadFile, blockIds) => {
366
- return Effect.tryPromise({
367
- try: async () => {
368
- const blobClient = containerClient.getBlockBlobClient(uploadFile.id);
369
- await blobClient.commitBlockList(blockIds, {
370
- blobHTTPHeaders: {
371
- blobContentType: uploadFile.metadata?.contentType?.toString(),
372
- blobCacheControl: uploadFile.metadata?.cacheControl?.toString(),
373
- },
374
- });
375
- },
376
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
377
- });
378
- };
379
- /**
380
- * Gets the committed blocks for a blob
381
- */
382
- const retrieveBlocks = (id) => {
383
- return Effect.tryPromise({
384
- try: async () => {
385
- try {
386
- const blobClient = containerClient.getBlockBlobClient(id);
387
- const blockList = await blobClient.getBlockList("committed");
388
- const blocks = blockList.committedBlocks?.map((block) => ({
389
- size: block.size,
390
- })) ?? [];
391
- return blocks;
392
- }
393
- catch (error) {
394
- if (error &&
395
- typeof error === "object" &&
396
- "statusCode" in error &&
397
- error.statusCode === 404) {
398
- return [];
399
- }
400
- throw error;
401
- }
402
- },
403
- catch: (error) => UploadistaError.fromCode("UPLOAD_ID_NOT_FOUND", {
404
- cause: error,
405
- }),
406
- });
407
- };
408
- /**
409
- * Removes cached data for a given file
410
- */
411
- const clearCache = (id) => {
412
- return Effect.gen(function* () {
413
- yield* Effect.logInfo("Removing cached data").pipe(Effect.annotateLogs({
414
- upload_id: id,
415
- }));
416
- yield* kvStore.delete(id);
417
- });
418
- };
419
- /**
420
- * Creates a blob placeholder in Azure and stores metadata
421
- */
422
- const create = (upload) => {
423
- return Effect.gen(function* () {
424
- yield* uploadRequestsTotal(Effect.succeed(1));
425
- yield* activeUploadsGauge(Effect.succeed(1));
426
- yield* fileSizeHistogram(Effect.succeed(upload.size || 0));
427
- yield* Effect.logInfo("Initializing Azure blob upload").pipe(Effect.annotateLogs({
428
- upload_id: upload.id,
429
- }));
430
- upload.creationDate = new Date().toISOString();
431
- upload.storage = {
432
- id: upload.storage.id,
433
- type: upload.storage.type,
434
- path: upload.id,
435
- bucket: containerName,
436
- };
437
- upload.url = `${deliveryUrl}/${upload.id}`;
438
- yield* kvStore.set(upload.id, upload);
439
- yield* Effect.logInfo("Azure blob upload initialized").pipe(Effect.annotateLogs({
440
- upload_id: upload.id,
441
- }));
442
- return upload;
443
- });
444
- };
445
- const readStream = (id) => {
446
- return Effect.tryPromise({
447
- try: async () => {
448
- const blobClient = containerClient.getBlockBlobClient(id);
449
- const response = await blobClient.download();
450
- if (response.blobBody) {
451
- return response.blobBody;
452
- }
453
- if (response.readableStreamBody) {
454
- return response.readableStreamBody;
455
- }
456
- throw new Error("No blob body or readable stream body");
457
- },
458
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", {
459
- cause: error,
460
- }),
461
- });
462
- };
463
- const read = (id) => {
464
- return Effect.gen(function* () {
465
- const stream = yield* readStream(id);
466
- // Convert stream/blob to Uint8Array
467
- if (stream instanceof Blob) {
468
- const arrayBuffer = yield* Effect.promise(() => stream.arrayBuffer());
469
- return new Uint8Array(arrayBuffer);
470
- }
471
- // Read from ReadableStream
472
- const reader = stream.getReader();
473
- const chunks = [];
474
- try {
475
- while (true) {
476
- const result = yield* Effect.promise(() => reader.read());
477
- if (result.done)
478
- break;
479
- chunks.push(result.value);
480
- }
481
- }
482
- finally {
483
- reader.releaseLock();
484
- }
485
- // Concatenate all chunks
486
- const totalLength = chunks.reduce((acc, chunk) => acc + chunk.length, 0);
487
- const result = new Uint8Array(totalLength);
488
- let offset = 0;
489
- for (const chunk of chunks) {
490
- result.set(chunk, offset);
491
- offset += chunk.length;
492
- }
493
- return result;
494
- });
495
- };
496
- const prepareUpload = (file_id, initialOffset, initialData) => {
497
- return Effect.gen(function* () {
498
- const uploadFile = yield* kvStore.get(file_id);
499
- const blocks = yield* retrieveBlocks(file_id);
500
- const blockNumber = blocks.length;
501
- const nextBlockNumber = blockNumber + 1;
502
- const incompleteBlock = yield* downloadIncompleteBlock(file_id);
503
- if (incompleteBlock) {
504
- yield* deleteIncompleteBlock(file_id);
505
- const offset = initialOffset - incompleteBlock.size;
506
- const data = incompleteBlock.stream.pipe(Stream.concat(initialData));
507
- return {
508
- uploadFile,
509
- nextBlockNumber: nextBlockNumber - 1,
510
- offset,
511
- incompleteBlockSize: incompleteBlock.size,
512
- data,
513
- };
514
- }
515
- else {
516
- return {
517
- uploadFile,
518
- nextBlockNumber,
519
- offset: initialOffset,
520
- incompleteBlockSize: 0,
521
- data: initialData,
522
- };
523
- }
524
- });
525
- };
526
- /**
527
- * Write to the file, starting at the provided offset
528
- */
529
- const write = (options, dependencies) => {
530
- return withUploadMetrics(options.file_id, withTimingMetrics(uploadDurationHistogram, Effect.gen(function* () {
531
- const startTime = Date.now();
532
- const { stream: initialData, file_id, offset: initialOffset, } = options;
533
- const { onProgress } = dependencies;
534
- const prepareResult = yield* prepareUpload(file_id, initialOffset, initialData);
535
- const { uploadFile, nextBlockNumber, offset, data } = prepareResult;
536
- const { bytesUploaded, blockIds } = yield* uploadBlocks(uploadFile, data, nextBlockNumber, offset, onProgress);
537
- const newOffset = offset + bytesUploaded;
538
- if (uploadFile.size === newOffset) {
539
- try {
540
- // Commit all blocks to finalize the blob
541
- yield* commitBlocks(uploadFile, blockIds);
542
- yield* clearCache(file_id);
543
- // Log completion with observability
544
- yield* logAzureUploadCompletion(file_id, {
545
- fileSize: uploadFile.size || 0,
546
- totalDurationMs: Date.now() - startTime,
547
- partsCount: blockIds.length,
548
- averagePartSize: uploadFile.size,
549
- throughputBps: uploadFile.size / (Date.now() - startTime),
550
- retryCount: 0,
551
- });
552
- yield* uploadSuccessTotal(Effect.succeed(1));
553
- yield* activeUploadsGauge(Effect.succeed(-1));
554
- }
555
- catch (error) {
556
- yield* Effect.logError("Failed to finish upload").pipe(Effect.annotateLogs({
557
- upload_id: file_id,
558
- error: JSON.stringify(error),
559
- }));
560
- yield* uploadErrorsTotal(Effect.succeed(1));
561
- Effect.runSync(trackAzureError("write", error, {
562
- upload_id: file_id,
563
- operation: "commit",
564
- blocks: blockIds.length,
565
- }));
566
- throw error;
567
- }
568
- }
569
- return newOffset;
570
- })));
571
- };
572
- const getUpload = (id) => {
573
- return Effect.gen(function* () {
574
- const uploadFile = yield* kvStore.get(id);
575
- let offset = 0;
576
- try {
577
- const blocks = yield* retrieveBlocks(id);
578
- offset = calcOffsetFromBlocks(blocks);
579
- }
580
- catch (error) {
581
- // Check if the error is caused by the blob not being found
582
- if (typeof error === "object" &&
583
- error !== null &&
584
- "statusCode" in error &&
585
- error.statusCode === 404) {
586
- return {
587
- ...uploadFile,
588
- offset: uploadFile.size,
589
- size: uploadFile.size,
590
- metadata: uploadFile.metadata,
591
- storage: uploadFile.storage,
592
- };
593
- }
594
- yield* Effect.logError("Error on get upload").pipe(Effect.annotateLogs({
595
- upload_id: id,
596
- error: JSON.stringify(error),
597
- }));
598
- throw error;
599
- }
600
- const incompleteBlockSize = yield* getIncompleteBlockSize(id);
601
- return {
602
- ...uploadFile,
603
- offset: offset + (incompleteBlockSize ?? 0),
604
- size: uploadFile.size,
605
- storage: uploadFile.storage,
606
- };
607
- });
608
- };
609
- const remove = (id) => {
610
- return Effect.gen(function* () {
611
- try {
612
- const blobClient = containerClient.getBlockBlobClient(id);
613
- yield* Effect.promise(() => blobClient.deleteIfExists());
614
- // Also delete incomplete block if it exists
615
- yield* deleteIncompleteBlock(id);
616
- }
617
- catch (error) {
618
- if (typeof error === "object" &&
619
- error !== null &&
620
- "statusCode" in error &&
621
- error.statusCode === 404) {
622
- yield* Effect.logError("No file found").pipe(Effect.annotateLogs({
623
- upload_id: id,
624
- }));
625
- return yield* Effect.fail(UploadistaError.fromCode("FILE_NOT_FOUND"));
626
- }
627
- Effect.runSync(trackAzureError("remove", error, {
628
- upload_id: id,
629
- }));
630
- throw error;
631
- }
632
- yield* clearCache(id);
633
- yield* activeUploadsGauge(Effect.succeed(-1));
634
- });
635
- };
636
- const getExpiration = () => {
637
- return expirationPeriodInMilliseconds;
638
- };
639
- const getExpirationDate = (created_at) => {
640
- const date = new Date(created_at);
641
- return new Date(date.getTime() + getExpiration());
642
- };
643
- const deleteExpired = () => {
644
- return Effect.tryPromise({
645
- try: async () => {
646
- if (getExpiration() === 0) {
647
- return 0;
648
- }
649
- let deleted = 0;
650
- const response = containerClient.listBlobsFlat({
651
- includeMetadata: true,
652
- });
653
- const expiredBlobs = [];
654
- for await (const blob of response) {
655
- if (blob.metadata?.creationDate) {
656
- const creationDate = new Date(blob.metadata.creationDate);
657
- if (Date.now() >
658
- getExpirationDate(creationDate.toISOString()).getTime()) {
659
- expiredBlobs.push(blob.name);
660
- }
661
- }
662
- }
663
- // Delete expired blobs
664
- for (const blobName of expiredBlobs) {
665
- await containerClient.deleteBlob(blobName);
666
- deleted++;
667
- }
668
- return deleted;
669
- },
670
- catch: (error) => UploadistaError.fromCode("FILE_WRITE_ERROR", { cause: error }),
671
- });
672
- };
673
- const getCapabilities = () => {
674
- return {
675
- supportsParallelUploads: true,
676
- supportsConcatenation: false, // Azure doesn't have native concatenation like GCS
677
- supportsDeferredLength: true,
678
- supportsResumableUploads: true,
679
- supportsTransactionalUploads: true,
680
- maxConcurrentUploads: maxConcurrentBlockUploads,
681
- minChunkSize: minBlockSize,
682
- maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit
683
- maxParts: maxBlocks,
684
- optimalChunkSize: preferredBlockSize,
685
- requiresOrderedChunks: false,
686
- requiresMimeTypeValidation: true,
687
- maxValidationSize: undefined, // no size limit
688
- };
689
- };
690
- const getChunkerConstraints = () => {
691
- return {
692
- minChunkSize: minBlockSize,
693
- maxChunkSize: 4000 * 1024 * 1024, // 4000MB Azure limit
694
- optimalChunkSize: preferredBlockSize,
695
- requiresOrderedChunks: false,
696
- };
697
- };
698
- const validateUploadStrategy = (strategy) => {
699
- const capabilities = getCapabilities();
700
- const result = (() => {
701
- switch (strategy) {
702
- case "parallel":
703
- return capabilities.supportsParallelUploads;
704
- case "single":
705
- return true;
706
- default:
707
- return false;
708
- }
709
- })();
710
- return Effect.succeed(result);
711
- };
712
- return {
713
- bucket: containerName,
714
- create,
715
- remove,
716
- write,
717
- getUpload,
718
- read,
719
- readStream,
720
- deleteExpired: deleteExpired(),
721
- getCapabilities,
722
- getChunkerConstraints,
723
- validateUploadStrategy,
724
- };
725
- }