@maas/payload-plugin-media-cloud 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/LICENSE +8 -0
  2. package/dist/adapter/handleDelete.d.ts +20 -0
  3. package/dist/adapter/handleDelete.js +70 -0
  4. package/dist/adapter/handleDelete.js.map +1 -0
  5. package/dist/adapter/handleUpload.d.ts +12 -0
  6. package/dist/adapter/handleUpload.js +29 -0
  7. package/dist/adapter/handleUpload.js.map +1 -0
  8. package/dist/adapter/staticHandler.d.ts +17 -0
  9. package/dist/adapter/staticHandler.js +64 -0
  10. package/dist/adapter/staticHandler.js.map +1 -0
  11. package/dist/adapter/storageAdapter.d.ts +23 -0
  12. package/dist/adapter/storageAdapter.js +30 -0
  13. package/dist/adapter/storageAdapter.js.map +1 -0
  14. package/dist/collections/mediaCollection.d.ts +16 -0
  15. package/dist/collections/mediaCollection.js +139 -0
  16. package/dist/collections/mediaCollection.js.map +1 -0
  17. package/dist/components/index.d.ts +4 -0
  18. package/dist/components/index.js +5 -0
  19. package/dist/components/mux-preview/index.d.ts +2 -0
  20. package/dist/components/mux-preview/index.js +3 -0
  21. package/dist/components/mux-preview/mux-preview.d.ts +14 -0
  22. package/dist/components/mux-preview/mux-preview.js +38 -0
  23. package/dist/components/mux-preview/mux-preview.js.map +1 -0
  24. package/dist/components/upload-handler/index.d.ts +2 -0
  25. package/dist/components/upload-handler/index.js +3 -0
  26. package/dist/components/upload-handler/upload-handler.d.ts +22 -0
  27. package/dist/components/upload-handler/upload-handler.js +178 -0
  28. package/dist/components/upload-handler/upload-handler.js.map +1 -0
  29. package/dist/components/upload-manager/index.d.ts +2 -0
  30. package/dist/components/upload-manager/index.js +3 -0
  31. package/dist/components/upload-manager/upload-manager-DN4RrmYB.css +204 -0
  32. package/dist/components/upload-manager/upload-manager-DN4RrmYB.css.map +1 -0
  33. package/dist/components/upload-manager/upload-manager.css +201 -0
  34. package/dist/components/upload-manager/upload-manager.d.ts +42 -0
  35. package/dist/components/upload-manager/upload-manager.js +315 -0
  36. package/dist/components/upload-manager/upload-manager.js.map +1 -0
  37. package/dist/components/upload-manager/upload-manager2.js +0 -0
  38. package/dist/endpoints/muxAssetHandler.d.ts +11 -0
  39. package/dist/endpoints/muxAssetHandler.js +59 -0
  40. package/dist/endpoints/muxAssetHandler.js.map +1 -0
  41. package/dist/endpoints/muxCreateUploadHandler.d.ts +13 -0
  42. package/dist/endpoints/muxCreateUploadHandler.js +40 -0
  43. package/dist/endpoints/muxCreateUploadHandler.js.map +1 -0
  44. package/dist/endpoints/muxWebhookHandler.d.ts +11 -0
  45. package/dist/endpoints/muxWebhookHandler.js +49 -0
  46. package/dist/endpoints/muxWebhookHandler.js.map +1 -0
  47. package/dist/hooks/useEmitter.d.ts +48 -0
  48. package/dist/hooks/useEmitter.js +19 -0
  49. package/dist/hooks/useEmitter.js.map +1 -0
  50. package/dist/hooks/useErrorHandler.d.ts +11 -0
  51. package/dist/hooks/useErrorHandler.js +19 -0
  52. package/dist/hooks/useErrorHandler.js.map +1 -0
  53. package/dist/index.d.ts +3 -0
  54. package/dist/index.js +3 -0
  55. package/dist/plugin.d.ts +15 -0
  56. package/dist/plugin.js +242 -0
  57. package/dist/plugin.js.map +1 -0
  58. package/dist/tus/stores/s3/expiration-manager.d.ts +36 -0
  59. package/dist/tus/stores/s3/expiration-manager.js +76 -0
  60. package/dist/tus/stores/s3/expiration-manager.js.map +1 -0
  61. package/dist/tus/stores/s3/file-operations.d.ts +66 -0
  62. package/dist/tus/stores/s3/file-operations.js +90 -0
  63. package/dist/tus/stores/s3/file-operations.js.map +1 -0
  64. package/dist/tus/stores/s3/log.d.ts +5 -0
  65. package/dist/tus/stores/s3/log.js +8 -0
  66. package/dist/tus/stores/s3/log.js.map +1 -0
  67. package/dist/tus/stores/s3/metadata-manager.d.ts +85 -0
  68. package/dist/tus/stores/s3/metadata-manager.js +135 -0
  69. package/dist/tus/stores/s3/metadata-manager.js.map +1 -0
  70. package/dist/tus/stores/s3/parts-manager.d.ts +130 -0
  71. package/dist/tus/stores/s3/parts-manager.js +328 -0
  72. package/dist/tus/stores/s3/parts-manager.js.map +1 -0
  73. package/dist/tus/stores/s3/s3-store.d.ts +110 -0
  74. package/dist/tus/stores/s3/s3-store.js +342 -0
  75. package/dist/tus/stores/s3/s3-store.js.map +1 -0
  76. package/dist/tus/stores/s3/semaphore.d.ts +16 -0
  77. package/dist/tus/stores/s3/semaphore.js +32 -0
  78. package/dist/tus/stores/s3/semaphore.js.map +1 -0
  79. package/dist/types/errors.d.ts +26 -0
  80. package/dist/types/errors.js +28 -0
  81. package/dist/types/errors.js.map +1 -0
  82. package/dist/types/index.d.ts +73 -0
  83. package/dist/types/index.js +0 -0
  84. package/dist/utils/file.d.ts +30 -0
  85. package/dist/utils/file.js +84 -0
  86. package/dist/utils/file.js.map +1 -0
  87. package/package.json +92 -0
@@ -0,0 +1 @@
1
+ {"version":3,"file":"parts-manager.js","names":["client: S3","bucket: string","minPartSize: number","partUploadSemaphore: Semaphore","metadataManager: S3MetadataManager","fileOperations: S3FileOperations","generateCompleteTag: (value: 'false' | 'true') => string | undefined","args: RetrievePartsArgs","params: AWS.ListPartsCommandInput","args: FinishMultipartUploadArgs","args: GetIncompletePartArgs","args: GetIncompletePartSizeArgs","args: DeleteIncompletePartArgs","args: DownloadIncompletePartArgs","options: { cleanUpOnEnd: boolean }","args: UploadIncompletePartArgs","args: UploadPartArgs","params: AWS.UploadPartCommandInput","args: UploadPartsArgs","promises: Promise<void>[]","pendingChunkFilepath: null | string","permit: SemaphorePermit | undefined"],"sources":["../../../../src/tus/stores/s3/parts-manager.ts"],"sourcesContent":["import fs from 'node:fs'\nimport os from 'node:os'\nimport stream from 'node:stream'\n\nimport { NoSuchKey, NotFound, type S3 } from '@aws-sdk/client-s3'\nimport { StreamSplitter } from '@tus/utils'\n\nimport { useErrorHandler } from '../../../hooks/useErrorHandler'\nimport { MediaCloudError } from '../../../types/errors'\nimport { log } from './log'\n\nimport type { Readable } from 'node:stream'\nimport type AWS from '@aws-sdk/client-s3'\nimport type { IncompletePartInfo, TusUploadMetadata } from '../../../types'\nimport type { S3FileOperations } from './file-operations'\nimport type { S3MetadataManager } from './metadata-manager'\nimport type { Semaphore, SemaphorePermit } from './semaphore'\n\ntype RetrievePartsArgs = {\n id: string\n partNumberMarker?: string\n}\n\ntype FinishMultipartUploadArgs = {\n metadata: TusUploadMetadata\n parts: Array<AWS.Part>\n}\n\ntype GetIncompletePartArgs = {\n id: string\n}\n\ntype GetIncompletePartSizeArgs = {\n id: string\n}\n\ntype DeleteIncompletePartArgs = {\n id: string\n}\n\ntype DownloadIncompletePartArgs = {\n id: string\n}\n\ntype UploadIncompletePartArgs = {\n id: string\n readStream: fs.ReadStream | Readable\n}\n\ntype UploadPartArgs = {\n metadata: TusUploadMetadata\n readStream: fs.ReadStream | Readable\n partNumber: number\n}\n\ntype UploadPartsArgs = {\n metadata: TusUploadMetadata\n readStream: stream.Readable\n currentPartNumber: number\n offset: number\n}\n\nconst { throwError } = useErrorHandler()\n\nexport class S3PartsManager {\n constructor(\n private client: S3,\n private bucket: string,\n private minPartSize: number,\n private partUploadSemaphore: Semaphore,\n private metadataManager: S3MetadataManager,\n private fileOperations: S3FileOperations,\n private generateCompleteTag: (value: 'false' | 'true') => string | undefined\n ) {}\n\n /**\n * Gets the number of complete parts/chunks already uploaded to S3.\n * Retrieves only consecutive parts.\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @param args.partNumberMarker - Marker for pagination (optional)\n * @returns Promise that resolves to array of uploaded parts\n */\n async retrieveParts(args: RetrievePartsArgs): Promise<Array<AWS.Part>> {\n const { id, partNumberMarker } = args\n const metadata = await this.metadataManager.getMetadata({ id })\n\n if (!metadata['upload-id']) {\n throwError(MediaCloudError.MUX_UPLOAD_ID_MISSING)\n throw new Error() // This will never execute but satisfies TypeScript\n }\n\n const params: AWS.ListPartsCommandInput = {\n Bucket: this.bucket,\n Key: id,\n PartNumberMarker: partNumberMarker,\n UploadId: metadata['upload-id'],\n }\n\n const data = await this.client.listParts(params)\n\n let parts = data.Parts ?? []\n\n if (data.IsTruncated) {\n const rest = await this.retrieveParts({\n id,\n partNumberMarker: data.NextPartNumberMarker,\n })\n parts = [...parts, ...rest]\n }\n\n if (!partNumberMarker) {\n parts.sort((a, b) => (a.PartNumber || 0) - (b.PartNumber || 0))\n }\n\n return parts\n }\n\n /**\n * Completes a multipart upload on S3.\n * This is where S3 concatenates all the uploaded parts.\n * @param args - The function arguments\n * @param args.metadata - The upload metadata\n * @param args.parts - Array of uploaded parts to complete\n * @returns Promise that resolves to the location URL (optional)\n */\n async finishMultipartUpload(\n args: FinishMultipartUploadArgs\n ): Promise<string | undefined> {\n const { metadata, parts } = args\n const params = {\n Bucket: this.bucket,\n Key: metadata.file.id,\n MultipartUpload: {\n Parts: parts.map((part) => {\n return {\n ETag: part.ETag,\n PartNumber: part.PartNumber,\n }\n }),\n },\n UploadId: metadata['upload-id'],\n }\n\n try {\n const result = await this.client.completeMultipartUpload(params)\n return result.Location\n } catch (_error) {\n throwError(MediaCloudError.TUS_UPLOAD_ERROR)\n throw new Error() // This will never execute but satisfies TypeScript\n }\n }\n\n /**\n * Gets incomplete part from S3\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @returns Promise that resolves to readable stream or undefined if not found\n */\n async getIncompletePart(\n args: GetIncompletePartArgs\n ): Promise<Readable | undefined> {\n const { id } = args\n try {\n const data = await this.client.getObject({\n Bucket: this.bucket,\n Key: this.metadataManager.generatePartKey({ id, isIncomplete: true }),\n })\n return data.Body as Readable\n } catch (error) {\n if (error instanceof NoSuchKey) {\n return undefined\n }\n throw error\n }\n }\n\n /**\n * Gets the size of an incomplete part\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @returns Promise that resolves to part size or undefined if not found\n */\n async getIncompletePartSize(\n args: GetIncompletePartSizeArgs\n ): Promise<number | undefined> {\n const { id } = args\n try {\n const data = await this.client.headObject({\n Bucket: this.bucket,\n Key: this.metadataManager.generatePartKey({ id, isIncomplete: true }),\n })\n return data.ContentLength\n } catch (error) {\n if (error instanceof NotFound) {\n return undefined\n }\n throw error\n }\n }\n\n /**\n * Deletes an incomplete part\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @returns Promise that resolves when deletion is complete\n */\n async deleteIncompletePart(args: DeleteIncompletePartArgs): Promise<void> {\n const { id } = args\n await this.client.deleteObject({\n Bucket: this.bucket,\n Key: this.metadataManager.generatePartKey({ id, isIncomplete: true }),\n })\n }\n\n /**\n * Downloads incomplete part to temporary file\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @returns Promise that resolves to incomplete part info or undefined if not found\n */\n async downloadIncompletePart(\n args: DownloadIncompletePartArgs\n ): Promise<IncompletePartInfo | undefined> {\n const { id } = args\n const incompletePart = await this.getIncompletePart({ id })\n\n if (!incompletePart) {\n return\n }\n const filePath = await this.fileOperations.generateUniqueTmpFileName({\n template: 'tus-s3-incomplete-part-',\n })\n\n try {\n let incompletePartSize = 0\n\n const byteCounterTransform = new stream.Transform({\n transform(chunk, _, callback) {\n incompletePartSize += chunk.length\n callback(null, chunk)\n },\n })\n\n // Write to temporary file\n await stream.promises.pipeline(\n incompletePart,\n byteCounterTransform,\n fs.createWriteStream(filePath)\n )\n\n const createReadStream = (options: { cleanUpOnEnd: boolean }) => {\n const fileReader = fs.createReadStream(filePath)\n\n if (options.cleanUpOnEnd) {\n fileReader.on('end', () => {\n fs.unlink(filePath, () => {})\n })\n\n fileReader.on('error', (err) => {\n fileReader.destroy(err)\n fs.unlink(filePath, () => {})\n })\n }\n\n return fileReader\n }\n\n return {\n createReader: createReadStream,\n path: filePath,\n size: incompletePartSize,\n }\n } catch (err) {\n fs.promises.rm(filePath).catch(() => {})\n throw err\n }\n }\n\n /**\n * Uploads an incomplete part\n * @param args - The function arguments\n * @param args.id - The upload ID\n * @param args.readStream - The stream to read data from\n * @returns Promise that resolves to the ETag of the uploaded part\n */\n async uploadIncompletePart(args: UploadIncompletePartArgs): Promise<string> {\n const { id, readStream } = args\n const data = await this.client.putObject({\n Body: readStream,\n Bucket: this.bucket,\n Key: this.metadataManager.generatePartKey({ id, isIncomplete: true }),\n Tagging: this.generateCompleteTag('false'),\n })\n log(`[${id}] finished uploading incomplete part`)\n return data.ETag as string\n }\n\n /**\n * Uploads a single part\n * @param args - The function arguments\n * @param args.metadata - The upload metadata\n * @param args.readStream - The stream to read data from\n * @param args.partNumber - The part number to upload\n * @returns Promise that resolves to the ETag of the uploaded part\n */\n async uploadPart(args: UploadPartArgs): Promise<AWS.Part> {\n const { metadata, readStream, partNumber } = args\n const permit = await this.partUploadSemaphore.acquire()\n\n if (!metadata['upload-id']) {\n throwError(MediaCloudError.MUX_UPLOAD_ID_MISSING)\n throw new Error() // This will never execute but satisfies TypeScript\n }\n\n const params: AWS.UploadPartCommandInput = {\n Body: readStream,\n Bucket: this.bucket,\n Key: metadata.file.id,\n PartNumber: partNumber,\n UploadId: metadata['upload-id'],\n }\n\n try {\n const data = await this.client.uploadPart(params)\n return { ETag: data.ETag, PartNumber: partNumber }\n } catch (_error) {\n throwError(MediaCloudError.TUS_UPLOAD_ERROR)\n throw new Error() // This will never execute but satisfies TypeScript\n } finally {\n permit()\n }\n }\n\n /**\n * Uploads a stream to s3 using multiple parts\n * @param args - The function arguments\n * @param args.metadata - The upload metadata\n * @param args.readStream - The stream to read data from\n * @param args.currentPartNumber - The current part number to start from\n * @param args.offset - The byte offset to start from\n * @returns Promise that resolves to the number of bytes uploaded\n */\n async uploadParts(args: UploadPartsArgs): Promise<number> {\n const { metadata, readStream, offset: initialOffset } = args\n let { currentPartNumber } = args\n let offset = initialOffset\n const size = metadata.file.size\n const promises: Promise<void>[] = []\n let pendingChunkFilepath: null | string = null\n let bytesUploaded = 0\n let permit: SemaphorePermit | undefined = undefined\n\n const splitterStream = new StreamSplitter({\n chunkSize: this.fileOperations.calculateOptimalPartSize({ size }),\n directory: os.tmpdir(),\n })\n .on('beforeChunkStarted', async () => {\n permit = await this.partUploadSemaphore.acquire()\n })\n .on('chunkStarted', (filepath) => {\n pendingChunkFilepath = filepath\n })\n .on('chunkFinished', ({ path, size: partSize }) => {\n pendingChunkFilepath = null\n\n const acquiredPermit = permit\n const partNumber = currentPartNumber++\n\n offset += partSize\n\n const isFinalPart = size === offset\n\n const uploadChunk = async () => {\n try {\n // Only the first chunk of each PATCH request can prepend\n // an incomplete part (last chunk) from the previous request.\n const readable = fs.createReadStream(path)\n readable.on('error', function (error) {\n throw error\n })\n\n switch (true) {\n case partSize >= this.minPartSize || isFinalPart:\n await this.uploadPart({\n metadata,\n readStream: readable,\n partNumber,\n })\n break\n default:\n await this.uploadIncompletePart({\n id: metadata.file.id,\n readStream: readable,\n })\n break\n }\n\n bytesUploaded += partSize\n } catch (error) {\n // Destroy the splitter to stop processing more chunks\n const mappedError =\n error instanceof Error ? error : new Error(String(error))\n splitterStream.destroy(mappedError)\n throw mappedError\n } finally {\n fs.promises.rm(path).catch(function () {})\n acquiredPermit?.()\n }\n }\n\n const deferred = uploadChunk()\n\n promises.push(deferred)\n })\n .on('chunkError', () => {\n permit?.()\n })\n\n try {\n await stream.promises.pipeline(readStream, splitterStream)\n } catch (error) {\n if (pendingChunkFilepath !== null) {\n try {\n await fs.promises.rm(pendingChunkFilepath)\n } catch {\n log(\n `[${metadata.file.id}] failed to remove chunk ${String(pendingChunkFilepath)}`\n )\n }\n }\n const mappedError =\n error instanceof Error ? error : new Error(String(error))\n promises.push(Promise.reject(mappedError))\n } finally {\n // Wait for all promises\n await Promise.allSettled(promises)\n // Reject the promise if any of the promises reject\n await Promise.all(promises)\n }\n\n return bytesUploaded\n }\n}\n"],"mappings":";;;;;;;;;;AA8DA,MAAM,EAAE,YAAY,GAAG,iBAAiB;AAExC,IAAa,iBAAb,MAA4B;CAC1B,YACUA,QACAC,QACAC,aACAC,qBACAC,iBACAC,gBACAC,qBACR;EAPQ;EACA;EACA;EACA;EACA;EACA;EACA;CACN;;;;;;;;;CAUJ,MAAM,cAAcC,MAAmD;EACrE,MAAM,EAAE,IAAI,kBAAkB,GAAG;EACjC,MAAM,WAAW,MAAM,KAAK,gBAAgB,YAAY,EAAE,GAAI,EAAC;AAE/D,MAAI,CAAC,SAAS,cAAc;GAC1B,WAAW,gBAAgB,sBAAsB;AACjD,SAAM,IAAI;EACX;EAED,MAAMC,SAAoC;GACxC,QAAQ,KAAK;GACb,KAAK;GACL,kBAAkB;GAClB,UAAU,SAAS;EACpB;EAED,MAAM,OAAO,MAAM,KAAK,OAAO,UAAU,OAAO;EAEhD,IAAI,QAAQ,KAAK,SAAS,CAAE;AAE5B,MAAI,KAAK,aAAa;GACpB,MAAM,OAAO,MAAM,KAAK,cAAc;IACpC;IACA,kBAAkB,KAAK;GACxB,EAAC;GACF,QAAQ,CAAC,GAAG,OAAO,GAAG,IAAK;EAC5B;AAED,MAAI,CAAC,kBACH,MAAM,KAAK,CAAC,GAAG,OAAO,EAAE,cAAc,MAAM,EAAE,cAAc,GAAG;AAGjE,SAAO;CACR;;;;;;;;;CAUD,MAAM,sBACJC,MAC6B;EAC7B,MAAM,EAAE,UAAU,OAAO,GAAG;EAC5B,MAAM,SAAS;GACb,QAAQ,KAAK;GACb,KAAK,SAAS,KAAK;GACnB,iBAAiB,EACf,OAAO,MAAM,IAAI,CAAC,SAAS;AACzB,WAAO;KACL,MAAM,KAAK;KACX,YAAY,KAAK;IAClB;GACF,EAAC,CACH;GACD,UAAU,SAAS;EACpB;AAED,MAAI;GACF,MAAM,SAAS,MAAM,KAAK,OAAO,wBAAwB,OAAO;AAChE,UAAO,OAAO;EACf,SAAQ,QAAQ;GACf,WAAW,gBAAgB,iBAAiB;AAC5C,SAAM,IAAI;EACX;CACF;;;;;;;CAQD,MAAM,kBACJC,MAC+B;EAC/B,MAAM,EAAE,IAAI,GAAG;AACf,MAAI;GACF,MAAM,OAAO,MAAM,KAAK,OAAO,UAAU;IACvC,QAAQ,KAAK;IACb,KAAK,KAAK,gBAAgB,gBAAgB;KAAE;KAAI,cAAc;IAAM,EAAC;GACtE,EAAC;AACF,UAAO,KAAK;EACb,SAAQ,OAAO;AACd,OAAI,iBAAiB,UACnB,QAAO;AAET,SAAM;EACP;CACF;;;;;;;CAQD,MAAM,sBACJC,MAC6B;EAC7B,MAAM,EAAE,IAAI,GAAG;AACf,MAAI;GACF,MAAM,OAAO,MAAM,KAAK,OAAO,WAAW;IACxC,QAAQ,KAAK;IACb,KAAK,KAAK,gBAAgB,gBAAgB;KAAE;KAAI,cAAc;IAAM,EAAC;GACtE,EAAC;AACF,UAAO,KAAK;EACb,SAAQ,OAAO;AACd,OAAI,iBAAiB,SACnB,QAAO;AAET,SAAM;EACP;CACF;;;;;;;CAQD,MAAM,qBAAqBC,MAA+C;EACxE,MAAM,EAAE,IAAI,GAAG;EACf,MAAM,KAAK,OAAO,aAAa;GAC7B,QAAQ,KAAK;GACb,KAAK,KAAK,gBAAgB,gBAAgB;IAAE;IAAI,cAAc;GAAM,EAAC;EACtE,EAAC;CACH;;;;;;;CAQD,MAAM,uBACJC,MACyC;EACzC,MAAM,EAAE,IAAI,GAAG;EACf,MAAM,iBAAiB,MAAM,KAAK,kBAAkB,EAAE,GAAI,EAAC;AAE3D,MAAI,CAAC,eACH;EAEF,MAAM,WAAW,MAAM,KAAK,eAAe,0BAA0B,EACnE,UAAU,0BACX,EAAC;AAEF,MAAI;GACF,IAAI,qBAAqB;GAEzB,MAAM,uBAAuB,IAAI,OAAO,UAAU,EAChD,UAAU,OAAO,GAAG,UAAU;IAC5B,sBAAsB,MAAM;IAC5B,SAAS,MAAM,MAAM;GACtB,EACF;GAGD,MAAM,OAAO,SAAS,SACpB,gBACA,sBACA,GAAG,kBAAkB,SAAS,CAC/B;GAED,MAAM,mBAAmB,CAACC,YAAuC;IAC/D,MAAM,aAAa,GAAG,iBAAiB,SAAS;AAEhD,QAAI,QAAQ,cAAc;KACxB,WAAW,GAAG,OAAO,MAAM;MACzB,GAAG,OAAO,UAAU,MAAM,CAAE,EAAC;KAC9B,EAAC;KAEF,WAAW,GAAG,SAAS,CAAC,QAAQ;MAC9B,WAAW,QAAQ,IAAI;MACvB,GAAG,OAAO,UAAU,MAAM,CAAE,EAAC;KAC9B,EAAC;IACH;AAED,WAAO;GACR;AAED,UAAO;IACL,cAAc;IACd,MAAM;IACN,MAAM;GACP;EACF,SAAQ,KAAK;GACZ,GAAG,SAAS,GAAG,SAAS,CAAC,MAAM,MAAM,CAAE,EAAC;AACxC,SAAM;EACP;CACF;;;;;;;;CASD,MAAM,qBAAqBC,MAAiD;EAC1E,MAAM,EAAE,IAAI,YAAY,GAAG;EAC3B,MAAM,OAAO,MAAM,KAAK,OAAO,UAAU;GACvC,MAAM;GACN,QAAQ,KAAK;GACb,KAAK,KAAK,gBAAgB,gBAAgB;IAAE;IAAI,cAAc;GAAM,EAAC;GACrE,SAAS,KAAK,oBAAoB,QAAQ;EAC3C,EAAC;EACF,IAAI,CAAC,CAAC,EAAE,GAAG,oCAAoC,CAAC,CAAC;AACjD,SAAO,KAAK;CACb;;;;;;;;;CAUD,MAAM,WAAWC,MAAyC;EACxD,MAAM,EAAE,UAAU,YAAY,YAAY,GAAG;EAC7C,MAAM,SAAS,MAAM,KAAK,oBAAoB,SAAS;AAEvD,MAAI,CAAC,SAAS,cAAc;GAC1B,WAAW,gBAAgB,sBAAsB;AACjD,SAAM,IAAI;EACX;EAED,MAAMC,SAAqC;GACzC,MAAM;GACN,QAAQ,KAAK;GACb,KAAK,SAAS,KAAK;GACnB,YAAY;GACZ,UAAU,SAAS;EACpB;AAED,MAAI;GACF,MAAM,OAAO,MAAM,KAAK,OAAO,WAAW,OAAO;AACjD,UAAO;IAAE,MAAM,KAAK;IAAM,YAAY;GAAY;EACnD,SAAQ,QAAQ;GACf,WAAW,gBAAgB,iBAAiB;AAC5C,SAAM,IAAI;EACX,UAAS;GACR,QAAQ;EACT;CACF;;;;;;;;;;CAWD,MAAM,YAAYC,MAAwC;EACxD,MAAM,EAAE,UAAU,YAAY,QAAQ,eAAe,GAAG;EACxD,IAAI,EAAE,mBAAmB,GAAG;EAC5B,IAAI,SAAS;EACb,MAAM,OAAO,SAAS,KAAK;EAC3B,MAAMC,WAA4B,CAAE;EACpC,IAAIC,uBAAsC;EAC1C,IAAI,gBAAgB;EACpB,IAAIC,SAAsC;EAE1C,MAAM,iBAAiB,IAAI,eAAe;GACxC,WAAW,KAAK,eAAe,yBAAyB,EAAE,KAAM,EAAC;GACjE,WAAW,GAAG,QAAQ;EACvB,GACE,GAAG,sBAAsB,YAAY;GACpC,SAAS,MAAM,KAAK,oBAAoB,SAAS;EAClD,EAAC,CACD,GAAG,gBAAgB,CAAC,aAAa;GAChC,uBAAuB;EACxB,EAAC,CACD,GAAG,iBAAiB,CAAC,EAAE,MAAM,MAAM,UAAU,KAAK;GACjD,uBAAuB;GAEvB,MAAM,iBAAiB;GACvB,MAAM,aAAa;GAEnB,UAAU;GAEV,MAAM,cAAc,SAAS;GAE7B,MAAM,cAAc,YAAY;AAC9B,QAAI;KAGF,MAAM,WAAW,GAAG,iBAAiB,KAAK;KAC1C,SAAS,GAAG,SAAS,SAAU,OAAO;AACpC,YAAM;KACP,EAAC;AAEF,aAAQ,MAAR;MACE,KAAK,YAAY,KAAK,eAAe;OACnC,MAAM,KAAK,WAAW;QACpB;QACA,YAAY;QACZ;OACD,EAAC;AACF;MACF;OACE,MAAM,KAAK,qBAAqB;QAC9B,IAAI,SAAS,KAAK;QAClB,YAAY;OACb,EAAC;AACF;KACH;KAED,iBAAiB;IAClB,SAAQ,OAAO;KAEd,MAAM,cACJ,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,MAAM;KAC1D,eAAe,QAAQ,YAAY;AACnC,WAAM;IACP,UAAS;KACR,GAAG,SAAS,GAAG,KAAK,CAAC,MAAM,WAAY,CAAE,EAAC;KAC1C,kBAAkB;IACnB;GACF;GAED,MAAM,WAAW,aAAa;GAE9B,SAAS,KAAK,SAAS;EACxB,EAAC,CACD,GAAG,cAAc,MAAM;GACtB,UAAU;EACX,EAAC;AAEJ,MAAI;GACF,MAAM,OAAO,SAAS,SAAS,YAAY,eAAe;EAC3D,SAAQ,OAAO;AACd,OAAI,yBAAyB,KAC3B,KAAI;IACF,MAAM,GAAG,SAAS,GAAG,qBAAqB;GAC3C,QAAO;IACN,IACE,CAAC,CAAC,EAAE,SAAS,KAAK,GAAG,yBAAyB,EAAE,OAAO,qBAAqB,EAAE,CAC/E;GACF;GAEH,MAAM,cACJ,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,MAAM;GAC1D,SAAS,KAAK,QAAQ,OAAO,YAAY,CAAC;EAC3C,UAAS;GAER,MAAM,QAAQ,WAAW,SAAS;GAElC,MAAM,QAAQ,IAAI,SAAS;EAC5B;AAED,SAAO;CACR;AACF"}
@@ -0,0 +1,110 @@
1
+ import { Semaphore } from "./semaphore.js";
2
+ import { S3ExpirationManager } from "./expiration-manager.js";
3
+ import { S3FileOperations } from "./file-operations.js";
4
+ import { S3StoreConfig, TusUploadMetadata } from "../../../types/index.js";
5
+ import { S3MetadataManager } from "./metadata-manager.js";
6
+ import { S3PartsManager } from "./parts-manager.js";
7
+ import stream, { Readable } from "node:stream";
8
+ import { S3 } from "@aws-sdk/client-s3";
9
+ import { DataStore, KvStore, Upload } from "@tus/utils";
10
+
11
+ //#region src/tus/stores/s3/s3-store.d.ts
12
+ declare class S3Store extends DataStore {
13
+ client: S3;
14
+ bucket: string;
15
+ partSize: number;
16
+ minPartSize: number;
17
+ maxMultipartParts: number;
18
+ maxUploadSize: 5497558138880;
19
+ useTags: boolean;
20
+ expirationPeriodInMilliseconds: number;
21
+ protected acl?: string;
22
+ protected cache: KvStore<TusUploadMetadata>;
23
+ protected partUploadSemaphore: Semaphore;
24
+ protected metadataManager: S3MetadataManager;
25
+ protected fileOperations: S3FileOperations;
26
+ protected partsManager: S3PartsManager;
27
+ protected expirationManager: S3ExpirationManager;
28
+ protected customEndpoint: string;
29
+ constructor(options: S3StoreConfig);
30
+ /**
31
+ * Generate the key name for the info file
32
+ * @param id - The upload ID
33
+ * @returns The info file key
34
+ */
35
+ protected generateInfoKey(id: string): string;
36
+ /**
37
+ * Generate the key name for a part file
38
+ * @param id - The upload ID
39
+ * @param isIncompletePart - Whether this is an incomplete part (default: false)
40
+ * @returns The part file key
41
+ */
42
+ protected generatePartKey(id: string, isIncompletePart?: boolean): string;
43
+ /**
44
+ * Helper method to check if expiration tags should be used
45
+ * @returns True if expiration tags should be used
46
+ */
47
+ protected shouldUseExpirationTags(): boolean;
48
+ /**
49
+ * Generates a tag for marking complete/incomplete uploads
50
+ * @param value - Either 'false' or 'true' to mark completion status
51
+ * @returns The tag string or undefined if tags shouldn't be used
52
+ */
53
+ protected generateCompleteTag(value: 'false' | 'true'): string | undefined;
54
+ /**
55
+ * Creates a multipart upload on S3 attaching any metadata to it.
56
+ * Also, a `${file_id}.info` file is created which holds some information
57
+ * about the upload itself like: `upload-id`, `upload-length`, etc.
58
+ * @param upload - The upload object to create
59
+ * @returns Promise that resolves to the created upload
60
+ */
61
+ create(upload: Upload): Promise<Upload>;
62
+ /**
63
+ * Declares the length of the upload
64
+ * @param file_id - The file ID
65
+ * @param upload_length - The length of the upload
66
+ * @returns Promise that resolves when length is declared
67
+ */
68
+ declareUploadLength(file_id: string, upload_length: number): Promise<void>;
69
+ /**
70
+ * Writes `buffer` to the file specified by the upload's `id` at `offset`
71
+ * @param readable - The readable stream to write
72
+ * @param id - The upload ID
73
+ * @param offset - The byte offset to write at
74
+ * @returns Promise that resolves to the number of bytes written
75
+ */
76
+ write(readable: stream.Readable, id: string, offset: number): Promise<number>;
77
+ /**
78
+ * Returns the current state of the upload, i.e how much data has been
79
+ * uploaded and if the upload is complete.
80
+ */
81
+ getUpload(id: string): Promise<Upload>;
82
+ /**
83
+ * Reads the file specified by the upload's `id` and returns a readable stream
84
+ */
85
+ read(id: string): Promise<Readable>;
86
+ /**
87
+ * Removes the file specified by the upload's `id`
88
+ */
89
+ remove(id: string): Promise<void>;
90
+ /**
91
+ * Combine all multipart uploads into a single object
92
+ */
93
+ completeMultipartUpload(id: string): Promise<Upload>;
94
+ /**
95
+ * Get the full S3 URL for an uploaded file
96
+ */
97
+ getUrl(id: string): string;
98
+ /**
99
+ * Deletes expired incomplete uploads.
100
+ * Returns the number of deleted uploads.
101
+ */
102
+ deleteExpired(): Promise<number>;
103
+ /**
104
+ * Returns the expiration period in milliseconds
105
+ */
106
+ getExpiration(): number;
107
+ }
108
+ //#endregion
109
+ export { S3Store };
110
+ //# sourceMappingURL=s3-store.d.ts.map
@@ -0,0 +1,342 @@
1
+ import { log } from "./log.js";
2
+ import { Semaphore } from "./semaphore.js";
3
+ import { S3ExpirationManager } from "./expiration-manager.js";
4
+ import { S3FileOperations } from "./file-operations.js";
5
+ import { S3MetadataManager } from "./metadata-manager.js";
6
+ import { S3PartsManager } from "./parts-manager.js";
7
+ import stream from "node:stream";
8
+ import { NoSuchKey, NotFound, S3 } from "@aws-sdk/client-s3";
9
+ import { DataStore, ERRORS, MemoryKvStore, TUS_RESUMABLE, Upload } from "@tus/utils";
10
+
11
+ //#region src/tus/stores/s3/s3-store.ts
12
+ var S3Store = class extends DataStore {
13
+ client;
14
+ bucket;
15
+ partSize = 8 * 1024 * 1024;
16
+ minPartSize = 5 * 1024 * 1024;
17
+ maxMultipartParts = 1e4;
18
+ maxUploadSize = 5497558138880;
19
+ useTags = false;
20
+ expirationPeriodInMilliseconds = 0;
21
+ acl;
22
+ cache;
23
+ partUploadSemaphore;
24
+ metadataManager;
25
+ fileOperations;
26
+ partsManager;
27
+ expirationManager;
28
+ customEndpoint;
29
+ constructor(options) {
30
+ super();
31
+ const { maxMultipartParts, minPartSize, partSize, s3ClientConfig, maxConcurrentPartUploads, useTags, expirationPeriodInMilliseconds, cache } = options;
32
+ const { acl, bucket,...restS3ClientConfig } = s3ClientConfig;
33
+ this.extensions = [
34
+ "creation",
35
+ "creation-with-upload",
36
+ "creation-defer-length",
37
+ "termination",
38
+ "expiration"
39
+ ];
40
+ this.bucket = bucket;
41
+ this.acl = acl;
42
+ this.client = new S3(restS3ClientConfig);
43
+ this.customEndpoint = String(restS3ClientConfig.endpoint);
44
+ this.partSize = partSize ?? this.partSize;
45
+ this.minPartSize = minPartSize ?? this.minPartSize;
46
+ this.maxMultipartParts = maxMultipartParts ?? this.maxMultipartParts;
47
+ this.useTags = useTags ?? this.useTags;
48
+ this.expirationPeriodInMilliseconds = expirationPeriodInMilliseconds ?? this.expirationPeriodInMilliseconds;
49
+ this.cache = cache ?? new MemoryKvStore();
50
+ this.partUploadSemaphore = new Semaphore(maxConcurrentPartUploads ?? 60);
51
+ this.metadataManager = new S3MetadataManager(this.client, this.bucket, this.cache, this.shouldUseExpirationTags.bind(this), this.generateCompleteTag.bind(this));
52
+ this.fileOperations = new S3FileOperations(this.maxMultipartParts, this.maxUploadSize, this.minPartSize, this.partSize);
53
+ this.partsManager = new S3PartsManager(this.client, this.bucket, this.minPartSize, this.partUploadSemaphore, this.metadataManager, this.fileOperations, this.generateCompleteTag.bind(this));
54
+ this.expirationManager = new S3ExpirationManager(this.client, this.bucket, this.expirationPeriodInMilliseconds, this.generateInfoKey.bind(this), this.generatePartKey.bind(this));
55
+ this.deleteExpired();
56
+ }
57
+ /**
58
+ * Generate the key name for the info file
59
+ * @param id - The upload ID
60
+ * @returns The info file key
61
+ */
62
+ generateInfoKey(id) {
63
+ return `${id}.info`;
64
+ }
65
+ /**
66
+ * Generate the key name for a part file
67
+ * @param id - The upload ID
68
+ * @param isIncompletePart - Whether this is an incomplete part (default: false)
69
+ * @returns The part file key
70
+ */
71
+ generatePartKey(id, isIncompletePart = false) {
72
+ return isIncompletePart ? `${id}.part` : id;
73
+ }
74
+ /**
75
+ * Helper method to check if expiration tags should be used
76
+ * @returns True if expiration tags should be used
77
+ */
78
+ shouldUseExpirationTags() {
79
+ return this.expirationPeriodInMilliseconds !== 0 && this.useTags;
80
+ }
81
+ /**
82
+ * Generates a tag for marking complete/incomplete uploads
83
+ * @param value - Either 'false' or 'true' to mark completion status
84
+ * @returns The tag string or undefined if tags shouldn't be used
85
+ */
86
+ generateCompleteTag(value) {
87
+ if (!this.shouldUseExpirationTags()) return void 0;
88
+ return `Tus-Completed=${value}`;
89
+ }
90
+ /**
91
+ * Creates a multipart upload on S3 attaching any metadata to it.
92
+ * Also, a `${file_id}.info` file is created which holds some information
93
+ * about the upload itself like: `upload-id`, `upload-length`, etc.
94
+ * @param upload - The upload object to create
95
+ * @returns Promise that resolves to the created upload
96
+ */
97
+ async create(upload) {
98
+ log(`[${upload.id}] initializing multipart upload`);
99
+ const request = {
100
+ Bucket: this.bucket,
101
+ Key: upload.id,
102
+ Metadata: { "tus-version": TUS_RESUMABLE }
103
+ };
104
+ if (upload.metadata?.contentType) request.ContentType = upload.metadata.contentType;
105
+ if (upload.metadata?.cacheControl) request.CacheControl = upload.metadata.cacheControl;
106
+ if (this.acl) request.ACL = this.acl;
107
+ upload.creation_date = (/* @__PURE__ */ new Date()).toISOString();
108
+ const response = await this.client.createMultipartUpload(request);
109
+ upload.storage = {
110
+ type: "s3",
111
+ bucket: this.bucket,
112
+ path: response.Key
113
+ };
114
+ await this.metadataManager.saveMetadata({
115
+ upload,
116
+ uploadId: response.UploadId
117
+ });
118
+ log(`[${upload.id}] multipart upload created (${response.UploadId})`);
119
+ return upload;
120
+ }
121
+ /**
122
+ * Declares the length of the upload
123
+ * @param file_id - The file ID
124
+ * @param upload_length - The length of the upload
125
+ * @returns Promise that resolves when length is declared
126
+ */
127
+ async declareUploadLength(file_id, upload_length) {
128
+ const { file, "upload-id": uploadId } = await this.metadataManager.getMetadata({ id: file_id });
129
+ if (!file) throw ERRORS.FILE_NOT_FOUND;
130
+ file.size = upload_length;
131
+ await this.metadataManager.saveMetadata({
132
+ upload: file,
133
+ uploadId
134
+ });
135
+ }
136
+ /**
137
+ * Writes `buffer` to the file specified by the upload's `id` at `offset`
138
+ * @param readable - The readable stream to write
139
+ * @param id - The upload ID
140
+ * @param offset - The byte offset to write at
141
+ * @returns Promise that resolves to the number of bytes written
142
+ */
143
+ async write(readable, id, offset) {
144
+ const metadata = await this.metadataManager.getMetadata({ id });
145
+ const calculatedOffset = this.fileOperations.calculateOffsetFromParts({ parts: await this.partsManager.retrieveParts({ id }) });
146
+ const offsetDiff = offset - calculatedOffset;
147
+ const requestedOffset = offset;
148
+ let finalReadable = readable;
149
+ if (offsetDiff < 0) throw ERRORS.FILE_WRITE_ERROR;
150
+ if (offsetDiff > 0) {
151
+ const incompletePart = await this.partsManager.downloadIncompletePart({ id });
152
+ if (!incompletePart) throw ERRORS.FILE_WRITE_ERROR;
153
+ if (incompletePart.size !== offsetDiff) throw ERRORS.FILE_WRITE_ERROR;
154
+ await this.partsManager.deleteIncompletePart({ id });
155
+ offset = requestedOffset - incompletePart.size;
156
+ finalReadable = stream.Readable.from((async function* () {
157
+ yield* incompletePart.createReader({ cleanUpOnEnd: true });
158
+ yield* readable;
159
+ })());
160
+ }
161
+ const partNumber = this.fileOperations.calculatePartNumber({ parts: await this.partsManager.retrieveParts({ id }) });
162
+ const bytesUploaded = await this.partsManager.uploadParts({
163
+ metadata,
164
+ readStream: finalReadable,
165
+ currentPartNumber: partNumber,
166
+ offset
167
+ });
168
+ const newOffset = requestedOffset + bytesUploaded - (offsetDiff > 0 ? offsetDiff : 0);
169
+ if (metadata.file.size === newOffset) try {
170
+ const parts = await this.partsManager.retrieveParts({ id });
171
+ await this.partsManager.finishMultipartUpload({
172
+ metadata,
173
+ parts
174
+ });
175
+ const completedUpload = new Upload({
176
+ ...metadata.file,
177
+ offset: newOffset,
178
+ size: metadata.file.size,
179
+ storage: metadata.file.storage
180
+ });
181
+ await this.metadataManager.completeMetadata({ upload: completedUpload });
182
+ } catch (error) {
183
+ log(`[${id}] failed to finish upload`, error);
184
+ throw error;
185
+ }
186
+ return newOffset;
187
+ }
188
+ /**
189
+ * Returns the current state of the upload, i.e how much data has been
190
+ * uploaded and if the upload is complete.
191
+ */
192
+ async getUpload(id) {
193
+ let metadata;
194
+ try {
195
+ metadata = await this.metadataManager.getMetadata({ id });
196
+ } catch (error) {
197
+ if (error instanceof NoSuchKey || error instanceof NotFound || error?.Code === "NotFound" || error?.Code === "NoSuchKey") throw ERRORS.FILE_NOT_FOUND;
198
+ throw error;
199
+ }
200
+ let offset;
201
+ try {
202
+ const parts = await this.partsManager.retrieveParts({ id });
203
+ offset = this.fileOperations.calculateOffsetFromParts({ parts });
204
+ } catch (error) {
205
+ if (error?.Code === "NoSuchUpload" || error?.Code === "NoSuchKey") return new Upload({
206
+ ...metadata.file,
207
+ metadata: metadata.file.metadata,
208
+ offset: metadata.file.size,
209
+ size: metadata.file.size,
210
+ storage: metadata.file.storage
211
+ });
212
+ log("getUpload: Error retrieving parts.", error);
213
+ throw error;
214
+ }
215
+ const incompletePartSize = await this.partsManager.getIncompletePartSize({ id });
216
+ return new Upload({
217
+ ...metadata.file,
218
+ offset: offset + (incompletePartSize ?? 0),
219
+ size: metadata.file.size,
220
+ storage: metadata.file.storage
221
+ });
222
+ }
223
+ /**
224
+ * Reads the file specified by the upload's `id` and returns a readable stream
225
+ */
226
+ async read(id) {
227
+ log(`[${id}] attempting to read file from S3`);
228
+ let retries = 3;
229
+ let lastError = null;
230
+ while (retries > 0) try {
231
+ const data = await this.client.getObject({
232
+ Bucket: this.bucket,
233
+ Key: id
234
+ });
235
+ log(`[${id}] successfully read file from S3`);
236
+ return data.Body;
237
+ } catch (error) {
238
+ log(`[${id}] failed to read file, retries left: ${retries - 1}`, error);
239
+ lastError = error;
240
+ retries--;
241
+ if (retries > 0) await new Promise((resolve) => setTimeout(resolve, 100));
242
+ }
243
+ log(`[${id}] failed to read file after all retries`);
244
+ throw lastError || /* @__PURE__ */ new Error(`Failed to read file ${id} after retries`);
245
+ }
246
+ /**
247
+ * Removes the file specified by the upload's `id`
248
+ */
249
+ async remove(id) {
250
+ try {
251
+ const { "upload-id": uploadId } = await this.metadataManager.getMetadata({ id });
252
+ if (uploadId) await this.client.abortMultipartUpload({
253
+ Bucket: this.bucket,
254
+ Key: id,
255
+ UploadId: uploadId
256
+ });
257
+ } catch (error) {
258
+ if (error?.code && [
259
+ "NoSuchKey",
260
+ "NoSuchUpload",
261
+ "NotFound"
262
+ ].includes(error.Code || "")) {
263
+ log("remove: No file found.", error);
264
+ throw ERRORS.FILE_NOT_FOUND;
265
+ }
266
+ throw error;
267
+ }
268
+ await this.client.deleteObjects({
269
+ Bucket: this.bucket,
270
+ Delete: { Objects: [
271
+ { Key: id },
272
+ { Key: this.metadataManager.generateInfoKey({ id }) },
273
+ { Key: this.metadataManager.generatePartKey({
274
+ id,
275
+ isIncomplete: true
276
+ }) }
277
+ ] }
278
+ });
279
+ await this.metadataManager.clearCache({ id });
280
+ }
281
+ /**
282
+ * Combine all multipart uploads into a single object
283
+ */
284
+ async completeMultipartUpload(id) {
285
+ const metadata = await this.metadataManager.getMetadata({ id });
286
+ const parts = await this.partsManager.retrieveParts({ id });
287
+ const incompletePartInfo = await this.partsManager.downloadIncompletePart({ id });
288
+ if (incompletePartInfo) {
289
+ await this.partsManager.uploadPart({
290
+ metadata,
291
+ readStream: incompletePartInfo.createReader({ cleanUpOnEnd: true }),
292
+ partNumber: parts.length + 1
293
+ });
294
+ await this.partsManager.deleteIncompletePart({ id });
295
+ const updatedParts = await this.partsManager.retrieveParts({ id });
296
+ await this.partsManager.finishMultipartUpload({
297
+ metadata,
298
+ parts: updatedParts
299
+ });
300
+ } else await this.partsManager.finishMultipartUpload({
301
+ metadata,
302
+ parts
303
+ });
304
+ const completedUpload = new Upload({
305
+ ...metadata.file,
306
+ offset: metadata.file.size ?? 0,
307
+ size: metadata.file.size ?? 0,
308
+ storage: metadata.file.storage
309
+ });
310
+ await this.metadataManager.completeMetadata({ upload: completedUpload });
311
+ return completedUpload;
312
+ }
313
+ /**
314
+ * Get the full S3 URL for an uploaded file
315
+ */
316
+ getUrl(id) {
317
+ if (this.customEndpoint) return `${this.customEndpoint}/${this.bucket}/${id}`;
318
+ const regionConfig = this.client.config.region;
319
+ let region;
320
+ if (typeof regionConfig === "function") region = "us-east-1";
321
+ else region = regionConfig || "us-east-1";
322
+ if (region === "us-east-1") return `https://${this.bucket}.s3.amazonaws.com/${id}`;
323
+ else return `https://${this.bucket}.s3.${region}.amazonaws.com/${id}`;
324
+ }
325
+ /**
326
+ * Deletes expired incomplete uploads.
327
+ * Returns the number of deleted uploads.
328
+ */
329
+ async deleteExpired() {
330
+ return this.expirationManager.deleteExpired({});
331
+ }
332
+ /**
333
+ * Returns the expiration period in milliseconds
334
+ */
335
+ getExpiration() {
336
+ return this.expirationManager.getExpiration();
337
+ }
338
+ };
339
+
340
+ //#endregion
341
+ export { S3Store };
342
+ //# sourceMappingURL=s3-store.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"s3-store.js","names":["options: S3StoreConfig","id: string","isIncompletePart: boolean","value: 'false' | 'true'","upload: Upload","request: AWS.CreateMultipartUploadCommandInput","file_id: string","upload_length: number","readable: stream.Readable","offset: number","metadata: TusUploadMetadata","lastError: Error | null","region: string"],"sources":["../../../../src/tus/stores/s3/s3-store.ts"],"sourcesContent":["import { NoSuchKey, NotFound, S3 } from '@aws-sdk/client-s3'\nimport {\n DataStore,\n ERRORS,\n MemoryKvStore,\n TUS_RESUMABLE,\n Upload,\n type KvStore,\n} from '@tus/utils'\nimport stream, { type Readable } from 'node:stream'\n\nimport { log } from './log'\nimport { Semaphore } from './semaphore'\nimport { S3ExpirationManager } from './expiration-manager'\nimport { S3FileOperations } from './file-operations'\nimport { S3MetadataManager } from './metadata-manager'\nimport { S3PartsManager } from './parts-manager'\n\nimport type AWS from '@aws-sdk/client-s3'\nimport type { AWSError, S3StoreConfig, TusUploadMetadata } from '../../../types'\n\nexport class S3Store extends DataStore {\n public client: S3\n public bucket: string\n public partSize = 8 * 1024 * 1024 // 8MB preferred part size\n public minPartSize = 5 * 1024 * 1024 // 5MB minimum part size\n public maxMultipartParts = 10_000\n public maxUploadSize = 5_497_558_138_880 as const // 5TiB\n public useTags = false\n public expirationPeriodInMilliseconds = 0\n protected acl?: string\n\n protected cache: KvStore<TusUploadMetadata>\n protected partUploadSemaphore: Semaphore\n protected metadataManager: S3MetadataManager\n protected fileOperations: S3FileOperations\n protected partsManager: S3PartsManager\n protected expirationManager: S3ExpirationManager\n protected customEndpoint: string\n\n constructor(options: S3StoreConfig) {\n super()\n const {\n maxMultipartParts,\n minPartSize,\n partSize,\n s3ClientConfig,\n maxConcurrentPartUploads,\n useTags,\n expirationPeriodInMilliseconds,\n cache,\n } = options\n const { acl, bucket, ...restS3ClientConfig } = s3ClientConfig\n\n this.extensions = [\n 'creation',\n 'creation-with-upload',\n 'creation-defer-length',\n 'termination',\n 'expiration',\n ]\n\n this.bucket = bucket\n this.acl = acl\n this.client = new S3(restS3ClientConfig)\n this.customEndpoint = String(restS3ClientConfig.endpoint)\n\n this.partSize = partSize ?? this.partSize\n this.minPartSize = minPartSize ?? this.minPartSize\n this.maxMultipartParts = maxMultipartParts ?? this.maxMultipartParts\n\n this.useTags = useTags ?? this.useTags\n this.expirationPeriodInMilliseconds =\n expirationPeriodInMilliseconds ?? this.expirationPeriodInMilliseconds\n this.cache = cache ?? new MemoryKvStore<TusUploadMetadata>()\n this.partUploadSemaphore = new Semaphore(maxConcurrentPartUploads ?? 60)\n\n // Initialize component managers\n this.metadataManager = new S3MetadataManager(\n this.client,\n this.bucket,\n this.cache,\n this.shouldUseExpirationTags.bind(this),\n this.generateCompleteTag.bind(this)\n )\n\n this.fileOperations = new S3FileOperations(\n this.maxMultipartParts,\n this.maxUploadSize,\n this.minPartSize,\n this.partSize\n )\n\n this.partsManager = new S3PartsManager(\n this.client,\n this.bucket,\n this.minPartSize,\n this.partUploadSemaphore,\n this.metadataManager,\n this.fileOperations,\n this.generateCompleteTag.bind(this)\n )\n\n this.expirationManager = new S3ExpirationManager(\n this.client,\n this.bucket,\n this.expirationPeriodInMilliseconds,\n this.generateInfoKey.bind(this),\n this.generatePartKey.bind(this)\n )\n\n // Cleanup expired uploads when the store is initialized\n this.deleteExpired()\n }\n\n /**\n * Generate the key name for the info file\n * @param id - The upload ID\n * @returns The info file key\n */\n protected generateInfoKey(id: string): string {\n return `${id}.info`\n }\n\n /**\n * Generate the key name for a part file\n * @param id - The upload ID\n * @param isIncompletePart - Whether this is an incomplete part (default: false)\n * @returns The part file key\n */\n protected generatePartKey(\n id: string,\n isIncompletePart: boolean = false\n ): string {\n return isIncompletePart ? `${id}.part` : id\n }\n\n /**\n * Helper method to check if expiration tags should be used\n * @returns True if expiration tags should be used\n */\n protected shouldUseExpirationTags(): boolean {\n return this.expirationPeriodInMilliseconds !== 0 && this.useTags\n }\n\n /**\n * Generates a tag for marking complete/incomplete uploads\n * @param value - Either 'false' or 'true' to mark completion status\n * @returns The tag string or undefined if tags shouldn't be used\n */\n protected generateCompleteTag(value: 'false' | 'true'): string | undefined {\n if (!this.shouldUseExpirationTags()) {\n return undefined\n }\n return `Tus-Completed=${value}`\n }\n\n /**\n * Creates a multipart upload on S3 attaching any metadata to it.\n * Also, a `${file_id}.info` file is created which holds some information\n * about the upload itself like: `upload-id`, `upload-length`, etc.\n * @param upload - The upload object to create\n * @returns Promise that resolves to the created upload\n */\n public async create(upload: Upload): Promise<Upload> {\n log(`[${upload.id}] initializing multipart upload`)\n const request: AWS.CreateMultipartUploadCommandInput = {\n Bucket: this.bucket,\n Key: upload.id,\n Metadata: { 'tus-version': TUS_RESUMABLE },\n }\n\n if (upload.metadata?.contentType) {\n request.ContentType = upload.metadata.contentType as string\n }\n\n if (upload.metadata?.cacheControl) {\n request.CacheControl = upload.metadata.cacheControl as string\n }\n\n if (this.acl) {\n request.ACL = this.acl as AWS.ObjectCannedACL\n }\n\n upload.creation_date = new Date().toISOString()\n\n const response = await this.client.createMultipartUpload(request)\n upload.storage = {\n type: 's3',\n bucket: this.bucket,\n path: response.Key as string,\n }\n await this.metadataManager.saveMetadata({\n upload,\n uploadId: response.UploadId as string,\n })\n log(`[${upload.id}] multipart upload created (${response.UploadId})`)\n\n return upload\n }\n\n /**\n * Declares the length of the upload\n * @param file_id - The file ID\n * @param upload_length - The length of the upload\n * @returns Promise that resolves when length is declared\n */\n public async declareUploadLength(\n file_id: string,\n upload_length: number\n ): Promise<void> {\n const { file, 'upload-id': uploadId } =\n await this.metadataManager.getMetadata({ id: file_id })\n if (!file) {\n throw ERRORS.FILE_NOT_FOUND\n }\n\n file.size = upload_length\n\n await this.metadataManager.saveMetadata({ upload: file, uploadId })\n }\n\n /**\n * Writes `buffer` to the file specified by the upload's `id` at `offset`\n * @param readable - The readable stream to write\n * @param id - The upload ID\n * @param offset - The byte offset to write at\n * @returns Promise that resolves to the number of bytes written\n */\n public async write(\n readable: stream.Readable,\n id: string,\n offset: number\n ): Promise<number> {\n const metadata = await this.metadataManager.getMetadata({ id })\n\n // TUS sends PATCH requests with an `upload-offset` header.\n // Offset the write by the offset in the PATCH request.\n const calculatedOffset = this.fileOperations.calculateOffsetFromParts({\n parts: await this.partsManager.retrieveParts({ id }),\n })\n const offsetDiff = offset - calculatedOffset\n const requestedOffset = offset\n\n let finalReadable = readable\n\n if (offsetDiff < 0) {\n throw ERRORS.FILE_WRITE_ERROR\n }\n\n // If the offset given in the PATCH request is higher than\n // the expected offset, we need to prepend an incomplete\n // part to the readable stream, if one exists.\n if (offsetDiff > 0) {\n const incompletePart = await this.partsManager.downloadIncompletePart({\n id,\n })\n\n if (!incompletePart) {\n throw ERRORS.FILE_WRITE_ERROR\n }\n\n if (incompletePart.size !== offsetDiff) {\n throw ERRORS.FILE_WRITE_ERROR\n }\n\n // Clear the incomplete part from S3 since it's going to be combined with the current request\n await this.partsManager.deleteIncompletePart({ id })\n\n // Adjust offset to account for the incomplete part\n offset = requestedOffset - incompletePart.size\n\n finalReadable = stream.Readable.from(\n (async function* () {\n yield* incompletePart.createReader({ cleanUpOnEnd: true })\n yield* readable\n })()\n )\n }\n\n const partNumber = this.fileOperations.calculatePartNumber({\n parts: await this.partsManager.retrieveParts({ id }),\n })\n\n const bytesUploaded = await this.partsManager.uploadParts({\n metadata,\n readStream: finalReadable,\n currentPartNumber: partNumber,\n offset,\n })\n\n // The size of the incomplete part should not be counted, because the\n // process of the incomplete part should be fully transparent to the user.\n const newOffset =\n requestedOffset + bytesUploaded - (offsetDiff > 0 ? offsetDiff : 0)\n\n // Check if the upload is complete\n if (metadata.file.size === newOffset) {\n try {\n const parts = await this.partsManager.retrieveParts({ id })\n await this.partsManager.finishMultipartUpload({ metadata, parts })\n\n // Update the metadata with completed state\n const completedUpload = new Upload({\n ...metadata.file,\n offset: newOffset,\n size: metadata.file.size,\n storage: metadata.file.storage,\n })\n\n await this.metadataManager.completeMetadata({ upload: completedUpload })\n // Don't clear cache immediately - Payload might still need the metadata\n // await this.metadataManager.clearCache(id)\n } catch (error) {\n log(`[${id}] failed to finish upload`, error)\n throw error\n }\n }\n\n return newOffset\n }\n\n /**\n * Returns the current state of the upload, i.e how much data has been\n * uploaded and if the upload is complete.\n */\n public async getUpload(id: string): Promise<Upload> {\n let metadata: TusUploadMetadata\n\n try {\n metadata = await this.metadataManager.getMetadata({ id })\n } catch (error) {\n if (\n error instanceof NoSuchKey ||\n error instanceof NotFound ||\n (error as AWSError)?.Code === 'NotFound' ||\n (error as AWSError)?.Code === 'NoSuchKey'\n ) {\n throw ERRORS.FILE_NOT_FOUND\n }\n throw error\n }\n\n let offset: number\n\n try {\n const parts = await this.partsManager.retrieveParts({ id })\n offset = this.fileOperations.calculateOffsetFromParts({ parts })\n } catch (error) {\n // Check if the error is caused by the upload not being found. This happens\n // when the multipart upload has already been completed or aborted.\n if (\n (error as AWSError)?.Code === 'NoSuchUpload' ||\n (error as AWSError)?.Code === 'NoSuchKey'\n ) {\n return new Upload({\n ...metadata.file,\n metadata: metadata.file.metadata,\n offset: metadata.file.size as number,\n size: metadata.file.size,\n storage: metadata.file.storage,\n })\n }\n\n log('getUpload: Error retrieving parts.', error)\n throw error\n }\n\n const incompletePartSize = await this.partsManager.getIncompletePartSize({\n id,\n })\n\n return new Upload({\n ...metadata.file,\n offset: offset + (incompletePartSize ?? 0),\n size: metadata.file.size,\n storage: metadata.file.storage,\n })\n }\n\n /**\n * Reads the file specified by the upload's `id` and returns a readable stream\n */\n async read(id: string): Promise<Readable> {\n log(`[${id}] attempting to read file from S3`)\n let retries = 3\n let lastError: Error | null = null\n\n while (retries > 0) {\n try {\n const data = await this.client.getObject({\n Bucket: this.bucket,\n Key: id,\n })\n log(`[${id}] successfully read file from S3`)\n return data.Body as Readable\n } catch (error) {\n log(`[${id}] failed to read file, retries left: ${retries - 1}`, error)\n lastError = error as Error\n retries--\n\n if (retries > 0) {\n // Wait a bit before retrying, in case S3 needs time for consistency\n await new Promise((resolve) => setTimeout(resolve, 100))\n }\n }\n }\n\n log(`[${id}] failed to read file after all retries`)\n throw lastError || new Error(`Failed to read file ${id} after retries`)\n }\n\n /**\n * Removes the file specified by the upload's `id`\n */\n public async remove(id: string): Promise<void> {\n try {\n const { 'upload-id': uploadId } = await this.metadataManager.getMetadata({\n id,\n })\n if (uploadId) {\n await this.client.abortMultipartUpload({\n Bucket: this.bucket,\n Key: id,\n UploadId: uploadId,\n })\n }\n } catch (error) {\n if (\n (error as AWSError)?.code &&\n ['NoSuchKey', 'NoSuchUpload', 'NotFound'].includes(\n (error as AWSError).Code || ''\n )\n ) {\n log('remove: No file found.', error)\n throw ERRORS.FILE_NOT_FOUND\n }\n throw error\n }\n\n await this.client.deleteObjects({\n Bucket: this.bucket,\n Delete: {\n Objects: [\n { Key: id },\n { Key: this.metadataManager.generateInfoKey({ id }) },\n {\n Key: this.metadataManager.generatePartKey({\n id,\n isIncomplete: true,\n }),\n },\n ],\n },\n })\n\n await this.metadataManager.clearCache({ id })\n }\n\n /**\n * Combine all multipart uploads into a single object\n */\n public async completeMultipartUpload(id: string): Promise<Upload> {\n const metadata = await this.metadataManager.getMetadata({ id })\n const parts = await this.partsManager.retrieveParts({ id })\n\n const incompletePartInfo = await this.partsManager.downloadIncompletePart({\n id,\n })\n\n if (incompletePartInfo) {\n // Upload the incomplete part as a regular part\n await this.partsManager.uploadPart({\n metadata,\n readStream: incompletePartInfo.createReader({ cleanUpOnEnd: true }),\n partNumber: parts.length + 1,\n })\n\n // Remove the incomplete part\n await this.partsManager.deleteIncompletePart({ id })\n\n // Re-fetch parts to include the newly uploaded part\n const updatedParts = await this.partsManager.retrieveParts({ id })\n await this.partsManager.finishMultipartUpload({\n metadata,\n parts: updatedParts,\n })\n } else {\n await this.partsManager.finishMultipartUpload({ metadata, parts })\n }\n\n const completedUpload = new Upload({\n ...metadata.file,\n offset: metadata.file.size ?? 0,\n size: metadata.file.size ?? 0,\n storage: metadata.file.storage,\n })\n\n await this.metadataManager.completeMetadata({ upload: completedUpload })\n\n return completedUpload\n }\n\n /**\n * Get the full S3 URL for an uploaded file\n */\n public getUrl(id: string): string {\n // Use the custom endpoint if available\n if (this.customEndpoint) {\n return `${this.customEndpoint}/${this.bucket}/${id}`\n }\n\n // Fallback to standard AWS S3 URL format\n const regionConfig = this.client.config.region\n let region: string\n\n // If region is a function, we can't resolve it synchronously, so use a fallback\n if (typeof regionConfig === 'function') {\n region = 'us-east-1' // fallback for sync calls\n } else {\n region = regionConfig || 'us-east-1'\n }\n\n // Standard AWS S3 URL format\n if (region === 'us-east-1') {\n return `https://${this.bucket}.s3.amazonaws.com/${id}`\n } else {\n return `https://${this.bucket}.s3.${region}.amazonaws.com/${id}`\n }\n }\n\n /**\n * Deletes expired incomplete uploads.\n * Returns the number of deleted uploads.\n */\n async deleteExpired(): Promise<number> {\n return this.expirationManager.deleteExpired({})\n }\n\n /**\n * Returns the expiration period in milliseconds\n */\n getExpiration(): number {\n return this.expirationManager.getExpiration()\n }\n}\n"],"mappings":";;;;;;;;;;;AAqBA,IAAa,UAAb,cAA6B,UAAU;CACrC,AAAO;CACP,AAAO;CACP,AAAO,WAAW,IAAI,OAAO;CAC7B,AAAO,cAAc,IAAI,OAAO;CAChC,AAAO,oBAAoB;CAC3B,AAAO,gBAAgB;CACvB,AAAO,UAAU;CACjB,AAAO,iCAAiC;CACxC,AAAU;CAEV,AAAU;CACV,AAAU;CACV,AAAU;CACV,AAAU;CACV,AAAU;CACV,AAAU;CACV,AAAU;CAEV,YAAYA,SAAwB;EAClC,OAAO;EACP,MAAM,EACJ,mBACA,aACA,UACA,gBACA,0BACA,SACA,gCACA,OACD,GAAG;EACJ,MAAM,EAAE,KAAK,OAAQ,GAAG,oBAAoB,GAAG;EAE/C,KAAK,aAAa;GAChB;GACA;GACA;GACA;GACA;EACD;EAED,KAAK,SAAS;EACd,KAAK,MAAM;EACX,KAAK,SAAS,IAAI,GAAG;EACrB,KAAK,iBAAiB,OAAO,mBAAmB,SAAS;EAEzD,KAAK,WAAW,YAAY,KAAK;EACjC,KAAK,cAAc,eAAe,KAAK;EACvC,KAAK,oBAAoB,qBAAqB,KAAK;EAEnD,KAAK,UAAU,WAAW,KAAK;EAC/B,KAAK,iCACH,kCAAkC,KAAK;EACzC,KAAK,QAAQ,SAAS,IAAI;EAC1B,KAAK,sBAAsB,IAAI,UAAU,4BAA4B;EAGrE,KAAK,kBAAkB,IAAI,kBACzB,KAAK,QACL,KAAK,QACL,KAAK,OACL,KAAK,wBAAwB,KAAK,KAAK,EACvC,KAAK,oBAAoB,KAAK,KAAK;EAGrC,KAAK,iBAAiB,IAAI,iBACxB,KAAK,mBACL,KAAK,eACL,KAAK,aACL,KAAK;EAGP,KAAK,eAAe,IAAI,eACtB,KAAK,QACL,KAAK,QACL,KAAK,aACL,KAAK,qBACL,KAAK,iBACL,KAAK,gBACL,KAAK,oBAAoB,KAAK,KAAK;EAGrC,KAAK,oBAAoB,IAAI,oBAC3B,KAAK,QACL,KAAK,QACL,KAAK,gCACL,KAAK,gBAAgB,KAAK,KAAK,EAC/B,KAAK,gBAAgB,KAAK,KAAK;EAIjC,KAAK,eAAe;CACrB;;;;;;CAOD,AAAU,gBAAgBC,IAAoB;AAC5C,SAAO,GAAG,GAAG,KAAK,CAAC;CACpB;;;;;;;CAQD,AAAU,gBACRA,IACAC,mBAA4B,OACpB;AACR,SAAO,mBAAmB,GAAG,GAAG,KAAK,CAAC,GAAG;CAC1C;;;;;CAMD,AAAU,0BAAmC;AAC3C,SAAO,KAAK,mCAAmC,KAAK,KAAK;CAC1D;;;;;;CAOD,AAAU,oBAAoBC,OAA6C;AACzE,MAAI,CAAC,KAAK,yBAAyB,CACjC,QAAO;AAET,SAAO,CAAC,cAAc,EAAE,OAAO;CAChC;;;;;;;;CASD,MAAa,OAAOC,QAAiC;EACnD,IAAI,CAAC,CAAC,EAAE,OAAO,GAAG,+BAA+B,CAAC,CAAC;EACnD,MAAMC,UAAiD;GACrD,QAAQ,KAAK;GACb,KAAK,OAAO;GACZ,UAAU,EAAE,eAAe,cAAe;EAC3C;AAED,MAAI,OAAO,UAAU,aACnB,QAAQ,cAAc,OAAO,SAAS;AAGxC,MAAI,OAAO,UAAU,cACnB,QAAQ,eAAe,OAAO,SAAS;AAGzC,MAAI,KAAK,KACP,QAAQ,MAAM,KAAK;EAGrB,OAAO,iCAAgB,IAAI,QAAO,aAAa;EAE/C,MAAM,WAAW,MAAM,KAAK,OAAO,sBAAsB,QAAQ;EACjE,OAAO,UAAU;GACf,MAAM;GACN,QAAQ,KAAK;GACb,MAAM,SAAS;EAChB;EACD,MAAM,KAAK,gBAAgB,aAAa;GACtC;GACA,UAAU,SAAS;EACpB,EAAC;EACF,IAAI,CAAC,CAAC,EAAE,OAAO,GAAG,4BAA4B,EAAE,SAAS,SAAS,CAAC,CAAC,CAAC;AAErE,SAAO;CACR;;;;;;;CAQD,MAAa,oBACXC,SACAC,eACe;EACf,MAAM,EAAE,MAAM,aAAa,UAAU,GACnC,MAAM,KAAK,gBAAgB,YAAY,EAAE,IAAI,QAAS,EAAC;AACzD,MAAI,CAAC,KACH,OAAM,OAAO;EAGf,KAAK,OAAO;EAEZ,MAAM,KAAK,gBAAgB,aAAa;GAAE,QAAQ;GAAM;EAAU,EAAC;CACpE;;;;;;;;CASD,MAAa,MACXC,UACAP,IACAQ,QACiB;EACjB,MAAM,WAAW,MAAM,KAAK,gBAAgB,YAAY,EAAE,GAAI,EAAC;EAI/D,MAAM,mBAAmB,KAAK,eAAe,yBAAyB,EACpE,OAAO,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC,CACrD,EAAC;EACF,MAAM,aAAa,SAAS;EAC5B,MAAM,kBAAkB;EAExB,IAAI,gBAAgB;AAEpB,MAAI,aAAa,EACf,OAAM,OAAO;AAMf,MAAI,aAAa,GAAG;GAClB,MAAM,iBAAiB,MAAM,KAAK,aAAa,uBAAuB,EACpE,GACD,EAAC;AAEF,OAAI,CAAC,eACH,OAAM,OAAO;AAGf,OAAI,eAAe,SAAS,WAC1B,OAAM,OAAO;GAIf,MAAM,KAAK,aAAa,qBAAqB,EAAE,GAAI,EAAC;GAGpD,SAAS,kBAAkB,eAAe;GAE1C,gBAAgB,OAAO,SAAS,MAC7B,mBAAmB;IAClB,OAAO,eAAe,aAAa,EAAE,cAAc,KAAM,EAAC;IAC1D,OAAO;GACR,IAAG,CACL;EACF;EAED,MAAM,aAAa,KAAK,eAAe,oBAAoB,EACzD,OAAO,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC,CACrD,EAAC;EAEF,MAAM,gBAAgB,MAAM,KAAK,aAAa,YAAY;GACxD;GACA,YAAY;GACZ,mBAAmB;GACnB;EACD,EAAC;EAIF,MAAM,YACJ,kBAAkB,iBAAiB,aAAa,IAAI,aAAa;AAGnE,MAAI,SAAS,KAAK,SAAS,UACzB,KAAI;GACF,MAAM,QAAQ,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC;GAC3D,MAAM,KAAK,aAAa,sBAAsB;IAAE;IAAU;GAAO,EAAC;GAGlE,MAAM,kBAAkB,IAAI,OAAO;IACjC,GAAG,SAAS;IACZ,QAAQ;IACR,MAAM,SAAS,KAAK;IACpB,SAAS,SAAS,KAAK;GACxB;GAED,MAAM,KAAK,gBAAgB,iBAAiB,EAAE,QAAQ,gBAAiB,EAAC;EAGzE,SAAQ,OAAO;GACd,IAAI,CAAC,CAAC,EAAE,GAAG,yBAAyB,CAAC,EAAE,MAAM;AAC7C,SAAM;EACP;AAGH,SAAO;CACR;;;;;CAMD,MAAa,UAAUR,IAA6B;EAClD,IAAIS;AAEJ,MAAI;GACF,WAAW,MAAM,KAAK,gBAAgB,YAAY,EAAE,GAAI,EAAC;EAC1D,SAAQ,OAAO;AACd,OACE,iBAAiB,aACjB,iBAAiB,YAChB,OAAoB,SAAS,cAC7B,OAAoB,SAAS,YAE9B,OAAM,OAAO;AAEf,SAAM;EACP;EAED,IAAID;AAEJ,MAAI;GACF,MAAM,QAAQ,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC;GAC3D,SAAS,KAAK,eAAe,yBAAyB,EAAE,MAAO,EAAC;EACjE,SAAQ,OAAO;AAGd,OACG,OAAoB,SAAS,kBAC7B,OAAoB,SAAS,YAE9B,QAAO,IAAI,OAAO;IAChB,GAAG,SAAS;IACZ,UAAU,SAAS,KAAK;IACxB,QAAQ,SAAS,KAAK;IACtB,MAAM,SAAS,KAAK;IACpB,SAAS,SAAS,KAAK;GACxB;GAGH,IAAI,sCAAsC,MAAM;AAChD,SAAM;EACP;EAED,MAAM,qBAAqB,MAAM,KAAK,aAAa,sBAAsB,EACvE,GACD,EAAC;AAEF,SAAO,IAAI,OAAO;GAChB,GAAG,SAAS;GACZ,QAAQ,UAAU,sBAAsB;GACxC,MAAM,SAAS,KAAK;GACpB,SAAS,SAAS,KAAK;EACxB;CACF;;;;CAKD,MAAM,KAAKR,IAA+B;EACxC,IAAI,CAAC,CAAC,EAAE,GAAG,iCAAiC,CAAC,CAAC;EAC9C,IAAI,UAAU;EACd,IAAIU,YAA0B;AAE9B,SAAO,UAAU,EACf,KAAI;GACF,MAAM,OAAO,MAAM,KAAK,OAAO,UAAU;IACvC,QAAQ,KAAK;IACb,KAAK;GACN,EAAC;GACF,IAAI,CAAC,CAAC,EAAE,GAAG,gCAAgC,CAAC,CAAC;AAC7C,UAAO,KAAK;EACb,SAAQ,OAAO;GACd,IAAI,CAAC,CAAC,EAAE,GAAG,qCAAqC,EAAE,UAAU,GAAG,EAAE,MAAM;GACvE,YAAY;GACZ;AAEA,OAAI,UAAU,GAEZ,MAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,IAAI;EAE1D;EAGH,IAAI,CAAC,CAAC,EAAE,GAAG,uCAAuC,CAAC,CAAC;AACpD,QAAM,6BAAa,IAAI,MAAM,CAAC,oBAAoB,EAAE,GAAG,cAAc,CAAC;CACvE;;;;CAKD,MAAa,OAAOV,IAA2B;AAC7C,MAAI;GACF,MAAM,EAAE,aAAa,UAAU,GAAG,MAAM,KAAK,gBAAgB,YAAY,EACvE,GACD,EAAC;AACF,OAAI,UACF,MAAM,KAAK,OAAO,qBAAqB;IACrC,QAAQ,KAAK;IACb,KAAK;IACL,UAAU;GACX,EAAC;EAEL,SAAQ,OAAO;AACd,OACG,OAAoB,QACrB;IAAC;IAAa;IAAgB;GAAW,EAAC,SACvC,MAAmB,QAAQ,GAC7B,EACD;IACA,IAAI,0BAA0B,MAAM;AACpC,UAAM,OAAO;GACd;AACD,SAAM;EACP;EAED,MAAM,KAAK,OAAO,cAAc;GAC9B,QAAQ,KAAK;GACb,QAAQ,EACN,SAAS;IACP,EAAE,KAAK,GAAI;IACX,EAAE,KAAK,KAAK,gBAAgB,gBAAgB,EAAE,GAAI,EAAC,CAAE;IACrD,EACE,KAAK,KAAK,gBAAgB,gBAAgB;KACxC;KACA,cAAc;IACf,EAAC,CACH;GACF,EACF;EACF,EAAC;EAEF,MAAM,KAAK,gBAAgB,WAAW,EAAE,GAAI,EAAC;CAC9C;;;;CAKD,MAAa,wBAAwBA,IAA6B;EAChE,MAAM,WAAW,MAAM,KAAK,gBAAgB,YAAY,EAAE,GAAI,EAAC;EAC/D,MAAM,QAAQ,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC;EAE3D,MAAM,qBAAqB,MAAM,KAAK,aAAa,uBAAuB,EACxE,GACD,EAAC;AAEF,MAAI,oBAAoB;GAEtB,MAAM,KAAK,aAAa,WAAW;IACjC;IACA,YAAY,mBAAmB,aAAa,EAAE,cAAc,KAAM,EAAC;IACnE,YAAY,MAAM,SAAS;GAC5B,EAAC;GAGF,MAAM,KAAK,aAAa,qBAAqB,EAAE,GAAI,EAAC;GAGpD,MAAM,eAAe,MAAM,KAAK,aAAa,cAAc,EAAE,GAAI,EAAC;GAClE,MAAM,KAAK,aAAa,sBAAsB;IAC5C;IACA,OAAO;GACR,EAAC;EACH,OACC,MAAM,KAAK,aAAa,sBAAsB;GAAE;GAAU;EAAO,EAAC;EAGpE,MAAM,kBAAkB,IAAI,OAAO;GACjC,GAAG,SAAS;GACZ,QAAQ,SAAS,KAAK,QAAQ;GAC9B,MAAM,SAAS,KAAK,QAAQ;GAC5B,SAAS,SAAS,KAAK;EACxB;EAED,MAAM,KAAK,gBAAgB,iBAAiB,EAAE,QAAQ,gBAAiB,EAAC;AAExE,SAAO;CACR;;;;CAKD,AAAO,OAAOA,IAAoB;AAEhC,MAAI,KAAK,eACP,QAAO,GAAG,KAAK,eAAe,CAAC,EAAE,KAAK,OAAO,CAAC,EAAE,IAAI;EAItD,MAAM,eAAe,KAAK,OAAO,OAAO;EACxC,IAAIW;AAGJ,MAAI,OAAO,iBAAiB,YAC1B,SAAS;OAET,SAAS,gBAAgB;AAI3B,MAAI,WAAW,YACb,QAAO,CAAC,QAAQ,EAAE,KAAK,OAAO,kBAAkB,EAAE,IAAI;MAEtD,QAAO,CAAC,QAAQ,EAAE,KAAK,OAAO,IAAI,EAAE,OAAO,eAAe,EAAE,IAAI;CAEnE;;;;;CAMD,MAAM,gBAAiC;AACrC,SAAO,KAAK,kBAAkB,cAAc,CAAE,EAAC;CAChD;;;;CAKD,gBAAwB;AACtB,SAAO,KAAK,kBAAkB,eAAe;CAC9C;AACF"}
@@ -0,0 +1,16 @@
1
+ //#region src/tus/stores/s3/semaphore.d.ts
2
+ /**
3
+ * A semaphore implementation for controlling concurrent operations.
4
+ * Used to limit the number of simultaneous part uploads to S3.
5
+ */
6
+ declare class Semaphore {
7
+ private permits;
8
+ private queue;
9
+ constructor(permits: number);
10
+ private release;
11
+ acquire(): Promise<() => void>;
12
+ }
13
+ type SemaphorePermit = () => void;
14
+ //#endregion
15
+ export { Semaphore, SemaphorePermit };
16
+ //# sourceMappingURL=semaphore.d.ts.map
@@ -0,0 +1,32 @@
1
+ //#region src/tus/stores/s3/semaphore.ts
2
+ /**
3
+ * A semaphore implementation for controlling concurrent operations.
4
+ * Used to limit the number of simultaneous part uploads to S3.
5
+ */
6
+ var Semaphore = class {
7
+ permits;
8
+ queue = [];
9
+ constructor(permits) {
10
+ this.permits = permits;
11
+ }
12
+ release() {
13
+ this.permits++;
14
+ const next = this.queue.shift();
15
+ if (next) next();
16
+ }
17
+ async acquire() {
18
+ return new Promise((resolve) => {
19
+ if (this.permits > 0) {
20
+ this.permits--;
21
+ resolve(() => this.release());
22
+ } else this.queue.push(() => {
23
+ this.permits--;
24
+ resolve(() => this.release());
25
+ });
26
+ });
27
+ }
28
+ };
29
+
30
+ //#endregion
31
+ export { Semaphore };
32
+ //# sourceMappingURL=semaphore.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"semaphore.js","names":["permits: number"],"sources":["../../../../src/tus/stores/s3/semaphore.ts"],"sourcesContent":["/**\n * A semaphore implementation for controlling concurrent operations.\n * Used to limit the number of simultaneous part uploads to S3.\n */\nexport class Semaphore {\n private permits: number\n private queue: (() => void)[] = []\n\n constructor(permits: number) {\n this.permits = permits\n }\n\n private release(): void {\n this.permits++\n const next = this.queue.shift()\n if (next) {\n next()\n }\n }\n\n async acquire(): Promise<() => void> {\n return new Promise((resolve) => {\n if (this.permits > 0) {\n this.permits--\n resolve(() => this.release())\n } else {\n this.queue.push(() => {\n this.permits--\n resolve(() => this.release())\n })\n }\n })\n }\n}\n\nexport type SemaphorePermit = () => void\n"],"mappings":";;;;;AAIA,IAAa,YAAb,MAAuB;CACrB,AAAQ;CACR,AAAQ,QAAwB,CAAE;CAElC,YAAYA,SAAiB;EAC3B,KAAK,UAAU;CAChB;CAED,AAAQ,UAAgB;EACtB,KAAK;EACL,MAAM,OAAO,KAAK,MAAM,OAAO;AAC/B,MAAI,MACF,MAAM;CAET;CAED,MAAM,UAA+B;AACnC,SAAO,IAAI,QAAQ,CAAC,YAAY;AAC9B,OAAI,KAAK,UAAU,GAAG;IACpB,KAAK;IACL,QAAQ,MAAM,KAAK,SAAS,CAAC;GAC9B,OACC,KAAK,MAAM,KAAK,MAAM;IACpB,KAAK;IACL,QAAQ,MAAM,KAAK,SAAS,CAAC;GAC9B,EAAC;EAEL;CACF;AACF"}
@@ -0,0 +1,26 @@
1
+ //#region src/types/errors.d.ts
2
+ declare enum MediaCloudError {
3
+ MUX_CONFIG_MISSING = "Mux configuration (tokenId and tokenSecret) must be provided in pluginOptions to use Mux",
4
+ MUX_CONFIG_INCOMPLETE = "Mux configuration is missing. Mux features will not be available",
5
+ MUX_UPLOAD_ID_MISSING = "No upload-id found for upload",
6
+ MUX_ASSET_DELETE_ERROR = "Error deleting Mux asset",
7
+ MUX_UPLOAD_ERROR = "Mux video upload failed",
8
+ MUX_DIRECT_UPLOAD_ERROR = "Mux direct upload failed",
9
+ MUX_CREATE_UPLOAD_ERROR = "Error in Mux create upload handler",
10
+ MUX_REQUEST_NO_JSON = "Request does not support json() method",
11
+ S3_CONFIG_MISSING = "S3 configuration (bucket, region, accessKeyId, secretAccessKey) must be provided in pluginOptions",
12
+ S3_DELETE_ERROR = "Error deleting file from S3",
13
+ S3_UNIQUE_NAME_ERROR = "Could not find a unique file name after maximum tries",
14
+ TUS_UPLOAD_ERROR = "TUS file upload error occurred",
15
+ FILE_TYPE_UNKNOWN = "Unable to determine file type",
16
+ FILE_TYPE_ERROR = "Error determining file type",
17
+ FILENAME_SANITIZE_ERROR = "Error sanitizing filename",
18
+ UPLOAD_NO_URL = "No upload URL provided, cannot parse upload ID",
19
+ UPLOAD_HANDLER_ERROR = "Upload handler error occurred",
20
+ UPLOAD_POLLING_ERROR = "Polling error for upload",
21
+ PLUGIN_NOT_CONFIGURED = "Payload Media Cloud plugin is not configured",
22
+ NAMING_FUNCTION_ERROR = "Error in namingFunction",
23
+ }
24
+ //#endregion
25
+ export { MediaCloudError };
26
+ //# sourceMappingURL=errors.d.ts.map
@@ -0,0 +1,28 @@
1
+ //#region src/types/errors.ts
2
+ let MediaCloudError = /* @__PURE__ */ function(MediaCloudError$1) {
3
+ MediaCloudError$1["MUX_CONFIG_MISSING"] = "Mux configuration (tokenId and tokenSecret) must be provided in pluginOptions to use Mux";
4
+ MediaCloudError$1["MUX_CONFIG_INCOMPLETE"] = "Mux configuration is missing. Mux features will not be available";
5
+ MediaCloudError$1["MUX_UPLOAD_ID_MISSING"] = "No upload-id found for upload";
6
+ MediaCloudError$1["MUX_ASSET_DELETE_ERROR"] = "Error deleting Mux asset";
7
+ MediaCloudError$1["MUX_UPLOAD_ERROR"] = "Mux video upload failed";
8
+ MediaCloudError$1["MUX_DIRECT_UPLOAD_ERROR"] = "Mux direct upload failed";
9
+ MediaCloudError$1["MUX_CREATE_UPLOAD_ERROR"] = "Error in Mux create upload handler";
10
+ MediaCloudError$1["MUX_REQUEST_NO_JSON"] = "Request does not support json() method";
11
+ MediaCloudError$1["S3_CONFIG_MISSING"] = "S3 configuration (bucket, region, accessKeyId, secretAccessKey) must be provided in pluginOptions";
12
+ MediaCloudError$1["S3_DELETE_ERROR"] = "Error deleting file from S3";
13
+ MediaCloudError$1["S3_UNIQUE_NAME_ERROR"] = "Could not find a unique file name after maximum tries";
14
+ MediaCloudError$1["TUS_UPLOAD_ERROR"] = "TUS file upload error occurred";
15
+ MediaCloudError$1["FILE_TYPE_UNKNOWN"] = "Unable to determine file type";
16
+ MediaCloudError$1["FILE_TYPE_ERROR"] = "Error determining file type";
17
+ MediaCloudError$1["FILENAME_SANITIZE_ERROR"] = "Error sanitizing filename";
18
+ MediaCloudError$1["UPLOAD_NO_URL"] = "No upload URL provided, cannot parse upload ID";
19
+ MediaCloudError$1["UPLOAD_HANDLER_ERROR"] = "Upload handler error occurred";
20
+ MediaCloudError$1["UPLOAD_POLLING_ERROR"] = "Polling error for upload";
21
+ MediaCloudError$1["PLUGIN_NOT_CONFIGURED"] = "Payload Media Cloud plugin is not configured";
22
+ MediaCloudError$1["NAMING_FUNCTION_ERROR"] = "Error in namingFunction";
23
+ return MediaCloudError$1;
24
+ }({});
25
+
26
+ //#endregion
27
+ export { MediaCloudError };
28
+ //# sourceMappingURL=errors.js.map