@directus/storage-driver-s3 12.0.8 → 12.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.ts +55 -54
  2. package/dist/index.js +314 -385
  3. package/package.json +7 -7
package/dist/index.d.ts CHANGED
@@ -1,59 +1,60 @@
1
- import { ObjectCannedACL, ServerSideEncryption } from '@aws-sdk/client-s3';
2
- import { TusDriver } from '@directus/storage';
3
- import { ReadOptions, ChunkedUploadContext } from '@directus/types';
4
- import { Readable } from 'node:stream';
1
+ import { ObjectCannedACL, ServerSideEncryption } from "@aws-sdk/client-s3";
2
+ import { Readable } from "node:stream";
3
+ import { TusDriver } from "@directus/storage";
4
+ import { ChunkedUploadContext, ReadOptions } from "@directus/types";
5
5
 
6
+ //#region src/index.d.ts
6
7
  type DriverS3Config = {
7
- root?: string;
8
- key?: string;
9
- secret?: string;
10
- bucket: string;
11
- acl?: ObjectCannedACL;
12
- serverSideEncryption?: ServerSideEncryption;
13
- endpoint?: string;
14
- region?: string;
15
- forcePathStyle?: boolean;
16
- tus?: {
17
- chunkSize?: number;
18
- };
19
- connectionTimeout?: number;
20
- socketTimeout?: number;
21
- maxSockets?: number;
22
- keepAlive?: boolean;
8
+ root?: string;
9
+ key?: string;
10
+ secret?: string;
11
+ bucket: string;
12
+ acl?: ObjectCannedACL;
13
+ serverSideEncryption?: ServerSideEncryption;
14
+ endpoint?: string;
15
+ region?: string;
16
+ forcePathStyle?: boolean;
17
+ tus?: {
18
+ chunkSize?: number;
19
+ };
20
+ connectionTimeout?: number;
21
+ socketTimeout?: number;
22
+ maxSockets?: number;
23
+ keepAlive?: boolean;
23
24
  };
24
25
  declare class DriverS3 implements TusDriver {
25
- private config;
26
- private readonly client;
27
- private readonly root;
28
- private partUploadSemaphore;
29
- private readonly preferredPartSize;
30
- maxMultipartParts: 10000;
31
- minPartSize: 5242880;
32
- maxUploadSize: 5497558138880;
33
- constructor(config: DriverS3Config);
34
- private getClient;
35
- private fullPath;
36
- read(filepath: string, options?: ReadOptions): Promise<Readable>;
37
- stat(filepath: string): Promise<{
38
- size: number;
39
- modified: Date;
40
- }>;
41
- exists(filepath: string): Promise<boolean>;
42
- move(src: string, dest: string): Promise<void>;
43
- copy(src: string, dest: string): Promise<void>;
44
- write(filepath: string, content: Readable, type?: string): Promise<void>;
45
- delete(filepath: string): Promise<void>;
46
- list(prefix?: string): AsyncGenerator<string, void, unknown>;
47
- get tusExtensions(): string[];
48
- createChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<ChunkedUploadContext>;
49
- deleteChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<void>;
50
- finishChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<void>;
51
- writeChunk(filepath: string, content: Readable, offset: number, context: ChunkedUploadContext): Promise<number>;
52
- private uploadPart;
53
- private uploadParts;
54
- private retrieveParts;
55
- private finishMultipartUpload;
56
- private calcOptimalPartSize;
26
+ private config;
27
+ private readonly client;
28
+ private readonly root;
29
+ private partUploadSemaphore;
30
+ private readonly preferredPartSize;
31
+ maxMultipartParts: 10000;
32
+ minPartSize: 5242880;
33
+ maxUploadSize: 5497558138880;
34
+ constructor(config: DriverS3Config);
35
+ private getClient;
36
+ private fullPath;
37
+ read(filepath: string, options?: ReadOptions): Promise<Readable>;
38
+ stat(filepath: string): Promise<{
39
+ size: number;
40
+ modified: Date;
41
+ }>;
42
+ exists(filepath: string): Promise<boolean>;
43
+ move(src: string, dest: string): Promise<void>;
44
+ copy(src: string, dest: string): Promise<void>;
45
+ write(filepath: string, content: Readable, type?: string): Promise<void>;
46
+ delete(filepath: string): Promise<void>;
47
+ list(prefix?: string): AsyncGenerator<string, void, unknown>;
48
+ get tusExtensions(): string[];
49
+ createChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<ChunkedUploadContext>;
50
+ deleteChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<void>;
51
+ finishChunkedUpload(filepath: string, context: ChunkedUploadContext): Promise<void>;
52
+ writeChunk(filepath: string, content: Readable, offset: number, context: ChunkedUploadContext): Promise<number>;
53
+ private uploadPart;
54
+ private uploadParts;
55
+ private retrieveParts;
56
+ private finishMultipartUpload;
57
+ private calcOptimalPartSize;
57
58
  }
58
-
59
- export { DriverS3, type DriverS3Config, DriverS3 as default };
59
+ //#endregion
60
+ export { DriverS3, DriverS3 as default, DriverS3Config };
package/dist/index.js CHANGED
@@ -1,18 +1,4 @@
1
- // src/index.ts
2
- import {
3
- AbortMultipartUploadCommand,
4
- CompleteMultipartUploadCommand,
5
- CopyObjectCommand,
6
- CreateMultipartUploadCommand,
7
- DeleteObjectCommand,
8
- DeleteObjectsCommand,
9
- GetObjectCommand,
10
- HeadObjectCommand,
11
- ListObjectsV2Command,
12
- ListPartsCommand,
13
- S3Client,
14
- UploadPartCommand
15
- } from "@aws-sdk/client-s3";
1
+ import { AbortMultipartUploadCommand, CompleteMultipartUploadCommand, CopyObjectCommand, CreateMultipartUploadCommand, DeleteObjectCommand, DeleteObjectsCommand, GetObjectCommand, HeadObjectCommand, ListObjectsV2Command, ListPartsCommand, S3Client, UploadPartCommand } from "@aws-sdk/client-s3";
16
2
  import { Upload } from "@aws-sdk/lib-storage";
17
3
  import { normalizePath } from "@directus/utils";
18
4
  import { isReadableStream } from "@directus/utils/node";
@@ -20,375 +6,318 @@ import { Semaphore } from "@shopify/semaphore";
20
6
  import { NodeHttpHandler } from "@smithy/node-http-handler";
21
7
  import { ERRORS, StreamSplitter, TUS_RESUMABLE } from "@tus/utils";
22
8
  import ms from "ms";
23
- import fs, { promises as fsProm } from "fs";
24
- import { Agent as HttpAgent } from "http";
25
- import { Agent as HttpsAgent } from "https";
26
- import os from "os";
27
- import { join } from "path";
28
- import { promises as streamProm } from "stream";
9
+ import fs, { promises } from "node:fs";
10
+ import { Agent } from "node:http";
11
+ import { Agent as Agent$1 } from "node:https";
12
+ import os from "node:os";
13
+ import { join } from "node:path";
14
+ import { promises as promises$1 } from "node:stream";
15
+
16
+ //#region src/index.ts
29
17
  var DriverS3 = class {
30
- config;
31
- client;
32
- root;
33
- // TUS specific members
34
- partUploadSemaphore;
35
- preferredPartSize;
36
- maxMultipartParts = 1e4;
37
- minPartSize = 5242880;
38
- // 5MiB
39
- maxUploadSize = 5497558138880;
40
- // 5TiB
41
- constructor(config) {
42
- this.config = config;
43
- this.client = this.getClient();
44
- this.root = this.config.root ? normalizePath(this.config.root, { removeLeading: true }) : "";
45
- this.preferredPartSize = config.tus?.chunkSize ?? this.minPartSize;
46
- this.partUploadSemaphore = new Semaphore(60);
47
- }
48
- getClient() {
49
- const connectionTimeout = ms(String(this.config.connectionTimeout ?? 5e3));
50
- const socketTimeout = ms(String(this.config.socketTimeout ?? 12e4));
51
- const maxSockets = this.config.maxSockets ?? 500;
52
- const keepAlive = this.config.keepAlive ?? true;
53
- const s3ClientConfig = {
54
- requestHandler: new NodeHttpHandler({
55
- connectionTimeout,
56
- socketTimeout,
57
- httpAgent: new HttpAgent({ maxSockets, keepAlive }),
58
- httpsAgent: new HttpsAgent({ maxSockets, keepAlive })
59
- })
60
- };
61
- if (this.config.key && !this.config.secret || this.config.secret && !this.config.key) {
62
- throw new Error("Both `key` and `secret` are required when defined");
63
- }
64
- if (this.config.key && this.config.secret) {
65
- s3ClientConfig.credentials = {
66
- accessKeyId: this.config.key,
67
- secretAccessKey: this.config.secret
68
- };
69
- }
70
- if (this.config.endpoint) {
71
- const protocol = this.config.endpoint.startsWith("http://") ? "http:" : "https:";
72
- const hostname = this.config.endpoint.replace("https://", "").replace("http://", "");
73
- s3ClientConfig.endpoint = {
74
- hostname,
75
- protocol,
76
- path: "/"
77
- };
78
- }
79
- if (this.config.region) {
80
- s3ClientConfig.region = this.config.region;
81
- }
82
- if (this.config.forcePathStyle !== void 0) {
83
- s3ClientConfig.forcePathStyle = this.config.forcePathStyle;
84
- }
85
- return new S3Client(s3ClientConfig);
86
- }
87
- fullPath(filepath) {
88
- return normalizePath(join(this.root, filepath));
89
- }
90
- async read(filepath, options) {
91
- const { range } = options ?? {};
92
- const commandInput = {
93
- Key: this.fullPath(filepath),
94
- Bucket: this.config.bucket
95
- };
96
- if (range) {
97
- commandInput.Range = `bytes=${range.start ?? ""}-${range.end ?? ""}`;
98
- }
99
- const { Body: stream2 } = await this.client.send(new GetObjectCommand(commandInput));
100
- if (!stream2 || !isReadableStream(stream2)) {
101
- throw new Error(`No stream returned for file "${filepath}"`);
102
- }
103
- return stream2;
104
- }
105
- async stat(filepath) {
106
- const { ContentLength, LastModified } = await this.client.send(
107
- new HeadObjectCommand({
108
- Key: this.fullPath(filepath),
109
- Bucket: this.config.bucket
110
- })
111
- );
112
- return {
113
- size: ContentLength,
114
- modified: LastModified
115
- };
116
- }
117
- async exists(filepath) {
118
- try {
119
- await this.stat(filepath);
120
- return true;
121
- } catch {
122
- return false;
123
- }
124
- }
125
- async move(src, dest) {
126
- await this.copy(src, dest);
127
- await this.delete(src);
128
- }
129
- async copy(src, dest) {
130
- const params = {
131
- Key: this.fullPath(dest),
132
- Bucket: this.config.bucket,
133
- CopySource: `/${this.config.bucket}/${this.fullPath(src)}`
134
- };
135
- if (this.config.serverSideEncryption) {
136
- params.ServerSideEncryption = this.config.serverSideEncryption;
137
- }
138
- if (this.config.acl) {
139
- params.ACL = this.config.acl;
140
- }
141
- await this.client.send(new CopyObjectCommand(params));
142
- }
143
- async write(filepath, content, type) {
144
- const params = {
145
- Key: this.fullPath(filepath),
146
- Body: content,
147
- Bucket: this.config.bucket
148
- };
149
- if (type) {
150
- params.ContentType = type;
151
- }
152
- if (this.config.acl) {
153
- params.ACL = this.config.acl;
154
- }
155
- if (this.config.serverSideEncryption) {
156
- params.ServerSideEncryption = this.config.serverSideEncryption;
157
- }
158
- const upload = new Upload({
159
- client: this.client,
160
- params
161
- });
162
- await upload.done();
163
- }
164
- async delete(filepath) {
165
- await this.client.send(new DeleteObjectCommand({ Key: this.fullPath(filepath), Bucket: this.config.bucket }));
166
- }
167
- async *list(prefix = "") {
168
- let Prefix = this.fullPath(prefix);
169
- if (Prefix === ".") Prefix = "";
170
- let continuationToken = void 0;
171
- do {
172
- const listObjectsV2CommandInput = {
173
- Bucket: this.config.bucket,
174
- Prefix,
175
- MaxKeys: 1e3
176
- };
177
- if (continuationToken) {
178
- listObjectsV2CommandInput.ContinuationToken = continuationToken;
179
- }
180
- const response = await this.client.send(new ListObjectsV2Command(listObjectsV2CommandInput));
181
- continuationToken = response.NextContinuationToken;
182
- if (response.Contents) {
183
- for (const object of response.Contents) {
184
- if (!object.Key) continue;
185
- const isDir = object.Key.endsWith("/");
186
- if (isDir) continue;
187
- yield object.Key.substring(this.root.length);
188
- }
189
- }
190
- } while (continuationToken);
191
- }
192
- // TUS implementation based on https://github.com/tus/tus-node-server
193
- get tusExtensions() {
194
- return ["creation", "termination", "expiration"];
195
- }
196
- async createChunkedUpload(filepath, context) {
197
- const command = new CreateMultipartUploadCommand({
198
- Bucket: this.config.bucket,
199
- Key: this.fullPath(filepath),
200
- Metadata: { "tus-version": TUS_RESUMABLE },
201
- ...context.metadata?.["contentType"] ? {
202
- ContentType: context.metadata["contentType"]
203
- } : {},
204
- ...context.metadata?.["cacheControl"] ? {
205
- CacheControl: context.metadata["cacheControl"]
206
- } : {}
207
- });
208
- const res = await this.client.send(command);
209
- context.metadata["upload-id"] = res.UploadId;
210
- return context;
211
- }
212
- async deleteChunkedUpload(filepath, context) {
213
- const key = this.fullPath(filepath);
214
- try {
215
- const { "upload-id": uploadId } = context.metadata;
216
- if (uploadId) {
217
- await this.client.send(
218
- new AbortMultipartUploadCommand({
219
- Bucket: this.config.bucket,
220
- Key: key,
221
- UploadId: uploadId
222
- })
223
- );
224
- }
225
- } catch (error) {
226
- if (error?.code && ["NotFound", "NoSuchKey", "NoSuchUpload"].includes(error.Code)) {
227
- throw ERRORS.FILE_NOT_FOUND;
228
- }
229
- throw error;
230
- }
231
- await this.client.send(
232
- new DeleteObjectsCommand({
233
- Bucket: this.config.bucket,
234
- Delete: {
235
- Objects: [{ Key: key }]
236
- }
237
- })
238
- );
239
- }
240
- async finishChunkedUpload(filepath, context) {
241
- const key = this.fullPath(filepath);
242
- const uploadId = context.metadata["upload-id"];
243
- const size = context.size;
244
- const chunkSize = this.calcOptimalPartSize(size);
245
- const expectedParts = Math.ceil(size / chunkSize);
246
- let parts = await this.retrieveParts(key, uploadId);
247
- let retries = 0;
248
- while (parts.length !== expectedParts && retries < 3) {
249
- ++retries;
250
- await new Promise((resolve) => setTimeout(resolve, 500 * retries));
251
- parts = await this.retrieveParts(key, uploadId);
252
- }
253
- if (parts.length !== expectedParts) {
254
- throw {
255
- status_code: 500,
256
- body: "Failed to upload all parts to S3."
257
- };
258
- }
259
- await this.finishMultipartUpload(key, uploadId, parts);
260
- }
261
- async writeChunk(filepath, content, offset, context) {
262
- const key = this.fullPath(filepath);
263
- const uploadId = context.metadata["upload-id"];
264
- const size = context.size;
265
- const parts = await this.retrieveParts(key, uploadId);
266
- const partNumber = parts.length > 0 ? parts[parts.length - 1].PartNumber : 0;
267
- const nextPartNumber = partNumber + 1;
268
- const requestedOffset = offset;
269
- const bytesUploaded = await this.uploadParts(key, uploadId, size, content, nextPartNumber, offset);
270
- return requestedOffset + bytesUploaded;
271
- }
272
- async uploadPart(key, uploadId, readStream, partNumber) {
273
- const data = await this.client.send(
274
- new UploadPartCommand({
275
- Bucket: this.config.bucket,
276
- Key: key,
277
- UploadId: uploadId,
278
- PartNumber: partNumber,
279
- Body: readStream
280
- })
281
- );
282
- return data.ETag;
283
- }
284
- async uploadParts(key, uploadId, size, readStream, currentPartNumber, offset) {
285
- const promises = [];
286
- let pendingChunkFilepath = null;
287
- let bytesUploaded = 0;
288
- let permit = void 0;
289
- const splitterStream = new StreamSplitter({
290
- chunkSize: this.calcOptimalPartSize(size),
291
- directory: os.tmpdir()
292
- }).on("beforeChunkStarted", async () => {
293
- permit = await this.partUploadSemaphore.acquire();
294
- }).on("chunkStarted", (filepath) => {
295
- pendingChunkFilepath = filepath;
296
- }).on("chunkFinished", ({ path, size: partSize }) => {
297
- pendingChunkFilepath = null;
298
- const partNumber = currentPartNumber++;
299
- const acquiredPermit = permit;
300
- offset += partSize;
301
- const isFinalPart = size === offset;
302
- const deferred = new Promise(async (resolve, reject) => {
303
- try {
304
- const readable = fs.createReadStream(path);
305
- readable.on("error", reject);
306
- if (partSize >= this.minPartSize || isFinalPart) {
307
- await this.uploadPart(key, uploadId, readable, partNumber);
308
- bytesUploaded += partSize;
309
- } else {
310
- }
311
- resolve();
312
- } catch (error) {
313
- reject(error);
314
- } finally {
315
- fsProm.rm(path).catch(() => {
316
- });
317
- acquiredPermit?.release();
318
- }
319
- });
320
- promises.push(deferred);
321
- }).on("chunkError", () => {
322
- permit?.release();
323
- });
324
- try {
325
- await streamProm.pipeline(readStream, splitterStream);
326
- } catch (error) {
327
- if (pendingChunkFilepath !== null) {
328
- try {
329
- await fsProm.rm(pendingChunkFilepath);
330
- } catch {
331
- }
332
- }
333
- promises.push(Promise.reject(error));
334
- } finally {
335
- await Promise.all(promises);
336
- }
337
- return bytesUploaded;
338
- }
339
- async retrieveParts(key, uploadId, partNumberMarker) {
340
- const data = await this.client.send(
341
- new ListPartsCommand({
342
- Bucket: this.config.bucket,
343
- Key: key,
344
- UploadId: uploadId,
345
- PartNumberMarker: partNumberMarker
346
- })
347
- );
348
- let parts = data.Parts ?? [];
349
- if (data.IsTruncated) {
350
- const rest = await this.retrieveParts(key, uploadId, data.NextPartNumberMarker);
351
- parts = [...parts, ...rest];
352
- }
353
- if (!partNumberMarker) {
354
- parts.sort((a, b) => a.PartNumber - b.PartNumber);
355
- }
356
- return parts;
357
- }
358
- async finishMultipartUpload(key, uploadId, parts) {
359
- const command = new CompleteMultipartUploadCommand({
360
- Bucket: this.config.bucket,
361
- Key: key,
362
- UploadId: uploadId,
363
- MultipartUpload: {
364
- Parts: parts.map((part) => {
365
- return {
366
- ETag: part.ETag,
367
- PartNumber: part.PartNumber
368
- };
369
- })
370
- }
371
- });
372
- const response = await this.client.send(command);
373
- return response.Location;
374
- }
375
- calcOptimalPartSize(size) {
376
- if (size === void 0) {
377
- size = this.maxUploadSize;
378
- }
379
- let optimalPartSize;
380
- if (size <= this.preferredPartSize) {
381
- optimalPartSize = size;
382
- } else if (size <= this.preferredPartSize * this.maxMultipartParts) {
383
- optimalPartSize = this.preferredPartSize;
384
- } else {
385
- optimalPartSize = Math.ceil(size / this.maxMultipartParts);
386
- }
387
- return optimalPartSize;
388
- }
389
- };
390
- var index_default = DriverS3;
391
- export {
392
- DriverS3,
393
- index_default as default
18
+ config;
19
+ client;
20
+ root;
21
+ partUploadSemaphore;
22
+ preferredPartSize;
23
+ maxMultipartParts = 1e4;
24
+ minPartSize = 5242880;
25
+ maxUploadSize = 5497558138880;
26
+ constructor(config) {
27
+ this.config = config;
28
+ this.client = this.getClient();
29
+ this.root = this.config.root ? normalizePath(this.config.root, { removeLeading: true }) : "";
30
+ this.preferredPartSize = config.tus?.chunkSize ?? this.minPartSize;
31
+ this.partUploadSemaphore = new Semaphore(60);
32
+ }
33
+ getClient() {
34
+ const connectionTimeout = ms(String(this.config.connectionTimeout ?? 5e3));
35
+ const socketTimeout = ms(String(this.config.socketTimeout ?? 12e4));
36
+ const maxSockets = this.config.maxSockets ?? 500;
37
+ const keepAlive = this.config.keepAlive ?? true;
38
+ const s3ClientConfig = { requestHandler: new NodeHttpHandler({
39
+ connectionTimeout,
40
+ socketTimeout,
41
+ httpAgent: new Agent({
42
+ maxSockets,
43
+ keepAlive
44
+ }),
45
+ httpsAgent: new Agent$1({
46
+ maxSockets,
47
+ keepAlive
48
+ })
49
+ }) };
50
+ if (this.config.key && !this.config.secret || this.config.secret && !this.config.key) throw new Error("Both `key` and `secret` are required when defined");
51
+ if (this.config.key && this.config.secret) s3ClientConfig.credentials = {
52
+ accessKeyId: this.config.key,
53
+ secretAccessKey: this.config.secret
54
+ };
55
+ if (this.config.endpoint) {
56
+ const protocol = this.config.endpoint.startsWith("http://") ? "http:" : "https:";
57
+ s3ClientConfig.endpoint = {
58
+ hostname: this.config.endpoint.replace("https://", "").replace("http://", ""),
59
+ protocol,
60
+ path: "/"
61
+ };
62
+ }
63
+ if (this.config.region) s3ClientConfig.region = this.config.region;
64
+ if (this.config.forcePathStyle !== void 0) s3ClientConfig.forcePathStyle = this.config.forcePathStyle;
65
+ return new S3Client(s3ClientConfig);
66
+ }
67
+ fullPath(filepath) {
68
+ return normalizePath(join(this.root, filepath));
69
+ }
70
+ async read(filepath, options) {
71
+ const { range } = options ?? {};
72
+ const commandInput = {
73
+ Key: this.fullPath(filepath),
74
+ Bucket: this.config.bucket
75
+ };
76
+ if (range) commandInput.Range = `bytes=${range.start ?? ""}-${range.end ?? ""}`;
77
+ const { Body: stream$1 } = await this.client.send(new GetObjectCommand(commandInput));
78
+ if (!stream$1 || !isReadableStream(stream$1)) throw new Error(`No stream returned for file "${filepath}"`);
79
+ return stream$1;
80
+ }
81
+ async stat(filepath) {
82
+ const { ContentLength, LastModified } = await this.client.send(new HeadObjectCommand({
83
+ Key: this.fullPath(filepath),
84
+ Bucket: this.config.bucket
85
+ }));
86
+ return {
87
+ size: ContentLength,
88
+ modified: LastModified
89
+ };
90
+ }
91
+ async exists(filepath) {
92
+ try {
93
+ await this.stat(filepath);
94
+ return true;
95
+ } catch {
96
+ return false;
97
+ }
98
+ }
99
+ async move(src, dest) {
100
+ await this.copy(src, dest);
101
+ await this.delete(src);
102
+ }
103
+ async copy(src, dest) {
104
+ const params = {
105
+ Key: this.fullPath(dest),
106
+ Bucket: this.config.bucket,
107
+ CopySource: `/${this.config.bucket}/${this.fullPath(src)}`
108
+ };
109
+ if (this.config.serverSideEncryption) params.ServerSideEncryption = this.config.serverSideEncryption;
110
+ if (this.config.acl) params.ACL = this.config.acl;
111
+ await this.client.send(new CopyObjectCommand(params));
112
+ }
113
+ async write(filepath, content, type) {
114
+ const params = {
115
+ Key: this.fullPath(filepath),
116
+ Body: content,
117
+ Bucket: this.config.bucket
118
+ };
119
+ if (type) params.ContentType = type;
120
+ if (this.config.acl) params.ACL = this.config.acl;
121
+ if (this.config.serverSideEncryption) params.ServerSideEncryption = this.config.serverSideEncryption;
122
+ await new Upload({
123
+ client: this.client,
124
+ params
125
+ }).done();
126
+ }
127
+ async delete(filepath) {
128
+ await this.client.send(new DeleteObjectCommand({
129
+ Key: this.fullPath(filepath),
130
+ Bucket: this.config.bucket
131
+ }));
132
+ }
133
+ async *list(prefix = "") {
134
+ let Prefix = this.fullPath(prefix);
135
+ if (Prefix === ".") Prefix = "";
136
+ let continuationToken = void 0;
137
+ do {
138
+ const listObjectsV2CommandInput = {
139
+ Bucket: this.config.bucket,
140
+ Prefix,
141
+ MaxKeys: 1e3
142
+ };
143
+ if (continuationToken) listObjectsV2CommandInput.ContinuationToken = continuationToken;
144
+ const response = await this.client.send(new ListObjectsV2Command(listObjectsV2CommandInput));
145
+ continuationToken = response.NextContinuationToken;
146
+ if (response.Contents) for (const object of response.Contents) {
147
+ if (!object.Key) continue;
148
+ if (object.Key.endsWith("/")) continue;
149
+ yield object.Key.substring(this.root.length);
150
+ }
151
+ } while (continuationToken);
152
+ }
153
+ get tusExtensions() {
154
+ return [
155
+ "creation",
156
+ "termination",
157
+ "expiration"
158
+ ];
159
+ }
160
+ async createChunkedUpload(filepath, context) {
161
+ const command = new CreateMultipartUploadCommand({
162
+ Bucket: this.config.bucket,
163
+ Key: this.fullPath(filepath),
164
+ Metadata: { "tus-version": TUS_RESUMABLE },
165
+ ...context.metadata?.["contentType"] ? { ContentType: context.metadata["contentType"] } : {},
166
+ ...context.metadata?.["cacheControl"] ? { CacheControl: context.metadata["cacheControl"] } : {}
167
+ });
168
+ const res = await this.client.send(command);
169
+ context.metadata["upload-id"] = res.UploadId;
170
+ return context;
171
+ }
172
+ async deleteChunkedUpload(filepath, context) {
173
+ const key = this.fullPath(filepath);
174
+ try {
175
+ const { "upload-id": uploadId } = context.metadata;
176
+ if (uploadId) await this.client.send(new AbortMultipartUploadCommand({
177
+ Bucket: this.config.bucket,
178
+ Key: key,
179
+ UploadId: uploadId
180
+ }));
181
+ } catch (error) {
182
+ if (error?.code && [
183
+ "NotFound",
184
+ "NoSuchKey",
185
+ "NoSuchUpload"
186
+ ].includes(error.Code)) throw ERRORS.FILE_NOT_FOUND;
187
+ throw error;
188
+ }
189
+ await this.client.send(new DeleteObjectsCommand({
190
+ Bucket: this.config.bucket,
191
+ Delete: { Objects: [{ Key: key }] }
192
+ }));
193
+ }
194
+ async finishChunkedUpload(filepath, context) {
195
+ const key = this.fullPath(filepath);
196
+ const uploadId = context.metadata["upload-id"];
197
+ const size = context.size;
198
+ const chunkSize = this.calcOptimalPartSize(size);
199
+ const expectedParts = Math.ceil(size / chunkSize);
200
+ let parts = await this.retrieveParts(key, uploadId);
201
+ let retries = 0;
202
+ while (parts.length !== expectedParts && retries < 3) {
203
+ ++retries;
204
+ await new Promise((resolve) => setTimeout(resolve, 500 * retries));
205
+ parts = await this.retrieveParts(key, uploadId);
206
+ }
207
+ if (parts.length !== expectedParts) throw {
208
+ status_code: 500,
209
+ body: "Failed to upload all parts to S3."
210
+ };
211
+ await this.finishMultipartUpload(key, uploadId, parts);
212
+ }
213
+ async writeChunk(filepath, content, offset, context) {
214
+ const key = this.fullPath(filepath);
215
+ const uploadId = context.metadata["upload-id"];
216
+ const size = context.size;
217
+ const parts = await this.retrieveParts(key, uploadId);
218
+ const nextPartNumber = (parts.length > 0 ? parts[parts.length - 1].PartNumber : 0) + 1;
219
+ const requestedOffset = offset;
220
+ const bytesUploaded = await this.uploadParts(key, uploadId, size, content, nextPartNumber, offset);
221
+ return requestedOffset + bytesUploaded;
222
+ }
223
+ async uploadPart(key, uploadId, readStream, partNumber) {
224
+ return (await this.client.send(new UploadPartCommand({
225
+ Bucket: this.config.bucket,
226
+ Key: key,
227
+ UploadId: uploadId,
228
+ PartNumber: partNumber,
229
+ Body: readStream
230
+ }))).ETag;
231
+ }
232
+ async uploadParts(key, uploadId, size, readStream, currentPartNumber, offset) {
233
+ const promises$2 = [];
234
+ let pendingChunkFilepath = null;
235
+ let bytesUploaded = 0;
236
+ let permit = void 0;
237
+ const splitterStream = new StreamSplitter({
238
+ chunkSize: this.calcOptimalPartSize(size),
239
+ directory: os.tmpdir()
240
+ }).on("beforeChunkStarted", async () => {
241
+ permit = await this.partUploadSemaphore.acquire();
242
+ }).on("chunkStarted", (filepath) => {
243
+ pendingChunkFilepath = filepath;
244
+ }).on("chunkFinished", ({ path, size: partSize }) => {
245
+ pendingChunkFilepath = null;
246
+ const partNumber = currentPartNumber++;
247
+ const acquiredPermit = permit;
248
+ offset += partSize;
249
+ const isFinalPart = size === offset;
250
+ const deferred = new Promise(async (resolve, reject) => {
251
+ try {
252
+ const readable = fs.createReadStream(path);
253
+ readable.on("error", reject);
254
+ if (partSize >= this.minPartSize || isFinalPart) {
255
+ await this.uploadPart(key, uploadId, readable, partNumber);
256
+ bytesUploaded += partSize;
257
+ }
258
+ resolve();
259
+ } catch (error) {
260
+ reject(error);
261
+ } finally {
262
+ promises.rm(path).catch(() => {});
263
+ acquiredPermit?.release();
264
+ }
265
+ });
266
+ promises$2.push(deferred);
267
+ }).on("chunkError", () => {
268
+ permit?.release();
269
+ });
270
+ try {
271
+ await promises$1.pipeline(readStream, splitterStream);
272
+ } catch (error) {
273
+ if (pendingChunkFilepath !== null) try {
274
+ await promises.rm(pendingChunkFilepath);
275
+ } catch {}
276
+ promises$2.push(Promise.reject(error));
277
+ } finally {
278
+ await Promise.all(promises$2);
279
+ }
280
+ return bytesUploaded;
281
+ }
282
+ async retrieveParts(key, uploadId, partNumberMarker) {
283
+ const data = await this.client.send(new ListPartsCommand({
284
+ Bucket: this.config.bucket,
285
+ Key: key,
286
+ UploadId: uploadId,
287
+ PartNumberMarker: partNumberMarker
288
+ }));
289
+ let parts = data.Parts ?? [];
290
+ if (data.IsTruncated) {
291
+ const rest = await this.retrieveParts(key, uploadId, data.NextPartNumberMarker);
292
+ parts = [...parts, ...rest];
293
+ }
294
+ if (!partNumberMarker) parts.sort((a, b) => a.PartNumber - b.PartNumber);
295
+ return parts;
296
+ }
297
+ async finishMultipartUpload(key, uploadId, parts) {
298
+ const command = new CompleteMultipartUploadCommand({
299
+ Bucket: this.config.bucket,
300
+ Key: key,
301
+ UploadId: uploadId,
302
+ MultipartUpload: { Parts: parts.map((part) => {
303
+ return {
304
+ ETag: part.ETag,
305
+ PartNumber: part.PartNumber
306
+ };
307
+ }) }
308
+ });
309
+ return (await this.client.send(command)).Location;
310
+ }
311
+ calcOptimalPartSize(size) {
312
+ if (size === void 0) size = this.maxUploadSize;
313
+ let optimalPartSize;
314
+ if (size <= this.preferredPartSize) optimalPartSize = size;
315
+ else if (size <= this.preferredPartSize * this.maxMultipartParts) optimalPartSize = this.preferredPartSize;
316
+ else optimalPartSize = Math.ceil(size / this.maxMultipartParts);
317
+ return optimalPartSize;
318
+ }
394
319
  };
320
+ var src_default = DriverS3;
321
+
322
+ //#endregion
323
+ export { DriverS3, src_default as default };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@directus/storage-driver-s3",
3
- "version": "12.0.8",
3
+ "version": "12.0.9",
4
4
  "description": "S3 file storage abstraction for `@directus/storage`",
5
5
  "homepage": "https://directus.io",
6
6
  "repository": {
@@ -27,22 +27,22 @@
27
27
  "@smithy/node-http-handler": "4.1.0",
28
28
  "@tus/utils": "0.5.1",
29
29
  "ms": "2.1.3",
30
- "@directus/storage": "12.0.1",
31
- "@directus/utils": "13.0.9"
30
+ "@directus/storage": "12.0.2",
31
+ "@directus/utils": "13.0.10"
32
32
  },
33
33
  "devDependencies": {
34
34
  "@directus/tsconfig": "3.0.0",
35
35
  "@ngneat/falso": "8.0.2",
36
36
  "@types/ms": "2.1.0",
37
37
  "@vitest/coverage-v8": "3.2.4",
38
- "tsup": "8.5.0",
38
+ "tsdown": "0.14.2",
39
39
  "typescript": "5.8.3",
40
40
  "vitest": "3.2.4",
41
- "@directus/types": "13.2.1"
41
+ "@directus/types": "13.2.3"
42
42
  },
43
43
  "scripts": {
44
- "build": "tsup src/index.ts --format=esm --dts",
45
- "dev": "tsup src/index.ts --format=esm --dts --watch",
44
+ "build": "tsdown src/index.ts --dts",
45
+ "dev": "tsdown src/index.ts --dts --watch",
46
46
  "test": "vitest run",
47
47
  "test:coverage": "vitest run --coverage"
48
48
  }