@remotion/lambda-client 4.0.272 → 4.0.274
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-make.log +2 -2
- package/bundle.ts +4 -0
- package/dist/esm/index.mjs +16062 -67
- package/dist/write-file.js +31 -6
- package/package.json +5 -4
- package/src/write-file.ts +48 -18
- package/tsconfig.tsbuildinfo +1 -1
package/dist/write-file.js
CHANGED
|
@@ -4,31 +4,56 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.lambdaWriteFileImplementation = void 0;
|
|
7
|
-
/* eslint-disable no-console */
|
|
8
7
|
const client_s3_1 = require("@aws-sdk/client-s3");
|
|
8
|
+
const lib_storage_1 = require("@aws-sdk/lib-storage");
|
|
9
9
|
const mime_types_1 = __importDefault(require("mime-types"));
|
|
10
10
|
const content_disposition_header_1 = require("./content-disposition-header");
|
|
11
11
|
const get_s3_client_1 = require("./get-s3-client");
|
|
12
|
+
// Files larger than 100MB will use multipart upload
|
|
13
|
+
const MULTIPART_THRESHOLD = 100 * 1024 * 1024; // 5MB in bytes
|
|
12
14
|
const tryLambdaWriteFile = async ({ bucketName, key, body, region, privacy, expectedBucketOwner, downloadBehavior, customCredentials, forcePathStyle, }) => {
|
|
13
|
-
|
|
15
|
+
const client = (0, get_s3_client_1.getS3Client)({
|
|
14
16
|
region,
|
|
15
17
|
customCredentials: customCredentials,
|
|
16
18
|
forcePathStyle,
|
|
17
|
-
})
|
|
19
|
+
});
|
|
20
|
+
const params = {
|
|
18
21
|
Bucket: bucketName,
|
|
19
22
|
Key: key,
|
|
20
23
|
Body: body,
|
|
21
24
|
ACL: privacy === 'no-acl'
|
|
22
25
|
? undefined
|
|
23
|
-
: privacy === 'private'
|
|
26
|
+
: (privacy === 'private'
|
|
24
27
|
? 'private'
|
|
25
|
-
: 'public-read',
|
|
28
|
+
: 'public-read'),
|
|
26
29
|
ExpectedBucketOwner: customCredentials
|
|
27
30
|
? undefined
|
|
28
31
|
: (expectedBucketOwner !== null && expectedBucketOwner !== void 0 ? expectedBucketOwner : undefined),
|
|
29
32
|
ContentType: mime_types_1.default.lookup(key) || 'application/octet-stream',
|
|
30
33
|
ContentDisposition: (0, content_disposition_header_1.getContentDispositionHeader)(downloadBehavior),
|
|
31
|
-
}
|
|
34
|
+
};
|
|
35
|
+
// Determine file size
|
|
36
|
+
const size = body instanceof Buffer || body instanceof Uint8Array
|
|
37
|
+
? body.length
|
|
38
|
+
: body instanceof Blob
|
|
39
|
+
? body.size
|
|
40
|
+
: typeof body === 'string'
|
|
41
|
+
? Buffer.from(body).length
|
|
42
|
+
: null;
|
|
43
|
+
// Use multipart upload for large files or streams (where we can't determine size)
|
|
44
|
+
if (size === null || size > MULTIPART_THRESHOLD) {
|
|
45
|
+
const upload = new lib_storage_1.Upload({
|
|
46
|
+
client,
|
|
47
|
+
params,
|
|
48
|
+
queueSize: 4, // number of concurrent uploads
|
|
49
|
+
partSize: 5 * 1024 * 1024, // chunk size of 5MB
|
|
50
|
+
});
|
|
51
|
+
await upload.done();
|
|
52
|
+
}
|
|
53
|
+
else {
|
|
54
|
+
// Use regular PutObject for small files
|
|
55
|
+
await client.send(new client_s3_1.PutObjectCommand(params));
|
|
56
|
+
}
|
|
32
57
|
};
|
|
33
58
|
const lambdaWriteFileImplementation = async (params) => {
|
|
34
59
|
var _a;
|
package/package.json
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
"url": "https://github.com/remotion-dev/remotion/tree/main/packages/lambda-client"
|
|
4
4
|
},
|
|
5
5
|
"name": "@remotion/lambda-client",
|
|
6
|
-
"version": "4.0.
|
|
6
|
+
"version": "4.0.274",
|
|
7
7
|
"main": "dist/index.js",
|
|
8
8
|
"sideEffects": false,
|
|
9
9
|
"author": "Jonny Burger <jonny@remotion.dev>",
|
|
@@ -16,17 +16,18 @@
|
|
|
16
16
|
"@aws-sdk/client-s3": "3.738.0",
|
|
17
17
|
"@aws-sdk/client-service-quotas": "3.738.0",
|
|
18
18
|
"@aws-sdk/client-sts": "3.738.0",
|
|
19
|
-
"@aws-sdk/
|
|
19
|
+
"@aws-sdk/lib-storage": "3.738.0",
|
|
20
20
|
"mime-types": "2.1.34"
|
|
21
21
|
},
|
|
22
22
|
"devDependencies": {
|
|
23
|
+
"@aws-sdk/credential-provider-ini": "3.734.0",
|
|
23
24
|
"@types/express": "^5.0.0",
|
|
24
25
|
"express": "4.21.0",
|
|
25
26
|
"eslint": "9.19.0",
|
|
26
27
|
"next": "15.1.6",
|
|
27
28
|
"@types/mime-types": "2.1.1",
|
|
28
|
-
"@remotion/serverless-client": "4.0.
|
|
29
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
29
|
+
"@remotion/serverless-client": "4.0.274",
|
|
30
|
+
"@remotion/eslint-config-internal": "4.0.274"
|
|
30
31
|
},
|
|
31
32
|
"publishConfig": {
|
|
32
33
|
"access": "public"
|
package/src/write-file.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
/* eslint-disable no-console */
|
|
2
|
+
import type {ObjectCannedACL, PutObjectCommandInput} from '@aws-sdk/client-s3';
|
|
2
3
|
import {PutObjectCommand} from '@aws-sdk/client-s3';
|
|
4
|
+
import {Upload} from '@aws-sdk/lib-storage';
|
|
3
5
|
import type {
|
|
4
6
|
CustomCredentials,
|
|
5
7
|
WriteFileInput,
|
|
@@ -9,6 +11,9 @@ import type {AwsProvider} from './aws-provider';
|
|
|
9
11
|
import {getContentDispositionHeader} from './content-disposition-header';
|
|
10
12
|
import {getS3Client} from './get-s3-client';
|
|
11
13
|
|
|
14
|
+
// Files larger than 100MB will use multipart upload
|
|
15
|
+
const MULTIPART_THRESHOLD = 100 * 1024 * 1024; // 5MB in bytes
|
|
16
|
+
|
|
12
17
|
const tryLambdaWriteFile = async ({
|
|
13
18
|
bucketName,
|
|
14
19
|
key,
|
|
@@ -20,28 +25,53 @@ const tryLambdaWriteFile = async ({
|
|
|
20
25
|
customCredentials,
|
|
21
26
|
forcePathStyle,
|
|
22
27
|
}: WriteFileInput<AwsProvider>): Promise<void> => {
|
|
23
|
-
|
|
28
|
+
const client = getS3Client({
|
|
24
29
|
region,
|
|
25
30
|
customCredentials: customCredentials as CustomCredentials<AwsProvider>,
|
|
26
31
|
forcePathStyle,
|
|
27
|
-
})
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
: privacy === 'private'
|
|
36
|
-
? 'private'
|
|
37
|
-
: 'public-read',
|
|
38
|
-
ExpectedBucketOwner: customCredentials
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
const params: PutObjectCommandInput = {
|
|
35
|
+
Bucket: bucketName,
|
|
36
|
+
Key: key,
|
|
37
|
+
Body: body,
|
|
38
|
+
ACL:
|
|
39
|
+
privacy === 'no-acl'
|
|
39
40
|
? undefined
|
|
40
|
-
: (
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
41
|
+
: ((privacy === 'private'
|
|
42
|
+
? 'private'
|
|
43
|
+
: 'public-read') as ObjectCannedACL),
|
|
44
|
+
ExpectedBucketOwner: customCredentials
|
|
45
|
+
? undefined
|
|
46
|
+
: (expectedBucketOwner ?? undefined),
|
|
47
|
+
ContentType: mimeTypes.lookup(key) || 'application/octet-stream',
|
|
48
|
+
ContentDisposition: getContentDispositionHeader(downloadBehavior),
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
// Determine file size
|
|
52
|
+
const size =
|
|
53
|
+
body instanceof Buffer || body instanceof Uint8Array
|
|
54
|
+
? body.length
|
|
55
|
+
: body instanceof Blob
|
|
56
|
+
? body.size
|
|
57
|
+
: typeof body === 'string'
|
|
58
|
+
? Buffer.from(body).length
|
|
59
|
+
: null;
|
|
60
|
+
|
|
61
|
+
// Use multipart upload for large files or streams (where we can't determine size)
|
|
62
|
+
if (size === null || size > MULTIPART_THRESHOLD) {
|
|
63
|
+
const upload = new Upload({
|
|
64
|
+
client,
|
|
65
|
+
params,
|
|
66
|
+
queueSize: 4, // number of concurrent uploads
|
|
67
|
+
partSize: 5 * 1024 * 1024, // chunk size of 5MB
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
await upload.done();
|
|
71
|
+
} else {
|
|
72
|
+
// Use regular PutObject for small files
|
|
73
|
+
await client.send(new PutObjectCommand(params));
|
|
74
|
+
}
|
|
45
75
|
};
|
|
46
76
|
|
|
47
77
|
export const lambdaWriteFileImplementation = async (
|