@remotion/lambda-client 4.0.273 → 4.0.275
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-make.log +2 -2
- package/dist/esm/index.mjs +21 -4
- package/dist/write-file.js +31 -6
- package/package.json +4 -3
- package/src/write-file.ts +48 -18
- package/tsconfig.tsbuildinfo +1 -1
package/.turbo/turbo-make.log
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
|
|
2
2
|
|
|
3
|
-
> @remotion/lambda-client@4.0.
|
|
3
|
+
> @remotion/lambda-client@4.0.275 make /Users/jonathanburger/remotion/packages/lambda-client
|
|
4
4
|
> tsc -d && bun --env-file=../.env.bundle bundle.ts
|
|
5
5
|
|
|
6
|
-
[0m[2m[[
|
|
6
|
+
[0m[2m[[1m78.27ms[0m[2m][0m Generated.
|
package/dist/esm/index.mjs
CHANGED
|
@@ -16329,6 +16329,7 @@ if (typeof window !== "undefined") {
|
|
|
16329
16329
|
var DELAY_RENDER_CALLSTACK_TOKEN = "The delayRender was called:";
|
|
16330
16330
|
var DELAY_RENDER_RETRIES_LEFT = "Retries left: ";
|
|
16331
16331
|
var DELAY_RENDER_RETRY_TOKEN = "- Rendering the frame will be retried.";
|
|
16332
|
+
var DELAY_RENDER_CLEAR_TOKEN = "handle was cleared after";
|
|
16332
16333
|
var DATE_TOKEN = "remotion-date:";
|
|
16333
16334
|
var FILE_TOKEN = "remotion-file:";
|
|
16334
16335
|
var serializeJSONWithDate = ({
|
|
@@ -16793,6 +16794,7 @@ var NoReactInternals = {
|
|
|
16793
16794
|
deserializeJSONWithCustomFields,
|
|
16794
16795
|
DELAY_RENDER_CALLSTACK_TOKEN,
|
|
16795
16796
|
DELAY_RENDER_RETRY_TOKEN,
|
|
16797
|
+
DELAY_RENDER_CLEAR_TOKEN,
|
|
16796
16798
|
DELAY_RENDER_ATTEMPT_TOKEN: DELAY_RENDER_RETRIES_LEFT,
|
|
16797
16799
|
getOffthreadVideoSource,
|
|
16798
16800
|
getExpectedMediaFrameUncorrected,
|
|
@@ -16895,7 +16897,7 @@ var validateFramesPerFunction = ({
|
|
|
16895
16897
|
throw new TypeError(`The framesPerLambda needs to be at least ${effectiveMinimum}, but is ${framesPerFunction}`);
|
|
16896
16898
|
}
|
|
16897
16899
|
};
|
|
16898
|
-
var VERSION = "4.0.
|
|
16900
|
+
var VERSION = "4.0.275";
|
|
16899
16901
|
var isColorSupported = () => {
|
|
16900
16902
|
const env = process.env || {};
|
|
16901
16903
|
const isForceDisabled = "NO_COLOR" in env;
|
|
@@ -20650,6 +20652,7 @@ var lambdaReadFileImplementation = async ({
|
|
|
20650
20652
|
|
|
20651
20653
|
// src/write-file.ts
|
|
20652
20654
|
import { PutObjectCommand } from "@aws-sdk/client-s3";
|
|
20655
|
+
import { Upload } from "@aws-sdk/lib-storage";
|
|
20653
20656
|
import mimeTypes from "mime-types";
|
|
20654
20657
|
|
|
20655
20658
|
// src/content-disposition-header.ts
|
|
@@ -20698,6 +20701,7 @@ var getContentDispositionHeader = (behavior) => {
|
|
|
20698
20701
|
};
|
|
20699
20702
|
|
|
20700
20703
|
// src/write-file.ts
|
|
20704
|
+
var MULTIPART_THRESHOLD = 100 * 1024 * 1024;
|
|
20701
20705
|
var tryLambdaWriteFile = async ({
|
|
20702
20706
|
bucketName,
|
|
20703
20707
|
key,
|
|
@@ -20709,11 +20713,12 @@ var tryLambdaWriteFile = async ({
|
|
|
20709
20713
|
customCredentials,
|
|
20710
20714
|
forcePathStyle
|
|
20711
20715
|
}) => {
|
|
20712
|
-
|
|
20716
|
+
const client = getS3Client({
|
|
20713
20717
|
region,
|
|
20714
20718
|
customCredentials,
|
|
20715
20719
|
forcePathStyle
|
|
20716
|
-
})
|
|
20720
|
+
});
|
|
20721
|
+
const params = {
|
|
20717
20722
|
Bucket: bucketName,
|
|
20718
20723
|
Key: key,
|
|
20719
20724
|
Body: body,
|
|
@@ -20721,7 +20726,19 @@ var tryLambdaWriteFile = async ({
|
|
|
20721
20726
|
ExpectedBucketOwner: customCredentials ? undefined : expectedBucketOwner ?? undefined,
|
|
20722
20727
|
ContentType: mimeTypes.lookup(key) || "application/octet-stream",
|
|
20723
20728
|
ContentDisposition: getContentDispositionHeader(downloadBehavior)
|
|
20724
|
-
}
|
|
20729
|
+
};
|
|
20730
|
+
const size = body instanceof Buffer || body instanceof Uint8Array ? body.length : body instanceof Blob ? body.size : typeof body === "string" ? Buffer.from(body).length : null;
|
|
20731
|
+
if (size === null || size > MULTIPART_THRESHOLD) {
|
|
20732
|
+
const upload = new Upload({
|
|
20733
|
+
client,
|
|
20734
|
+
params,
|
|
20735
|
+
queueSize: 4,
|
|
20736
|
+
partSize: 5 * 1024 * 1024
|
|
20737
|
+
});
|
|
20738
|
+
await upload.done();
|
|
20739
|
+
} else {
|
|
20740
|
+
await client.send(new PutObjectCommand(params));
|
|
20741
|
+
}
|
|
20725
20742
|
};
|
|
20726
20743
|
var lambdaWriteFileImplementation = async (params) => {
|
|
20727
20744
|
const remainingRetries = params.retries ?? 2;
|
package/dist/write-file.js
CHANGED
|
@@ -4,31 +4,56 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
4
4
|
};
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.lambdaWriteFileImplementation = void 0;
|
|
7
|
-
/* eslint-disable no-console */
|
|
8
7
|
const client_s3_1 = require("@aws-sdk/client-s3");
|
|
8
|
+
const lib_storage_1 = require("@aws-sdk/lib-storage");
|
|
9
9
|
const mime_types_1 = __importDefault(require("mime-types"));
|
|
10
10
|
const content_disposition_header_1 = require("./content-disposition-header");
|
|
11
11
|
const get_s3_client_1 = require("./get-s3-client");
|
|
12
|
+
// Files larger than 100MB will use multipart upload
|
|
13
|
+
const MULTIPART_THRESHOLD = 100 * 1024 * 1024; // 5MB in bytes
|
|
12
14
|
const tryLambdaWriteFile = async ({ bucketName, key, body, region, privacy, expectedBucketOwner, downloadBehavior, customCredentials, forcePathStyle, }) => {
|
|
13
|
-
|
|
15
|
+
const client = (0, get_s3_client_1.getS3Client)({
|
|
14
16
|
region,
|
|
15
17
|
customCredentials: customCredentials,
|
|
16
18
|
forcePathStyle,
|
|
17
|
-
})
|
|
19
|
+
});
|
|
20
|
+
const params = {
|
|
18
21
|
Bucket: bucketName,
|
|
19
22
|
Key: key,
|
|
20
23
|
Body: body,
|
|
21
24
|
ACL: privacy === 'no-acl'
|
|
22
25
|
? undefined
|
|
23
|
-
: privacy === 'private'
|
|
26
|
+
: (privacy === 'private'
|
|
24
27
|
? 'private'
|
|
25
|
-
: 'public-read',
|
|
28
|
+
: 'public-read'),
|
|
26
29
|
ExpectedBucketOwner: customCredentials
|
|
27
30
|
? undefined
|
|
28
31
|
: (expectedBucketOwner !== null && expectedBucketOwner !== void 0 ? expectedBucketOwner : undefined),
|
|
29
32
|
ContentType: mime_types_1.default.lookup(key) || 'application/octet-stream',
|
|
30
33
|
ContentDisposition: (0, content_disposition_header_1.getContentDispositionHeader)(downloadBehavior),
|
|
31
|
-
}
|
|
34
|
+
};
|
|
35
|
+
// Determine file size
|
|
36
|
+
const size = body instanceof Buffer || body instanceof Uint8Array
|
|
37
|
+
? body.length
|
|
38
|
+
: body instanceof Blob
|
|
39
|
+
? body.size
|
|
40
|
+
: typeof body === 'string'
|
|
41
|
+
? Buffer.from(body).length
|
|
42
|
+
: null;
|
|
43
|
+
// Use multipart upload for large files or streams (where we can't determine size)
|
|
44
|
+
if (size === null || size > MULTIPART_THRESHOLD) {
|
|
45
|
+
const upload = new lib_storage_1.Upload({
|
|
46
|
+
client,
|
|
47
|
+
params,
|
|
48
|
+
queueSize: 4, // number of concurrent uploads
|
|
49
|
+
partSize: 5 * 1024 * 1024, // chunk size of 5MB
|
|
50
|
+
});
|
|
51
|
+
await upload.done();
|
|
52
|
+
}
|
|
53
|
+
else {
|
|
54
|
+
// Use regular PutObject for small files
|
|
55
|
+
await client.send(new client_s3_1.PutObjectCommand(params));
|
|
56
|
+
}
|
|
32
57
|
};
|
|
33
58
|
const lambdaWriteFileImplementation = async (params) => {
|
|
34
59
|
var _a;
|
package/package.json
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
"url": "https://github.com/remotion-dev/remotion/tree/main/packages/lambda-client"
|
|
4
4
|
},
|
|
5
5
|
"name": "@remotion/lambda-client",
|
|
6
|
-
"version": "4.0.
|
|
6
|
+
"version": "4.0.275",
|
|
7
7
|
"main": "dist/index.js",
|
|
8
8
|
"sideEffects": false,
|
|
9
9
|
"author": "Jonny Burger <jonny@remotion.dev>",
|
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
"@aws-sdk/client-s3": "3.738.0",
|
|
17
17
|
"@aws-sdk/client-service-quotas": "3.738.0",
|
|
18
18
|
"@aws-sdk/client-sts": "3.738.0",
|
|
19
|
+
"@aws-sdk/lib-storage": "3.738.0",
|
|
19
20
|
"mime-types": "2.1.34"
|
|
20
21
|
},
|
|
21
22
|
"devDependencies": {
|
|
@@ -25,8 +26,8 @@
|
|
|
25
26
|
"eslint": "9.19.0",
|
|
26
27
|
"next": "15.1.6",
|
|
27
28
|
"@types/mime-types": "2.1.1",
|
|
28
|
-
"@remotion/serverless-client": "4.0.
|
|
29
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
29
|
+
"@remotion/serverless-client": "4.0.275",
|
|
30
|
+
"@remotion/eslint-config-internal": "4.0.275"
|
|
30
31
|
},
|
|
31
32
|
"publishConfig": {
|
|
32
33
|
"access": "public"
|
package/src/write-file.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
/* eslint-disable no-console */
|
|
2
|
+
import type {ObjectCannedACL, PutObjectCommandInput} from '@aws-sdk/client-s3';
|
|
2
3
|
import {PutObjectCommand} from '@aws-sdk/client-s3';
|
|
4
|
+
import {Upload} from '@aws-sdk/lib-storage';
|
|
3
5
|
import type {
|
|
4
6
|
CustomCredentials,
|
|
5
7
|
WriteFileInput,
|
|
@@ -9,6 +11,9 @@ import type {AwsProvider} from './aws-provider';
|
|
|
9
11
|
import {getContentDispositionHeader} from './content-disposition-header';
|
|
10
12
|
import {getS3Client} from './get-s3-client';
|
|
11
13
|
|
|
14
|
+
// Files larger than 100MB will use multipart upload
|
|
15
|
+
const MULTIPART_THRESHOLD = 100 * 1024 * 1024; // 5MB in bytes
|
|
16
|
+
|
|
12
17
|
const tryLambdaWriteFile = async ({
|
|
13
18
|
bucketName,
|
|
14
19
|
key,
|
|
@@ -20,28 +25,53 @@ const tryLambdaWriteFile = async ({
|
|
|
20
25
|
customCredentials,
|
|
21
26
|
forcePathStyle,
|
|
22
27
|
}: WriteFileInput<AwsProvider>): Promise<void> => {
|
|
23
|
-
|
|
28
|
+
const client = getS3Client({
|
|
24
29
|
region,
|
|
25
30
|
customCredentials: customCredentials as CustomCredentials<AwsProvider>,
|
|
26
31
|
forcePathStyle,
|
|
27
|
-
})
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
: privacy === 'private'
|
|
36
|
-
? 'private'
|
|
37
|
-
: 'public-read',
|
|
38
|
-
ExpectedBucketOwner: customCredentials
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
const params: PutObjectCommandInput = {
|
|
35
|
+
Bucket: bucketName,
|
|
36
|
+
Key: key,
|
|
37
|
+
Body: body,
|
|
38
|
+
ACL:
|
|
39
|
+
privacy === 'no-acl'
|
|
39
40
|
? undefined
|
|
40
|
-
: (
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
41
|
+
: ((privacy === 'private'
|
|
42
|
+
? 'private'
|
|
43
|
+
: 'public-read') as ObjectCannedACL),
|
|
44
|
+
ExpectedBucketOwner: customCredentials
|
|
45
|
+
? undefined
|
|
46
|
+
: (expectedBucketOwner ?? undefined),
|
|
47
|
+
ContentType: mimeTypes.lookup(key) || 'application/octet-stream',
|
|
48
|
+
ContentDisposition: getContentDispositionHeader(downloadBehavior),
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
// Determine file size
|
|
52
|
+
const size =
|
|
53
|
+
body instanceof Buffer || body instanceof Uint8Array
|
|
54
|
+
? body.length
|
|
55
|
+
: body instanceof Blob
|
|
56
|
+
? body.size
|
|
57
|
+
: typeof body === 'string'
|
|
58
|
+
? Buffer.from(body).length
|
|
59
|
+
: null;
|
|
60
|
+
|
|
61
|
+
// Use multipart upload for large files or streams (where we can't determine size)
|
|
62
|
+
if (size === null || size > MULTIPART_THRESHOLD) {
|
|
63
|
+
const upload = new Upload({
|
|
64
|
+
client,
|
|
65
|
+
params,
|
|
66
|
+
queueSize: 4, // number of concurrent uploads
|
|
67
|
+
partSize: 5 * 1024 * 1024, // chunk size of 5MB
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
await upload.done();
|
|
71
|
+
} else {
|
|
72
|
+
// Use regular PutObject for small files
|
|
73
|
+
await client.send(new PutObjectCommand(params));
|
|
74
|
+
}
|
|
45
75
|
};
|
|
46
76
|
|
|
47
77
|
export const lambdaWriteFileImplementation = async (
|