@remotion/webcodecs 4.0.247 → 4.0.249
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/convert-media.d.ts +7 -2
- package/dist/convert-media.js +2 -1
- package/dist/create/matroska/matroska-utils.d.ts +1 -1
- package/dist/create/wav/create-wav.js +1 -1
- package/dist/esm/index.mjs +28 -6
- package/dist/index.d.ts +1 -1
- package/dist/on-audio-track.d.ts +3 -1
- package/dist/on-audio-track.js +25 -4
- package/dist/select-container-creator.d.ts +1 -1
- package/dist/test/create-matroska.test.js +1 -0
- package/dist/test/stsd.test.js +2 -0
- package/package.json +5 -5
package/dist/convert-media.d.ts
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Copyright (c) 2025 Remotion AG
|
|
3
3
|
* For licensing, see: https://remotion.dev/docs/webcodecs#license
|
|
4
4
|
*/
|
|
5
|
-
import type { LogLevel, Options, ParseMediaDynamicOptions, ParseMediaFields, ParseMediaOptions, VideoTrack } from '@remotion/media-parser';
|
|
5
|
+
import type { AudioTrack, LogLevel, Options, ParseMediaDynamicOptions, ParseMediaFields, ParseMediaOptions, VideoTrack } from '@remotion/media-parser';
|
|
6
6
|
import type { ConvertMediaAudioCodec } from './get-available-audio-codecs';
|
|
7
7
|
import type { ConvertMediaContainer } from './get-available-containers';
|
|
8
8
|
import type { ConvertMediaVideoCodec } from './get-available-video-codecs';
|
|
@@ -30,10 +30,15 @@ export type ConvertMediaOnVideoFrame = (options: {
|
|
|
30
30
|
frame: VideoFrame;
|
|
31
31
|
track: VideoTrack;
|
|
32
32
|
}) => Promise<VideoFrame> | VideoFrame;
|
|
33
|
-
export
|
|
33
|
+
export type ConvertMediaOnAudioData = (options: {
|
|
34
|
+
audioData: AudioData;
|
|
35
|
+
track: AudioTrack;
|
|
36
|
+
}) => Promise<AudioData> | AudioData;
|
|
37
|
+
export declare const convertMedia: <F extends Options<ParseMediaFields>>({ src, onVideoFrame, onAudioData, onProgress: onProgressDoNotCallDirectly, audioCodec, container, videoCodec, signal: userPassedAbortSignal, onAudioTrack: userAudioResolver, onVideoTrack: userVideoResolver, reader, fields, logLevel, writer, progressIntervalInMs, rotate, apiKey, resize, ...more }: {
|
|
34
38
|
src: ParseMediaOptions<F>["src"];
|
|
35
39
|
container: ConvertMediaContainer;
|
|
36
40
|
onVideoFrame?: ConvertMediaOnVideoFrame;
|
|
41
|
+
onAudioData?: ConvertMediaOnAudioData;
|
|
37
42
|
onProgress?: ConvertMediaOnProgress;
|
|
38
43
|
videoCodec?: ConvertMediaVideoCodec;
|
|
39
44
|
audioCodec?: ConvertMediaAudioCodec;
|
package/dist/convert-media.js
CHANGED
|
@@ -21,7 +21,7 @@ const on_video_track_1 = require("./on-video-track");
|
|
|
21
21
|
const select_container_creator_1 = require("./select-container-creator");
|
|
22
22
|
const send_telemetry_event_1 = require("./send-telemetry-event");
|
|
23
23
|
const throttled_state_update_1 = require("./throttled-state-update");
|
|
24
|
-
const convertMedia = async function ({ src, onVideoFrame, onProgress: onProgressDoNotCallDirectly, audioCodec, container, videoCodec, signal: userPassedAbortSignal, onAudioTrack: userAudioResolver, onVideoTrack: userVideoResolver, reader, fields, logLevel = 'info', writer, progressIntervalInMs, rotate, apiKey, resize, ...more }) {
|
|
24
|
+
const convertMedia = async function ({ src, onVideoFrame, onAudioData, onProgress: onProgressDoNotCallDirectly, audioCodec, container, videoCodec, signal: userPassedAbortSignal, onAudioTrack: userAudioResolver, onVideoTrack: userVideoResolver, reader, fields, logLevel = 'info', writer, progressIntervalInMs, rotate, apiKey, resize, ...more }) {
|
|
25
25
|
var _a, _b;
|
|
26
26
|
if (userPassedAbortSignal === null || userPassedAbortSignal === void 0 ? void 0 : userPassedAbortSignal.aborted) {
|
|
27
27
|
return Promise.reject(new error_cause_1.default('Aborted'));
|
|
@@ -106,6 +106,7 @@ const convertMedia = async function ({ src, onVideoFrame, onProgress: onProgress
|
|
|
106
106
|
logLevel,
|
|
107
107
|
outputContainer: container,
|
|
108
108
|
progressTracker,
|
|
109
|
+
onAudioData: onAudioData !== null && onAudioData !== void 0 ? onAudioData : null,
|
|
109
110
|
});
|
|
110
111
|
(0, media_parser_1.parseMedia)({
|
|
111
112
|
logLevel,
|
|
@@ -19,7 +19,7 @@ export type EbmlParsedOrUint8Array<T extends Ebml> = {
|
|
|
19
19
|
value: EbmlValueOrUint8Array<T>;
|
|
20
20
|
minVintWidth: number | null;
|
|
21
21
|
};
|
|
22
|
-
export declare const measureEBMLVarInt: (value: number) => 2 | 1 |
|
|
22
|
+
export declare const measureEBMLVarInt: (value: number) => 2 | 1 | 6 | 5 | 3 | 4;
|
|
23
23
|
export declare const getVariableInt: (value: number, minWidth: number | null) => Uint8Array;
|
|
24
24
|
export declare const makeMatroskaBytes: (fields: PossibleEbmlOrUint8Array) => BytesAndOffset;
|
|
25
25
|
export type PossibleEbmlOrUint8Array = Prettify<{
|
|
@@ -57,7 +57,7 @@ const createWav = async ({ filename, logLevel, onBytesProgress, onMillisecondsPr
|
|
|
57
57
|
};
|
|
58
58
|
const addSample = async (chunk) => {
|
|
59
59
|
var _a;
|
|
60
|
-
log_1.Log.
|
|
60
|
+
log_1.Log.trace(logLevel, 'Adding sample', chunk);
|
|
61
61
|
await w.write(chunk.data);
|
|
62
62
|
onMillisecondsProgress((chunk.timestamp + ((_a = chunk.duration) !== null && _a !== void 0 ? _a : 0)) / 1000);
|
|
63
63
|
onBytesProgress(w.getWrittenByteCount());
|
package/dist/esm/index.mjs
CHANGED
|
@@ -1288,7 +1288,8 @@ var makeAudioTrackHandler = ({
|
|
|
1288
1288
|
onAudioTrack,
|
|
1289
1289
|
logLevel,
|
|
1290
1290
|
outputContainer,
|
|
1291
|
-
progressTracker
|
|
1291
|
+
progressTracker,
|
|
1292
|
+
onAudioData
|
|
1292
1293
|
}) => async ({ track, container: inputContainer }) => {
|
|
1293
1294
|
const canCopyTrack = canCopyAudioTrack({
|
|
1294
1295
|
inputCodec: track.codecWithoutConfig,
|
|
@@ -1397,15 +1398,34 @@ var makeAudioTrackHandler = ({
|
|
|
1397
1398
|
progressTracker
|
|
1398
1399
|
});
|
|
1399
1400
|
const audioDecoder = createAudioDecoder({
|
|
1400
|
-
onFrame: async (
|
|
1401
|
-
await
|
|
1401
|
+
onFrame: async (audioData) => {
|
|
1402
|
+
const newAudioData = onAudioData ? await onAudioData?.({ audioData, track }) : audioData;
|
|
1403
|
+
if (newAudioData !== audioData) {
|
|
1404
|
+
if (newAudioData.duration !== audioData.duration) {
|
|
1405
|
+
throw new error_cause_default(`onAudioData returned a different duration than the input audio data. Original duration: ${audioData.duration}, new duration: ${newAudioData.duration}`);
|
|
1406
|
+
}
|
|
1407
|
+
if (newAudioData.numberOfChannels !== audioData.numberOfChannels) {
|
|
1408
|
+
throw new error_cause_default(`onAudioData returned a different number of channels than the input audio data. Original channels: ${audioData.numberOfChannels}, new channels: ${newAudioData.numberOfChannels}`);
|
|
1409
|
+
}
|
|
1410
|
+
if (newAudioData.sampleRate !== audioData.sampleRate) {
|
|
1411
|
+
throw new error_cause_default(`onAudioData returned a different sample rate than the input audio data. Original sample rate: ${audioData.sampleRate}, new sample rate: ${newAudioData.sampleRate}`);
|
|
1412
|
+
}
|
|
1413
|
+
if (newAudioData.format !== audioData.format) {
|
|
1414
|
+
throw new error_cause_default(`onAudioData returned a different format than the input audio data. Original format: ${audioData.format}, new format: ${newAudioData.format}`);
|
|
1415
|
+
}
|
|
1416
|
+
if (newAudioData.timestamp !== audioData.timestamp) {
|
|
1417
|
+
throw new error_cause_default(`onAudioData returned a different timestamp than the input audio data. Original timestamp: ${audioData.timestamp}, new timestamp: ${newAudioData.timestamp}`);
|
|
1418
|
+
}
|
|
1419
|
+
audioData.close();
|
|
1420
|
+
}
|
|
1421
|
+
await audioEncoder.encodeFrame(newAudioData);
|
|
1402
1422
|
onMediaStateUpdate?.((prevState) => {
|
|
1403
1423
|
return {
|
|
1404
1424
|
...prevState,
|
|
1405
1425
|
decodedAudioFrames: prevState.decodedAudioFrames + 1
|
|
1406
1426
|
};
|
|
1407
1427
|
});
|
|
1408
|
-
|
|
1428
|
+
newAudioData.close();
|
|
1409
1429
|
},
|
|
1410
1430
|
onError(error) {
|
|
1411
1431
|
abortConversion(new error_cause_default(`Audio decoder of track ${track.trackId} failed. Config: ${JSON.stringify(audioDecoderConfig)} (see .cause of this error)`, {
|
|
@@ -4134,7 +4154,7 @@ var createWav = async ({
|
|
|
4134
4154
|
await w.updateDataAt(blockAlignPosition, new Uint8Array(numberTo16BitLittleEndian(numberOfChannels * BYTES_PER_SAMPLE)));
|
|
4135
4155
|
};
|
|
4136
4156
|
const addSample = async (chunk) => {
|
|
4137
|
-
Log.
|
|
4157
|
+
Log.trace(logLevel, "Adding sample", chunk);
|
|
4138
4158
|
await w.write(chunk.data);
|
|
4139
4159
|
onMillisecondsProgress((chunk.timestamp + (chunk.duration ?? 0)) / 1000);
|
|
4140
4160
|
onBytesProgress(w.getWrittenByteCount());
|
|
@@ -4273,6 +4293,7 @@ var throttledStateUpdate = ({
|
|
|
4273
4293
|
var convertMedia = async function({
|
|
4274
4294
|
src,
|
|
4275
4295
|
onVideoFrame,
|
|
4296
|
+
onAudioData,
|
|
4276
4297
|
onProgress: onProgressDoNotCallDirectly,
|
|
4277
4298
|
audioCodec,
|
|
4278
4299
|
container,
|
|
@@ -4370,7 +4391,8 @@ var convertMedia = async function({
|
|
|
4370
4391
|
onAudioTrack: userAudioResolver ?? null,
|
|
4371
4392
|
logLevel,
|
|
4372
4393
|
outputContainer: container,
|
|
4373
|
-
progressTracker
|
|
4394
|
+
progressTracker,
|
|
4395
|
+
onAudioData: onAudioData ?? null
|
|
4374
4396
|
});
|
|
4375
4397
|
parseMedia({
|
|
4376
4398
|
logLevel,
|
package/dist/index.d.ts
CHANGED
|
@@ -4,7 +4,7 @@ export { canCopyAudioTrack } from './can-copy-audio-track';
|
|
|
4
4
|
export { canCopyVideoTrack } from './can-copy-video-track';
|
|
5
5
|
export { canReencodeAudioTrack } from './can-reencode-audio-track';
|
|
6
6
|
export { canReencodeVideoTrack } from './can-reencode-video-track';
|
|
7
|
-
export { convertMedia, ConvertMediaOnProgress, ConvertMediaOnVideoFrame, ConvertMediaProgress, ConvertMediaResult, } from './convert-media';
|
|
7
|
+
export { convertMedia, ConvertMediaOnAudioData, ConvertMediaOnProgress, ConvertMediaOnVideoFrame, ConvertMediaProgress, ConvertMediaResult, } from './convert-media';
|
|
8
8
|
export { defaultOnAudioTrackHandler } from './default-on-audio-track-handler';
|
|
9
9
|
export { defaultOnVideoTrackHandler } from './default-on-video-track-handler';
|
|
10
10
|
export { ConvertMediaAudioCodec, getAvailableAudioCodecs, } from './get-available-audio-codecs';
|
package/dist/on-audio-track.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { type LogLevel, type OnAudioTrack } from '@remotion/media-parser';
|
|
2
|
+
import type { ConvertMediaOnAudioData } from './convert-media';
|
|
2
3
|
import type { MediaFn } from './create/media-fn';
|
|
3
4
|
import type { ProgressTracker } from './create/progress-tracker';
|
|
4
5
|
import Error from './error-cause';
|
|
@@ -6,7 +7,7 @@ import type { ConvertMediaAudioCodec } from './get-available-audio-codecs';
|
|
|
6
7
|
import type { ConvertMediaContainer } from './get-available-containers';
|
|
7
8
|
import type { ConvertMediaOnAudioTrackHandler } from './on-audio-track-handler';
|
|
8
9
|
import type { ConvertMediaProgressFn } from './throttled-state-update';
|
|
9
|
-
export declare const makeAudioTrackHandler: ({ state, defaultAudioCodec: audioCodec, controller, abortConversion, onMediaStateUpdate, onAudioTrack, logLevel, outputContainer, progressTracker, }: {
|
|
10
|
+
export declare const makeAudioTrackHandler: ({ state, defaultAudioCodec: audioCodec, controller, abortConversion, onMediaStateUpdate, onAudioTrack, logLevel, outputContainer, progressTracker, onAudioData, }: {
|
|
10
11
|
state: MediaFn;
|
|
11
12
|
defaultAudioCodec: ConvertMediaAudioCodec | null;
|
|
12
13
|
controller: AbortController;
|
|
@@ -16,4 +17,5 @@ export declare const makeAudioTrackHandler: ({ state, defaultAudioCodec: audioCo
|
|
|
16
17
|
logLevel: LogLevel;
|
|
17
18
|
outputContainer: ConvertMediaContainer;
|
|
18
19
|
progressTracker: ProgressTracker;
|
|
20
|
+
onAudioData: ConvertMediaOnAudioData | null;
|
|
19
21
|
}) => OnAudioTrack;
|
package/dist/on-audio-track.js
CHANGED
|
@@ -15,7 +15,7 @@ const default_on_audio_track_handler_1 = require("./default-on-audio-track-handl
|
|
|
15
15
|
const error_cause_1 = __importDefault(require("./error-cause"));
|
|
16
16
|
const get_default_audio_codec_1 = require("./get-default-audio-codec");
|
|
17
17
|
const log_1 = require("./log");
|
|
18
|
-
const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controller, abortConversion, onMediaStateUpdate, onAudioTrack, logLevel, outputContainer, progressTracker, }) => async ({ track, container: inputContainer }) => {
|
|
18
|
+
const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controller, abortConversion, onMediaStateUpdate, onAudioTrack, logLevel, outputContainer, progressTracker, onAudioData, }) => async ({ track, container: inputContainer }) => {
|
|
19
19
|
const canCopyTrack = (0, can_copy_audio_track_1.canCopyAudioTrack)({
|
|
20
20
|
inputCodec: track.codecWithoutConfig,
|
|
21
21
|
outputContainer,
|
|
@@ -131,15 +131,36 @@ const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controlle
|
|
|
131
131
|
progressTracker,
|
|
132
132
|
});
|
|
133
133
|
const audioDecoder = (0, audio_decoder_1.createAudioDecoder)({
|
|
134
|
-
onFrame: async (
|
|
135
|
-
|
|
134
|
+
onFrame: async (audioData) => {
|
|
135
|
+
const newAudioData = onAudioData
|
|
136
|
+
? await (onAudioData === null || onAudioData === void 0 ? void 0 : onAudioData({ audioData, track }))
|
|
137
|
+
: audioData;
|
|
138
|
+
if (newAudioData !== audioData) {
|
|
139
|
+
if (newAudioData.duration !== audioData.duration) {
|
|
140
|
+
throw new error_cause_1.default(`onAudioData returned a different duration than the input audio data. Original duration: ${audioData.duration}, new duration: ${newAudioData.duration}`);
|
|
141
|
+
}
|
|
142
|
+
if (newAudioData.numberOfChannels !== audioData.numberOfChannels) {
|
|
143
|
+
throw new error_cause_1.default(`onAudioData returned a different number of channels than the input audio data. Original channels: ${audioData.numberOfChannels}, new channels: ${newAudioData.numberOfChannels}`);
|
|
144
|
+
}
|
|
145
|
+
if (newAudioData.sampleRate !== audioData.sampleRate) {
|
|
146
|
+
throw new error_cause_1.default(`onAudioData returned a different sample rate than the input audio data. Original sample rate: ${audioData.sampleRate}, new sample rate: ${newAudioData.sampleRate}`);
|
|
147
|
+
}
|
|
148
|
+
if (newAudioData.format !== audioData.format) {
|
|
149
|
+
throw new error_cause_1.default(`onAudioData returned a different format than the input audio data. Original format: ${audioData.format}, new format: ${newAudioData.format}`);
|
|
150
|
+
}
|
|
151
|
+
if (newAudioData.timestamp !== audioData.timestamp) {
|
|
152
|
+
throw new error_cause_1.default(`onAudioData returned a different timestamp than the input audio data. Original timestamp: ${audioData.timestamp}, new timestamp: ${newAudioData.timestamp}`);
|
|
153
|
+
}
|
|
154
|
+
audioData.close();
|
|
155
|
+
}
|
|
156
|
+
await audioEncoder.encodeFrame(newAudioData);
|
|
136
157
|
onMediaStateUpdate === null || onMediaStateUpdate === void 0 ? void 0 : onMediaStateUpdate((prevState) => {
|
|
137
158
|
return {
|
|
138
159
|
...prevState,
|
|
139
160
|
decodedAudioFrames: prevState.decodedAudioFrames + 1,
|
|
140
161
|
};
|
|
141
162
|
});
|
|
142
|
-
|
|
163
|
+
newAudioData.close();
|
|
143
164
|
},
|
|
144
165
|
onError(error) {
|
|
145
166
|
abortConversion(new error_cause_1.default(`Audio decoder of track ${track.trackId} failed. Config: ${JSON.stringify(audioDecoderConfig)} (see .cause of this error)`, {
|
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
import type { ConvertMediaContainer } from './get-available-containers';
|
|
2
|
-
export declare const selectContainerCreator: (container: ConvertMediaContainer) => ({
|
|
2
|
+
export declare const selectContainerCreator: (container: ConvertMediaContainer) => ({ filename, logLevel, onBytesProgress, onMillisecondsProgress, writer, progressTracker, }: import("./create/media-fn").MediaFnGeneratorInput) => Promise<import("./create/media-fn").MediaFn>;
|
|
@@ -12,6 +12,7 @@ const state = media_parser_1.MediaParserInternals.makeParserState({
|
|
|
12
12
|
onAudioTrack: null,
|
|
13
13
|
onVideoTrack: null,
|
|
14
14
|
supportsContentRange: true,
|
|
15
|
+
contentLength: null,
|
|
15
16
|
});
|
|
16
17
|
(0, bun_test_1.test)('Should make Matroska header that is same as input', async () => {
|
|
17
18
|
const headerOutput = (0, matroska_utils_1.makeMatroskaBytes)({
|
package/dist/test/stsd.test.js
CHANGED
|
@@ -47,6 +47,7 @@ const bun_test_1 = require("bun:test");
|
|
|
47
47
|
getIterator: () => null,
|
|
48
48
|
fields: {},
|
|
49
49
|
supportsContentRange: true,
|
|
50
|
+
contentLength: null,
|
|
50
51
|
}),
|
|
51
52
|
signal: null,
|
|
52
53
|
fields: {},
|
|
@@ -214,6 +215,7 @@ const bun_test_1 = require("bun:test");
|
|
|
214
215
|
structure: true,
|
|
215
216
|
},
|
|
216
217
|
supportsContentRange: true,
|
|
218
|
+
contentLength: null,
|
|
217
219
|
}),
|
|
218
220
|
signal: null,
|
|
219
221
|
logLevel: 'info',
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@remotion/webcodecs",
|
|
3
|
-
"version": "4.0.
|
|
3
|
+
"version": "4.0.249",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"types": "dist/index.d.ts",
|
|
6
6
|
"module": "dist/esm/index.mjs",
|
|
@@ -17,15 +17,15 @@
|
|
|
17
17
|
"author": "Jonny Burger <jonny@remotion.dev>",
|
|
18
18
|
"license": "Remotion License (See https://remotion.dev/docs/webcodecs#license)",
|
|
19
19
|
"dependencies": {
|
|
20
|
-
"@remotion/media-parser": "4.0.
|
|
21
|
-
"@remotion/licensing": "4.0.
|
|
20
|
+
"@remotion/media-parser": "4.0.249",
|
|
21
|
+
"@remotion/licensing": "4.0.249"
|
|
22
22
|
},
|
|
23
23
|
"peerDependencies": {},
|
|
24
24
|
"devDependencies": {
|
|
25
25
|
"@types/dom-webcodecs": "0.1.11",
|
|
26
26
|
"eslint": "9.14.0",
|
|
27
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
28
|
-
"@remotion/example-videos": "4.0.
|
|
27
|
+
"@remotion/eslint-config-internal": "4.0.249",
|
|
28
|
+
"@remotion/example-videos": "4.0.249"
|
|
29
29
|
},
|
|
30
30
|
"keywords": [],
|
|
31
31
|
"publishConfig": {
|