@remotion/webcodecs 4.0.303 → 4.0.305
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio-decoder.js +1 -1
- package/dist/convert-encoded-chunk.d.ts +1 -1
- package/dist/convert-encoded-chunk.js +2 -5
- package/dist/create/iso-base-media/create-iso-base-media.js +3 -3
- package/dist/create/iso-base-media/example-stts.js +620 -620
- package/dist/create/iso-base-media/trak/mdia/minf/create-stbl.js +3 -1
- package/dist/create/iso-base-media/trak/mdia/minf/stbl/create-ctts.js +1 -1
- package/dist/create/iso-base-media/trak/mdia/minf/stbl/create-stts.js +3 -2
- package/dist/create/matroska/create-matroska-media.js +1 -1
- package/dist/esm/index.mjs +22 -25
- package/dist/on-audio-track.js +4 -4
- package/dist/on-video-track.js +5 -5
- package/dist/video-decoder.js +1 -1
- package/package.json +5 -5
|
@@ -17,7 +17,9 @@ const createStbl = ({ samplePositions, codecSpecificData, isVideo, }) => {
|
|
|
17
17
|
// The sample entries are ordered by time stamps; therefore, the deltas are all nonnegative.
|
|
18
18
|
// For the other tables, there doesn't seem to be a requirement for them to be sorted
|
|
19
19
|
// -> ordering the sample positions by dts
|
|
20
|
-
const sorted = samplePositions
|
|
20
|
+
const sorted = samplePositions
|
|
21
|
+
.slice()
|
|
22
|
+
.sort((a, b) => a.decodingTimestamp - b.decodingTimestamp);
|
|
21
23
|
return (0, primitives_1.addSize)((0, matroska_utils_1.combineUint8Arrays)([
|
|
22
24
|
(0, primitives_1.stringsToUint8Array)('stbl'),
|
|
23
25
|
(0, create_avc1_1.createStsdData)(codecSpecificData),
|
|
@@ -10,7 +10,7 @@ const makeEntry = (entry) => {
|
|
|
10
10
|
]);
|
|
11
11
|
};
|
|
12
12
|
const createCttsBox = (samplePositions) => {
|
|
13
|
-
const offsets = samplePositions.map((s) => s.
|
|
13
|
+
const offsets = samplePositions.map((s) => s.timestamp - s.decodingTimestamp);
|
|
14
14
|
const entries = [];
|
|
15
15
|
let lastOffset = null;
|
|
16
16
|
for (const offset of offsets) {
|
|
@@ -18,9 +18,10 @@ const createSttsAtom = (samplePositions) => {
|
|
|
18
18
|
// TODO: Why does 0 appear here?
|
|
19
19
|
if (a[i].duration === undefined || a[i].duration === 0) {
|
|
20
20
|
if (a[i + 1] === undefined) {
|
|
21
|
-
return a[i].
|
|
21
|
+
return (a[i].decodingTimestamp -
|
|
22
|
+
(a[i - 1]?.decodingTimestamp ?? a[i].decodingTimestamp));
|
|
22
23
|
}
|
|
23
|
-
return a[i + 1].
|
|
24
|
+
return a[i + 1].decodingTimestamp - a[i].decodingTimestamp;
|
|
24
25
|
}
|
|
25
26
|
return a[i].duration;
|
|
26
27
|
});
|
|
@@ -88,7 +88,7 @@ const createMatroskaMedia = async ({ writer, onBytesProgress, onMillisecondsProg
|
|
|
88
88
|
// In Safari, samples can arrive out of order, e.g public/bigbuckbunny.mp4
|
|
89
89
|
// Therefore, only updating track number progress if it is a keyframe
|
|
90
90
|
// to allow for timestamps to be lower than the previous one
|
|
91
|
-
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.
|
|
91
|
+
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.decodingTimestamp ?? Infinity));
|
|
92
92
|
const smallestProgress = progressTracker.getSmallestProgress();
|
|
93
93
|
if (!currentCluster.shouldMakeNewCluster({
|
|
94
94
|
newT: smallestProgress,
|
package/dist/esm/index.mjs
CHANGED
|
@@ -776,7 +776,7 @@ var createAudioDecoder = ({
|
|
|
776
776
|
if (audioDecoder.state === "closed") {
|
|
777
777
|
return;
|
|
778
778
|
}
|
|
779
|
-
progressTracker.setPossibleLowestTimestamp(Math.min(audioSample.timestamp, audioSample.
|
|
779
|
+
progressTracker.setPossibleLowestTimestamp(Math.min(audioSample.timestamp, audioSample.decodingTimestamp ?? Infinity));
|
|
780
780
|
await ioSynchronizer.waitFor({
|
|
781
781
|
unemitted: 20,
|
|
782
782
|
unprocessed: 20,
|
|
@@ -2552,7 +2552,7 @@ var makeEntry = (entry) => {
|
|
|
2552
2552
|
]);
|
|
2553
2553
|
};
|
|
2554
2554
|
var createCttsBox = (samplePositions) => {
|
|
2555
|
-
const offsets = samplePositions.map((s) => s.
|
|
2555
|
+
const offsets = samplePositions.map((s) => s.timestamp - s.decodingTimestamp);
|
|
2556
2556
|
const entries = [];
|
|
2557
2557
|
let lastOffset = null;
|
|
2558
2558
|
for (const offset of offsets) {
|
|
@@ -2684,9 +2684,9 @@ var createSttsAtom = (samplePositions) => {
|
|
|
2684
2684
|
const durations = samplePositions.map((_, i, a) => {
|
|
2685
2685
|
if (a[i].duration === undefined || a[i].duration === 0) {
|
|
2686
2686
|
if (a[i + 1] === undefined) {
|
|
2687
|
-
return a[i].
|
|
2687
|
+
return a[i].decodingTimestamp - (a[i - 1]?.decodingTimestamp ?? a[i].decodingTimestamp);
|
|
2688
2688
|
}
|
|
2689
|
-
return a[i + 1].
|
|
2689
|
+
return a[i + 1].decodingTimestamp - a[i].decodingTimestamp;
|
|
2690
2690
|
}
|
|
2691
2691
|
return a[i].duration;
|
|
2692
2692
|
});
|
|
@@ -2728,7 +2728,7 @@ var createStbl = ({
|
|
|
2728
2728
|
codecSpecificData,
|
|
2729
2729
|
isVideo
|
|
2730
2730
|
}) => {
|
|
2731
|
-
const sorted = samplePositions.slice().sort((a, b) => a.
|
|
2731
|
+
const sorted = samplePositions.slice().sort((a, b) => a.decodingTimestamp - b.decodingTimestamp);
|
|
2732
2732
|
return addSize(combineUint8Arrays([
|
|
2733
2733
|
stringsToUint8Array("stbl"),
|
|
2734
2734
|
createStsdData(codecSpecificData),
|
|
@@ -3019,7 +3019,7 @@ var createIsoBaseMedia = async ({
|
|
|
3019
3019
|
await w.write(chunk.data);
|
|
3020
3020
|
mdatSize += chunk.data.length;
|
|
3021
3021
|
onBytesProgress(w.getWrittenByteCount());
|
|
3022
|
-
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.
|
|
3022
|
+
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.decodingTimestamp ?? Infinity));
|
|
3023
3023
|
progressTracker.updateTrackProgress(trackNumber, chunk.timestamp);
|
|
3024
3024
|
if (codecPrivate) {
|
|
3025
3025
|
addCodecPrivateToTrack({ trackNumber, codecPrivate });
|
|
@@ -3059,8 +3059,8 @@ var createIsoBaseMedia = async ({
|
|
|
3059
3059
|
isKeyframe: chunk.type === "key",
|
|
3060
3060
|
offset: position,
|
|
3061
3061
|
chunk: sampleChunkIndices[trackNumber],
|
|
3062
|
-
|
|
3063
|
-
|
|
3062
|
+
timestamp: Math.round(chunk.timestamp / 1e6 * currentTrack.timescale),
|
|
3063
|
+
decodingTimestamp: Math.round(chunk.decodingTimestamp / 1e6 * currentTrack.timescale),
|
|
3064
3064
|
duration: Math.round((chunk.duration ?? 0) / 1e6 * currentTrack.timescale),
|
|
3065
3065
|
size: chunk.data.length,
|
|
3066
3066
|
bigEndian: false,
|
|
@@ -3881,7 +3881,7 @@ var createMatroskaMedia = async ({
|
|
|
3881
3881
|
chunk,
|
|
3882
3882
|
isVideo
|
|
3883
3883
|
}) => {
|
|
3884
|
-
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.
|
|
3884
|
+
progressTracker.setPossibleLowestTimestamp(Math.min(chunk.timestamp, chunk.decodingTimestamp ?? Infinity));
|
|
3885
3885
|
const smallestProgress = progressTracker.getSmallestProgress();
|
|
3886
3886
|
if (!currentCluster.shouldMakeNewCluster({
|
|
3887
3887
|
newT: smallestProgress,
|
|
@@ -4215,7 +4215,7 @@ import {
|
|
|
4215
4215
|
} from "@remotion/media-parser";
|
|
4216
4216
|
|
|
4217
4217
|
// src/convert-encoded-chunk.ts
|
|
4218
|
-
var convertEncodedChunk = (chunk
|
|
4218
|
+
var convertEncodedChunk = (chunk) => {
|
|
4219
4219
|
const arr = new Uint8Array(chunk.byteLength);
|
|
4220
4220
|
chunk.copyTo(arr);
|
|
4221
4221
|
return {
|
|
@@ -4223,11 +4223,8 @@ var convertEncodedChunk = (chunk, trackId) => {
|
|
|
4223
4223
|
duration: chunk.duration ?? undefined,
|
|
4224
4224
|
timestamp: chunk.timestamp,
|
|
4225
4225
|
type: chunk.type,
|
|
4226
|
-
|
|
4227
|
-
|
|
4228
|
-
trackId,
|
|
4229
|
-
offset: 0,
|
|
4230
|
-
timescale: 1e6
|
|
4226
|
+
decodingTimestamp: chunk.timestamp,
|
|
4227
|
+
offset: 0
|
|
4231
4228
|
};
|
|
4232
4229
|
};
|
|
4233
4230
|
|
|
@@ -4323,9 +4320,9 @@ var makeAudioTrackHandler = ({
|
|
|
4323
4320
|
numberOfChannels: track.numberOfChannels,
|
|
4324
4321
|
sampleRate: track.sampleRate,
|
|
4325
4322
|
codecPrivate: track.codecData?.data ?? null,
|
|
4326
|
-
timescale: track.
|
|
4323
|
+
timescale: track.originalTimescale
|
|
4327
4324
|
});
|
|
4328
|
-
Log.verbose(logLevel, `Copying audio track ${track.trackId} as track ${addedTrack.trackNumber}. Timescale = ${track.
|
|
4325
|
+
Log.verbose(logLevel, `Copying audio track ${track.trackId} as track ${addedTrack.trackNumber}. Timescale = ${track.originalTimescale}, codec = ${track.codecEnum} (${track.codec}) `);
|
|
4329
4326
|
return async (audioSample) => {
|
|
4330
4327
|
await state.addSample({
|
|
4331
4328
|
chunk: audioSample,
|
|
@@ -4375,7 +4372,7 @@ var makeAudioTrackHandler = ({
|
|
|
4375
4372
|
numberOfChannels: audioEncoderConfig.numberOfChannels,
|
|
4376
4373
|
sampleRate: audioOperation.sampleRate ?? audioEncoderConfig.sampleRate,
|
|
4377
4374
|
codecPrivate,
|
|
4378
|
-
timescale: track.
|
|
4375
|
+
timescale: track.originalTimescale
|
|
4379
4376
|
});
|
|
4380
4377
|
const audioEncoder = createAudioEncoder({
|
|
4381
4378
|
onNewAudioSampleRate: (sampleRate) => {
|
|
@@ -4383,7 +4380,7 @@ var makeAudioTrackHandler = ({
|
|
|
4383
4380
|
},
|
|
4384
4381
|
onChunk: async (chunk) => {
|
|
4385
4382
|
await state.addSample({
|
|
4386
|
-
chunk: convertEncodedChunk(chunk
|
|
4383
|
+
chunk: convertEncodedChunk(chunk),
|
|
4387
4384
|
trackNumber,
|
|
4388
4385
|
isVideo: false,
|
|
4389
4386
|
codecPrivate
|
|
@@ -4722,7 +4719,7 @@ var createVideoDecoder = ({
|
|
|
4722
4719
|
if (videoDecoder.state === "closed") {
|
|
4723
4720
|
return;
|
|
4724
4721
|
}
|
|
4725
|
-
progress.setPossibleLowestTimestamp(Math.min(sample.timestamp, sample.
|
|
4722
|
+
progress.setPossibleLowestTimestamp(Math.min(sample.timestamp, sample.decodingTimestamp ?? Infinity));
|
|
4726
4723
|
await ioSynchronizer.waitFor({
|
|
4727
4724
|
unemitted: 20,
|
|
4728
4725
|
unprocessed: 10,
|
|
@@ -4897,7 +4894,7 @@ var makeVideoTrackHandler = ({
|
|
|
4897
4894
|
throw new Error(`Video track with ID ${track.trackId} resolved with {"type": "fail"}. This could mean that this video track could neither be copied to the output container or re-encoded. You have the option to drop the track instead of failing it: https://remotion.dev/docs/webcodecs/track-transformation`);
|
|
4898
4895
|
}
|
|
4899
4896
|
if (videoOperation.type === "copy") {
|
|
4900
|
-
Log.verbose(logLevel, `Copying video track with codec ${track.codec} and timescale ${track.
|
|
4897
|
+
Log.verbose(logLevel, `Copying video track with codec ${track.codec} and timescale ${track.originalTimescale}`);
|
|
4901
4898
|
const videoTrack = await state.addTrack({
|
|
4902
4899
|
type: "video",
|
|
4903
4900
|
color: track.advancedColor,
|
|
@@ -4905,7 +4902,7 @@ var makeVideoTrackHandler = ({
|
|
|
4905
4902
|
height: track.codedHeight,
|
|
4906
4903
|
codec: track.codecEnum,
|
|
4907
4904
|
codecPrivate: track.codecData?.data ?? null,
|
|
4908
|
-
timescale: track.
|
|
4905
|
+
timescale: track.originalTimescale
|
|
4909
4906
|
});
|
|
4910
4907
|
return async (sample) => {
|
|
4911
4908
|
await state.addSample({
|
|
@@ -4957,13 +4954,13 @@ var makeVideoTrackHandler = ({
|
|
|
4957
4954
|
height: newHeight,
|
|
4958
4955
|
codec: videoOperation.videoCodec,
|
|
4959
4956
|
codecPrivate: null,
|
|
4960
|
-
timescale: track.
|
|
4957
|
+
timescale: track.originalTimescale
|
|
4961
4958
|
});
|
|
4962
|
-
Log.verbose(logLevel, `Created new video track with ID ${trackNumber}, codec ${videoOperation.videoCodec} and timescale ${track.
|
|
4959
|
+
Log.verbose(logLevel, `Created new video track with ID ${trackNumber}, codec ${videoOperation.videoCodec} and timescale ${track.originalTimescale}`);
|
|
4963
4960
|
const videoEncoder = createVideoEncoder({
|
|
4964
4961
|
onChunk: async (chunk, metadata) => {
|
|
4965
4962
|
await state.addSample({
|
|
4966
|
-
chunk: convertEncodedChunk(chunk
|
|
4963
|
+
chunk: convertEncodedChunk(chunk),
|
|
4967
4964
|
trackNumber,
|
|
4968
4965
|
isVideo: true,
|
|
4969
4966
|
codecPrivate: arrayBufferToUint8Array(metadata?.decoderConfig?.description ?? null)
|
package/dist/on-audio-track.js
CHANGED
|
@@ -38,9 +38,9 @@ const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controlle
|
|
|
38
38
|
numberOfChannels: track.numberOfChannels,
|
|
39
39
|
sampleRate: track.sampleRate,
|
|
40
40
|
codecPrivate: track.codecData?.data ?? null,
|
|
41
|
-
timescale: track.
|
|
41
|
+
timescale: track.originalTimescale,
|
|
42
42
|
});
|
|
43
|
-
log_1.Log.verbose(logLevel, `Copying audio track ${track.trackId} as track ${addedTrack.trackNumber}. Timescale = ${track.
|
|
43
|
+
log_1.Log.verbose(logLevel, `Copying audio track ${track.trackId} as track ${addedTrack.trackNumber}. Timescale = ${track.originalTimescale}, codec = ${track.codecEnum} (${track.codec}) `);
|
|
44
44
|
return async (audioSample) => {
|
|
45
45
|
await state.addSample({
|
|
46
46
|
chunk: audioSample,
|
|
@@ -94,7 +94,7 @@ const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controlle
|
|
|
94
94
|
numberOfChannels: audioEncoderConfig.numberOfChannels,
|
|
95
95
|
sampleRate: audioOperation.sampleRate ?? audioEncoderConfig.sampleRate,
|
|
96
96
|
codecPrivate,
|
|
97
|
-
timescale: track.
|
|
97
|
+
timescale: track.originalTimescale,
|
|
98
98
|
});
|
|
99
99
|
const audioEncoder = (0, audio_encoder_1.createAudioEncoder)({
|
|
100
100
|
// This is weird 😵💫
|
|
@@ -106,7 +106,7 @@ const makeAudioTrackHandler = ({ state, defaultAudioCodec: audioCodec, controlle
|
|
|
106
106
|
},
|
|
107
107
|
onChunk: async (chunk) => {
|
|
108
108
|
await state.addSample({
|
|
109
|
-
chunk: (0, convert_encoded_chunk_1.convertEncodedChunk)(chunk
|
|
109
|
+
chunk: (0, convert_encoded_chunk_1.convertEncodedChunk)(chunk),
|
|
110
110
|
trackNumber,
|
|
111
111
|
isVideo: false,
|
|
112
112
|
codecPrivate,
|
package/dist/on-video-track.js
CHANGED
|
@@ -41,7 +41,7 @@ const makeVideoTrackHandler = ({ state, onVideoFrame, onMediaStateUpdate, abortC
|
|
|
41
41
|
throw new Error(`Video track with ID ${track.trackId} resolved with {"type": "fail"}. This could mean that this video track could neither be copied to the output container or re-encoded. You have the option to drop the track instead of failing it: https://remotion.dev/docs/webcodecs/track-transformation`);
|
|
42
42
|
}
|
|
43
43
|
if (videoOperation.type === 'copy') {
|
|
44
|
-
log_1.Log.verbose(logLevel, `Copying video track with codec ${track.codec} and timescale ${track.
|
|
44
|
+
log_1.Log.verbose(logLevel, `Copying video track with codec ${track.codec} and timescale ${track.originalTimescale}`);
|
|
45
45
|
const videoTrack = await state.addTrack({
|
|
46
46
|
type: 'video',
|
|
47
47
|
color: track.advancedColor,
|
|
@@ -49,7 +49,7 @@ const makeVideoTrackHandler = ({ state, onVideoFrame, onMediaStateUpdate, abortC
|
|
|
49
49
|
height: track.codedHeight,
|
|
50
50
|
codec: track.codecEnum,
|
|
51
51
|
codecPrivate: track.codecData?.data ?? null,
|
|
52
|
-
timescale: track.
|
|
52
|
+
timescale: track.originalTimescale,
|
|
53
53
|
});
|
|
54
54
|
return async (sample) => {
|
|
55
55
|
await state.addSample({
|
|
@@ -101,13 +101,13 @@ const makeVideoTrackHandler = ({ state, onVideoFrame, onMediaStateUpdate, abortC
|
|
|
101
101
|
height: newHeight,
|
|
102
102
|
codec: videoOperation.videoCodec,
|
|
103
103
|
codecPrivate: null,
|
|
104
|
-
timescale: track.
|
|
104
|
+
timescale: track.originalTimescale,
|
|
105
105
|
});
|
|
106
|
-
log_1.Log.verbose(logLevel, `Created new video track with ID ${trackNumber}, codec ${videoOperation.videoCodec} and timescale ${track.
|
|
106
|
+
log_1.Log.verbose(logLevel, `Created new video track with ID ${trackNumber}, codec ${videoOperation.videoCodec} and timescale ${track.originalTimescale}`);
|
|
107
107
|
const videoEncoder = (0, video_encoder_1.createVideoEncoder)({
|
|
108
108
|
onChunk: async (chunk, metadata) => {
|
|
109
109
|
await state.addSample({
|
|
110
|
-
chunk: (0, convert_encoded_chunk_1.convertEncodedChunk)(chunk
|
|
110
|
+
chunk: (0, convert_encoded_chunk_1.convertEncodedChunk)(chunk),
|
|
111
111
|
trackNumber,
|
|
112
112
|
isVideo: true,
|
|
113
113
|
codecPrivate: (0, arraybuffer_to_uint8_array_1.arrayBufferToUint8Array)((metadata?.decoderConfig?.description ??
|
package/dist/video-decoder.js
CHANGED
|
@@ -74,7 +74,7 @@ const createVideoDecoder = ({ onFrame, onError, controller, config, logLevel, pr
|
|
|
74
74
|
if (videoDecoder.state === 'closed') {
|
|
75
75
|
return;
|
|
76
76
|
}
|
|
77
|
-
progress.setPossibleLowestTimestamp(Math.min(sample.timestamp, sample.
|
|
77
|
+
progress.setPossibleLowestTimestamp(Math.min(sample.timestamp, sample.decodingTimestamp ?? Infinity));
|
|
78
78
|
await ioSynchronizer.waitFor({
|
|
79
79
|
unemitted: 20,
|
|
80
80
|
unprocessed: 10,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@remotion/webcodecs",
|
|
3
|
-
"version": "4.0.
|
|
3
|
+
"version": "4.0.305",
|
|
4
4
|
"main": "dist/index.js",
|
|
5
5
|
"types": "dist/index.d.ts",
|
|
6
6
|
"module": "dist/esm/index.mjs",
|
|
@@ -19,8 +19,8 @@
|
|
|
19
19
|
"author": "Jonny Burger <jonny@remotion.dev>",
|
|
20
20
|
"license": "Remotion License (See https://remotion.dev/docs/webcodecs#license)",
|
|
21
21
|
"dependencies": {
|
|
22
|
-
"@remotion/media-parser": "4.0.
|
|
23
|
-
"@remotion/licensing": "4.0.
|
|
22
|
+
"@remotion/media-parser": "4.0.305",
|
|
23
|
+
"@remotion/licensing": "4.0.305"
|
|
24
24
|
},
|
|
25
25
|
"peerDependencies": {},
|
|
26
26
|
"devDependencies": {
|
|
@@ -28,8 +28,8 @@
|
|
|
28
28
|
"playwright": "1.51.1",
|
|
29
29
|
"@playwright/test": "1.51.1",
|
|
30
30
|
"eslint": "9.19.0",
|
|
31
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
32
|
-
"@remotion/example-videos": "4.0.
|
|
31
|
+
"@remotion/eslint-config-internal": "4.0.305",
|
|
32
|
+
"@remotion/example-videos": "4.0.305"
|
|
33
33
|
},
|
|
34
34
|
"keywords": [],
|
|
35
35
|
"publishConfig": {
|