@remotion/media 4.0.364 → 4.0.366
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio/audio-for-preview.js +99 -33
- package/dist/audio/audio-preview-iterator.d.ts +30 -7
- package/dist/audio/audio-preview-iterator.js +173 -20
- package/dist/audio-iterator-manager.d.ts +66 -0
- package/dist/audio-iterator-manager.js +181 -0
- package/dist/calculate-playbacktime.d.ts +5 -0
- package/dist/calculate-playbacktime.js +4 -0
- package/dist/debug-overlay/preview-overlay.d.ts +11 -5
- package/dist/debug-overlay/preview-overlay.js +37 -8
- package/dist/esm/index.mjs +1023 -550
- package/dist/media-player.d.ts +28 -34
- package/dist/media-player.js +187 -313
- package/dist/nonce-manager.d.ts +9 -0
- package/dist/nonce-manager.js +13 -0
- package/dist/video/video-for-preview.js +91 -40
- package/dist/video-iterator-manager.d.ts +37 -0
- package/dist/video-iterator-manager.js +83 -0
- package/package.json +4 -4
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import { AudioBufferSink } from 'mediabunny';
|
|
2
|
+
import { isAlreadyQueued, makeAudioIterator, } from './audio/audio-preview-iterator';
|
|
3
|
+
export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }) => {
|
|
4
|
+
let muted = false;
|
|
5
|
+
let currentVolume = 1;
|
|
6
|
+
const gainNode = sharedAudioContext.createGain();
|
|
7
|
+
gainNode.connect(sharedAudioContext.destination);
|
|
8
|
+
const audioSink = new AudioBufferSink(audioTrack);
|
|
9
|
+
let audioBufferIterator = null;
|
|
10
|
+
let audioIteratorsCreated = 0;
|
|
11
|
+
const scheduleAudioChunk = ({ buffer, mediaTimestamp, playbackRate, scheduleAudioNode, }) => {
|
|
12
|
+
if (!audioBufferIterator) {
|
|
13
|
+
throw new Error('Audio buffer iterator not found');
|
|
14
|
+
}
|
|
15
|
+
const node = sharedAudioContext.createBufferSource();
|
|
16
|
+
node.buffer = buffer;
|
|
17
|
+
node.playbackRate.value = playbackRate;
|
|
18
|
+
node.connect(gainNode);
|
|
19
|
+
scheduleAudioNode(node, mediaTimestamp);
|
|
20
|
+
const iterator = audioBufferIterator;
|
|
21
|
+
iterator.addQueuedAudioNode(node, mediaTimestamp, buffer);
|
|
22
|
+
node.onended = () => {
|
|
23
|
+
// Some leniancy is needed as we find that sometimes onended is fired a bit too early
|
|
24
|
+
setTimeout(() => {
|
|
25
|
+
iterator.removeQueuedAudioNode(node);
|
|
26
|
+
}, 30);
|
|
27
|
+
};
|
|
28
|
+
};
|
|
29
|
+
const onAudioChunk = ({ getIsPlaying, buffer, playbackRate, scheduleAudioNode, }) => {
|
|
30
|
+
if (getIsPlaying()) {
|
|
31
|
+
scheduleAudioChunk({
|
|
32
|
+
buffer: buffer.buffer,
|
|
33
|
+
mediaTimestamp: buffer.timestamp,
|
|
34
|
+
playbackRate,
|
|
35
|
+
scheduleAudioNode,
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
else {
|
|
39
|
+
if (!audioBufferIterator) {
|
|
40
|
+
throw new Error('Audio buffer iterator not found');
|
|
41
|
+
}
|
|
42
|
+
audioBufferIterator.addChunkForAfterResuming(buffer.buffer, buffer.timestamp);
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
const startAudioIterator = async ({ nonce, playbackRate, startFromSecond, getIsPlaying, scheduleAudioNode, }) => {
|
|
46
|
+
audioBufferIterator?.destroy();
|
|
47
|
+
const delayHandle = delayPlaybackHandleIfNotPremounting();
|
|
48
|
+
const iterator = makeAudioIterator(audioSink, startFromSecond);
|
|
49
|
+
audioIteratorsCreated++;
|
|
50
|
+
audioBufferIterator = iterator;
|
|
51
|
+
// Schedule up to 3 buffers ahead of the current time
|
|
52
|
+
for (let i = 0; i < 3; i++) {
|
|
53
|
+
const result = await iterator.getNext();
|
|
54
|
+
if (iterator.isDestroyed()) {
|
|
55
|
+
delayHandle.unblock();
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
if (nonce.isStale()) {
|
|
59
|
+
delayHandle.unblock();
|
|
60
|
+
return;
|
|
61
|
+
}
|
|
62
|
+
if (!result.value) {
|
|
63
|
+
// media ended
|
|
64
|
+
delayHandle.unblock();
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
onAudioChunk({
|
|
68
|
+
getIsPlaying,
|
|
69
|
+
buffer: result.value,
|
|
70
|
+
playbackRate,
|
|
71
|
+
scheduleAudioNode,
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
delayHandle.unblock();
|
|
75
|
+
};
|
|
76
|
+
const pausePlayback = () => {
|
|
77
|
+
if (!audioBufferIterator) {
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
audioBufferIterator.moveQueuedChunksToPauseQueue();
|
|
81
|
+
};
|
|
82
|
+
const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }) => {
|
|
83
|
+
if (!audioBufferIterator) {
|
|
84
|
+
await startAudioIterator({
|
|
85
|
+
nonce,
|
|
86
|
+
playbackRate,
|
|
87
|
+
startFromSecond: newTime,
|
|
88
|
+
getIsPlaying,
|
|
89
|
+
scheduleAudioNode,
|
|
90
|
+
});
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod([]));
|
|
94
|
+
const toBeScheduled = [];
|
|
95
|
+
if (!currentTimeIsAlreadyQueued) {
|
|
96
|
+
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, false);
|
|
97
|
+
if (nonce.isStale()) {
|
|
98
|
+
return;
|
|
99
|
+
}
|
|
100
|
+
if (audioSatisfyResult.type === 'not-satisfied') {
|
|
101
|
+
await startAudioIterator({
|
|
102
|
+
nonce,
|
|
103
|
+
playbackRate,
|
|
104
|
+
startFromSecond: newTime,
|
|
105
|
+
getIsPlaying,
|
|
106
|
+
scheduleAudioNode,
|
|
107
|
+
});
|
|
108
|
+
return;
|
|
109
|
+
}
|
|
110
|
+
toBeScheduled.push(...audioSatisfyResult.buffers);
|
|
111
|
+
}
|
|
112
|
+
const nextTime = newTime +
|
|
113
|
+
// start of next frame
|
|
114
|
+
(1 / fps) * playbackRate +
|
|
115
|
+
// need the full duration of the next frame to be queued
|
|
116
|
+
(1 / fps) * playbackRate;
|
|
117
|
+
const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod(toBeScheduled));
|
|
118
|
+
if (!nextIsAlreadyQueued) {
|
|
119
|
+
// here we allow waiting for the next buffer to be loaded
|
|
120
|
+
// it's better than to create a new iterator
|
|
121
|
+
// because we already know we are in the right spot
|
|
122
|
+
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, true);
|
|
123
|
+
if (nonce.isStale()) {
|
|
124
|
+
return;
|
|
125
|
+
}
|
|
126
|
+
if (audioSatisfyResult.type === 'not-satisfied') {
|
|
127
|
+
await startAudioIterator({
|
|
128
|
+
nonce,
|
|
129
|
+
playbackRate,
|
|
130
|
+
startFromSecond: newTime,
|
|
131
|
+
getIsPlaying,
|
|
132
|
+
scheduleAudioNode,
|
|
133
|
+
});
|
|
134
|
+
return;
|
|
135
|
+
}
|
|
136
|
+
toBeScheduled.push(...audioSatisfyResult.buffers);
|
|
137
|
+
}
|
|
138
|
+
for (const buffer of toBeScheduled) {
|
|
139
|
+
onAudioChunk({
|
|
140
|
+
getIsPlaying,
|
|
141
|
+
buffer,
|
|
142
|
+
playbackRate,
|
|
143
|
+
scheduleAudioNode,
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
};
|
|
147
|
+
const resumeScheduledAudioChunks = ({ playbackRate, scheduleAudioNode, }) => {
|
|
148
|
+
if (!audioBufferIterator) {
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
for (const chunk of audioBufferIterator.getAndClearAudioChunksForAfterResuming()) {
|
|
152
|
+
scheduleAudioChunk({
|
|
153
|
+
buffer: chunk.buffer,
|
|
154
|
+
mediaTimestamp: chunk.timestamp,
|
|
155
|
+
playbackRate,
|
|
156
|
+
scheduleAudioNode,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
};
|
|
160
|
+
return {
|
|
161
|
+
startAudioIterator,
|
|
162
|
+
resumeScheduledAudioChunks,
|
|
163
|
+
pausePlayback,
|
|
164
|
+
getAudioBufferIterator: () => audioBufferIterator,
|
|
165
|
+
destroy: () => {
|
|
166
|
+
audioBufferIterator?.destroy();
|
|
167
|
+
audioBufferIterator = null;
|
|
168
|
+
},
|
|
169
|
+
seek,
|
|
170
|
+
getAudioIteratorsCreated: () => audioIteratorsCreated,
|
|
171
|
+
setMuted: (newMuted) => {
|
|
172
|
+
muted = newMuted;
|
|
173
|
+
gainNode.gain.value = muted ? 0 : currentVolume;
|
|
174
|
+
},
|
|
175
|
+
setVolume: (volume) => {
|
|
176
|
+
currentVolume = Math.max(0, volume);
|
|
177
|
+
gainNode.gain.value = muted ? 0 : currentVolume;
|
|
178
|
+
},
|
|
179
|
+
scheduleAudioChunk,
|
|
180
|
+
};
|
|
181
|
+
};
|
|
@@ -1,5 +1,11 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
1
|
+
import type { AudioIteratorManager } from '../audio-iterator-manager';
|
|
2
|
+
import type { VideoIteratorManager } from '../video-iterator-manager';
|
|
3
|
+
export declare const drawPreviewOverlay: ({ context, audioTime, audioContextState, audioSyncAnchor, playing, audioIteratorManager, videoIteratorManager, }: {
|
|
4
|
+
context: OffscreenCanvasRenderingContext2D | CanvasRenderingContext2D;
|
|
5
|
+
audioTime: number;
|
|
6
|
+
audioContextState: AudioContextState;
|
|
7
|
+
audioSyncAnchor: number;
|
|
8
|
+
playing: boolean;
|
|
9
|
+
audioIteratorManager: AudioIteratorManager | null;
|
|
10
|
+
videoIteratorManager: VideoIteratorManager | null;
|
|
11
|
+
}) => void;
|
|
@@ -1,13 +1,42 @@
|
|
|
1
|
-
export const drawPreviewOverlay = (context,
|
|
2
|
-
//
|
|
1
|
+
export const drawPreviewOverlay = ({ context, audioTime, audioContextState, audioSyncAnchor, playing, audioIteratorManager, videoIteratorManager, }) => {
|
|
2
|
+
// Collect all lines to be rendered
|
|
3
|
+
const lines = [
|
|
4
|
+
'Debug overlay',
|
|
5
|
+
`Video iterators created: ${videoIteratorManager?.getVideoIteratorsCreated()}`,
|
|
6
|
+
`Audio iterators created: ${audioIteratorManager?.getAudioIteratorsCreated()}`,
|
|
7
|
+
`Frames rendered: ${videoIteratorManager?.getFramesRendered()}`,
|
|
8
|
+
`Audio context state: ${audioContextState}`,
|
|
9
|
+
`Audio time: ${(audioTime - audioSyncAnchor).toFixed(3)}s`,
|
|
10
|
+
];
|
|
11
|
+
if (audioIteratorManager) {
|
|
12
|
+
const queuedPeriod = audioIteratorManager
|
|
13
|
+
.getAudioBufferIterator()
|
|
14
|
+
?.getQueuedPeriod([]);
|
|
15
|
+
const numberOfChunksAfterResuming = audioIteratorManager
|
|
16
|
+
?.getAudioBufferIterator()
|
|
17
|
+
?.getNumberOfChunksAfterResuming();
|
|
18
|
+
if (queuedPeriod) {
|
|
19
|
+
lines.push(`Audio queued until: ${(queuedPeriod.until - (audioTime - audioSyncAnchor)).toFixed(3)}s`);
|
|
20
|
+
}
|
|
21
|
+
else if (numberOfChunksAfterResuming) {
|
|
22
|
+
lines.push(`Audio chunks for after resuming: ${numberOfChunksAfterResuming}`);
|
|
23
|
+
}
|
|
24
|
+
lines.push(`Playing: ${playing}`);
|
|
25
|
+
}
|
|
26
|
+
const lineHeight = 30; // px, should match or exceed font size
|
|
27
|
+
const boxPaddingX = 10;
|
|
28
|
+
const boxPaddingY = 10;
|
|
29
|
+
const boxLeft = 20;
|
|
30
|
+
const boxTop = 20;
|
|
31
|
+
const boxWidth = 600;
|
|
32
|
+
const boxHeight = lines.length * lineHeight + 2 * boxPaddingY;
|
|
33
|
+
// Draw background for text legibility
|
|
3
34
|
context.fillStyle = 'rgba(0, 0, 0, 1)';
|
|
4
|
-
context.fillRect(
|
|
35
|
+
context.fillRect(boxLeft, boxTop, boxWidth, boxHeight);
|
|
5
36
|
context.fillStyle = 'white';
|
|
6
37
|
context.font = '24px sans-serif';
|
|
7
38
|
context.textBaseline = 'top';
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
context.fillText(`Audio context state: ${audioContextState}`, 30, 120);
|
|
12
|
-
context.fillText(`Audio time: ${audioSyncAnchor.toFixed(3)}s`, 30, 150);
|
|
39
|
+
for (let i = 0; i < lines.length; i++) {
|
|
40
|
+
context.fillText(lines[i], boxLeft + boxPaddingX, boxTop + boxPaddingY + i * lineHeight);
|
|
41
|
+
}
|
|
13
42
|
};
|