@remotion/media 4.0.430 → 4.0.432
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio/audio-for-preview.d.ts +1 -0
- package/dist/audio/audio-preview-iterator.d.ts +16 -9
- package/dist/audio/props.d.ts +1 -0
- package/dist/audio-iterator-manager.d.ts +24 -13
- package/dist/debug-overlay/preview-overlay.d.ts +24 -14
- package/dist/esm/index.mjs +755 -537
- package/dist/make-iterator-with-priming.d.ts +6 -0
- package/dist/media-player.d.ts +12 -7
- package/dist/prewarm-iterator-for-looping.d.ts +3 -2
- package/dist/set-global-time-anchor.d.ts +11 -0
- package/dist/shared-audio-context-for-media-player.d.ts +8 -0
- package/dist/use-common-effects.d.ts +32 -0
- package/dist/video/props.d.ts +1 -0
- package/dist/video/video-for-preview.d.ts +1 -0
- package/package.json +4 -4
- package/dist/audio/allow-wait.d.ts +0 -6
- package/dist/audio/allow-wait.js +0 -15
- package/dist/audio/audio-for-preview.js +0 -304
- package/dist/audio/audio-for-rendering.js +0 -194
- package/dist/audio/audio-preview-iterator.js +0 -176
- package/dist/audio/audio.js +0 -20
- package/dist/audio/props.js +0 -1
- package/dist/audio-extraction/audio-cache.js +0 -66
- package/dist/audio-extraction/audio-iterator.js +0 -132
- package/dist/audio-extraction/audio-manager.js +0 -113
- package/dist/audio-extraction/extract-audio.js +0 -132
- package/dist/audio-iterator-manager.js +0 -228
- package/dist/browser-can-use-webgl2.js +0 -13
- package/dist/caches.js +0 -61
- package/dist/calculate-playbacktime.js +0 -4
- package/dist/convert-audiodata/apply-volume.js +0 -17
- package/dist/convert-audiodata/combine-audiodata.js +0 -23
- package/dist/convert-audiodata/convert-audiodata.js +0 -73
- package/dist/convert-audiodata/resample-audiodata.js +0 -94
- package/dist/debug-overlay/preview-overlay.js +0 -42
- package/dist/extract-frame-and-audio.js +0 -101
- package/dist/get-sink.js +0 -15
- package/dist/get-time-in-seconds.js +0 -40
- package/dist/helpers/round-to-4-digits.js +0 -4
- package/dist/index.js +0 -12
- package/dist/is-type-of-error.js +0 -20
- package/dist/looped-frame.js +0 -10
- package/dist/media-player.js +0 -431
- package/dist/nonce-manager.js +0 -13
- package/dist/prewarm-iterator-for-looping.js +0 -56
- package/dist/render-timestamp-range.js +0 -9
- package/dist/show-in-timeline.js +0 -31
- package/dist/use-media-in-timeline.js +0 -103
- package/dist/video/props.js +0 -1
- package/dist/video/video-for-preview.js +0 -331
- package/dist/video/video-for-rendering.js +0 -263
- package/dist/video/video-preview-iterator.js +0 -122
- package/dist/video/video.js +0 -35
- package/dist/video-extraction/add-broadcast-channel-listener.js +0 -125
- package/dist/video-extraction/extract-frame-via-broadcast-channel.js +0 -113
- package/dist/video-extraction/extract-frame.js +0 -85
- package/dist/video-extraction/get-allocation-size.js +0 -6
- package/dist/video-extraction/get-frames-since-keyframe.js +0 -108
- package/dist/video-extraction/keyframe-bank.js +0 -159
- package/dist/video-extraction/keyframe-manager.js +0 -206
- package/dist/video-extraction/remember-actual-matroska-timestamps.js +0 -19
- package/dist/video-extraction/rotate-frame.js +0 -34
- package/dist/video-iterator-manager.js +0 -109
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
import { Internals } from 'remotion';
|
|
2
|
-
import { getTotalCacheStats } from '../caches';
|
|
3
|
-
import { makeAudioIterator } from './audio-iterator';
|
|
4
|
-
export const makeAudioManager = () => {
|
|
5
|
-
const iterators = [];
|
|
6
|
-
const makeIterator = ({ timeInSeconds, src, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, }) => {
|
|
7
|
-
const iterator = makeAudioIterator({
|
|
8
|
-
audioSampleSink,
|
|
9
|
-
isMatroska,
|
|
10
|
-
startTimestamp: timeInSeconds,
|
|
11
|
-
src,
|
|
12
|
-
actualMatroskaTimestamps,
|
|
13
|
-
logLevel,
|
|
14
|
-
});
|
|
15
|
-
iterators.push(iterator);
|
|
16
|
-
return iterator;
|
|
17
|
-
};
|
|
18
|
-
const getIteratorMostInThePast = () => {
|
|
19
|
-
let mostInThePast = null;
|
|
20
|
-
let mostInThePastIterator = null;
|
|
21
|
-
for (const iterator of iterators) {
|
|
22
|
-
const lastUsed = iterator.getLastUsed();
|
|
23
|
-
if (mostInThePast === null || lastUsed < mostInThePast) {
|
|
24
|
-
mostInThePast = lastUsed;
|
|
25
|
-
mostInThePastIterator = iterator;
|
|
26
|
-
}
|
|
27
|
-
}
|
|
28
|
-
return mostInThePastIterator;
|
|
29
|
-
};
|
|
30
|
-
const deleteOldestIterator = () => {
|
|
31
|
-
const iterator = getIteratorMostInThePast();
|
|
32
|
-
if (iterator) {
|
|
33
|
-
iterator.prepareForDeletion();
|
|
34
|
-
iterators.splice(iterators.indexOf(iterator), 1);
|
|
35
|
-
}
|
|
36
|
-
};
|
|
37
|
-
const deleteDuplicateIterators = (logLevel) => {
|
|
38
|
-
const seenKeys = new Set();
|
|
39
|
-
for (let i = 0; i < iterators.length; i++) {
|
|
40
|
-
const iterator = iterators[i];
|
|
41
|
-
const key = `${iterator.src}-${iterator.getOldestTimestamp()}-${iterator.getNewestTimestamp()}`;
|
|
42
|
-
if (seenKeys.has(key)) {
|
|
43
|
-
iterator.prepareForDeletion();
|
|
44
|
-
iterators.splice(iterators.indexOf(iterator), 1);
|
|
45
|
-
Internals.Log.verbose({ logLevel, tag: '@remotion/media' }, `Deleted duplicate iterator for ${iterator.src}`);
|
|
46
|
-
}
|
|
47
|
-
seenKeys.add(key);
|
|
48
|
-
}
|
|
49
|
-
};
|
|
50
|
-
const getIterator = async ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }) => {
|
|
51
|
-
while ((await getTotalCacheStats()).totalSize > maxCacheSize) {
|
|
52
|
-
deleteOldestIterator();
|
|
53
|
-
}
|
|
54
|
-
for (const iterator of iterators) {
|
|
55
|
-
if (iterator.src === src &&
|
|
56
|
-
(await iterator.waitForCompletion()) &&
|
|
57
|
-
iterator.canSatisfyRequestedTime(timeInSeconds)) {
|
|
58
|
-
return iterator;
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
for (let i = 0; i < iterators.length; i++) {
|
|
62
|
-
const iterator = iterators[i];
|
|
63
|
-
// delete iterator with same starting timestamp as requested
|
|
64
|
-
if (iterator.src === src && iterator.startTimestamp === timeInSeconds) {
|
|
65
|
-
iterator.prepareForDeletion();
|
|
66
|
-
iterators.splice(iterators.indexOf(iterator), 1);
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
deleteDuplicateIterators(logLevel);
|
|
70
|
-
return makeIterator({
|
|
71
|
-
src,
|
|
72
|
-
timeInSeconds,
|
|
73
|
-
audioSampleSink,
|
|
74
|
-
isMatroska,
|
|
75
|
-
actualMatroskaTimestamps,
|
|
76
|
-
logLevel,
|
|
77
|
-
});
|
|
78
|
-
};
|
|
79
|
-
const getCacheStats = () => {
|
|
80
|
-
let totalCount = 0;
|
|
81
|
-
let totalSize = 0;
|
|
82
|
-
for (const iterator of iterators) {
|
|
83
|
-
const { count, size } = iterator.getCacheStats();
|
|
84
|
-
totalCount += count;
|
|
85
|
-
totalSize += size;
|
|
86
|
-
}
|
|
87
|
-
return { count: totalCount, totalSize };
|
|
88
|
-
};
|
|
89
|
-
const logOpenFrames = () => {
|
|
90
|
-
for (const iterator of iterators) {
|
|
91
|
-
iterator.logOpenFrames();
|
|
92
|
-
}
|
|
93
|
-
};
|
|
94
|
-
let queue = Promise.resolve(undefined);
|
|
95
|
-
return {
|
|
96
|
-
getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }) => {
|
|
97
|
-
queue = queue.then(() => getIterator({
|
|
98
|
-
src,
|
|
99
|
-
timeInSeconds,
|
|
100
|
-
audioSampleSink,
|
|
101
|
-
isMatroska,
|
|
102
|
-
actualMatroskaTimestamps,
|
|
103
|
-
logLevel,
|
|
104
|
-
maxCacheSize,
|
|
105
|
-
}));
|
|
106
|
-
return queue;
|
|
107
|
-
},
|
|
108
|
-
getCacheStats,
|
|
109
|
-
getIteratorMostInThePast,
|
|
110
|
-
logOpenFrames,
|
|
111
|
-
deleteDuplicateIterators,
|
|
112
|
-
};
|
|
113
|
-
};
|
|
@@ -1,132 +0,0 @@
|
|
|
1
|
-
import { audioManager } from '../caches';
|
|
2
|
-
import { combineAudioDataAndClosePrevious } from '../convert-audiodata/combine-audiodata';
|
|
3
|
-
import { convertAudioData, fixFloatingPoint, } from '../convert-audiodata/convert-audiodata';
|
|
4
|
-
import { TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from '../convert-audiodata/resample-audiodata';
|
|
5
|
-
import { getSink } from '../get-sink';
|
|
6
|
-
import { getTimeInSeconds } from '../get-time-in-seconds';
|
|
7
|
-
import { isNetworkError, isUnsupportedConfigurationError, } from '../is-type-of-error';
|
|
8
|
-
const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, maxCacheSize, }) => {
|
|
9
|
-
const { getAudio, actualMatroskaTimestamps, isMatroska, getDuration } = await getSink(src, logLevel);
|
|
10
|
-
let mediaDurationInSeconds = null;
|
|
11
|
-
if (loop) {
|
|
12
|
-
mediaDurationInSeconds = await getDuration();
|
|
13
|
-
}
|
|
14
|
-
const audio = await getAudio(audioStreamIndex);
|
|
15
|
-
if (audio === 'network-error') {
|
|
16
|
-
return 'network-error';
|
|
17
|
-
}
|
|
18
|
-
if (audio === 'no-audio-track') {
|
|
19
|
-
return { data: null, durationInSeconds: null };
|
|
20
|
-
}
|
|
21
|
-
if (audio === 'cannot-decode-audio') {
|
|
22
|
-
return 'cannot-decode';
|
|
23
|
-
}
|
|
24
|
-
if (audio === 'unknown-container-format') {
|
|
25
|
-
return 'unknown-container-format';
|
|
26
|
-
}
|
|
27
|
-
const timeInSeconds = getTimeInSeconds({
|
|
28
|
-
loop,
|
|
29
|
-
mediaDurationInSeconds,
|
|
30
|
-
unloopedTimeInSeconds,
|
|
31
|
-
src,
|
|
32
|
-
trimAfter,
|
|
33
|
-
playbackRate,
|
|
34
|
-
trimBefore,
|
|
35
|
-
fps,
|
|
36
|
-
ifNoMediaDuration: 'fail',
|
|
37
|
-
});
|
|
38
|
-
if (timeInSeconds === null) {
|
|
39
|
-
return { data: null, durationInSeconds: mediaDurationInSeconds };
|
|
40
|
-
}
|
|
41
|
-
try {
|
|
42
|
-
const sampleIterator = await audioManager.getIterator({
|
|
43
|
-
src,
|
|
44
|
-
timeInSeconds,
|
|
45
|
-
audioSampleSink: audio.sampleSink,
|
|
46
|
-
isMatroska,
|
|
47
|
-
actualMatroskaTimestamps,
|
|
48
|
-
logLevel,
|
|
49
|
-
maxCacheSize,
|
|
50
|
-
});
|
|
51
|
-
const durationInSeconds = durationNotYetApplyingPlaybackRate * playbackRate;
|
|
52
|
-
const samples = await sampleIterator.getSamples(timeInSeconds, durationInSeconds);
|
|
53
|
-
audioManager.logOpenFrames();
|
|
54
|
-
const audioDataArray = [];
|
|
55
|
-
for (let i = 0; i < samples.length; i++) {
|
|
56
|
-
const sample = samples[i];
|
|
57
|
-
// Less than 1 sample would be included - we did not need it after all!
|
|
58
|
-
if (Math.abs(sample.timestamp - (timeInSeconds + durationInSeconds)) *
|
|
59
|
-
sample.sampleRate <
|
|
60
|
-
1) {
|
|
61
|
-
continue;
|
|
62
|
-
}
|
|
63
|
-
// Less than 1 sample would be included - we did not need it after all!
|
|
64
|
-
if (sample.timestamp + sample.duration <= timeInSeconds) {
|
|
65
|
-
continue;
|
|
66
|
-
}
|
|
67
|
-
const isFirstSample = i === 0;
|
|
68
|
-
const isLastSample = i === samples.length - 1;
|
|
69
|
-
const audioDataRaw = sample.toAudioData();
|
|
70
|
-
// amount of samples to shave from start and end
|
|
71
|
-
let trimStartInSeconds = 0;
|
|
72
|
-
let trimEndInSeconds = 0;
|
|
73
|
-
let leadingSilence = null;
|
|
74
|
-
if (isFirstSample) {
|
|
75
|
-
trimStartInSeconds = fixFloatingPoint(timeInSeconds - sample.timestamp);
|
|
76
|
-
if (trimStartInSeconds < 0) {
|
|
77
|
-
const silenceFrames = Math.ceil(fixFloatingPoint(-trimStartInSeconds * TARGET_SAMPLE_RATE));
|
|
78
|
-
leadingSilence = {
|
|
79
|
-
data: new Int16Array(silenceFrames * TARGET_NUMBER_OF_CHANNELS),
|
|
80
|
-
numberOfFrames: silenceFrames,
|
|
81
|
-
timestamp: timeInSeconds * 1000000,
|
|
82
|
-
durationInMicroSeconds: (silenceFrames / TARGET_SAMPLE_RATE) * 1000000,
|
|
83
|
-
};
|
|
84
|
-
trimStartInSeconds = 0;
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
if (isLastSample) {
|
|
88
|
-
trimEndInSeconds =
|
|
89
|
-
// clamp to 0 in case the audio ends early
|
|
90
|
-
Math.max(0, sample.timestamp +
|
|
91
|
-
sample.duration -
|
|
92
|
-
(timeInSeconds + durationInSeconds));
|
|
93
|
-
}
|
|
94
|
-
const audioData = convertAudioData({
|
|
95
|
-
audioData: audioDataRaw,
|
|
96
|
-
trimStartInSeconds,
|
|
97
|
-
trimEndInSeconds,
|
|
98
|
-
playbackRate,
|
|
99
|
-
audioDataTimestamp: sample.timestamp,
|
|
100
|
-
isLast: isLastSample,
|
|
101
|
-
});
|
|
102
|
-
audioDataRaw.close();
|
|
103
|
-
if (audioData.numberOfFrames === 0) {
|
|
104
|
-
continue;
|
|
105
|
-
}
|
|
106
|
-
if (leadingSilence) {
|
|
107
|
-
audioDataArray.push(leadingSilence);
|
|
108
|
-
}
|
|
109
|
-
audioDataArray.push(audioData);
|
|
110
|
-
}
|
|
111
|
-
if (audioDataArray.length === 0) {
|
|
112
|
-
return { data: null, durationInSeconds: mediaDurationInSeconds };
|
|
113
|
-
}
|
|
114
|
-
const combined = combineAudioDataAndClosePrevious(audioDataArray);
|
|
115
|
-
return { data: combined, durationInSeconds: mediaDurationInSeconds };
|
|
116
|
-
}
|
|
117
|
-
catch (err) {
|
|
118
|
-
const error = err;
|
|
119
|
-
if (isNetworkError(error)) {
|
|
120
|
-
return 'network-error';
|
|
121
|
-
}
|
|
122
|
-
if (isUnsupportedConfigurationError(error)) {
|
|
123
|
-
return 'cannot-decode';
|
|
124
|
-
}
|
|
125
|
-
throw err;
|
|
126
|
-
}
|
|
127
|
-
};
|
|
128
|
-
let queue = Promise.resolve(undefined);
|
|
129
|
-
export const extractAudio = (params) => {
|
|
130
|
-
queue = queue.then(() => extractAudioInternal(params));
|
|
131
|
-
return queue;
|
|
132
|
-
};
|
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
import { AudioBufferSink, InputDisposedError } from 'mediabunny';
|
|
2
|
-
import { isAlreadyQueued, makeAudioIterator, } from './audio/audio-preview-iterator';
|
|
3
|
-
import { makePrewarmedAudioIteratorCache } from './prewarm-iterator-for-looping';
|
|
4
|
-
export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, getIsLooping, getEndTime, getStartTime, updatePlaybackTime, }) => {
|
|
5
|
-
let muted = false;
|
|
6
|
-
let currentVolume = 1;
|
|
7
|
-
const gainNode = sharedAudioContext.createGain();
|
|
8
|
-
gainNode.connect(sharedAudioContext.destination);
|
|
9
|
-
const audioSink = new AudioBufferSink(audioTrack);
|
|
10
|
-
const prewarmedAudioIteratorCache = makePrewarmedAudioIteratorCache(audioSink);
|
|
11
|
-
let audioBufferIterator = null;
|
|
12
|
-
let audioIteratorsCreated = 0;
|
|
13
|
-
let currentDelayHandle = null;
|
|
14
|
-
const scheduleAudioChunk = ({ buffer, mediaTimestamp, playbackRate, scheduleAudioNode, }) => {
|
|
15
|
-
if (!audioBufferIterator) {
|
|
16
|
-
throw new Error('Audio buffer iterator not found');
|
|
17
|
-
}
|
|
18
|
-
const node = sharedAudioContext.createBufferSource();
|
|
19
|
-
node.buffer = buffer;
|
|
20
|
-
node.playbackRate.value = playbackRate;
|
|
21
|
-
node.connect(gainNode);
|
|
22
|
-
scheduleAudioNode(node, mediaTimestamp);
|
|
23
|
-
const iterator = audioBufferIterator;
|
|
24
|
-
iterator.addQueuedAudioNode(node, mediaTimestamp, buffer);
|
|
25
|
-
node.onended = () => {
|
|
26
|
-
// Some leniancy is needed as we find that sometimes onended is fired a bit too early
|
|
27
|
-
setTimeout(() => {
|
|
28
|
-
iterator.removeQueuedAudioNode(node);
|
|
29
|
-
}, 30);
|
|
30
|
-
};
|
|
31
|
-
};
|
|
32
|
-
const onAudioChunk = ({ getIsPlaying, buffer, playbackRate, scheduleAudioNode, }) => {
|
|
33
|
-
if (getIsPlaying()) {
|
|
34
|
-
scheduleAudioChunk({
|
|
35
|
-
buffer: buffer.buffer,
|
|
36
|
-
mediaTimestamp: buffer.timestamp,
|
|
37
|
-
playbackRate,
|
|
38
|
-
scheduleAudioNode,
|
|
39
|
-
});
|
|
40
|
-
}
|
|
41
|
-
else {
|
|
42
|
-
if (!audioBufferIterator) {
|
|
43
|
-
throw new Error('Audio buffer iterator not found');
|
|
44
|
-
}
|
|
45
|
-
audioBufferIterator.addChunkForAfterResuming(buffer.buffer, buffer.timestamp);
|
|
46
|
-
}
|
|
47
|
-
};
|
|
48
|
-
const startAudioIterator = async ({ nonce, playbackRate, startFromSecond, getIsPlaying, scheduleAudioNode, }) => {
|
|
49
|
-
updatePlaybackTime(startFromSecond);
|
|
50
|
-
audioBufferIterator?.destroy();
|
|
51
|
-
const delayHandle = delayPlaybackHandleIfNotPremounting();
|
|
52
|
-
currentDelayHandle = delayHandle;
|
|
53
|
-
const iterator = makeAudioIterator(startFromSecond, prewarmedAudioIteratorCache);
|
|
54
|
-
audioIteratorsCreated++;
|
|
55
|
-
audioBufferIterator = iterator;
|
|
56
|
-
try {
|
|
57
|
-
// Schedule up to 3 buffers ahead of the current time
|
|
58
|
-
for (let i = 0; i < 3; i++) {
|
|
59
|
-
const result = await iterator.getNext();
|
|
60
|
-
if (iterator.isDestroyed()) {
|
|
61
|
-
return;
|
|
62
|
-
}
|
|
63
|
-
if (nonce.isStale()) {
|
|
64
|
-
return;
|
|
65
|
-
}
|
|
66
|
-
if (!result.value) {
|
|
67
|
-
// media ended
|
|
68
|
-
return;
|
|
69
|
-
}
|
|
70
|
-
onAudioChunk({
|
|
71
|
-
getIsPlaying,
|
|
72
|
-
buffer: result.value,
|
|
73
|
-
playbackRate,
|
|
74
|
-
scheduleAudioNode,
|
|
75
|
-
});
|
|
76
|
-
}
|
|
77
|
-
}
|
|
78
|
-
catch (e) {
|
|
79
|
-
if (e instanceof InputDisposedError) {
|
|
80
|
-
// iterator was disposed by a newer startAudioIterator call
|
|
81
|
-
// this is expected during rapid seeking
|
|
82
|
-
return;
|
|
83
|
-
}
|
|
84
|
-
throw e;
|
|
85
|
-
}
|
|
86
|
-
finally {
|
|
87
|
-
delayHandle.unblock();
|
|
88
|
-
currentDelayHandle = null;
|
|
89
|
-
}
|
|
90
|
-
};
|
|
91
|
-
const pausePlayback = () => {
|
|
92
|
-
if (!audioBufferIterator) {
|
|
93
|
-
return;
|
|
94
|
-
}
|
|
95
|
-
audioBufferIterator.moveQueuedChunksToPauseQueue();
|
|
96
|
-
};
|
|
97
|
-
const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }) => {
|
|
98
|
-
if (getIsLooping()) {
|
|
99
|
-
// If less than 1 second from the end away, we pre-warm a new iterator
|
|
100
|
-
if (getEndTime() - newTime < 1) {
|
|
101
|
-
prewarmedAudioIteratorCache.prewarmIteratorForLooping({
|
|
102
|
-
timeToSeek: getStartTime(),
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
if (!audioBufferIterator) {
|
|
107
|
-
await startAudioIterator({
|
|
108
|
-
nonce,
|
|
109
|
-
playbackRate,
|
|
110
|
-
startFromSecond: newTime,
|
|
111
|
-
getIsPlaying,
|
|
112
|
-
scheduleAudioNode,
|
|
113
|
-
});
|
|
114
|
-
return;
|
|
115
|
-
}
|
|
116
|
-
const queuedPeriod = audioBufferIterator.getQueuedPeriod();
|
|
117
|
-
const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, queuedPeriod);
|
|
118
|
-
if (!currentTimeIsAlreadyQueued) {
|
|
119
|
-
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, null, (buffer) => {
|
|
120
|
-
if (!nonce.isStale()) {
|
|
121
|
-
onAudioChunk({
|
|
122
|
-
getIsPlaying,
|
|
123
|
-
buffer,
|
|
124
|
-
playbackRate,
|
|
125
|
-
scheduleAudioNode,
|
|
126
|
-
});
|
|
127
|
-
}
|
|
128
|
-
});
|
|
129
|
-
if (nonce.isStale()) {
|
|
130
|
-
return;
|
|
131
|
-
}
|
|
132
|
-
if (audioSatisfyResult.type === 'ended') {
|
|
133
|
-
return;
|
|
134
|
-
}
|
|
135
|
-
if (audioSatisfyResult.type === 'not-satisfied') {
|
|
136
|
-
await startAudioIterator({
|
|
137
|
-
nonce,
|
|
138
|
-
playbackRate,
|
|
139
|
-
startFromSecond: newTime,
|
|
140
|
-
getIsPlaying,
|
|
141
|
-
scheduleAudioNode,
|
|
142
|
-
});
|
|
143
|
-
return;
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
const nextTime = newTime +
|
|
147
|
-
// 3 frames ahead to get enough of a buffer
|
|
148
|
-
(1 / fps) * Math.max(1, playbackRate) * 3;
|
|
149
|
-
const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod());
|
|
150
|
-
if (!nextIsAlreadyQueued) {
|
|
151
|
-
// here we allow waiting for the next buffer to be loaded
|
|
152
|
-
// it's better than to create a new iterator
|
|
153
|
-
// because we already know we are in the right spot
|
|
154
|
-
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, {
|
|
155
|
-
type: 'allow-wait',
|
|
156
|
-
waitCallback: () => {
|
|
157
|
-
const handle = bufferState.delayPlayback();
|
|
158
|
-
return () => {
|
|
159
|
-
handle.unblock();
|
|
160
|
-
};
|
|
161
|
-
},
|
|
162
|
-
}, (buffer) => {
|
|
163
|
-
if (!nonce.isStale()) {
|
|
164
|
-
onAudioChunk({
|
|
165
|
-
getIsPlaying,
|
|
166
|
-
buffer,
|
|
167
|
-
playbackRate,
|
|
168
|
-
scheduleAudioNode,
|
|
169
|
-
});
|
|
170
|
-
}
|
|
171
|
-
});
|
|
172
|
-
if (nonce.isStale()) {
|
|
173
|
-
return;
|
|
174
|
-
}
|
|
175
|
-
if (audioSatisfyResult.type === 'ended') {
|
|
176
|
-
return;
|
|
177
|
-
}
|
|
178
|
-
if (audioSatisfyResult.type === 'not-satisfied') {
|
|
179
|
-
await startAudioIterator({
|
|
180
|
-
nonce,
|
|
181
|
-
playbackRate,
|
|
182
|
-
startFromSecond: newTime,
|
|
183
|
-
getIsPlaying,
|
|
184
|
-
scheduleAudioNode,
|
|
185
|
-
});
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
};
|
|
189
|
-
const resumeScheduledAudioChunks = ({ playbackRate, scheduleAudioNode, }) => {
|
|
190
|
-
if (!audioBufferIterator) {
|
|
191
|
-
return;
|
|
192
|
-
}
|
|
193
|
-
for (const chunk of audioBufferIterator.getAndClearAudioChunksForAfterResuming()) {
|
|
194
|
-
scheduleAudioChunk({
|
|
195
|
-
buffer: chunk.buffer,
|
|
196
|
-
mediaTimestamp: chunk.timestamp,
|
|
197
|
-
playbackRate,
|
|
198
|
-
scheduleAudioNode,
|
|
199
|
-
});
|
|
200
|
-
}
|
|
201
|
-
};
|
|
202
|
-
return {
|
|
203
|
-
startAudioIterator,
|
|
204
|
-
resumeScheduledAudioChunks,
|
|
205
|
-
pausePlayback,
|
|
206
|
-
getAudioBufferIterator: () => audioBufferIterator,
|
|
207
|
-
destroyIterator: () => {
|
|
208
|
-
prewarmedAudioIteratorCache.destroy();
|
|
209
|
-
audioBufferIterator?.destroy();
|
|
210
|
-
audioBufferIterator = null;
|
|
211
|
-
if (currentDelayHandle) {
|
|
212
|
-
currentDelayHandle.unblock();
|
|
213
|
-
currentDelayHandle = null;
|
|
214
|
-
}
|
|
215
|
-
},
|
|
216
|
-
seek,
|
|
217
|
-
getAudioIteratorsCreated: () => audioIteratorsCreated,
|
|
218
|
-
setMuted: (newMuted) => {
|
|
219
|
-
muted = newMuted;
|
|
220
|
-
gainNode.gain.value = muted ? 0 : currentVolume;
|
|
221
|
-
},
|
|
222
|
-
setVolume: (volume) => {
|
|
223
|
-
currentVolume = Math.max(0, volume);
|
|
224
|
-
gainNode.gain.value = muted ? 0 : currentVolume;
|
|
225
|
-
},
|
|
226
|
-
scheduleAudioChunk,
|
|
227
|
-
};
|
|
228
|
-
};
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
let browserCanUseWebGl2 = null;
|
|
2
|
-
const browserCanUseWebGl2Uncached = () => {
|
|
3
|
-
const canvas = new OffscreenCanvas(1, 1);
|
|
4
|
-
const context = canvas.getContext('webgl2');
|
|
5
|
-
return context !== null;
|
|
6
|
-
};
|
|
7
|
-
export const canBrowserUseWebGl2 = () => {
|
|
8
|
-
if (browserCanUseWebGl2 !== null) {
|
|
9
|
-
return browserCanUseWebGl2;
|
|
10
|
-
}
|
|
11
|
-
browserCanUseWebGl2 = browserCanUseWebGl2Uncached();
|
|
12
|
-
return browserCanUseWebGl2;
|
|
13
|
-
};
|
package/dist/caches.js
DELETED
|
@@ -1,61 +0,0 @@
|
|
|
1
|
-
import React from 'react';
|
|
2
|
-
import { cancelRender, Internals } from 'remotion';
|
|
3
|
-
import { makeAudioManager } from './audio-extraction/audio-manager';
|
|
4
|
-
import { makeKeyframeManager } from './video-extraction/keyframe-manager';
|
|
5
|
-
// TODO: make it dependent on the fps and concurrency
|
|
6
|
-
export const SAFE_BACK_WINDOW_IN_SECONDS = 1;
|
|
7
|
-
export const keyframeManager = makeKeyframeManager();
|
|
8
|
-
export const audioManager = makeAudioManager();
|
|
9
|
-
export const getTotalCacheStats = async () => {
|
|
10
|
-
const keyframeManagerCacheStats = await keyframeManager.getCacheStats();
|
|
11
|
-
const audioManagerCacheStats = audioManager.getCacheStats();
|
|
12
|
-
return {
|
|
13
|
-
count: keyframeManagerCacheStats.count + audioManagerCacheStats.count,
|
|
14
|
-
totalSize: keyframeManagerCacheStats.totalSize + audioManagerCacheStats.totalSize,
|
|
15
|
-
};
|
|
16
|
-
};
|
|
17
|
-
const getUncachedMaxCacheSize = (logLevel) => {
|
|
18
|
-
if (typeof window !== 'undefined' &&
|
|
19
|
-
window.remotion_mediaCacheSizeInBytes !== undefined &&
|
|
20
|
-
window.remotion_mediaCacheSizeInBytes !== null) {
|
|
21
|
-
if (window.remotion_mediaCacheSizeInBytes < 240 * 1024 * 1024) {
|
|
22
|
-
cancelRender(new Error(`The minimum value for the "mediaCacheSizeInBytes" prop is 240MB (${240 * 1024 * 1024}), got: ${window.remotion_mediaCacheSizeInBytes}`));
|
|
23
|
-
}
|
|
24
|
-
if (window.remotion_mediaCacheSizeInBytes > 20000 * 1024 * 1024) {
|
|
25
|
-
cancelRender(new Error(`The maximum value for the "mediaCacheSizeInBytes" prop is 20GB (${20000 * 1024 * 1024}), got: ${window.remotion_mediaCacheSizeInBytes}`));
|
|
26
|
-
}
|
|
27
|
-
Internals.Log.verbose({ logLevel, tag: '@remotion/media' }, `Using cache size set using "mediaCacheSizeInBytes": ${(window.remotion_mediaCacheSizeInBytes / 1024 / 1024).toFixed(1)} MB`);
|
|
28
|
-
return window.remotion_mediaCacheSizeInBytes;
|
|
29
|
-
}
|
|
30
|
-
if (typeof window !== 'undefined' &&
|
|
31
|
-
window.remotion_initialMemoryAvailable !== undefined &&
|
|
32
|
-
window.remotion_initialMemoryAvailable !== null) {
|
|
33
|
-
const value = window.remotion_initialMemoryAvailable / 2;
|
|
34
|
-
if (value < 500 * 1024 * 1024) {
|
|
35
|
-
Internals.Log.verbose({ logLevel, tag: '@remotion/media' }, `Using cache size set based on minimum value of 500MB (which is more than half of the available system memory!)`);
|
|
36
|
-
return 500 * 1024 * 1024;
|
|
37
|
-
}
|
|
38
|
-
if (value > 20000 * 1024 * 1024) {
|
|
39
|
-
Internals.Log.verbose({ logLevel, tag: '@remotion/media' }, `Using cache size set based on maximum value of 20GB (which is less than half of the available system memory)`);
|
|
40
|
-
return 20000 * 1024 * 1024;
|
|
41
|
-
}
|
|
42
|
-
Internals.Log.verbose({ logLevel, tag: '@remotion/media' }, `Using cache size set based on available memory (50% of available memory): ${(value / 1024 / 1024).toFixed(1)} MB`);
|
|
43
|
-
return value;
|
|
44
|
-
}
|
|
45
|
-
return 1000 * 1000 * 1000; // 1GB
|
|
46
|
-
};
|
|
47
|
-
let cachedMaxCacheSize = null;
|
|
48
|
-
export const getMaxVideoCacheSize = (logLevel) => {
|
|
49
|
-
if (cachedMaxCacheSize !== null) {
|
|
50
|
-
return cachedMaxCacheSize;
|
|
51
|
-
}
|
|
52
|
-
cachedMaxCacheSize = getUncachedMaxCacheSize(logLevel);
|
|
53
|
-
return cachedMaxCacheSize;
|
|
54
|
-
};
|
|
55
|
-
export const useMaxMediaCacheSize = (logLevel) => {
|
|
56
|
-
const context = React.useContext(Internals.MaxMediaCacheSizeContext);
|
|
57
|
-
if (context === null) {
|
|
58
|
-
return getMaxVideoCacheSize(logLevel);
|
|
59
|
-
}
|
|
60
|
-
return context;
|
|
61
|
-
};
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
export const applyVolume = (array, volume) => {
|
|
2
|
-
if (volume === 1) {
|
|
3
|
-
return;
|
|
4
|
-
}
|
|
5
|
-
for (let i = 0; i < array.length; i++) {
|
|
6
|
-
const newValue = array[i] * volume;
|
|
7
|
-
if (newValue < -32768) {
|
|
8
|
-
array[i] = -32768;
|
|
9
|
-
}
|
|
10
|
-
else if (newValue > 32767) {
|
|
11
|
-
array[i] = 32767;
|
|
12
|
-
}
|
|
13
|
-
else {
|
|
14
|
-
array[i] = newValue;
|
|
15
|
-
}
|
|
16
|
-
}
|
|
17
|
-
};
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
import { fixFloatingPoint } from './convert-audiodata';
|
|
2
|
-
import { TARGET_NUMBER_OF_CHANNELS } from './resample-audiodata';
|
|
3
|
-
export const combineAudioDataAndClosePrevious = (audioDataArray) => {
|
|
4
|
-
let numberOfFrames = 0;
|
|
5
|
-
let durationInMicroSeconds = 0;
|
|
6
|
-
const { timestamp } = audioDataArray[0];
|
|
7
|
-
for (const audioData of audioDataArray) {
|
|
8
|
-
numberOfFrames += audioData.numberOfFrames;
|
|
9
|
-
durationInMicroSeconds += audioData.durationInMicroSeconds;
|
|
10
|
-
}
|
|
11
|
-
const arr = new Int16Array(numberOfFrames * TARGET_NUMBER_OF_CHANNELS);
|
|
12
|
-
let offset = 0;
|
|
13
|
-
for (const audioData of audioDataArray) {
|
|
14
|
-
arr.set(audioData.data, offset);
|
|
15
|
-
offset += audioData.data.length;
|
|
16
|
-
}
|
|
17
|
-
return {
|
|
18
|
-
data: arr,
|
|
19
|
-
numberOfFrames,
|
|
20
|
-
timestamp: fixFloatingPoint(timestamp),
|
|
21
|
-
durationInMicroSeconds: fixFloatingPoint(durationInMicroSeconds),
|
|
22
|
-
};
|
|
23
|
-
};
|
|
@@ -1,73 +0,0 @@
|
|
|
1
|
-
import { resampleAudioData, TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './resample-audiodata';
|
|
2
|
-
const FORMAT = 's16';
|
|
3
|
-
export const fixFloatingPoint = (value) => {
|
|
4
|
-
const decimal = Math.abs(value % 1);
|
|
5
|
-
if (decimal < 0.0000001) {
|
|
6
|
-
return value < 0 ? Math.ceil(value) : Math.floor(value);
|
|
7
|
-
}
|
|
8
|
-
if (decimal > 0.9999999) {
|
|
9
|
-
return value < 0 ? Math.floor(value) : Math.ceil(value);
|
|
10
|
-
}
|
|
11
|
-
return value;
|
|
12
|
-
};
|
|
13
|
-
const ceilButNotIfFloatingPointIssue = (value) => {
|
|
14
|
-
const fixed = fixFloatingPoint(value);
|
|
15
|
-
return Math.ceil(fixed);
|
|
16
|
-
};
|
|
17
|
-
export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }) => {
|
|
18
|
-
const { numberOfChannels: srcNumberOfChannels, sampleRate: currentSampleRate, numberOfFrames, } = audioData;
|
|
19
|
-
const ratio = currentSampleRate / TARGET_SAMPLE_RATE;
|
|
20
|
-
// Always rounding down start timestamps and rounding up end durations
|
|
21
|
-
// to ensure there are no gaps when the samples don't align
|
|
22
|
-
// In @remotion/renderer inline audio mixing, we also round down the sample start
|
|
23
|
-
// timestamp and round up the end timestamp
|
|
24
|
-
// This might lead to overlapping, hopefully aligning perfectly!
|
|
25
|
-
// Test case: https://github.com/remotion-dev/remotion/issues/5758
|
|
26
|
-
const frameOffset = Math.floor(fixFloatingPoint(trimStartInSeconds * audioData.sampleRate));
|
|
27
|
-
const unroundedFrameCount = numberOfFrames - trimEndInSeconds * audioData.sampleRate - frameOffset;
|
|
28
|
-
const frameCount = isLast
|
|
29
|
-
? ceilButNotIfFloatingPointIssue(unroundedFrameCount)
|
|
30
|
-
: Math.round(unroundedFrameCount);
|
|
31
|
-
const newNumberOfFrames = isLast
|
|
32
|
-
? ceilButNotIfFloatingPointIssue(unroundedFrameCount / ratio / playbackRate)
|
|
33
|
-
: Math.round(unroundedFrameCount / ratio / playbackRate);
|
|
34
|
-
if (newNumberOfFrames === 0) {
|
|
35
|
-
throw new Error('Cannot resample - the given sample rate would result in less than 1 sample');
|
|
36
|
-
}
|
|
37
|
-
const srcChannels = new Int16Array(srcNumberOfChannels * frameCount);
|
|
38
|
-
audioData.copyTo(srcChannels, {
|
|
39
|
-
planeIndex: 0,
|
|
40
|
-
format: FORMAT,
|
|
41
|
-
frameOffset,
|
|
42
|
-
frameCount,
|
|
43
|
-
});
|
|
44
|
-
const data = new Int16Array(newNumberOfFrames * TARGET_NUMBER_OF_CHANNELS);
|
|
45
|
-
const chunkSize = frameCount / newNumberOfFrames;
|
|
46
|
-
const timestampOffsetMicroseconds = (frameOffset / audioData.sampleRate) * 1000000;
|
|
47
|
-
if (newNumberOfFrames === frameCount &&
|
|
48
|
-
TARGET_NUMBER_OF_CHANNELS === srcNumberOfChannels &&
|
|
49
|
-
playbackRate === 1) {
|
|
50
|
-
return {
|
|
51
|
-
data: srcChannels,
|
|
52
|
-
numberOfFrames: newNumberOfFrames,
|
|
53
|
-
timestamp: audioDataTimestamp * 1000000 +
|
|
54
|
-
fixFloatingPoint(timestampOffsetMicroseconds),
|
|
55
|
-
durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
|
|
56
|
-
};
|
|
57
|
-
}
|
|
58
|
-
resampleAudioData({
|
|
59
|
-
srcNumberOfChannels,
|
|
60
|
-
sourceChannels: srcChannels,
|
|
61
|
-
destination: data,
|
|
62
|
-
targetFrames: newNumberOfFrames,
|
|
63
|
-
chunkSize,
|
|
64
|
-
});
|
|
65
|
-
const newAudioData = {
|
|
66
|
-
data,
|
|
67
|
-
numberOfFrames: newNumberOfFrames,
|
|
68
|
-
timestamp: audioDataTimestamp * 1000000 +
|
|
69
|
-
fixFloatingPoint(timestampOffsetMicroseconds),
|
|
70
|
-
durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
|
|
71
|
-
};
|
|
72
|
-
return newAudioData;
|
|
73
|
-
};
|