@remotion/media 4.0.378 → 4.0.380
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio/audio-for-preview.js +1 -1
- package/dist/audio/audio-preview-iterator.js +27 -4
- package/dist/audio/props.d.ts +1 -0
- package/dist/audio-extraction/extract-audio.d.ts +1 -1
- package/dist/audio-extraction/extract-audio.js +3 -0
- package/dist/audio-for-rendering.d.ts +3 -0
- package/dist/audio-for-rendering.js +94 -0
- package/dist/audio.d.ts +3 -0
- package/dist/audio.js +60 -0
- package/dist/audiodata-to-array.d.ts +0 -0
- package/dist/audiodata-to-array.js +1 -0
- package/dist/convert-audiodata/data-types.d.ts +1 -0
- package/dist/convert-audiodata/data-types.js +22 -0
- package/dist/convert-audiodata/is-planar-format.d.ts +1 -0
- package/dist/convert-audiodata/is-planar-format.js +3 -0
- package/dist/convert-audiodata/log-audiodata.d.ts +1 -0
- package/dist/convert-audiodata/log-audiodata.js +8 -0
- package/dist/convert-audiodata/trim-audiodata.d.ts +0 -0
- package/dist/convert-audiodata/trim-audiodata.js +1 -0
- package/dist/deserialized-audiodata.d.ts +15 -0
- package/dist/deserialized-audiodata.js +26 -0
- package/dist/esm/index.mjs +56 -8
- package/dist/extract-audio.d.ts +7 -0
- package/dist/extract-audio.js +98 -0
- package/dist/extract-frame-and-audio.js +9 -0
- package/dist/extract-frame-via-broadcast-channel.d.ts +15 -0
- package/dist/extract-frame-via-broadcast-channel.js +104 -0
- package/dist/extract-frame.d.ts +27 -0
- package/dist/extract-frame.js +21 -0
- package/dist/extrct-audio.d.ts +7 -0
- package/dist/extrct-audio.js +94 -0
- package/dist/get-frames-since-keyframe.d.ts +22 -0
- package/dist/get-frames-since-keyframe.js +41 -0
- package/dist/keyframe-bank.d.ts +25 -0
- package/dist/keyframe-bank.js +120 -0
- package/dist/keyframe-manager.d.ts +23 -0
- package/dist/keyframe-manager.js +170 -0
- package/dist/log.d.ts +10 -0
- package/dist/log.js +33 -0
- package/dist/new-video-for-rendering.d.ts +3 -0
- package/dist/new-video-for-rendering.js +108 -0
- package/dist/new-video.d.ts +3 -0
- package/dist/new-video.js +37 -0
- package/dist/props.d.ts +29 -0
- package/dist/props.js +1 -0
- package/dist/remember-actual-matroska-timestamps.d.ts +4 -0
- package/dist/remember-actual-matroska-timestamps.js +19 -0
- package/dist/serialize-videoframe.d.ts +0 -0
- package/dist/serialize-videoframe.js +1 -0
- package/dist/video/media-player.d.ts +62 -0
- package/dist/video/media-player.js +361 -0
- package/dist/video/new-video-for-preview.d.ts +10 -0
- package/dist/video/new-video-for-preview.js +108 -0
- package/dist/video/props.d.ts +1 -0
- package/dist/video/timeout-utils.d.ts +2 -0
- package/dist/video/timeout-utils.js +18 -0
- package/dist/video/video-for-rendering.js +1 -1
- package/dist/video-extraction/extract-frame.d.ts +2 -0
- package/dist/video-extraction/extract-frame.js +3 -0
- package/dist/video-extraction/get-frames-since-keyframe.d.ts +2 -2
- package/dist/video-extraction/get-frames-since-keyframe.js +13 -3
- package/dist/video-extraction/media-player.d.ts +64 -0
- package/dist/video-extraction/media-player.js +501 -0
- package/dist/video-extraction/new-video-for-preview.d.ts +10 -0
- package/dist/video-extraction/new-video-for-preview.js +114 -0
- package/dist/video-for-rendering.d.ts +3 -0
- package/dist/video-for-rendering.js +108 -0
- package/dist/video.d.ts +3 -0
- package/dist/video.js +37 -0
- package/package.json +3 -3
|
@@ -261,7 +261,7 @@ const AudioForPreviewAssertedShowing = ({ src, playbackRate, logLevel, muted, vo
|
|
|
261
261
|
mediaPlayer.setIsPostmounting(isPostmounting);
|
|
262
262
|
}, [isPostmounting, mediaPlayerReady]);
|
|
263
263
|
if (shouldFallbackToNativeAudio && !disallowFallbackToHtml5Audio) {
|
|
264
|
-
return (_jsx(RemotionAudio, { src: src, muted: muted, volume: volume, startFrom: trimBefore, endAt: trimAfter, playbackRate: playbackRate, loopVolumeCurveBehavior: loopVolumeCurveBehavior, name: name, loop: loop, showInTimeline: showInTimeline, stack: stack ?? undefined, toneFrequency: toneFrequency, audioStreamIndex: audioStreamIndex, pauseWhenBuffering: fallbackHtml5AudioProps?.pauseWhenBuffering, ...fallbackHtml5AudioProps }));
|
|
264
|
+
return (_jsx(RemotionAudio, { src: src, muted: muted, volume: volume, startFrom: trimBefore, endAt: trimAfter, playbackRate: playbackRate, loopVolumeCurveBehavior: loopVolumeCurveBehavior, name: name, loop: loop, showInTimeline: showInTimeline, stack: stack ?? undefined, toneFrequency: toneFrequency, audioStreamIndex: audioStreamIndex, pauseWhenBuffering: fallbackHtml5AudioProps?.pauseWhenBuffering, crossOrigin: fallbackHtml5AudioProps?.crossOrigin, ...fallbackHtml5AudioProps }));
|
|
265
265
|
}
|
|
266
266
|
return null;
|
|
267
267
|
};
|
|
@@ -6,6 +6,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
6
6
|
const iterator = audioSink.buffers(startFromSecond);
|
|
7
7
|
const queuedAudioNodes = [];
|
|
8
8
|
const audioChunksForAfterResuming = [];
|
|
9
|
+
let mostRecentTimestamp = -Infinity;
|
|
9
10
|
const cleanupAudioQueue = () => {
|
|
10
11
|
for (const node of queuedAudioNodes) {
|
|
11
12
|
node.node.stop();
|
|
@@ -31,12 +32,25 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
31
32
|
},
|
|
32
33
|
};
|
|
33
34
|
}
|
|
35
|
+
if (result.value) {
|
|
36
|
+
mostRecentTimestamp = Math.max(mostRecentTimestamp, result.value.timestamp + result.value.duration);
|
|
37
|
+
return {
|
|
38
|
+
type: 'got-buffer',
|
|
39
|
+
buffer: result.value,
|
|
40
|
+
};
|
|
41
|
+
}
|
|
34
42
|
return {
|
|
35
|
-
type: 'got-
|
|
36
|
-
|
|
43
|
+
type: 'got-end',
|
|
44
|
+
mostRecentTimestamp,
|
|
37
45
|
};
|
|
38
46
|
};
|
|
39
47
|
const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
|
|
48
|
+
if (time < startFromSecond) {
|
|
49
|
+
return {
|
|
50
|
+
type: 'not-satisfied',
|
|
51
|
+
reason: `time requested is before the start of the iterator`,
|
|
52
|
+
};
|
|
53
|
+
}
|
|
40
54
|
while (true) {
|
|
41
55
|
const buffer = await getNextOrNullIfNotAvailable(allowWait);
|
|
42
56
|
if (buffer.type === 'need-to-wait-for-it') {
|
|
@@ -45,12 +59,18 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
45
59
|
reason: 'iterator did not have buffer ready',
|
|
46
60
|
};
|
|
47
61
|
}
|
|
48
|
-
if (buffer.type === 'got-
|
|
49
|
-
if (
|
|
62
|
+
if (buffer.type === 'got-end') {
|
|
63
|
+
if (time >= mostRecentTimestamp) {
|
|
50
64
|
return {
|
|
51
65
|
type: 'ended',
|
|
52
66
|
};
|
|
53
67
|
}
|
|
68
|
+
return {
|
|
69
|
+
type: 'not-satisfied',
|
|
70
|
+
reason: `iterator ended before the requested time`,
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
if (buffer.type === 'got-buffer') {
|
|
54
74
|
const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
|
|
55
75
|
const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
|
|
56
76
|
const timestamp = roundTo4Digits(time);
|
|
@@ -101,6 +121,9 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
101
121
|
},
|
|
102
122
|
getNext: async () => {
|
|
103
123
|
const next = await iterator.next();
|
|
124
|
+
if (next.value) {
|
|
125
|
+
mostRecentTimestamp = Math.max(mostRecentTimestamp, next.value.timestamp + next.value.duration);
|
|
126
|
+
}
|
|
104
127
|
return next;
|
|
105
128
|
},
|
|
106
129
|
isDestroyed: () => {
|
package/dist/audio/props.d.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { LogLevel, LoopVolumeCurveBehavior, VolumeProp } from 'remotion';
|
|
2
2
|
export type FallbackHtml5AudioProps = {
|
|
3
|
+
crossOrigin?: '' | 'anonymous' | 'use-credentials' | undefined;
|
|
3
4
|
onError?: (err: Error) => void;
|
|
4
5
|
useWebAudioApi?: boolean;
|
|
5
6
|
acceptableTimeShiftInSeconds?: number;
|
|
@@ -17,6 +17,6 @@ type ExtractAudioParams = {
|
|
|
17
17
|
declare const extractAudioInternal: ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, maxCacheSize, }: ExtractAudioParams) => Promise<{
|
|
18
18
|
data: PcmS16AudioData | null;
|
|
19
19
|
durationInSeconds: number | null;
|
|
20
|
-
} | "cannot-decode" | "unknown-container-format">;
|
|
20
|
+
} | "cannot-decode" | "unknown-container-format" | "network-error">;
|
|
21
21
|
export declare const extractAudio: (params: ExtractAudioParams) => Promise<ExtractAudioReturnType>;
|
|
22
22
|
export {};
|
|
@@ -11,6 +11,9 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
|
|
|
11
11
|
mediaDurationInSeconds = await getDuration();
|
|
12
12
|
}
|
|
13
13
|
const audio = await getAudio(audioStreamIndex);
|
|
14
|
+
if (audio === 'network-error') {
|
|
15
|
+
return 'network-error';
|
|
16
|
+
}
|
|
14
17
|
if (audio === 'no-audio-track') {
|
|
15
18
|
return { data: null, durationInSeconds: null };
|
|
16
19
|
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { useContext, useLayoutEffect, useMemo, useState } from 'react';
|
|
2
|
+
import { cancelRender, Internals, useCurrentFrame, useDelayRender, useRemotionEnvironment, } from 'remotion';
|
|
3
|
+
import { extractFrameViaBroadcastChannel } from './extract-frame-via-broadcast-channel';
|
|
4
|
+
export const AudioForRendering = ({ volume: volumeProp, playbackRate, src, muted, loopVolumeCurveBehavior, delayRenderRetries, delayRenderTimeoutInMilliseconds, logLevel = window.remotion_logLevel, }) => {
|
|
5
|
+
const absoluteFrame = Internals.useTimelinePosition();
|
|
6
|
+
const videoConfig = Internals.useUnsafeVideoConfig();
|
|
7
|
+
const { registerRenderAsset, unregisterRenderAsset } = useContext(Internals.RenderAssetManager);
|
|
8
|
+
const frame = useCurrentFrame();
|
|
9
|
+
const volumePropsFrame = Internals.useFrameForVolumeProp(loopVolumeCurveBehavior ?? 'repeat');
|
|
10
|
+
const environment = useRemotionEnvironment();
|
|
11
|
+
const [id] = useState(() => `${Math.random()}`.replace('0.', ''));
|
|
12
|
+
if (!videoConfig) {
|
|
13
|
+
throw new Error('No video config found');
|
|
14
|
+
}
|
|
15
|
+
if (!src) {
|
|
16
|
+
throw new TypeError('No `src` was passed to <Video>.');
|
|
17
|
+
}
|
|
18
|
+
const volume = Internals.evaluateVolume({
|
|
19
|
+
volume: volumeProp,
|
|
20
|
+
frame: volumePropsFrame,
|
|
21
|
+
mediaVolume: 1,
|
|
22
|
+
});
|
|
23
|
+
Internals.warnAboutTooHighVolume(volume);
|
|
24
|
+
const shouldRenderAudio = useMemo(() => {
|
|
25
|
+
if (!window.remotion_audioEnabled) {
|
|
26
|
+
return false;
|
|
27
|
+
}
|
|
28
|
+
if (muted) {
|
|
29
|
+
return false;
|
|
30
|
+
}
|
|
31
|
+
if (volume <= 0) {
|
|
32
|
+
return false;
|
|
33
|
+
}
|
|
34
|
+
return true;
|
|
35
|
+
}, [muted, volume]);
|
|
36
|
+
const { fps } = videoConfig;
|
|
37
|
+
const { delayRender, continueRender } = useDelayRender();
|
|
38
|
+
useLayoutEffect(() => {
|
|
39
|
+
const actualFps = playbackRate ? fps / playbackRate : fps;
|
|
40
|
+
const timestamp = frame / actualFps;
|
|
41
|
+
const durationInSeconds = 1 / actualFps;
|
|
42
|
+
const newHandle = delayRender(`Extracting frame number ${frame}`, {
|
|
43
|
+
retries: delayRenderRetries ?? undefined,
|
|
44
|
+
timeoutInMilliseconds: delayRenderTimeoutInMilliseconds ?? undefined,
|
|
45
|
+
});
|
|
46
|
+
extractFrameViaBroadcastChannel({
|
|
47
|
+
src,
|
|
48
|
+
timeInSeconds: timestamp,
|
|
49
|
+
durationInSeconds,
|
|
50
|
+
logLevel: logLevel ?? 'info',
|
|
51
|
+
shouldRenderAudio,
|
|
52
|
+
isClientSideRendering: environment.isClientSideRendering,
|
|
53
|
+
})
|
|
54
|
+
.then(({ audio }) => {
|
|
55
|
+
if (audio) {
|
|
56
|
+
registerRenderAsset({
|
|
57
|
+
type: 'inline-audio',
|
|
58
|
+
id,
|
|
59
|
+
audio: Array.from(audio.data),
|
|
60
|
+
sampleRate: audio.sampleRate,
|
|
61
|
+
numberOfChannels: audio.numberOfChannels,
|
|
62
|
+
frame: absoluteFrame,
|
|
63
|
+
timestamp: audio.timestamp,
|
|
64
|
+
duration: (audio.numberOfFrames / audio.sampleRate) * 1000000,
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
continueRender(newHandle);
|
|
68
|
+
})
|
|
69
|
+
.catch((error) => {
|
|
70
|
+
cancelRender(error);
|
|
71
|
+
});
|
|
72
|
+
return () => {
|
|
73
|
+
continueRender(newHandle);
|
|
74
|
+
unregisterRenderAsset(id);
|
|
75
|
+
};
|
|
76
|
+
}, [
|
|
77
|
+
absoluteFrame,
|
|
78
|
+
continueRender,
|
|
79
|
+
delayRender,
|
|
80
|
+
delayRenderRetries,
|
|
81
|
+
delayRenderTimeoutInMilliseconds,
|
|
82
|
+
environment.isClientSideRendering,
|
|
83
|
+
fps,
|
|
84
|
+
frame,
|
|
85
|
+
id,
|
|
86
|
+
logLevel,
|
|
87
|
+
playbackRate,
|
|
88
|
+
registerRenderAsset,
|
|
89
|
+
shouldRenderAudio,
|
|
90
|
+
src,
|
|
91
|
+
unregisterRenderAsset,
|
|
92
|
+
]);
|
|
93
|
+
return null;
|
|
94
|
+
};
|
package/dist/audio.d.ts
ADDED
package/dist/audio.js
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { jsx as _jsx } from "react/jsx-runtime";
|
|
2
|
+
import { useCallback, useContext } from 'react';
|
|
3
|
+
import { cancelRender, Internals, Sequence, useRemotionEnvironment, } from 'remotion';
|
|
4
|
+
import { SharedAudioContext } from '../../core/src/audio/shared-audio-tags';
|
|
5
|
+
import { AudioForRendering } from './audio-for-rendering';
|
|
6
|
+
const { validateMediaTrimProps, resolveTrimProps, validateMediaProps, AudioForPreview, } = Internals;
|
|
7
|
+
export const Audio = (props) => {
|
|
8
|
+
const audioContext = useContext(SharedAudioContext);
|
|
9
|
+
// Should only destruct `trimBefore` and `trimAfter` from props,
|
|
10
|
+
// rest gets drilled down
|
|
11
|
+
const { trimBefore, trimAfter, name, pauseWhenBuffering, stack, showInTimeline, onError: onRemotionError, loop, ...otherProps } = props;
|
|
12
|
+
const environment = useRemotionEnvironment();
|
|
13
|
+
const onDuration = useCallback(() => undefined, []);
|
|
14
|
+
if (typeof props.src !== 'string') {
|
|
15
|
+
throw new TypeError(`The \`<Audio>\` tag requires a string for \`src\`, but got ${JSON.stringify(props.src)} instead.`);
|
|
16
|
+
}
|
|
17
|
+
validateMediaTrimProps({
|
|
18
|
+
startFrom: undefined,
|
|
19
|
+
endAt: undefined,
|
|
20
|
+
trimBefore,
|
|
21
|
+
trimAfter,
|
|
22
|
+
});
|
|
23
|
+
const { trimBeforeValue, trimAfterValue } = resolveTrimProps({
|
|
24
|
+
startFrom: undefined,
|
|
25
|
+
endAt: undefined,
|
|
26
|
+
trimBefore,
|
|
27
|
+
trimAfter,
|
|
28
|
+
});
|
|
29
|
+
const onError = useCallback((e) => {
|
|
30
|
+
// eslint-disable-next-line no-console
|
|
31
|
+
console.log(e.currentTarget.error);
|
|
32
|
+
// If there is no `loop` property, we don't need to get the duration
|
|
33
|
+
// and this does not need to be a fatal error
|
|
34
|
+
const errMessage = `Could not play audio: ${e.currentTarget.error}. See https://remotion.dev/docs/media-playback-error for help.`;
|
|
35
|
+
if (loop) {
|
|
36
|
+
if (onRemotionError) {
|
|
37
|
+
onRemotionError(new Error(errMessage));
|
|
38
|
+
return;
|
|
39
|
+
}
|
|
40
|
+
cancelRender(new Error(errMessage));
|
|
41
|
+
}
|
|
42
|
+
else {
|
|
43
|
+
onRemotionError?.(new Error(errMessage));
|
|
44
|
+
// eslint-disable-next-line no-console
|
|
45
|
+
console.warn(errMessage);
|
|
46
|
+
}
|
|
47
|
+
}, [onRemotionError, loop]);
|
|
48
|
+
if (typeof trimBeforeValue !== 'undefined' ||
|
|
49
|
+
typeof trimAfterValue !== 'undefined') {
|
|
50
|
+
return (_jsx(Sequence, { layout: "none", from: 0 - (trimBeforeValue ?? 0), showInTimeline: false, durationInFrames: trimAfterValue, name: name, children: _jsx(Audio, { pauseWhenBuffering: pauseWhenBuffering ?? false, ...otherProps }) }));
|
|
51
|
+
}
|
|
52
|
+
validateMediaProps(props, 'Video');
|
|
53
|
+
if (environment.isRendering) {
|
|
54
|
+
return _jsx(AudioForRendering, { ...otherProps });
|
|
55
|
+
}
|
|
56
|
+
const { onAutoPlayError, crossOrigin, delayRenderRetries, delayRenderTimeoutInMilliseconds, ...propsForPreview } = otherProps;
|
|
57
|
+
return (_jsx(AudioForPreview, { _remotionInternalNativeLoopPassed: props._remotionInternalNativeLoopPassed ?? false, _remotionInternalStack: stack ?? null, shouldPreMountAudioTags: audioContext !== null && audioContext.numberOfAudioTags > 0, ...propsForPreview, onNativeError: onError, onDuration: onDuration,
|
|
58
|
+
// Proposal: Make this default to true in v5
|
|
59
|
+
pauseWhenBuffering: pauseWhenBuffering ?? false, _remotionInternalNeedsDurationCalculation: Boolean(loop), showInTimeline: showInTimeline ?? true }));
|
|
60
|
+
};
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"use strict";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const getDataTypeForAudioFormat: (format: AudioSampleFormat) => Float32ArrayConstructor | Int16ArrayConstructor | Uint8ArrayConstructor | Int32ArrayConstructor;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
export const getDataTypeForAudioFormat = (format) => {
|
|
2
|
+
switch (format) {
|
|
3
|
+
case 'f32':
|
|
4
|
+
return Float32Array;
|
|
5
|
+
case 'f32-planar':
|
|
6
|
+
return Float32Array;
|
|
7
|
+
case 's16':
|
|
8
|
+
return Int16Array;
|
|
9
|
+
case 's16-planar':
|
|
10
|
+
return Int16Array;
|
|
11
|
+
case 'u8':
|
|
12
|
+
return Uint8Array;
|
|
13
|
+
case 'u8-planar':
|
|
14
|
+
return Uint8Array;
|
|
15
|
+
case 's32':
|
|
16
|
+
return Int32Array;
|
|
17
|
+
case 's32-planar':
|
|
18
|
+
return Int32Array;
|
|
19
|
+
default:
|
|
20
|
+
throw new Error(`Unsupported audio format: ${format}`);
|
|
21
|
+
}
|
|
22
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const isPlanarFormat: (format: AudioSampleFormat) => boolean;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export declare const logAudioData: (audioData: AudioData) => string;
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"use strict";
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { DataType } from './convert-audiodata/data-types';
|
|
2
|
+
export type SerializableAudioData = {
|
|
3
|
+
data: DataType[];
|
|
4
|
+
format: AudioSampleFormat;
|
|
5
|
+
numberOfChannels: number;
|
|
6
|
+
numberOfFrames: number;
|
|
7
|
+
sampleRate: number;
|
|
8
|
+
};
|
|
9
|
+
export declare const turnAudioDataIntoSerializableData: (audioData: AudioData) => {
|
|
10
|
+
data: (Float32Array<ArrayBuffer> | Int32Array<ArrayBuffer> | Int16Array<ArrayBuffer> | Uint8Array<ArrayBuffer>)[];
|
|
11
|
+
format: AudioSampleFormat;
|
|
12
|
+
numberOfChannels: number;
|
|
13
|
+
numberOfFrames: number;
|
|
14
|
+
sampleRate: number;
|
|
15
|
+
};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { getDataTypeForAudioFormat } from './convert-audiodata/data-types';
|
|
2
|
+
import { isPlanarFormat } from './convert-audiodata/is-planar-format';
|
|
3
|
+
export const turnAudioDataIntoSerializableData = (audioData) => {
|
|
4
|
+
if (!audioData.format) {
|
|
5
|
+
throw new Error('AudioData format is not set');
|
|
6
|
+
}
|
|
7
|
+
const DataType = getDataTypeForAudioFormat(audioData.format);
|
|
8
|
+
const isPlanar = isPlanarFormat(audioData.format);
|
|
9
|
+
const planes = isPlanar ? audioData.numberOfChannels : 1;
|
|
10
|
+
const srcChannels = new Array(planes)
|
|
11
|
+
.fill(true)
|
|
12
|
+
.map(() => new DataType((isPlanar ? 1 : audioData.numberOfChannels) *
|
|
13
|
+
audioData.numberOfFrames));
|
|
14
|
+
for (let i = 0; i < planes; i++) {
|
|
15
|
+
audioData.copyTo(srcChannels[i], {
|
|
16
|
+
planeIndex: i,
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
return {
|
|
20
|
+
data: srcChannels,
|
|
21
|
+
format: audioData.format,
|
|
22
|
+
numberOfChannels: audioData.numberOfChannels,
|
|
23
|
+
numberOfFrames: audioData.numberOfFrames,
|
|
24
|
+
sampleRate: audioData.sampleRate,
|
|
25
|
+
};
|
|
26
|
+
};
|
package/dist/esm/index.mjs
CHANGED
|
@@ -85,6 +85,7 @@ var makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
85
85
|
const iterator = audioSink.buffers(startFromSecond);
|
|
86
86
|
const queuedAudioNodes = [];
|
|
87
87
|
const audioChunksForAfterResuming = [];
|
|
88
|
+
let mostRecentTimestamp = -Infinity;
|
|
88
89
|
const cleanupAudioQueue = () => {
|
|
89
90
|
for (const node of queuedAudioNodes) {
|
|
90
91
|
node.node.stop();
|
|
@@ -108,12 +109,25 @@ var makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
108
109
|
}
|
|
109
110
|
};
|
|
110
111
|
}
|
|
112
|
+
if (result.value) {
|
|
113
|
+
mostRecentTimestamp = Math.max(mostRecentTimestamp, result.value.timestamp + result.value.duration);
|
|
114
|
+
return {
|
|
115
|
+
type: "got-buffer",
|
|
116
|
+
buffer: result.value
|
|
117
|
+
};
|
|
118
|
+
}
|
|
111
119
|
return {
|
|
112
|
-
type: "got-
|
|
113
|
-
|
|
120
|
+
type: "got-end",
|
|
121
|
+
mostRecentTimestamp
|
|
114
122
|
};
|
|
115
123
|
};
|
|
116
124
|
const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
|
|
125
|
+
if (time < startFromSecond) {
|
|
126
|
+
return {
|
|
127
|
+
type: "not-satisfied",
|
|
128
|
+
reason: `time requested is before the start of the iterator`
|
|
129
|
+
};
|
|
130
|
+
}
|
|
117
131
|
while (true) {
|
|
118
132
|
const buffer = await getNextOrNullIfNotAvailable(allowWait);
|
|
119
133
|
if (buffer.type === "need-to-wait-for-it") {
|
|
@@ -122,12 +136,18 @@ var makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
122
136
|
reason: "iterator did not have buffer ready"
|
|
123
137
|
};
|
|
124
138
|
}
|
|
125
|
-
if (buffer.type === "got-
|
|
126
|
-
if (
|
|
139
|
+
if (buffer.type === "got-end") {
|
|
140
|
+
if (time >= mostRecentTimestamp) {
|
|
127
141
|
return {
|
|
128
142
|
type: "ended"
|
|
129
143
|
};
|
|
130
144
|
}
|
|
145
|
+
return {
|
|
146
|
+
type: "not-satisfied",
|
|
147
|
+
reason: `iterator ended before the requested time`
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
if (buffer.type === "got-buffer") {
|
|
131
151
|
const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
|
|
132
152
|
const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
|
|
133
153
|
const timestamp = roundTo4Digits(time);
|
|
@@ -180,6 +200,9 @@ var makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
180
200
|
},
|
|
181
201
|
getNext: async () => {
|
|
182
202
|
const next = await iterator.next();
|
|
203
|
+
if (next.value) {
|
|
204
|
+
mostRecentTimestamp = Math.max(mostRecentTimestamp, next.value.timestamp + next.value.duration);
|
|
205
|
+
}
|
|
183
206
|
return next;
|
|
184
207
|
},
|
|
185
208
|
isDestroyed: () => {
|
|
@@ -1580,6 +1603,7 @@ var AudioForPreviewAssertedShowing = ({
|
|
|
1580
1603
|
toneFrequency,
|
|
1581
1604
|
audioStreamIndex,
|
|
1582
1605
|
pauseWhenBuffering: fallbackHtml5AudioProps?.pauseWhenBuffering,
|
|
1606
|
+
crossOrigin: fallbackHtml5AudioProps?.crossOrigin,
|
|
1583
1607
|
...fallbackHtml5AudioProps
|
|
1584
1608
|
});
|
|
1585
1609
|
}
|
|
@@ -2208,10 +2232,13 @@ var rememberActualMatroskaTimestamps = (isMatroska) => {
|
|
|
2208
2232
|
var getRetryDelay = () => {
|
|
2209
2233
|
return null;
|
|
2210
2234
|
};
|
|
2211
|
-
var
|
|
2235
|
+
var getFormatOrNullOrNetworkError = async (input) => {
|
|
2212
2236
|
try {
|
|
2213
2237
|
return await input.getFormat();
|
|
2214
|
-
} catch {
|
|
2238
|
+
} catch (err) {
|
|
2239
|
+
if (isNetworkError(err)) {
|
|
2240
|
+
return "network-error";
|
|
2241
|
+
}
|
|
2215
2242
|
return null;
|
|
2216
2243
|
}
|
|
2217
2244
|
};
|
|
@@ -2222,9 +2249,12 @@ var getSinks = async (src) => {
|
|
|
2222
2249
|
getRetryDelay
|
|
2223
2250
|
})
|
|
2224
2251
|
});
|
|
2225
|
-
const format = await
|
|
2252
|
+
const format = await getFormatOrNullOrNetworkError(input);
|
|
2226
2253
|
const isMatroska = format === MATROSKA || format === WEBM;
|
|
2227
2254
|
const getVideoSinks = async () => {
|
|
2255
|
+
if (format === "network-error") {
|
|
2256
|
+
return "network-error";
|
|
2257
|
+
}
|
|
2228
2258
|
if (format === null) {
|
|
2229
2259
|
return "unknown-container-format";
|
|
2230
2260
|
}
|
|
@@ -2254,6 +2284,9 @@ var getSinks = async (src) => {
|
|
|
2254
2284
|
if (format === null) {
|
|
2255
2285
|
return "unknown-container-format";
|
|
2256
2286
|
}
|
|
2287
|
+
if (format === "network-error") {
|
|
2288
|
+
return "network-error";
|
|
2289
|
+
}
|
|
2257
2290
|
const audioTracks = await input.getAudioTracks();
|
|
2258
2291
|
const audioTrack = audioTracks[index];
|
|
2259
2292
|
if (!audioTrack) {
|
|
@@ -2833,6 +2866,9 @@ var extractAudioInternal = async ({
|
|
|
2833
2866
|
mediaDurationInSeconds = await getDuration();
|
|
2834
2867
|
}
|
|
2835
2868
|
const audio = await getAudio(audioStreamIndex);
|
|
2869
|
+
if (audio === "network-error") {
|
|
2870
|
+
return "network-error";
|
|
2871
|
+
}
|
|
2836
2872
|
if (audio === "no-audio-track") {
|
|
2837
2873
|
return { data: null, durationInSeconds: null };
|
|
2838
2874
|
}
|
|
@@ -2951,6 +2987,9 @@ var extractFrameInternal = async ({
|
|
|
2951
2987
|
if (video === "unknown-container-format") {
|
|
2952
2988
|
return { type: "unknown-container-format" };
|
|
2953
2989
|
}
|
|
2990
|
+
if (video === "network-error") {
|
|
2991
|
+
return { type: "network-error" };
|
|
2992
|
+
}
|
|
2954
2993
|
let mediaDurationInSeconds = null;
|
|
2955
2994
|
if (loop) {
|
|
2956
2995
|
mediaDurationInSeconds = await sink.getDuration();
|
|
@@ -3094,12 +3133,21 @@ var extractFrameAndAudio = async ({
|
|
|
3094
3133
|
durationInSeconds: frame.durationInSeconds
|
|
3095
3134
|
};
|
|
3096
3135
|
}
|
|
3136
|
+
if (frame?.type === "network-error") {
|
|
3137
|
+
return { type: "network-error" };
|
|
3138
|
+
}
|
|
3097
3139
|
if (audio === "unknown-container-format") {
|
|
3098
3140
|
if (frame !== null) {
|
|
3099
3141
|
frame?.frame?.close();
|
|
3100
3142
|
}
|
|
3101
3143
|
return { type: "unknown-container-format" };
|
|
3102
3144
|
}
|
|
3145
|
+
if (audio === "network-error") {
|
|
3146
|
+
if (frame !== null) {
|
|
3147
|
+
frame?.frame?.close();
|
|
3148
|
+
}
|
|
3149
|
+
return { type: "network-error" };
|
|
3150
|
+
}
|
|
3103
3151
|
if (audio === "cannot-decode") {
|
|
3104
3152
|
if (frame?.type === "success" && frame.frame !== null) {
|
|
3105
3153
|
frame?.frame.close();
|
|
@@ -4091,7 +4139,7 @@ var VideoForRendering = ({
|
|
|
4091
4139
|
cancelRender3(new Error(`Cannot decode ${src}, and 'disallowFallbackToOffthreadVideo' was set. Failing the render.`));
|
|
4092
4140
|
}
|
|
4093
4141
|
if (window.remotion_isMainTab) {
|
|
4094
|
-
Internals16.Log.
|
|
4142
|
+
Internals16.Log.warn({ logLevel, tag: "@remotion/media" }, `Network error fetching ${src} (no CORS?), falling back to <OffthreadVideo>`);
|
|
4095
4143
|
}
|
|
4096
4144
|
setReplaceWithOffthreadVideo({ durationInSeconds: null });
|
|
4097
4145
|
return;
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { PcmS16AudioData } from './convert-audiodata/convert-audiodata';
|
|
2
|
+
export declare const extractAudio: ({ src, timeInSeconds, durationInSeconds, volume, }: {
|
|
3
|
+
src: string;
|
|
4
|
+
timeInSeconds: number;
|
|
5
|
+
durationInSeconds: number;
|
|
6
|
+
volume: number;
|
|
7
|
+
}) => Promise<PcmS16AudioData | null>;
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { combineAudioDataAndClosePrevious } from './convert-audiodata/combine-audiodata';
|
|
2
|
+
import { convertAudioData } from './convert-audiodata/convert-audiodata';
|
|
3
|
+
import { TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './convert-audiodata/resample-audiodata';
|
|
4
|
+
import { sinkPromises } from './extract-frame';
|
|
5
|
+
import { getSinks } from './video-extraction/get-frames-since-keyframe';
|
|
6
|
+
export const extractAudio = async ({ src, timeInSeconds, durationInSeconds, volume, }) => {
|
|
7
|
+
console.time('extractAudio');
|
|
8
|
+
if (!sinkPromises[src]) {
|
|
9
|
+
sinkPromises[src] = getSinks(src);
|
|
10
|
+
}
|
|
11
|
+
const { audio, actualMatroskaTimestamps, isMatroska } = await sinkPromises[src];
|
|
12
|
+
if (audio === null) {
|
|
13
|
+
console.timeEnd('extractAudio');
|
|
14
|
+
return null;
|
|
15
|
+
}
|
|
16
|
+
// https://discord.com/channels/@me/1409810025844838481/1415028953093111870
|
|
17
|
+
// Audio frames might have dependencies on previous and next frames so we need to decode a bit more
|
|
18
|
+
// and then discard it.
|
|
19
|
+
// The worst case seems to be FLAC files with a 65'535 sample window, which would be 1486.0ms at 44.1Khz.
|
|
20
|
+
// So let's set a threshold of 1.5 seconds.
|
|
21
|
+
const extraThreshold = 1.5;
|
|
22
|
+
// Matroska timestamps are not accurate unless we start from the beginning
|
|
23
|
+
// So for matroska, we need to decode all samples :(
|
|
24
|
+
// https://github.com/Vanilagy/mediabunny/issues/105
|
|
25
|
+
const sampleIterator = audio.sampleSink.samples(isMatroska ? 0 : Math.max(0, timeInSeconds - extraThreshold), timeInSeconds + durationInSeconds);
|
|
26
|
+
const samples = [];
|
|
27
|
+
for await (const sample of sampleIterator) {
|
|
28
|
+
const realTimestamp = actualMatroskaTimestamps.getRealTimestamp(sample.timestamp);
|
|
29
|
+
if (realTimestamp !== null && realTimestamp !== sample.timestamp) {
|
|
30
|
+
sample.setTimestamp(realTimestamp);
|
|
31
|
+
}
|
|
32
|
+
actualMatroskaTimestamps.observeTimestamp(sample.timestamp);
|
|
33
|
+
actualMatroskaTimestamps.observeTimestamp(sample.timestamp + sample.duration);
|
|
34
|
+
if (sample.timestamp + sample.duration - 0.0000000001 <= timeInSeconds) {
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
if (sample.timestamp >= timeInSeconds + durationInSeconds - 0.0000000001) {
|
|
38
|
+
continue;
|
|
39
|
+
}
|
|
40
|
+
samples.push(sample);
|
|
41
|
+
}
|
|
42
|
+
const audioDataArray = [];
|
|
43
|
+
for (let i = 0; i < samples.length; i++) {
|
|
44
|
+
const sample = samples[i];
|
|
45
|
+
// Less than 1 sample would be included - we did not need it after all!
|
|
46
|
+
if (Math.abs(sample.timestamp - (timeInSeconds + durationInSeconds)) *
|
|
47
|
+
sample.sampleRate <
|
|
48
|
+
1) {
|
|
49
|
+
sample.close();
|
|
50
|
+
continue;
|
|
51
|
+
}
|
|
52
|
+
// Less than 1 sample would be included - we did not need it after all!
|
|
53
|
+
if (sample.timestamp + sample.duration <= timeInSeconds) {
|
|
54
|
+
sample.close();
|
|
55
|
+
continue;
|
|
56
|
+
}
|
|
57
|
+
const isFirstSample = i === 0;
|
|
58
|
+
const isLastSample = i === samples.length - 1;
|
|
59
|
+
const audioDataRaw = sample.toAudioData();
|
|
60
|
+
// amount of samples to shave from start and end
|
|
61
|
+
let trimStartInSeconds = 0;
|
|
62
|
+
let trimEndInSeconds = 0;
|
|
63
|
+
// TODO: Apply playback rate
|
|
64
|
+
// TODO: Apply tone frequency
|
|
65
|
+
if (isFirstSample) {
|
|
66
|
+
trimStartInSeconds = timeInSeconds - sample.timestamp;
|
|
67
|
+
}
|
|
68
|
+
if (isLastSample) {
|
|
69
|
+
trimEndInSeconds =
|
|
70
|
+
// clamp to 0 in case the audio ends early
|
|
71
|
+
Math.max(0, sample.timestamp +
|
|
72
|
+
sample.duration -
|
|
73
|
+
(timeInSeconds + durationInSeconds));
|
|
74
|
+
}
|
|
75
|
+
const audioData = convertAudioData({
|
|
76
|
+
audioData: audioDataRaw,
|
|
77
|
+
newSampleRate: TARGET_SAMPLE_RATE,
|
|
78
|
+
trimStartInSeconds,
|
|
79
|
+
trimEndInSeconds,
|
|
80
|
+
targetNumberOfChannels: TARGET_NUMBER_OF_CHANNELS,
|
|
81
|
+
volume,
|
|
82
|
+
});
|
|
83
|
+
audioDataRaw.close();
|
|
84
|
+
if (audioData.numberOfFrames === 0) {
|
|
85
|
+
sample.close();
|
|
86
|
+
continue;
|
|
87
|
+
}
|
|
88
|
+
audioDataArray.push(audioData);
|
|
89
|
+
sample.close();
|
|
90
|
+
}
|
|
91
|
+
if (audioDataArray.length === 0) {
|
|
92
|
+
console.timeEnd('extractAudio');
|
|
93
|
+
return null;
|
|
94
|
+
}
|
|
95
|
+
const combined = combineAudioDataAndClosePrevious(audioDataArray);
|
|
96
|
+
console.timeEnd('extractAudio');
|
|
97
|
+
return combined;
|
|
98
|
+
};
|
|
@@ -49,12 +49,21 @@ export const extractFrameAndAudio = async ({ src, timeInSeconds, logLevel, durat
|
|
|
49
49
|
durationInSeconds: frame.durationInSeconds,
|
|
50
50
|
};
|
|
51
51
|
}
|
|
52
|
+
if (frame?.type === 'network-error') {
|
|
53
|
+
return { type: 'network-error' };
|
|
54
|
+
}
|
|
52
55
|
if (audio === 'unknown-container-format') {
|
|
53
56
|
if (frame !== null) {
|
|
54
57
|
frame?.frame?.close();
|
|
55
58
|
}
|
|
56
59
|
return { type: 'unknown-container-format' };
|
|
57
60
|
}
|
|
61
|
+
if (audio === 'network-error') {
|
|
62
|
+
if (frame !== null) {
|
|
63
|
+
frame?.frame?.close();
|
|
64
|
+
}
|
|
65
|
+
return { type: 'network-error' };
|
|
66
|
+
}
|
|
58
67
|
if (audio === 'cannot-decode') {
|
|
59
68
|
if (frame?.type === 'success' && frame.frame !== null) {
|
|
60
69
|
frame?.frame.close();
|