@remotion/media-utils 4.0.381 → 4.0.383
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/get-partial-audio-data.d.ts +10 -0
- package/dist/get-partial-audio-data.js +48 -0
- package/dist/index.d.ts +0 -2
- package/dist/index.js +1 -5
- package/dist/use-windowed-audio-data.js +140 -52
- package/package.json +6 -3
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { InputAudioTrack } from 'mediabunny';
|
|
2
|
+
export type GetPartialAudioDataProps = {
|
|
3
|
+
track: InputAudioTrack;
|
|
4
|
+
fromSeconds: number;
|
|
5
|
+
toSeconds: number;
|
|
6
|
+
channelIndex: number;
|
|
7
|
+
signal: AbortSignal;
|
|
8
|
+
isMatroska?: boolean;
|
|
9
|
+
};
|
|
10
|
+
export declare const getPartialAudioData: ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska, }: GetPartialAudioDataProps) => Promise<Float32Array>;
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.getPartialAudioData = void 0;
|
|
4
|
+
const mediabunny_1 = require("mediabunny");
|
|
5
|
+
// Audio frames might have dependencies on previous and next frames so we need to decode a bit more and then discard it.
|
|
6
|
+
// The worst case seems to be FLAC files with a 65'535 sample window, which would be 1486.0ms at 44.1Khz.
|
|
7
|
+
// So let's set a threshold of 1.5 seconds.
|
|
8
|
+
const EXTRA_THRESHOLD_IN_SECONDS = 1.5;
|
|
9
|
+
const getPartialAudioData = async ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska = false, }) => {
|
|
10
|
+
if (signal.aborted) {
|
|
11
|
+
throw new Error('Operation was aborted');
|
|
12
|
+
}
|
|
13
|
+
const audioSamples = [];
|
|
14
|
+
// matroska must be decoded from the start due to limitation
|
|
15
|
+
// https://www.remotion.dev/docs/media/support#matroska-limitation
|
|
16
|
+
// Also request extra data beforehand to handle audio frame dependencies
|
|
17
|
+
const actualFromSeconds = isMatroska
|
|
18
|
+
? 0
|
|
19
|
+
: Math.max(0, fromSeconds - EXTRA_THRESHOLD_IN_SECONDS);
|
|
20
|
+
// mediabunny docs: constructing the sink is virtually free and does not perform any media data reads.
|
|
21
|
+
const sink = new mediabunny_1.AudioBufferSink(track);
|
|
22
|
+
for await (const { buffer, timestamp, duration } of sink.buffers(actualFromSeconds, toSeconds)) {
|
|
23
|
+
if (signal.aborted) {
|
|
24
|
+
break;
|
|
25
|
+
}
|
|
26
|
+
const channelData = buffer.getChannelData(channelIndex);
|
|
27
|
+
const bufferStartSeconds = timestamp;
|
|
28
|
+
const bufferEndSeconds = timestamp + duration;
|
|
29
|
+
const overlapStartSecond = Math.max(bufferStartSeconds, fromSeconds);
|
|
30
|
+
const overlapEndSecond = Math.min(bufferEndSeconds, toSeconds);
|
|
31
|
+
if (overlapStartSecond >= overlapEndSecond) {
|
|
32
|
+
continue;
|
|
33
|
+
}
|
|
34
|
+
const startSampleInBuffer = Math.floor((overlapStartSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
35
|
+
const endSampleInBuffer = Math.ceil((overlapEndSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
36
|
+
const trimmedData = channelData.slice(startSampleInBuffer, endSampleInBuffer);
|
|
37
|
+
audioSamples.push(trimmedData);
|
|
38
|
+
}
|
|
39
|
+
const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
|
|
40
|
+
const result = new Float32Array(totalSamples);
|
|
41
|
+
let offset = 0;
|
|
42
|
+
for (const audioSample of audioSamples) {
|
|
43
|
+
result.set(audioSample, offset);
|
|
44
|
+
offset += audioSample.length;
|
|
45
|
+
}
|
|
46
|
+
return result;
|
|
47
|
+
};
|
|
48
|
+
exports.getPartialAudioData = getPartialAudioData;
|
package/dist/index.d.ts
CHANGED
|
@@ -3,10 +3,8 @@ export { createSmoothSvgPath } from './create-smooth-svg-path';
|
|
|
3
3
|
export { getAudioData } from './get-audio-data';
|
|
4
4
|
export { getAudioDuration, getAudioDurationInSeconds, } from './get-audio-duration-in-seconds';
|
|
5
5
|
export { getImageDimensions } from './get-image-dimensions';
|
|
6
|
-
export { getPartialWaveData } from './get-partial-wave-data';
|
|
7
6
|
export { getVideoMetadata } from './get-video-metadata';
|
|
8
7
|
export { getWaveformPortion } from './get-waveform-portion';
|
|
9
|
-
export { WaveProbe, probeWaveFile } from './probe-wave-file';
|
|
10
8
|
export * from './types';
|
|
11
9
|
export type { AudioData, MediaUtilsAudioData, VideoMetadata as VideoData, } from './types';
|
|
12
10
|
export { useAudioData } from './use-audio-data';
|
package/dist/index.js
CHANGED
|
@@ -14,7 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
15
|
};
|
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
-
exports.visualizeAudioWaveform = exports.visualizeAudio = exports.useWindowedAudioData = exports.useAudioData = exports.
|
|
17
|
+
exports.visualizeAudioWaveform = exports.visualizeAudio = exports.useWindowedAudioData = exports.useAudioData = exports.getWaveformPortion = exports.getVideoMetadata = exports.getImageDimensions = exports.getAudioDurationInSeconds = exports.getAudioDuration = exports.getAudioData = exports.createSmoothSvgPath = exports.audioBufferToDataUrl = void 0;
|
|
18
18
|
var audio_url_helpers_1 = require("./audio-buffer/audio-url-helpers");
|
|
19
19
|
Object.defineProperty(exports, "audioBufferToDataUrl", { enumerable: true, get: function () { return audio_url_helpers_1.audioBufferToDataUrl; } });
|
|
20
20
|
var create_smooth_svg_path_1 = require("./create-smooth-svg-path");
|
|
@@ -26,14 +26,10 @@ Object.defineProperty(exports, "getAudioDuration", { enumerable: true, get: func
|
|
|
26
26
|
Object.defineProperty(exports, "getAudioDurationInSeconds", { enumerable: true, get: function () { return get_audio_duration_in_seconds_1.getAudioDurationInSeconds; } });
|
|
27
27
|
var get_image_dimensions_1 = require("./get-image-dimensions");
|
|
28
28
|
Object.defineProperty(exports, "getImageDimensions", { enumerable: true, get: function () { return get_image_dimensions_1.getImageDimensions; } });
|
|
29
|
-
var get_partial_wave_data_1 = require("./get-partial-wave-data");
|
|
30
|
-
Object.defineProperty(exports, "getPartialWaveData", { enumerable: true, get: function () { return get_partial_wave_data_1.getPartialWaveData; } });
|
|
31
29
|
var get_video_metadata_1 = require("./get-video-metadata");
|
|
32
30
|
Object.defineProperty(exports, "getVideoMetadata", { enumerable: true, get: function () { return get_video_metadata_1.getVideoMetadata; } });
|
|
33
31
|
var get_waveform_portion_1 = require("./get-waveform-portion");
|
|
34
32
|
Object.defineProperty(exports, "getWaveformPortion", { enumerable: true, get: function () { return get_waveform_portion_1.getWaveformPortion; } });
|
|
35
|
-
var probe_wave_file_1 = require("./probe-wave-file");
|
|
36
|
-
Object.defineProperty(exports, "probeWaveFile", { enumerable: true, get: function () { return probe_wave_file_1.probeWaveFile; } });
|
|
37
33
|
__exportStar(require("./types"), exports);
|
|
38
34
|
var use_audio_data_1 = require("./use-audio-data");
|
|
39
35
|
Object.defineProperty(exports, "useAudioData", { enumerable: true, get: function () { return use_audio_data_1.useAudioData; } });
|
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.useWindowedAudioData = void 0;
|
|
4
|
+
const mediabunny_1 = require("mediabunny");
|
|
4
5
|
const react_1 = require("react");
|
|
5
6
|
const remotion_1 = require("remotion");
|
|
6
7
|
const combine_float32_arrays_1 = require("./combine-float32-arrays");
|
|
7
|
-
const
|
|
8
|
+
const get_partial_audio_data_1 = require("./get-partial-audio-data");
|
|
8
9
|
const is_remote_asset_1 = require("./is-remote-asset");
|
|
9
|
-
const
|
|
10
|
+
const warnedMatroska = {};
|
|
10
11
|
const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex = 0, }) => {
|
|
11
12
|
const isMounted = (0, react_1.useRef)(true);
|
|
12
|
-
const [
|
|
13
|
+
const [audioUtils, setAudioUtils] = (0, react_1.useState)(null);
|
|
13
14
|
const [waveFormMap, setWaveformMap] = (0, react_1.useState)({});
|
|
14
15
|
const requests = (0, react_1.useRef)({});
|
|
15
16
|
const [initialWindowInSeconds] = (0, react_1.useState)(windowInSeconds);
|
|
@@ -20,8 +21,18 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
20
21
|
isMounted.current = true;
|
|
21
22
|
return () => {
|
|
22
23
|
isMounted.current = false;
|
|
24
|
+
Object.values(requests.current).forEach((controller) => {
|
|
25
|
+
if (controller) {
|
|
26
|
+
controller.abort();
|
|
27
|
+
}
|
|
28
|
+
});
|
|
29
|
+
requests.current = {};
|
|
30
|
+
setWaveformMap({});
|
|
31
|
+
if (audioUtils) {
|
|
32
|
+
audioUtils.input.dispose();
|
|
33
|
+
}
|
|
23
34
|
};
|
|
24
|
-
}, []);
|
|
35
|
+
}, [audioUtils]);
|
|
25
36
|
const { delayRender, continueRender } = (0, remotion_1.useDelayRender)();
|
|
26
37
|
const fetchMetadata = (0, react_1.useCallback)(async (signal) => {
|
|
27
38
|
const handle = delayRender(`Waiting for audio metadata with src="${src}" to be loaded`);
|
|
@@ -29,20 +40,53 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
29
40
|
continueRender(handle);
|
|
30
41
|
};
|
|
31
42
|
signal.addEventListener('abort', cont, { once: true });
|
|
43
|
+
const input = new mediabunny_1.Input({
|
|
44
|
+
formats: mediabunny_1.ALL_FORMATS,
|
|
45
|
+
source: new mediabunny_1.UrlSource(src),
|
|
46
|
+
});
|
|
47
|
+
const onAbort = () => {
|
|
48
|
+
input.dispose();
|
|
49
|
+
};
|
|
50
|
+
signal.addEventListener('abort', onAbort, { once: true });
|
|
32
51
|
try {
|
|
33
|
-
const
|
|
52
|
+
const durationInSeconds = await input.computeDuration();
|
|
53
|
+
const audioTrack = await input.getPrimaryAudioTrack();
|
|
54
|
+
if (!audioTrack) {
|
|
55
|
+
throw new Error('No audio track found');
|
|
56
|
+
}
|
|
57
|
+
const canDecode = await audioTrack.canDecode();
|
|
58
|
+
if (!canDecode) {
|
|
59
|
+
throw new Error('Audio track cannot be decoded');
|
|
60
|
+
}
|
|
61
|
+
if (channelIndex >= audioTrack.numberOfChannels || channelIndex < 0) {
|
|
62
|
+
throw new Error(`Invalid channel index ${channelIndex} for audio with ${audioTrack.numberOfChannels} channels`);
|
|
63
|
+
}
|
|
64
|
+
const { numberOfChannels, sampleRate } = audioTrack;
|
|
65
|
+
const format = await input.getFormat();
|
|
66
|
+
const isMatroska = format === mediabunny_1.MATROSKA || format === mediabunny_1.WEBM;
|
|
34
67
|
if (isMounted.current) {
|
|
35
|
-
|
|
68
|
+
setAudioUtils({
|
|
69
|
+
input,
|
|
70
|
+
track: audioTrack,
|
|
71
|
+
metadata: {
|
|
72
|
+
durationInSeconds,
|
|
73
|
+
numberOfChannels,
|
|
74
|
+
sampleRate,
|
|
75
|
+
},
|
|
76
|
+
isMatroska,
|
|
77
|
+
});
|
|
36
78
|
}
|
|
37
79
|
continueRender(handle);
|
|
38
80
|
}
|
|
39
81
|
catch (err) {
|
|
82
|
+
input.dispose();
|
|
40
83
|
(0, remotion_1.cancelRender)(err);
|
|
41
84
|
}
|
|
42
85
|
finally {
|
|
43
86
|
signal.removeEventListener('abort', cont);
|
|
87
|
+
signal.removeEventListener('abort', onAbort);
|
|
44
88
|
}
|
|
45
|
-
}, [src, delayRender, continueRender]);
|
|
89
|
+
}, [src, delayRender, continueRender, channelIndex]);
|
|
46
90
|
(0, react_1.useLayoutEffect)(() => {
|
|
47
91
|
const controller = new AbortController();
|
|
48
92
|
fetchMetadata(controller.signal);
|
|
@@ -53,13 +97,13 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
53
97
|
const currentTime = frame / fps;
|
|
54
98
|
const currentWindowIndex = Math.floor(currentTime / windowInSeconds);
|
|
55
99
|
const windowsToFetch = (0, react_1.useMemo)(() => {
|
|
56
|
-
if (!
|
|
100
|
+
if (!(audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata)) {
|
|
57
101
|
return [];
|
|
58
102
|
}
|
|
59
103
|
const maxWindowIndex = Math.floor(
|
|
60
104
|
// If an audio is exactly divisible by windowInSeconds, we need to
|
|
61
105
|
// subtract 0.000000000001 to avoid fetching an extra window.
|
|
62
|
-
|
|
106
|
+
audioUtils.metadata.durationInSeconds / windowInSeconds - 0.000000000001);
|
|
63
107
|
// needs to be in order because we rely on the concatenation below
|
|
64
108
|
return [
|
|
65
109
|
currentWindowIndex === 0 ? null : currentWindowIndex - 1,
|
|
@@ -68,43 +112,74 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
68
112
|
]
|
|
69
113
|
.filter((i) => i !== null)
|
|
70
114
|
.filter((i) => i >= 0);
|
|
71
|
-
}, [currentWindowIndex,
|
|
115
|
+
}, [currentWindowIndex, audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata, windowInSeconds]);
|
|
72
116
|
const fetchAndSetWaveformData = (0, react_1.useCallback)(async (windowIndex) => {
|
|
73
|
-
if (!
|
|
74
|
-
throw new Error('
|
|
117
|
+
if (!(audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata) || !audioUtils) {
|
|
118
|
+
throw new Error('MediaBunny context is not loaded yet');
|
|
119
|
+
}
|
|
120
|
+
// Cancel any existing request for this window, we don't want to over-fetch
|
|
121
|
+
const existingController = requests.current[windowIndex];
|
|
122
|
+
if (existingController) {
|
|
123
|
+
existingController.abort();
|
|
75
124
|
}
|
|
76
125
|
const controller = new AbortController();
|
|
77
126
|
requests.current[windowIndex] = controller;
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
127
|
+
if (controller.signal.aborted) {
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
const fromSeconds = windowIndex * windowInSeconds;
|
|
131
|
+
const toSeconds = (windowIndex + 1) * windowInSeconds;
|
|
132
|
+
// if both fromSeconds and toSeconds are outside of the audio duration, skip fetching
|
|
133
|
+
if (fromSeconds >= audioUtils.metadata.durationInSeconds ||
|
|
134
|
+
toSeconds <= 0) {
|
|
135
|
+
return;
|
|
136
|
+
}
|
|
137
|
+
try {
|
|
138
|
+
const { isMatroska } = audioUtils;
|
|
139
|
+
if (isMatroska && !warnedMatroska[src]) {
|
|
140
|
+
warnedMatroska[src] = true;
|
|
141
|
+
remotion_1.Internals.Log.warn({ logLevel: 'info', tag: '@remotion/media-utils' }, `[useWindowedAudioData] Matroska/WebM file detected at "${src}".\n\nDue to format limitation, audio decoding must start from the beginning of the file, which may lead to increased memory usage and slower performance for large files. Consider converting the audio to a more suitable format like MP3 or AAC for better performance.`);
|
|
142
|
+
}
|
|
143
|
+
const partialWaveData = await (0, get_partial_audio_data_1.getPartialAudioData)({
|
|
144
|
+
track: audioUtils.track,
|
|
145
|
+
fromSeconds,
|
|
146
|
+
toSeconds,
|
|
147
|
+
channelIndex,
|
|
148
|
+
signal: controller.signal,
|
|
149
|
+
isMatroska,
|
|
150
|
+
});
|
|
151
|
+
if (!controller.signal.aborted) {
|
|
152
|
+
setWaveformMap((prev) => {
|
|
153
|
+
const entries = Object.keys(prev);
|
|
154
|
+
const windowsToClear = entries.filter((entry) => !windowsToFetch.includes(Number(entry)));
|
|
155
|
+
return {
|
|
156
|
+
...prev,
|
|
157
|
+
...windowsToClear.reduce((acc, key) => {
|
|
158
|
+
acc[key] = null;
|
|
159
|
+
return acc;
|
|
160
|
+
}, {}),
|
|
161
|
+
[windowIndex]: partialWaveData,
|
|
162
|
+
};
|
|
163
|
+
});
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
catch (err) {
|
|
167
|
+
if (controller.signal.aborted) {
|
|
168
|
+
return;
|
|
169
|
+
}
|
|
170
|
+
if (err instanceof mediabunny_1.InputDisposedError) {
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
throw err;
|
|
174
|
+
}
|
|
175
|
+
finally {
|
|
176
|
+
if (requests.current[windowIndex] === controller) {
|
|
177
|
+
requests.current[windowIndex] = null;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}, [channelIndex, audioUtils, windowInSeconds, windowsToFetch, src]);
|
|
106
181
|
(0, react_1.useEffect)(() => {
|
|
107
|
-
if (!
|
|
182
|
+
if (!(audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata)) {
|
|
108
183
|
return;
|
|
109
184
|
}
|
|
110
185
|
const windowsToClear = Object.keys(requests.current).filter((entry) => !windowsToFetch.includes(Number(entry)));
|
|
@@ -115,7 +190,12 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
115
190
|
requests.current[windowIndex] = null;
|
|
116
191
|
}
|
|
117
192
|
}
|
|
118
|
-
|
|
193
|
+
// Only fetch windows that don't already exist
|
|
194
|
+
const windowsToActuallyFetch = windowsToFetch.filter((windowIndex) => !waveFormMap[windowIndex]);
|
|
195
|
+
if (windowsToActuallyFetch.length === 0) {
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
Promise.all(windowsToActuallyFetch.map((windowIndex) => {
|
|
119
199
|
return fetchAndSetWaveformData(windowIndex);
|
|
120
200
|
})).catch((err) => {
|
|
121
201
|
var _a, _b, _c, _d, _e;
|
|
@@ -131,25 +211,29 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
131
211
|
}
|
|
132
212
|
(0, remotion_1.cancelRender)(err);
|
|
133
213
|
});
|
|
134
|
-
}, [fetchAndSetWaveformData,
|
|
214
|
+
}, [fetchAndSetWaveformData, audioUtils, windowsToFetch, waveFormMap]);
|
|
215
|
+
// Calculate available windows for reuse
|
|
216
|
+
const availableWindows = (0, react_1.useMemo)(() => {
|
|
217
|
+
return windowsToFetch.filter((i) => waveFormMap[i]);
|
|
218
|
+
}, [windowsToFetch, waveFormMap]);
|
|
135
219
|
const currentAudioData = (0, react_1.useMemo)(() => {
|
|
136
|
-
if (!
|
|
220
|
+
if (!(audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata)) {
|
|
137
221
|
return null;
|
|
138
222
|
}
|
|
139
|
-
if (
|
|
223
|
+
if (availableWindows.length === 0) {
|
|
140
224
|
return null;
|
|
141
225
|
}
|
|
142
|
-
const windows =
|
|
226
|
+
const windows = availableWindows.map((i) => waveFormMap[i]);
|
|
143
227
|
const data = (0, combine_float32_arrays_1.combineFloat32Arrays)(windows);
|
|
144
228
|
return {
|
|
145
229
|
channelWaveforms: [data],
|
|
146
|
-
durationInSeconds:
|
|
230
|
+
durationInSeconds: audioUtils.metadata.durationInSeconds,
|
|
147
231
|
isRemote: (0, is_remote_asset_1.isRemoteAsset)(src),
|
|
148
232
|
numberOfChannels: 1,
|
|
149
|
-
resultId:
|
|
150
|
-
sampleRate:
|
|
233
|
+
resultId: `${src}-windows-${availableWindows.join(',')}`,
|
|
234
|
+
sampleRate: audioUtils.metadata.sampleRate,
|
|
151
235
|
};
|
|
152
|
-
}, [src, waveFormMap,
|
|
236
|
+
}, [src, waveFormMap, audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata, availableWindows]);
|
|
153
237
|
(0, react_1.useLayoutEffect)(() => {
|
|
154
238
|
if (currentAudioData) {
|
|
155
239
|
return;
|
|
@@ -159,9 +243,13 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
159
243
|
continueRender(handle);
|
|
160
244
|
};
|
|
161
245
|
}, [currentAudioData, src, delayRender, continueRender]);
|
|
246
|
+
const isBeyondAudioDuration = audioUtils
|
|
247
|
+
? currentTime >= audioUtils.metadata.durationInSeconds
|
|
248
|
+
: false;
|
|
249
|
+
const audioData = isBeyondAudioDuration ? null : currentAudioData;
|
|
162
250
|
return {
|
|
163
|
-
audioData
|
|
164
|
-
dataOffsetInSeconds:
|
|
251
|
+
audioData,
|
|
252
|
+
dataOffsetInSeconds: availableWindows.length > 0 ? availableWindows[0] * windowInSeconds : 0,
|
|
165
253
|
};
|
|
166
254
|
};
|
|
167
255
|
exports.useWindowedAudioData = useWindowedAudioData;
|
package/package.json
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
"url": "https://github.com/remotion-dev/remotion/tree/main/packages/media-utils"
|
|
4
4
|
},
|
|
5
5
|
"name": "@remotion/media-utils",
|
|
6
|
-
"version": "4.0.
|
|
6
|
+
"version": "4.0.383",
|
|
7
7
|
"description": "Utilities for working with media files",
|
|
8
8
|
"main": "dist/index.js",
|
|
9
9
|
"sideEffects": false,
|
|
@@ -18,14 +18,17 @@
|
|
|
18
18
|
"url": "https://github.com/remotion-dev/remotion/issues"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"remotion": "4.0.
|
|
21
|
+
"@remotion/media-parser": "4.0.383",
|
|
22
|
+
"@remotion/webcodecs": "4.0.383",
|
|
23
|
+
"remotion": "4.0.383",
|
|
24
|
+
"mediabunny": "1.25.3"
|
|
22
25
|
},
|
|
23
26
|
"peerDependencies": {
|
|
24
27
|
"react": ">=16.8.0",
|
|
25
28
|
"react-dom": ">=16.8.0"
|
|
26
29
|
},
|
|
27
30
|
"devDependencies": {
|
|
28
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
31
|
+
"@remotion/eslint-config-internal": "4.0.383",
|
|
29
32
|
"eslint": "9.19.0"
|
|
30
33
|
},
|
|
31
34
|
"keywords": [
|