@remotion/media-utils 4.0.397 → 4.0.399
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/get-partial-audio-data.d.ts +3 -2
- package/dist/get-partial-audio-data.js +37 -108
- package/dist/use-windowed-audio-data.js +70 -108
- package/package.json +6 -6
- package/dist/get-partial-media-data-2.d.ts +0 -7
- package/dist/get-partial-media-data-2.js +0 -104
- package/dist/get-partial-media-data.d.ts +0 -7
- package/dist/get-partial-media-data.js +0 -105
|
@@ -1,9 +1,10 @@
|
|
|
1
|
+
import type { InputAudioTrack } from 'mediabunny';
|
|
1
2
|
export type GetPartialAudioDataProps = {
|
|
3
|
+
track: InputAudioTrack;
|
|
2
4
|
fromSeconds: number;
|
|
3
5
|
toSeconds: number;
|
|
4
6
|
channelIndex: number;
|
|
5
7
|
signal: AbortSignal;
|
|
6
|
-
src: string;
|
|
7
8
|
isMatroska: boolean;
|
|
8
9
|
};
|
|
9
|
-
export declare const getPartialAudioData: ({ fromSeconds, toSeconds, channelIndex, signal,
|
|
10
|
+
export declare const getPartialAudioData: ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska, }: GetPartialAudioDataProps) => Promise<Float32Array>;
|
|
@@ -1,56 +1,4 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
3
|
-
if (value !== null && value !== void 0) {
|
|
4
|
-
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
5
|
-
var dispose, inner;
|
|
6
|
-
if (async) {
|
|
7
|
-
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
8
|
-
dispose = value[Symbol.asyncDispose];
|
|
9
|
-
}
|
|
10
|
-
if (dispose === void 0) {
|
|
11
|
-
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
12
|
-
dispose = value[Symbol.dispose];
|
|
13
|
-
if (async) inner = dispose;
|
|
14
|
-
}
|
|
15
|
-
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
16
|
-
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
17
|
-
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
18
|
-
}
|
|
19
|
-
else if (async) {
|
|
20
|
-
env.stack.push({ async: true });
|
|
21
|
-
}
|
|
22
|
-
return value;
|
|
23
|
-
};
|
|
24
|
-
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
25
|
-
return function (env) {
|
|
26
|
-
function fail(e) {
|
|
27
|
-
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
28
|
-
env.hasError = true;
|
|
29
|
-
}
|
|
30
|
-
var r, s = 0;
|
|
31
|
-
function next() {
|
|
32
|
-
while (r = env.stack.pop()) {
|
|
33
|
-
try {
|
|
34
|
-
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
35
|
-
if (r.dispose) {
|
|
36
|
-
var result = r.dispose.call(r.value);
|
|
37
|
-
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
38
|
-
}
|
|
39
|
-
else s |= 1;
|
|
40
|
-
}
|
|
41
|
-
catch (e) {
|
|
42
|
-
fail(e);
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
46
|
-
if (env.hasError) throw env.error;
|
|
47
|
-
}
|
|
48
|
-
return next();
|
|
49
|
-
};
|
|
50
|
-
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
51
|
-
var e = new Error(message);
|
|
52
|
-
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
53
|
-
});
|
|
54
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
55
3
|
exports.getPartialAudioData = void 0;
|
|
56
4
|
const mediabunny_1 = require("mediabunny");
|
|
@@ -58,64 +6,45 @@ const mediabunny_1 = require("mediabunny");
|
|
|
58
6
|
// The worst case seems to be FLAC files with a 65'535 sample window, which would be 1486.0ms at 44.1Khz.
|
|
59
7
|
// So let's set a threshold of 1.5 seconds.
|
|
60
8
|
const EXTRA_THRESHOLD_IN_SECONDS = 1.5;
|
|
61
|
-
const getPartialAudioData = async ({ fromSeconds, toSeconds, channelIndex, signal,
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
if (signal.aborted) {
|
|
65
|
-
throw new Error('Operation was aborted');
|
|
66
|
-
}
|
|
67
|
-
const audioSamples = [];
|
|
68
|
-
// matroska must be decoded from the start due to limitation
|
|
69
|
-
// https://www.remotion.dev/docs/media/support#matroska-limitation
|
|
70
|
-
// Also request extra data beforehand to handle audio frame dependencies
|
|
71
|
-
const actualFromSeconds = isMatroska
|
|
72
|
-
? 0
|
|
73
|
-
: Math.max(0, fromSeconds - EXTRA_THRESHOLD_IN_SECONDS);
|
|
74
|
-
const source = new mediabunny_1.UrlSource(src);
|
|
75
|
-
const input = __addDisposableResource(env_1, new mediabunny_1.Input({
|
|
76
|
-
formats: mediabunny_1.ALL_FORMATS,
|
|
77
|
-
source,
|
|
78
|
-
}), false);
|
|
79
|
-
const track = await input.getPrimaryAudioTrack();
|
|
80
|
-
if (!track) {
|
|
81
|
-
throw new Error('No audio track found');
|
|
82
|
-
}
|
|
83
|
-
// mediabunny docs: constructing the sink is virtually free and does not perform any media data reads.
|
|
84
|
-
const sink = new mediabunny_1.AudioBufferSink(track);
|
|
85
|
-
const iterator = sink.buffers(actualFromSeconds, toSeconds);
|
|
86
|
-
for await (const { buffer, timestamp, duration } of iterator) {
|
|
87
|
-
if (signal.aborted) {
|
|
88
|
-
break;
|
|
89
|
-
}
|
|
90
|
-
const channelData = buffer.getChannelData(channelIndex);
|
|
91
|
-
const bufferStartSeconds = timestamp;
|
|
92
|
-
const bufferEndSeconds = timestamp + duration;
|
|
93
|
-
const overlapStartSecond = Math.max(bufferStartSeconds, fromSeconds);
|
|
94
|
-
const overlapEndSecond = Math.min(bufferEndSeconds, toSeconds);
|
|
95
|
-
if (overlapStartSecond >= overlapEndSecond) {
|
|
96
|
-
continue;
|
|
97
|
-
}
|
|
98
|
-
const startSampleInBuffer = Math.floor((overlapStartSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
99
|
-
const endSampleInBuffer = Math.ceil((overlapEndSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
100
|
-
const trimmedData = channelData.slice(startSampleInBuffer, endSampleInBuffer);
|
|
101
|
-
audioSamples.push(trimmedData);
|
|
102
|
-
}
|
|
103
|
-
await iterator.return();
|
|
104
|
-
const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
|
|
105
|
-
const result = new Float32Array(totalSamples);
|
|
106
|
-
let offset = 0;
|
|
107
|
-
for (const audioSample of audioSamples) {
|
|
108
|
-
result.set(audioSample, offset);
|
|
109
|
-
offset += audioSample.length;
|
|
110
|
-
}
|
|
111
|
-
return result;
|
|
9
|
+
const getPartialAudioData = async ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska = false, }) => {
|
|
10
|
+
if (signal.aborted) {
|
|
11
|
+
throw new Error('Operation was aborted');
|
|
112
12
|
}
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
13
|
+
const audioSamples = [];
|
|
14
|
+
// matroska must be decoded from the start due to limitation
|
|
15
|
+
// https://www.remotion.dev/docs/media/support#matroska-limitation
|
|
16
|
+
// Also request extra data beforehand to handle audio frame dependencies
|
|
17
|
+
const actualFromSeconds = isMatroska
|
|
18
|
+
? 0
|
|
19
|
+
: Math.max(0, fromSeconds - EXTRA_THRESHOLD_IN_SECONDS);
|
|
20
|
+
// mediabunny docs: constructing the sink is virtually free and does not perform any media data reads.
|
|
21
|
+
const sink = new mediabunny_1.AudioBufferSink(track);
|
|
22
|
+
const iterator = sink.buffers(actualFromSeconds, toSeconds);
|
|
23
|
+
for await (const { buffer, timestamp, duration } of iterator) {
|
|
24
|
+
if (signal.aborted) {
|
|
25
|
+
break;
|
|
26
|
+
}
|
|
27
|
+
const channelData = buffer.getChannelData(channelIndex);
|
|
28
|
+
const bufferStartSeconds = timestamp;
|
|
29
|
+
const bufferEndSeconds = timestamp + duration;
|
|
30
|
+
const overlapStartSecond = Math.max(bufferStartSeconds, fromSeconds);
|
|
31
|
+
const overlapEndSecond = Math.min(bufferEndSeconds, toSeconds);
|
|
32
|
+
if (overlapStartSecond >= overlapEndSecond) {
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
35
|
+
const startSampleInBuffer = Math.floor((overlapStartSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
36
|
+
const endSampleInBuffer = Math.ceil((overlapEndSecond - bufferStartSeconds) * buffer.sampleRate);
|
|
37
|
+
const trimmedData = channelData.slice(startSampleInBuffer, endSampleInBuffer);
|
|
38
|
+
audioSamples.push(trimmedData);
|
|
116
39
|
}
|
|
117
|
-
|
|
118
|
-
|
|
40
|
+
await iterator.return();
|
|
41
|
+
const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
|
|
42
|
+
const result = new Float32Array(totalSamples);
|
|
43
|
+
let offset = 0;
|
|
44
|
+
for (const audioSample of audioSamples) {
|
|
45
|
+
result.set(audioSample, offset);
|
|
46
|
+
offset += audioSample.length;
|
|
119
47
|
}
|
|
48
|
+
return result;
|
|
120
49
|
};
|
|
121
50
|
exports.getPartialAudioData = getPartialAudioData;
|
|
@@ -1,56 +1,4 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
|
|
3
|
-
if (value !== null && value !== void 0) {
|
|
4
|
-
if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
|
|
5
|
-
var dispose, inner;
|
|
6
|
-
if (async) {
|
|
7
|
-
if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
|
|
8
|
-
dispose = value[Symbol.asyncDispose];
|
|
9
|
-
}
|
|
10
|
-
if (dispose === void 0) {
|
|
11
|
-
if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
|
|
12
|
-
dispose = value[Symbol.dispose];
|
|
13
|
-
if (async) inner = dispose;
|
|
14
|
-
}
|
|
15
|
-
if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
|
|
16
|
-
if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
|
|
17
|
-
env.stack.push({ value: value, dispose: dispose, async: async });
|
|
18
|
-
}
|
|
19
|
-
else if (async) {
|
|
20
|
-
env.stack.push({ async: true });
|
|
21
|
-
}
|
|
22
|
-
return value;
|
|
23
|
-
};
|
|
24
|
-
var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
|
|
25
|
-
return function (env) {
|
|
26
|
-
function fail(e) {
|
|
27
|
-
env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
|
|
28
|
-
env.hasError = true;
|
|
29
|
-
}
|
|
30
|
-
var r, s = 0;
|
|
31
|
-
function next() {
|
|
32
|
-
while (r = env.stack.pop()) {
|
|
33
|
-
try {
|
|
34
|
-
if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
|
|
35
|
-
if (r.dispose) {
|
|
36
|
-
var result = r.dispose.call(r.value);
|
|
37
|
-
if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
|
|
38
|
-
}
|
|
39
|
-
else s |= 1;
|
|
40
|
-
}
|
|
41
|
-
catch (e) {
|
|
42
|
-
fail(e);
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
|
|
46
|
-
if (env.hasError) throw env.error;
|
|
47
|
-
}
|
|
48
|
-
return next();
|
|
49
|
-
};
|
|
50
|
-
})(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
|
|
51
|
-
var e = new Error(message);
|
|
52
|
-
return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
|
|
53
|
-
});
|
|
54
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
55
3
|
exports.useWindowedAudioData = void 0;
|
|
56
4
|
const mediabunny_1 = require("mediabunny");
|
|
@@ -80,68 +28,62 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
80
28
|
});
|
|
81
29
|
requests.current = {};
|
|
82
30
|
setWaveformMap({});
|
|
31
|
+
if (audioUtils) {
|
|
32
|
+
audioUtils.input.dispose();
|
|
33
|
+
}
|
|
83
34
|
};
|
|
84
35
|
}, [audioUtils]);
|
|
85
36
|
const { delayRender, continueRender } = (0, remotion_1.useDelayRender)();
|
|
86
37
|
const fetchMetadata = (0, react_1.useCallback)(async (signal) => {
|
|
87
|
-
const
|
|
38
|
+
const handle = delayRender(`Waiting for audio metadata with src="${src}" to be loaded`);
|
|
39
|
+
const cont = () => {
|
|
40
|
+
continueRender(handle);
|
|
41
|
+
};
|
|
42
|
+
signal.addEventListener('abort', cont, { once: true });
|
|
43
|
+
const input = new mediabunny_1.Input({
|
|
44
|
+
formats: mediabunny_1.ALL_FORMATS,
|
|
45
|
+
source: new mediabunny_1.UrlSource(src),
|
|
46
|
+
});
|
|
47
|
+
const onAbort = () => {
|
|
48
|
+
input.dispose();
|
|
49
|
+
};
|
|
50
|
+
signal.addEventListener('abort', onAbort, { once: true });
|
|
88
51
|
try {
|
|
89
|
-
const
|
|
90
|
-
const
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
const
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
source,
|
|
98
|
-
}), false);
|
|
99
|
-
const onAbort = () => {
|
|
100
|
-
input.dispose();
|
|
101
|
-
};
|
|
102
|
-
signal.addEventListener('abort', onAbort, { once: true });
|
|
103
|
-
try {
|
|
104
|
-
const durationInSeconds = await input.computeDuration();
|
|
105
|
-
const audioTrack = await input.getPrimaryAudioTrack();
|
|
106
|
-
if (!audioTrack) {
|
|
107
|
-
throw new Error('No audio track found');
|
|
108
|
-
}
|
|
109
|
-
const canDecode = await audioTrack.canDecode();
|
|
110
|
-
if (!canDecode) {
|
|
111
|
-
throw new Error('Audio track cannot be decoded');
|
|
112
|
-
}
|
|
113
|
-
if (channelIndex >= audioTrack.numberOfChannels || channelIndex < 0) {
|
|
114
|
-
throw new Error(`Invalid channel index ${channelIndex} for audio with ${audioTrack.numberOfChannels} channels`);
|
|
115
|
-
}
|
|
116
|
-
const { numberOfChannels, sampleRate } = audioTrack;
|
|
117
|
-
const format = await input.getFormat();
|
|
118
|
-
const isMatroska = format === mediabunny_1.MATROSKA || format === mediabunny_1.WEBM;
|
|
119
|
-
if (isMounted.current) {
|
|
120
|
-
setAudioUtils({
|
|
121
|
-
metadata: {
|
|
122
|
-
durationInSeconds,
|
|
123
|
-
numberOfChannels,
|
|
124
|
-
sampleRate,
|
|
125
|
-
},
|
|
126
|
-
isMatroska,
|
|
127
|
-
});
|
|
128
|
-
}
|
|
129
|
-
continueRender(handle);
|
|
52
|
+
const durationInSeconds = await input.computeDuration();
|
|
53
|
+
const audioTrack = await input.getPrimaryAudioTrack();
|
|
54
|
+
if (!audioTrack) {
|
|
55
|
+
throw new Error('No audio track found');
|
|
56
|
+
}
|
|
57
|
+
const canDecode = await audioTrack.canDecode();
|
|
58
|
+
if (!canDecode) {
|
|
59
|
+
throw new Error('Audio track cannot be decoded');
|
|
130
60
|
}
|
|
131
|
-
|
|
132
|
-
(
|
|
61
|
+
if (channelIndex >= audioTrack.numberOfChannels || channelIndex < 0) {
|
|
62
|
+
throw new Error(`Invalid channel index ${channelIndex} for audio with ${audioTrack.numberOfChannels} channels`);
|
|
133
63
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
64
|
+
const { numberOfChannels, sampleRate } = audioTrack;
|
|
65
|
+
const format = await input.getFormat();
|
|
66
|
+
const isMatroska = format === mediabunny_1.MATROSKA || format === mediabunny_1.WEBM;
|
|
67
|
+
if (isMounted.current) {
|
|
68
|
+
setAudioUtils({
|
|
69
|
+
input,
|
|
70
|
+
track: audioTrack,
|
|
71
|
+
metadata: {
|
|
72
|
+
durationInSeconds,
|
|
73
|
+
numberOfChannels,
|
|
74
|
+
sampleRate,
|
|
75
|
+
},
|
|
76
|
+
isMatroska,
|
|
77
|
+
});
|
|
137
78
|
}
|
|
79
|
+
continueRender(handle);
|
|
138
80
|
}
|
|
139
|
-
catch (
|
|
140
|
-
|
|
141
|
-
env_1.hasError = true;
|
|
81
|
+
catch (err) {
|
|
82
|
+
(0, remotion_1.cancelRender)(err);
|
|
142
83
|
}
|
|
143
84
|
finally {
|
|
144
|
-
|
|
85
|
+
signal.removeEventListener('abort', cont);
|
|
86
|
+
signal.removeEventListener('abort', onAbort);
|
|
145
87
|
}
|
|
146
88
|
}, [src, delayRender, continueRender, channelIndex]);
|
|
147
89
|
(0, react_1.useLayoutEffect)(() => {
|
|
@@ -198,7 +140,7 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
198
140
|
remotion_1.Internals.Log.warn({ logLevel: 'info', tag: '@remotion/media-utils' }, `[useWindowedAudioData] Matroska/WebM file detected at "${src}".\n\nDue to format limitation, audio decoding must start from the beginning of the file, which may lead to increased memory usage and slower performance for large files. Consider converting the audio to a more suitable format like MP3 or AAC for better performance.`);
|
|
199
141
|
}
|
|
200
142
|
const partialWaveData = await (0, get_partial_audio_data_1.getPartialAudioData)({
|
|
201
|
-
|
|
143
|
+
track: audioUtils.track,
|
|
202
144
|
fromSeconds,
|
|
203
145
|
toSeconds,
|
|
204
146
|
channelIndex,
|
|
@@ -252,9 +194,23 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
252
194
|
if (windowsToActuallyFetch.length === 0) {
|
|
253
195
|
return;
|
|
254
196
|
}
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
197
|
+
// Prioritize the current window where playback is at.
|
|
198
|
+
// On slow connections, this ensures the most important window loads first.
|
|
199
|
+
const currentWindowNeedsFetch = windowsToActuallyFetch.includes(currentWindowIndex);
|
|
200
|
+
const otherWindowsToFetch = windowsToActuallyFetch.filter((w) => w !== currentWindowIndex);
|
|
201
|
+
const fetchWindows = async () => {
|
|
202
|
+
// First, load the current window where playback is at
|
|
203
|
+
if (currentWindowNeedsFetch) {
|
|
204
|
+
await fetchAndSetWaveformData(currentWindowIndex);
|
|
205
|
+
}
|
|
206
|
+
// Then load the surrounding windows in parallel
|
|
207
|
+
if (otherWindowsToFetch.length > 0) {
|
|
208
|
+
await Promise.all(otherWindowsToFetch.map((windowIndex) => {
|
|
209
|
+
return fetchAndSetWaveformData(windowIndex);
|
|
210
|
+
}));
|
|
211
|
+
}
|
|
212
|
+
};
|
|
213
|
+
fetchWindows().catch((err) => {
|
|
258
214
|
var _a, _b, _c, _d, _e;
|
|
259
215
|
if ((_a = err.stack) === null || _a === void 0 ? void 0 : _a.includes('Cancelled')) {
|
|
260
216
|
return;
|
|
@@ -268,7 +224,13 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
|
|
|
268
224
|
}
|
|
269
225
|
(0, remotion_1.cancelRender)(err);
|
|
270
226
|
});
|
|
271
|
-
}, [
|
|
227
|
+
}, [
|
|
228
|
+
fetchAndSetWaveformData,
|
|
229
|
+
audioUtils,
|
|
230
|
+
windowsToFetch,
|
|
231
|
+
waveFormMap,
|
|
232
|
+
currentWindowIndex,
|
|
233
|
+
]);
|
|
272
234
|
// Calculate available windows for reuse
|
|
273
235
|
const availableWindows = (0, react_1.useMemo)(() => {
|
|
274
236
|
return windowsToFetch.filter((i) => waveFormMap[i]);
|
package/package.json
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
"url": "https://github.com/remotion-dev/remotion/tree/main/packages/media-utils"
|
|
4
4
|
},
|
|
5
5
|
"name": "@remotion/media-utils",
|
|
6
|
-
"version": "4.0.
|
|
6
|
+
"version": "4.0.399",
|
|
7
7
|
"description": "Utilities for working with media files",
|
|
8
8
|
"main": "dist/index.js",
|
|
9
9
|
"sideEffects": false,
|
|
@@ -18,17 +18,17 @@
|
|
|
18
18
|
"url": "https://github.com/remotion-dev/remotion/issues"
|
|
19
19
|
},
|
|
20
20
|
"dependencies": {
|
|
21
|
-
"@remotion/media-parser": "4.0.
|
|
22
|
-
"@remotion/webcodecs": "4.0.
|
|
23
|
-
"remotion": "4.0.
|
|
24
|
-
"mediabunny": "1.27.
|
|
21
|
+
"@remotion/media-parser": "4.0.399",
|
|
22
|
+
"@remotion/webcodecs": "4.0.399",
|
|
23
|
+
"remotion": "4.0.399",
|
|
24
|
+
"mediabunny": "1.27.3"
|
|
25
25
|
},
|
|
26
26
|
"peerDependencies": {
|
|
27
27
|
"react": ">=16.8.0",
|
|
28
28
|
"react-dom": ">=16.8.0"
|
|
29
29
|
},
|
|
30
30
|
"devDependencies": {
|
|
31
|
-
"@remotion/eslint-config-internal": "4.0.
|
|
31
|
+
"@remotion/eslint-config-internal": "4.0.399",
|
|
32
32
|
"eslint": "9.19.0"
|
|
33
33
|
},
|
|
34
34
|
"keywords": [
|
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getPartialMediaData = void 0;
|
|
4
|
-
const media_parser_1 = require("@remotion/media-parser");
|
|
5
|
-
const webcodecs_1 = require("@remotion/webcodecs");
|
|
6
|
-
const getPartialMediaData = async ({ src, fromSeconds, toSeconds, channelIndex, signal, }) => {
|
|
7
|
-
const controller = (0, media_parser_1.mediaParserController)();
|
|
8
|
-
// Collect audio samples
|
|
9
|
-
const audioSamples = [];
|
|
10
|
-
// Abort if the signal is already aborted
|
|
11
|
-
if (signal.aborted) {
|
|
12
|
-
throw new Error('Operation was aborted');
|
|
13
|
-
}
|
|
14
|
-
try {
|
|
15
|
-
if (fromSeconds > 0) {
|
|
16
|
-
controller.seek(fromSeconds);
|
|
17
|
-
}
|
|
18
|
-
await (0, media_parser_1.parseMedia)({
|
|
19
|
-
src,
|
|
20
|
-
controller,
|
|
21
|
-
onAudioTrack: ({ track }) => {
|
|
22
|
-
if (!track) {
|
|
23
|
-
throw new Error('No audio track found');
|
|
24
|
-
}
|
|
25
|
-
const audioDecoder = (0, webcodecs_1.createAudioDecoder)({
|
|
26
|
-
track,
|
|
27
|
-
onFrame: (sample) => {
|
|
28
|
-
if (signal.aborted) {
|
|
29
|
-
sample.close();
|
|
30
|
-
return;
|
|
31
|
-
}
|
|
32
|
-
// For multi-channel audio, we need to handle channels properly
|
|
33
|
-
const { numberOfChannels } = sample;
|
|
34
|
-
const samplesPerChannel = sample.numberOfFrames;
|
|
35
|
-
let data;
|
|
36
|
-
if (numberOfChannels === 1) {
|
|
37
|
-
// Mono audio
|
|
38
|
-
data = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
|
|
39
|
-
sample.copyTo(data, { format: 'f32', planeIndex: 0 });
|
|
40
|
-
}
|
|
41
|
-
else {
|
|
42
|
-
// Multi-channel audio: extract specific channel
|
|
43
|
-
const allChannelsData = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
|
|
44
|
-
sample.copyTo(allChannelsData, { format: 'f32', planeIndex: 0 });
|
|
45
|
-
// Extract the specific channel (interleaved audio)
|
|
46
|
-
data = new Float32Array(samplesPerChannel);
|
|
47
|
-
for (let i = 0; i < samplesPerChannel; i++) {
|
|
48
|
-
data[i] = allChannelsData[i * numberOfChannels + channelIndex];
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
audioSamples.push(data);
|
|
52
|
-
sample.close();
|
|
53
|
-
},
|
|
54
|
-
onError(error) {
|
|
55
|
-
throw error;
|
|
56
|
-
},
|
|
57
|
-
});
|
|
58
|
-
// Listen for abort signal
|
|
59
|
-
const onAbort = () => {
|
|
60
|
-
controller.abort();
|
|
61
|
-
if (audioDecoder) {
|
|
62
|
-
audioDecoder.close();
|
|
63
|
-
}
|
|
64
|
-
};
|
|
65
|
-
signal.addEventListener('abort', onAbort, { once: true });
|
|
66
|
-
return async (sample) => {
|
|
67
|
-
if (signal.aborted) {
|
|
68
|
-
return;
|
|
69
|
-
}
|
|
70
|
-
// Convert timestamp using the track's timescale
|
|
71
|
-
const time = sample.timestamp / track.timescale;
|
|
72
|
-
console.log(time);
|
|
73
|
-
// Stop immediately when we reach our target time
|
|
74
|
-
if (time >= toSeconds) {
|
|
75
|
-
// abort media parsing, we reached the point where we want to stop
|
|
76
|
-
controller.abort();
|
|
77
|
-
return;
|
|
78
|
-
}
|
|
79
|
-
// Decode the sample using the sample directly
|
|
80
|
-
await audioDecoder.waitForQueueToBeLessThan(10);
|
|
81
|
-
// we're waiting for the queue above anyway, enqueue in sync mode
|
|
82
|
-
audioDecoder.decode(sample);
|
|
83
|
-
};
|
|
84
|
-
},
|
|
85
|
-
});
|
|
86
|
-
}
|
|
87
|
-
catch (err) {
|
|
88
|
-
const isAbortedByTimeCutoff = (0, media_parser_1.hasBeenAborted)(err);
|
|
89
|
-
// Don't throw if we stopped the parsing ourselves
|
|
90
|
-
if (!isAbortedByTimeCutoff && !signal.aborted) {
|
|
91
|
-
throw err;
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
// Simply concatenate all audio data since windowing handles the time ranges
|
|
95
|
-
const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
|
|
96
|
-
const result = new Float32Array(totalSamples);
|
|
97
|
-
let offset = 0;
|
|
98
|
-
for (const audioSample of audioSamples) {
|
|
99
|
-
result.set(audioSample, offset);
|
|
100
|
-
offset += audioSample.length;
|
|
101
|
-
}
|
|
102
|
-
return result;
|
|
103
|
-
};
|
|
104
|
-
exports.getPartialMediaData = getPartialMediaData;
|
|
@@ -1,105 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getPartialMediaData = void 0;
|
|
4
|
-
const media_parser_1 = require("@remotion/media-parser");
|
|
5
|
-
const worker_1 = require("@remotion/media-parser/worker");
|
|
6
|
-
const webcodecs_1 = require("@remotion/webcodecs");
|
|
7
|
-
const getPartialMediaData = async ({ src, fromSeconds, toSeconds, channelIndex, signal, }) => {
|
|
8
|
-
const controller = (0, media_parser_1.mediaParserController)();
|
|
9
|
-
// Collect audio samples
|
|
10
|
-
const audioSamples = [];
|
|
11
|
-
// Abort if the signal is already aborted
|
|
12
|
-
if (signal.aborted) {
|
|
13
|
-
throw new Error('Operation was aborted');
|
|
14
|
-
}
|
|
15
|
-
try {
|
|
16
|
-
if (fromSeconds > 0) {
|
|
17
|
-
controller.seek(fromSeconds);
|
|
18
|
-
}
|
|
19
|
-
await (0, worker_1.parseMediaOnWebWorker)({
|
|
20
|
-
src,
|
|
21
|
-
controller,
|
|
22
|
-
onAudioTrack: ({ track }) => {
|
|
23
|
-
if (!track) {
|
|
24
|
-
throw new Error('No audio track found');
|
|
25
|
-
}
|
|
26
|
-
const audioDecoder = (0, webcodecs_1.createAudioDecoder)({
|
|
27
|
-
track,
|
|
28
|
-
onFrame: (sample) => {
|
|
29
|
-
if (signal.aborted) {
|
|
30
|
-
sample.close();
|
|
31
|
-
return;
|
|
32
|
-
}
|
|
33
|
-
// For multi-channel audio, we need to handle channels properly
|
|
34
|
-
const { numberOfChannels } = sample;
|
|
35
|
-
const samplesPerChannel = sample.numberOfFrames;
|
|
36
|
-
let data;
|
|
37
|
-
if (numberOfChannels === 1) {
|
|
38
|
-
// Mono audio
|
|
39
|
-
data = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
|
|
40
|
-
sample.copyTo(data, { format: 'f32', planeIndex: 0 });
|
|
41
|
-
}
|
|
42
|
-
else {
|
|
43
|
-
// Multi-channel audio: extract specific channel
|
|
44
|
-
const allChannelsData = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
|
|
45
|
-
sample.copyTo(allChannelsData, { format: 'f32', planeIndex: 0 });
|
|
46
|
-
// Extract the specific channel (interleaved audio)
|
|
47
|
-
data = new Float32Array(samplesPerChannel);
|
|
48
|
-
for (let i = 0; i < samplesPerChannel; i++) {
|
|
49
|
-
data[i] = allChannelsData[i * numberOfChannels + channelIndex];
|
|
50
|
-
}
|
|
51
|
-
}
|
|
52
|
-
audioSamples.push(data);
|
|
53
|
-
sample.close();
|
|
54
|
-
},
|
|
55
|
-
onError(error) {
|
|
56
|
-
throw error;
|
|
57
|
-
},
|
|
58
|
-
});
|
|
59
|
-
// Listen for abort signal
|
|
60
|
-
const onAbort = () => {
|
|
61
|
-
controller.abort();
|
|
62
|
-
if (audioDecoder) {
|
|
63
|
-
audioDecoder.close();
|
|
64
|
-
}
|
|
65
|
-
};
|
|
66
|
-
signal.addEventListener('abort', onAbort, { once: true });
|
|
67
|
-
return async (sample) => {
|
|
68
|
-
if (signal.aborted) {
|
|
69
|
-
return;
|
|
70
|
-
}
|
|
71
|
-
// Convert timestamp using the track's timescale
|
|
72
|
-
const time = sample.timestamp / track.timescale;
|
|
73
|
-
// Stop immediately when we reach our target time
|
|
74
|
-
if (time >= toSeconds) {
|
|
75
|
-
// abort media parsing, we reached the point where we want to stop
|
|
76
|
-
controller.abort();
|
|
77
|
-
await audioDecoder.flush();
|
|
78
|
-
return;
|
|
79
|
-
}
|
|
80
|
-
// Decode the sample using the sample directly
|
|
81
|
-
await audioDecoder.waitForQueueToBeLessThan(10);
|
|
82
|
-
// we're waiting for the queue above anyway, enqueue in sync mode
|
|
83
|
-
audioDecoder.decode(sample);
|
|
84
|
-
};
|
|
85
|
-
},
|
|
86
|
-
});
|
|
87
|
-
}
|
|
88
|
-
catch (err) {
|
|
89
|
-
const isAbortedByTimeCutoff = (0, media_parser_1.hasBeenAborted)(err);
|
|
90
|
-
// Don't throw if we stopped the parsing ourselves
|
|
91
|
-
if (!isAbortedByTimeCutoff && !signal.aborted) {
|
|
92
|
-
throw err;
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
// Simply concatenate all audio data since windowing handles the time ranges
|
|
96
|
-
const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
|
|
97
|
-
const result = new Float32Array(totalSamples);
|
|
98
|
-
let offset = 0;
|
|
99
|
-
for (const audioSample of audioSamples) {
|
|
100
|
-
result.set(audioSample, offset);
|
|
101
|
-
offset += audioSample.length;
|
|
102
|
-
}
|
|
103
|
-
return result;
|
|
104
|
-
};
|
|
105
|
-
exports.getPartialMediaData = getPartialMediaData;
|