@remotion/media-utils 4.0.394 → 4.0.396

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,9 @@
1
- import type { InputAudioTrack } from 'mediabunny';
2
1
  export type GetPartialAudioDataProps = {
3
- track: InputAudioTrack;
4
2
  fromSeconds: number;
5
3
  toSeconds: number;
6
4
  channelIndex: number;
7
5
  signal: AbortSignal;
8
- isMatroska?: boolean;
6
+ src: string;
7
+ isMatroska: boolean;
9
8
  };
10
- export declare const getPartialAudioData: ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska, }: GetPartialAudioDataProps) => Promise<Float32Array>;
9
+ export declare const getPartialAudioData: ({ fromSeconds, toSeconds, channelIndex, signal, src, isMatroska, }: GetPartialAudioDataProps) => Promise<Float32Array>;
@@ -1,4 +1,56 @@
1
1
  "use strict";
2
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
3
+ if (value !== null && value !== void 0) {
4
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
5
+ var dispose, inner;
6
+ if (async) {
7
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
8
+ dispose = value[Symbol.asyncDispose];
9
+ }
10
+ if (dispose === void 0) {
11
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
12
+ dispose = value[Symbol.dispose];
13
+ if (async) inner = dispose;
14
+ }
15
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
16
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
17
+ env.stack.push({ value: value, dispose: dispose, async: async });
18
+ }
19
+ else if (async) {
20
+ env.stack.push({ async: true });
21
+ }
22
+ return value;
23
+ };
24
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
25
+ return function (env) {
26
+ function fail(e) {
27
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
28
+ env.hasError = true;
29
+ }
30
+ var r, s = 0;
31
+ function next() {
32
+ while (r = env.stack.pop()) {
33
+ try {
34
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
35
+ if (r.dispose) {
36
+ var result = r.dispose.call(r.value);
37
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
38
+ }
39
+ else s |= 1;
40
+ }
41
+ catch (e) {
42
+ fail(e);
43
+ }
44
+ }
45
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
46
+ if (env.hasError) throw env.error;
47
+ }
48
+ return next();
49
+ };
50
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
51
+ var e = new Error(message);
52
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
53
+ });
2
54
  Object.defineProperty(exports, "__esModule", { value: true });
3
55
  exports.getPartialAudioData = void 0;
4
56
  const mediabunny_1 = require("mediabunny");
@@ -6,43 +58,64 @@ const mediabunny_1 = require("mediabunny");
6
58
  // The worst case seems to be FLAC files with a 65'535 sample window, which would be 1486.0ms at 44.1Khz.
7
59
  // So let's set a threshold of 1.5 seconds.
8
60
  const EXTRA_THRESHOLD_IN_SECONDS = 1.5;
9
- const getPartialAudioData = async ({ track, fromSeconds, toSeconds, channelIndex, signal, isMatroska = false, }) => {
10
- if (signal.aborted) {
11
- throw new Error('Operation was aborted');
12
- }
13
- const audioSamples = [];
14
- // matroska must be decoded from the start due to limitation
15
- // https://www.remotion.dev/docs/media/support#matroska-limitation
16
- // Also request extra data beforehand to handle audio frame dependencies
17
- const actualFromSeconds = isMatroska
18
- ? 0
19
- : Math.max(0, fromSeconds - EXTRA_THRESHOLD_IN_SECONDS);
20
- // mediabunny docs: constructing the sink is virtually free and does not perform any media data reads.
21
- const sink = new mediabunny_1.AudioBufferSink(track);
22
- for await (const { buffer, timestamp, duration } of sink.buffers(actualFromSeconds, toSeconds)) {
61
+ const getPartialAudioData = async ({ fromSeconds, toSeconds, channelIndex, signal, src, isMatroska, }) => {
62
+ const env_1 = { stack: [], error: void 0, hasError: false };
63
+ try {
23
64
  if (signal.aborted) {
24
- break;
25
- }
26
- const channelData = buffer.getChannelData(channelIndex);
27
- const bufferStartSeconds = timestamp;
28
- const bufferEndSeconds = timestamp + duration;
29
- const overlapStartSecond = Math.max(bufferStartSeconds, fromSeconds);
30
- const overlapEndSecond = Math.min(bufferEndSeconds, toSeconds);
31
- if (overlapStartSecond >= overlapEndSecond) {
32
- continue;
33
- }
34
- const startSampleInBuffer = Math.floor((overlapStartSecond - bufferStartSeconds) * buffer.sampleRate);
35
- const endSampleInBuffer = Math.ceil((overlapEndSecond - bufferStartSeconds) * buffer.sampleRate);
36
- const trimmedData = channelData.slice(startSampleInBuffer, endSampleInBuffer);
37
- audioSamples.push(trimmedData);
65
+ throw new Error('Operation was aborted');
66
+ }
67
+ const audioSamples = [];
68
+ // matroska must be decoded from the start due to limitation
69
+ // https://www.remotion.dev/docs/media/support#matroska-limitation
70
+ // Also request extra data beforehand to handle audio frame dependencies
71
+ const actualFromSeconds = isMatroska
72
+ ? 0
73
+ : Math.max(0, fromSeconds - EXTRA_THRESHOLD_IN_SECONDS);
74
+ const source = new mediabunny_1.UrlSource(src);
75
+ const input = __addDisposableResource(env_1, new mediabunny_1.Input({
76
+ formats: mediabunny_1.ALL_FORMATS,
77
+ source,
78
+ }), false);
79
+ const track = await input.getPrimaryAudioTrack();
80
+ if (!track) {
81
+ throw new Error('No audio track found');
82
+ }
83
+ // mediabunny docs: constructing the sink is virtually free and does not perform any media data reads.
84
+ const sink = new mediabunny_1.AudioBufferSink(track);
85
+ const iterator = sink.buffers(actualFromSeconds, toSeconds);
86
+ for await (const { buffer, timestamp, duration } of iterator) {
87
+ if (signal.aborted) {
88
+ break;
89
+ }
90
+ const channelData = buffer.getChannelData(channelIndex);
91
+ const bufferStartSeconds = timestamp;
92
+ const bufferEndSeconds = timestamp + duration;
93
+ const overlapStartSecond = Math.max(bufferStartSeconds, fromSeconds);
94
+ const overlapEndSecond = Math.min(bufferEndSeconds, toSeconds);
95
+ if (overlapStartSecond >= overlapEndSecond) {
96
+ continue;
97
+ }
98
+ const startSampleInBuffer = Math.floor((overlapStartSecond - bufferStartSeconds) * buffer.sampleRate);
99
+ const endSampleInBuffer = Math.ceil((overlapEndSecond - bufferStartSeconds) * buffer.sampleRate);
100
+ const trimmedData = channelData.slice(startSampleInBuffer, endSampleInBuffer);
101
+ audioSamples.push(trimmedData);
102
+ }
103
+ await iterator.return();
104
+ const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
105
+ const result = new Float32Array(totalSamples);
106
+ let offset = 0;
107
+ for (const audioSample of audioSamples) {
108
+ result.set(audioSample, offset);
109
+ offset += audioSample.length;
110
+ }
111
+ return result;
112
+ }
113
+ catch (e_1) {
114
+ env_1.error = e_1;
115
+ env_1.hasError = true;
38
116
  }
39
- const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
40
- const result = new Float32Array(totalSamples);
41
- let offset = 0;
42
- for (const audioSample of audioSamples) {
43
- result.set(audioSample, offset);
44
- offset += audioSample.length;
117
+ finally {
118
+ __disposeResources(env_1);
45
119
  }
46
- return result;
47
120
  };
48
121
  exports.getPartialAudioData = getPartialAudioData;
@@ -0,0 +1,7 @@
1
+ export declare const getPartialMediaData: ({ src, fromSeconds, toSeconds, channelIndex, signal, }: {
2
+ src: string;
3
+ fromSeconds: number;
4
+ toSeconds: number;
5
+ channelIndex: number;
6
+ signal: AbortSignal;
7
+ }) => Promise<Float32Array>;
@@ -0,0 +1,104 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getPartialMediaData = void 0;
4
+ const media_parser_1 = require("@remotion/media-parser");
5
+ const webcodecs_1 = require("@remotion/webcodecs");
6
+ const getPartialMediaData = async ({ src, fromSeconds, toSeconds, channelIndex, signal, }) => {
7
+ const controller = (0, media_parser_1.mediaParserController)();
8
+ // Collect audio samples
9
+ const audioSamples = [];
10
+ // Abort if the signal is already aborted
11
+ if (signal.aborted) {
12
+ throw new Error('Operation was aborted');
13
+ }
14
+ try {
15
+ if (fromSeconds > 0) {
16
+ controller.seek(fromSeconds);
17
+ }
18
+ await (0, media_parser_1.parseMedia)({
19
+ src,
20
+ controller,
21
+ onAudioTrack: ({ track }) => {
22
+ if (!track) {
23
+ throw new Error('No audio track found');
24
+ }
25
+ const audioDecoder = (0, webcodecs_1.createAudioDecoder)({
26
+ track,
27
+ onFrame: (sample) => {
28
+ if (signal.aborted) {
29
+ sample.close();
30
+ return;
31
+ }
32
+ // For multi-channel audio, we need to handle channels properly
33
+ const { numberOfChannels } = sample;
34
+ const samplesPerChannel = sample.numberOfFrames;
35
+ let data;
36
+ if (numberOfChannels === 1) {
37
+ // Mono audio
38
+ data = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
39
+ sample.copyTo(data, { format: 'f32', planeIndex: 0 });
40
+ }
41
+ else {
42
+ // Multi-channel audio: extract specific channel
43
+ const allChannelsData = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
44
+ sample.copyTo(allChannelsData, { format: 'f32', planeIndex: 0 });
45
+ // Extract the specific channel (interleaved audio)
46
+ data = new Float32Array(samplesPerChannel);
47
+ for (let i = 0; i < samplesPerChannel; i++) {
48
+ data[i] = allChannelsData[i * numberOfChannels + channelIndex];
49
+ }
50
+ }
51
+ audioSamples.push(data);
52
+ sample.close();
53
+ },
54
+ onError(error) {
55
+ throw error;
56
+ },
57
+ });
58
+ // Listen for abort signal
59
+ const onAbort = () => {
60
+ controller.abort();
61
+ if (audioDecoder) {
62
+ audioDecoder.close();
63
+ }
64
+ };
65
+ signal.addEventListener('abort', onAbort, { once: true });
66
+ return async (sample) => {
67
+ if (signal.aborted) {
68
+ return;
69
+ }
70
+ // Convert timestamp using the track's timescale
71
+ const time = sample.timestamp / track.timescale;
72
+ console.log(time);
73
+ // Stop immediately when we reach our target time
74
+ if (time >= toSeconds) {
75
+ // abort media parsing, we reached the point where we want to stop
76
+ controller.abort();
77
+ return;
78
+ }
79
+ // Decode the sample using the sample directly
80
+ await audioDecoder.waitForQueueToBeLessThan(10);
81
+ // we're waiting for the queue above anyway, enqueue in sync mode
82
+ audioDecoder.decode(sample);
83
+ };
84
+ },
85
+ });
86
+ }
87
+ catch (err) {
88
+ const isAbortedByTimeCutoff = (0, media_parser_1.hasBeenAborted)(err);
89
+ // Don't throw if we stopped the parsing ourselves
90
+ if (!isAbortedByTimeCutoff && !signal.aborted) {
91
+ throw err;
92
+ }
93
+ }
94
+ // Simply concatenate all audio data since windowing handles the time ranges
95
+ const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
96
+ const result = new Float32Array(totalSamples);
97
+ let offset = 0;
98
+ for (const audioSample of audioSamples) {
99
+ result.set(audioSample, offset);
100
+ offset += audioSample.length;
101
+ }
102
+ return result;
103
+ };
104
+ exports.getPartialMediaData = getPartialMediaData;
@@ -0,0 +1,7 @@
1
+ export declare const getPartialMediaData: ({ src, fromSeconds, toSeconds, channelIndex, signal, }: {
2
+ src: string;
3
+ fromSeconds: number;
4
+ toSeconds: number;
5
+ channelIndex: number;
6
+ signal: AbortSignal;
7
+ }) => Promise<Float32Array>;
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getPartialMediaData = void 0;
4
+ const media_parser_1 = require("@remotion/media-parser");
5
+ const worker_1 = require("@remotion/media-parser/worker");
6
+ const webcodecs_1 = require("@remotion/webcodecs");
7
+ const getPartialMediaData = async ({ src, fromSeconds, toSeconds, channelIndex, signal, }) => {
8
+ const controller = (0, media_parser_1.mediaParserController)();
9
+ // Collect audio samples
10
+ const audioSamples = [];
11
+ // Abort if the signal is already aborted
12
+ if (signal.aborted) {
13
+ throw new Error('Operation was aborted');
14
+ }
15
+ try {
16
+ if (fromSeconds > 0) {
17
+ controller.seek(fromSeconds);
18
+ }
19
+ await (0, worker_1.parseMediaOnWebWorker)({
20
+ src,
21
+ controller,
22
+ onAudioTrack: ({ track }) => {
23
+ if (!track) {
24
+ throw new Error('No audio track found');
25
+ }
26
+ const audioDecoder = (0, webcodecs_1.createAudioDecoder)({
27
+ track,
28
+ onFrame: (sample) => {
29
+ if (signal.aborted) {
30
+ sample.close();
31
+ return;
32
+ }
33
+ // For multi-channel audio, we need to handle channels properly
34
+ const { numberOfChannels } = sample;
35
+ const samplesPerChannel = sample.numberOfFrames;
36
+ let data;
37
+ if (numberOfChannels === 1) {
38
+ // Mono audio
39
+ data = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
40
+ sample.copyTo(data, { format: 'f32', planeIndex: 0 });
41
+ }
42
+ else {
43
+ // Multi-channel audio: extract specific channel
44
+ const allChannelsData = new Float32Array(sample.allocationSize({ format: 'f32', planeIndex: 0 }));
45
+ sample.copyTo(allChannelsData, { format: 'f32', planeIndex: 0 });
46
+ // Extract the specific channel (interleaved audio)
47
+ data = new Float32Array(samplesPerChannel);
48
+ for (let i = 0; i < samplesPerChannel; i++) {
49
+ data[i] = allChannelsData[i * numberOfChannels + channelIndex];
50
+ }
51
+ }
52
+ audioSamples.push(data);
53
+ sample.close();
54
+ },
55
+ onError(error) {
56
+ throw error;
57
+ },
58
+ });
59
+ // Listen for abort signal
60
+ const onAbort = () => {
61
+ controller.abort();
62
+ if (audioDecoder) {
63
+ audioDecoder.close();
64
+ }
65
+ };
66
+ signal.addEventListener('abort', onAbort, { once: true });
67
+ return async (sample) => {
68
+ if (signal.aborted) {
69
+ return;
70
+ }
71
+ // Convert timestamp using the track's timescale
72
+ const time = sample.timestamp / track.timescale;
73
+ // Stop immediately when we reach our target time
74
+ if (time >= toSeconds) {
75
+ // abort media parsing, we reached the point where we want to stop
76
+ controller.abort();
77
+ await audioDecoder.flush();
78
+ return;
79
+ }
80
+ // Decode the sample using the sample directly
81
+ await audioDecoder.waitForQueueToBeLessThan(10);
82
+ // we're waiting for the queue above anyway, enqueue in sync mode
83
+ audioDecoder.decode(sample);
84
+ };
85
+ },
86
+ });
87
+ }
88
+ catch (err) {
89
+ const isAbortedByTimeCutoff = (0, media_parser_1.hasBeenAborted)(err);
90
+ // Don't throw if we stopped the parsing ourselves
91
+ if (!isAbortedByTimeCutoff && !signal.aborted) {
92
+ throw err;
93
+ }
94
+ }
95
+ // Simply concatenate all audio data since windowing handles the time ranges
96
+ const totalSamples = audioSamples.reduce((sum, sample) => sum + sample.length, 0);
97
+ const result = new Float32Array(totalSamples);
98
+ let offset = 0;
99
+ for (const audioSample of audioSamples) {
100
+ result.set(audioSample, offset);
101
+ offset += audioSample.length;
102
+ }
103
+ return result;
104
+ };
105
+ exports.getPartialMediaData = getPartialMediaData;
@@ -1,4 +1,56 @@
1
1
  "use strict";
2
+ var __addDisposableResource = (this && this.__addDisposableResource) || function (env, value, async) {
3
+ if (value !== null && value !== void 0) {
4
+ if (typeof value !== "object" && typeof value !== "function") throw new TypeError("Object expected.");
5
+ var dispose, inner;
6
+ if (async) {
7
+ if (!Symbol.asyncDispose) throw new TypeError("Symbol.asyncDispose is not defined.");
8
+ dispose = value[Symbol.asyncDispose];
9
+ }
10
+ if (dispose === void 0) {
11
+ if (!Symbol.dispose) throw new TypeError("Symbol.dispose is not defined.");
12
+ dispose = value[Symbol.dispose];
13
+ if (async) inner = dispose;
14
+ }
15
+ if (typeof dispose !== "function") throw new TypeError("Object not disposable.");
16
+ if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };
17
+ env.stack.push({ value: value, dispose: dispose, async: async });
18
+ }
19
+ else if (async) {
20
+ env.stack.push({ async: true });
21
+ }
22
+ return value;
23
+ };
24
+ var __disposeResources = (this && this.__disposeResources) || (function (SuppressedError) {
25
+ return function (env) {
26
+ function fail(e) {
27
+ env.error = env.hasError ? new SuppressedError(e, env.error, "An error was suppressed during disposal.") : e;
28
+ env.hasError = true;
29
+ }
30
+ var r, s = 0;
31
+ function next() {
32
+ while (r = env.stack.pop()) {
33
+ try {
34
+ if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);
35
+ if (r.dispose) {
36
+ var result = r.dispose.call(r.value);
37
+ if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });
38
+ }
39
+ else s |= 1;
40
+ }
41
+ catch (e) {
42
+ fail(e);
43
+ }
44
+ }
45
+ if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();
46
+ if (env.hasError) throw env.error;
47
+ }
48
+ return next();
49
+ };
50
+ })(typeof SuppressedError === "function" ? SuppressedError : function (error, suppressed, message) {
51
+ var e = new Error(message);
52
+ return e.name = "SuppressedError", e.error = error, e.suppressed = suppressed, e;
53
+ });
2
54
  Object.defineProperty(exports, "__esModule", { value: true });
3
55
  exports.useWindowedAudioData = void 0;
4
56
  const mediabunny_1 = require("mediabunny");
@@ -28,63 +80,68 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
28
80
  });
29
81
  requests.current = {};
30
82
  setWaveformMap({});
31
- if (audioUtils) {
32
- audioUtils.input.dispose();
33
- }
34
83
  };
35
84
  }, [audioUtils]);
36
85
  const { delayRender, continueRender } = (0, remotion_1.useDelayRender)();
37
86
  const fetchMetadata = (0, react_1.useCallback)(async (signal) => {
38
- const handle = delayRender(`Waiting for audio metadata with src="${src}" to be loaded`);
39
- const cont = () => {
40
- continueRender(handle);
41
- };
42
- signal.addEventListener('abort', cont, { once: true });
43
- const input = new mediabunny_1.Input({
44
- formats: mediabunny_1.ALL_FORMATS,
45
- source: new mediabunny_1.UrlSource(src),
46
- });
47
- const onAbort = () => {
48
- input.dispose();
49
- };
50
- signal.addEventListener('abort', onAbort, { once: true });
87
+ const env_1 = { stack: [], error: void 0, hasError: false };
51
88
  try {
52
- const durationInSeconds = await input.computeDuration();
53
- const audioTrack = await input.getPrimaryAudioTrack();
54
- if (!audioTrack) {
55
- throw new Error('No audio track found');
56
- }
57
- const canDecode = await audioTrack.canDecode();
58
- if (!canDecode) {
59
- throw new Error('Audio track cannot be decoded');
89
+ const handle = delayRender(`Waiting for audio metadata with src="${src}" to be loaded`);
90
+ const cont = () => {
91
+ continueRender(handle);
92
+ };
93
+ signal.addEventListener('abort', cont, { once: true });
94
+ const source = new mediabunny_1.UrlSource(src);
95
+ const input = __addDisposableResource(env_1, new mediabunny_1.Input({
96
+ formats: mediabunny_1.ALL_FORMATS,
97
+ source,
98
+ }), false);
99
+ const onAbort = () => {
100
+ input.dispose();
101
+ };
102
+ signal.addEventListener('abort', onAbort, { once: true });
103
+ try {
104
+ const durationInSeconds = await input.computeDuration();
105
+ const audioTrack = await input.getPrimaryAudioTrack();
106
+ if (!audioTrack) {
107
+ throw new Error('No audio track found');
108
+ }
109
+ const canDecode = await audioTrack.canDecode();
110
+ if (!canDecode) {
111
+ throw new Error('Audio track cannot be decoded');
112
+ }
113
+ if (channelIndex >= audioTrack.numberOfChannels || channelIndex < 0) {
114
+ throw new Error(`Invalid channel index ${channelIndex} for audio with ${audioTrack.numberOfChannels} channels`);
115
+ }
116
+ const { numberOfChannels, sampleRate } = audioTrack;
117
+ const format = await input.getFormat();
118
+ const isMatroska = format === mediabunny_1.MATROSKA || format === mediabunny_1.WEBM;
119
+ if (isMounted.current) {
120
+ setAudioUtils({
121
+ metadata: {
122
+ durationInSeconds,
123
+ numberOfChannels,
124
+ sampleRate,
125
+ },
126
+ isMatroska,
127
+ });
128
+ }
129
+ continueRender(handle);
60
130
  }
61
- if (channelIndex >= audioTrack.numberOfChannels || channelIndex < 0) {
62
- throw new Error(`Invalid channel index ${channelIndex} for audio with ${audioTrack.numberOfChannels} channels`);
131
+ catch (err) {
132
+ (0, remotion_1.cancelRender)(err);
63
133
  }
64
- const { numberOfChannels, sampleRate } = audioTrack;
65
- const format = await input.getFormat();
66
- const isMatroska = format === mediabunny_1.MATROSKA || format === mediabunny_1.WEBM;
67
- if (isMounted.current) {
68
- setAudioUtils({
69
- input,
70
- track: audioTrack,
71
- metadata: {
72
- durationInSeconds,
73
- numberOfChannels,
74
- sampleRate,
75
- },
76
- isMatroska,
77
- });
134
+ finally {
135
+ signal.removeEventListener('abort', cont);
136
+ signal.removeEventListener('abort', onAbort);
78
137
  }
79
- continueRender(handle);
80
138
  }
81
- catch (err) {
82
- input.dispose();
83
- (0, remotion_1.cancelRender)(err);
139
+ catch (e_1) {
140
+ env_1.error = e_1;
141
+ env_1.hasError = true;
84
142
  }
85
143
  finally {
86
- signal.removeEventListener('abort', cont);
87
- signal.removeEventListener('abort', onAbort);
144
+ __disposeResources(env_1);
88
145
  }
89
146
  }, [src, delayRender, continueRender, channelIndex]);
90
147
  (0, react_1.useLayoutEffect)(() => {
@@ -112,7 +169,7 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
112
169
  ]
113
170
  .filter((i) => i !== null)
114
171
  .filter((i) => i >= 0);
115
- }, [currentWindowIndex, audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata, windowInSeconds]);
172
+ }, [currentWindowIndex, audioUtils, windowInSeconds]);
116
173
  const fetchAndSetWaveformData = (0, react_1.useCallback)(async (windowIndex) => {
117
174
  if (!(audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata) || !audioUtils) {
118
175
  throw new Error('MediaBunny context is not loaded yet');
@@ -141,7 +198,7 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
141
198
  remotion_1.Internals.Log.warn({ logLevel: 'info', tag: '@remotion/media-utils' }, `[useWindowedAudioData] Matroska/WebM file detected at "${src}".\n\nDue to format limitation, audio decoding must start from the beginning of the file, which may lead to increased memory usage and slower performance for large files. Consider converting the audio to a more suitable format like MP3 or AAC for better performance.`);
142
199
  }
143
200
  const partialWaveData = await (0, get_partial_audio_data_1.getPartialAudioData)({
144
- track: audioUtils.track,
201
+ src,
145
202
  fromSeconds,
146
203
  toSeconds,
147
204
  channelIndex,
@@ -191,7 +248,7 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
191
248
  }
192
249
  }
193
250
  // Only fetch windows that don't already exist
194
- const windowsToActuallyFetch = windowsToFetch.filter((windowIndex) => !waveFormMap[windowIndex]);
251
+ const windowsToActuallyFetch = windowsToFetch.filter((windowIndex) => !waveFormMap[windowIndex] && !requests.current[windowIndex]);
195
252
  if (windowsToActuallyFetch.length === 0) {
196
253
  return;
197
254
  }
@@ -233,7 +290,7 @@ const useWindowedAudioData = ({ src, frame, fps, windowInSeconds, channelIndex =
233
290
  resultId: `${src}-windows-${availableWindows.join(',')}`,
234
291
  sampleRate: audioUtils.metadata.sampleRate,
235
292
  };
236
- }, [src, waveFormMap, audioUtils === null || audioUtils === void 0 ? void 0 : audioUtils.metadata, availableWindows]);
293
+ }, [src, waveFormMap, audioUtils, availableWindows]);
237
294
  const isBeyondAudioDuration = audioUtils
238
295
  ? currentTime >= audioUtils.metadata.durationInSeconds
239
296
  : false;
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "url": "https://github.com/remotion-dev/remotion/tree/main/packages/media-utils"
4
4
  },
5
5
  "name": "@remotion/media-utils",
6
- "version": "4.0.394",
6
+ "version": "4.0.396",
7
7
  "description": "Utilities for working with media files",
8
8
  "main": "dist/index.js",
9
9
  "sideEffects": false,
@@ -18,17 +18,17 @@
18
18
  "url": "https://github.com/remotion-dev/remotion/issues"
19
19
  },
20
20
  "dependencies": {
21
- "@remotion/media-parser": "4.0.394",
22
- "@remotion/webcodecs": "4.0.394",
23
- "remotion": "4.0.394",
24
- "mediabunny": "1.27.0"
21
+ "@remotion/media-parser": "4.0.396",
22
+ "@remotion/webcodecs": "4.0.396",
23
+ "remotion": "4.0.396",
24
+ "mediabunny": "1.27.2"
25
25
  },
26
26
  "peerDependencies": {
27
27
  "react": ">=16.8.0",
28
28
  "react-dom": ">=16.8.0"
29
29
  },
30
30
  "devDependencies": {
31
- "@remotion/eslint-config-internal": "4.0.394",
31
+ "@remotion/eslint-config-internal": "4.0.396",
32
32
  "eslint": "9.19.0"
33
33
  },
34
34
  "keywords": [