@remotion/media 4.0.369 → 4.0.371

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ import type { WrappedAudioBuffer } from 'mediabunny';
2
+ export type AllowWait = {
3
+ type: 'allow-wait';
4
+ waitCallback: () => () => void;
5
+ };
6
+ export declare const allowWaitRoutine: (next: Promise<IteratorResult<WrappedAudioBuffer, void>>, waitFn: AllowWait) => Promise<IteratorResult<WrappedAudioBuffer, void>>;
@@ -0,0 +1,15 @@
1
+ export const allowWaitRoutine = async (next, waitFn) => {
2
+ const result = await Promise.race([
3
+ next,
4
+ new Promise((resolve) => {
5
+ Promise.resolve().then(() => resolve());
6
+ }),
7
+ ]);
8
+ if (!result) {
9
+ const unblock = waitFn.waitCallback();
10
+ const newRes = await next;
11
+ unblock();
12
+ return newRes;
13
+ }
14
+ return result;
15
+ };
@@ -1,5 +1,5 @@
1
1
  import { jsx as _jsx } from "react/jsx-runtime";
2
- import { useContext, useEffect, useMemo, useRef, useState } from 'react';
2
+ import { useContext, useEffect, useLayoutEffect, useMemo, useRef, useState, } from 'react';
3
3
  import { Internals, Audio as RemotionAudio, useBufferState, useCurrentFrame, useVideoConfig, } from 'remotion';
4
4
  import { getTimeInSeconds } from '../get-time-in-seconds';
5
5
  import { MediaPlayer } from '../media-player';
@@ -181,7 +181,7 @@ const AudioForPreviewAssertedShowing = ({ src, playbackRate, logLevel, muted, vo
181
181
  audioPlayer.pause();
182
182
  }
183
183
  }, [isPlayerBuffering, logLevel, playing]);
184
- useEffect(() => {
184
+ useLayoutEffect(() => {
185
185
  const audioPlayer = mediaPlayerRef.current;
186
186
  if (!audioPlayer || !mediaPlayerReady)
187
187
  return;
@@ -1,4 +1,5 @@
1
1
  import type { AudioBufferSink, WrappedAudioBuffer } from 'mediabunny';
2
+ import { type AllowWait } from './allow-wait';
2
3
  export declare const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
3
4
  export type QueuedNode = {
4
5
  node: AudioBufferSourceNode;
@@ -15,16 +16,15 @@ export declare const makeAudioIterator: (audioSink: AudioBufferSink, startFromSe
15
16
  buffer: AudioBuffer;
16
17
  timestamp: number;
17
18
  }[];
18
- getQueuedPeriod: (pendingBuffers: WrappedAudioBuffer[]) => {
19
+ getQueuedPeriod: () => {
19
20
  from: number;
20
21
  until: number;
21
22
  } | null;
22
- tryToSatisfySeek: (time: number, allowWait: boolean) => Promise<{
23
+ tryToSatisfySeek: (time: number, allowWait: AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
23
24
  type: "not-satisfied";
24
25
  reason: string;
25
26
  } | {
26
27
  type: "satisfied";
27
- buffers: WrappedAudioBuffer[];
28
28
  }>;
29
29
  addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
30
30
  moveQueuedChunksToPauseQueue: () => void;
@@ -1,4 +1,5 @@
1
1
  import { roundTo4Digits } from '../helpers/round-to-4-digits';
2
+ import { allowWaitRoutine } from './allow-wait';
2
3
  export const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
3
4
  export const makeAudioIterator = (audioSink, startFromSecond) => {
4
5
  let destroyed = false;
@@ -16,7 +17,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
16
17
  const getNextOrNullIfNotAvailable = async (allowWait) => {
17
18
  const next = iterator.next();
18
19
  const result = allowWait
19
- ? await next
20
+ ? await allowWaitRoutine(next, allowWait)
20
21
  : await Promise.race([
21
22
  next,
22
23
  new Promise((resolve) => {
@@ -49,7 +50,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
49
50
  buffer: result.value ?? null,
50
51
  };
51
52
  };
52
- const tryToSatisfySeek = async (time, allowWait) => {
53
+ const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
53
54
  if (lastReturnedBuffer) {
54
55
  const bufferTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp);
55
56
  const bufferEndTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp + lastReturnedBuffer.duration);
@@ -60,20 +61,21 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
60
61
  };
61
62
  }
62
63
  if (roundTo4Digits(time) <= bufferEndTimestamp) {
64
+ onBufferScheduled(lastReturnedBuffer);
63
65
  return {
64
66
  type: 'satisfied',
65
- buffers: [lastReturnedBuffer],
66
67
  };
67
68
  }
68
69
  // fall through
69
70
  }
70
71
  if (iteratorEnded) {
72
+ if (lastReturnedBuffer) {
73
+ onBufferScheduled(lastReturnedBuffer);
74
+ }
71
75
  return {
72
76
  type: 'satisfied',
73
- buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
74
77
  };
75
78
  }
76
- const toBeReturned = [];
77
79
  while (true) {
78
80
  const buffer = await getNextOrNullIfNotAvailable(allowWait);
79
81
  if (buffer.type === 'need-to-wait-for-it') {
@@ -85,21 +87,23 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
85
87
  if (buffer.type === 'got-buffer-or-end') {
86
88
  if (buffer.buffer === null) {
87
89
  iteratorEnded = true;
90
+ if (lastReturnedBuffer) {
91
+ onBufferScheduled(lastReturnedBuffer);
92
+ }
88
93
  return {
89
94
  type: 'satisfied',
90
- buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
91
95
  };
92
96
  }
93
97
  const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
94
98
  const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
95
99
  const timestamp = roundTo4Digits(time);
96
100
  if (bufferTimestamp <= timestamp && bufferEndTimestamp > timestamp) {
101
+ onBufferScheduled(buffer.buffer);
97
102
  return {
98
103
  type: 'satisfied',
99
- buffers: [...toBeReturned, buffer.buffer],
100
104
  };
101
105
  }
102
- toBeReturned.push(buffer.buffer);
106
+ onBufferScheduled(buffer.buffer);
103
107
  continue;
104
108
  }
105
109
  throw new Error('Unreachable');
@@ -159,13 +163,9 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
159
163
  audioChunksForAfterResuming.length = 0;
160
164
  return chunks;
161
165
  },
162
- getQueuedPeriod: (pendingBuffers) => {
166
+ getQueuedPeriod: () => {
163
167
  let until = -Infinity;
164
168
  let from = Infinity;
165
- for (const buffer of pendingBuffers) {
166
- until = Math.max(until, buffer.timestamp + buffer.duration);
167
- from = Math.min(from, buffer.timestamp);
168
- }
169
169
  for (const node of queuedAudioNodes) {
170
170
  until = Math.max(until, node.timestamp + node.buffer.duration);
171
171
  from = Math.min(from, node.timestamp);
@@ -84,6 +84,8 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
84
84
  trimStartInSeconds,
85
85
  trimEndInSeconds,
86
86
  playbackRate,
87
+ audioDataTimestamp: sample.timestamp,
88
+ isLast: isLastSample,
87
89
  });
88
90
  audioDataRaw.close();
89
91
  if (audioData.numberOfFrames === 0) {
@@ -1,4 +1,5 @@
1
1
  import type { InputAudioTrack, WrappedAudioBuffer } from 'mediabunny';
2
+ import type { useBufferState } from 'remotion';
2
3
  import type { Nonce } from './nonce-manager';
3
4
  export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }: {
4
5
  audioTrack: InputAudioTrack;
@@ -29,28 +30,28 @@ export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfN
29
30
  buffer: AudioBuffer;
30
31
  timestamp: number;
31
32
  }[];
32
- getQueuedPeriod: (pendingBuffers: WrappedAudioBuffer[]) => {
33
+ getQueuedPeriod: () => {
33
34
  from: number;
34
35
  until: number;
35
36
  } | null;
36
- tryToSatisfySeek: (time: number, allowWait: boolean) => Promise<{
37
+ tryToSatisfySeek: (time: number, allowWait: import("./audio/allow-wait").AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
37
38
  type: "not-satisfied";
38
39
  reason: string;
39
40
  } | {
40
41
  type: "satisfied";
41
- buffers: WrappedAudioBuffer[];
42
42
  }>;
43
43
  addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
44
44
  moveQueuedChunksToPauseQueue: () => void;
45
45
  getNumberOfChunksAfterResuming: () => number;
46
46
  } | null;
47
47
  destroy: () => void;
48
- seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }: {
48
+ seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }: {
49
49
  newTime: number;
50
50
  nonce: Nonce;
51
51
  fps: number;
52
52
  playbackRate: number;
53
53
  getIsPlaying: () => boolean;
54
+ bufferState: ReturnType<typeof useBufferState>;
54
55
  scheduleAudioNode: (node: AudioBufferSourceNode, mediaTimestamp: number) => void;
55
56
  }) => Promise<void>;
56
57
  getAudioIteratorsCreated: () => number;
@@ -1,4 +1,4 @@
1
- import { AudioBufferSink, InputDisposedError } from 'mediabunny';
1
+ import { AudioBufferSink } from 'mediabunny';
2
2
  import { isAlreadyQueued, makeAudioIterator, } from './audio/audio-preview-iterator';
3
3
  export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }) => {
4
4
  let muted = false;
@@ -50,18 +50,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
50
50
  audioBufferIterator = iterator;
51
51
  // Schedule up to 3 buffers ahead of the current time
52
52
  for (let i = 0; i < 3; i++) {
53
- const result = await iterator.getNext().catch((err) => {
54
- if (iterator.isDestroyed() || err instanceof InputDisposedError) {
55
- // Fall through
56
- }
57
- else {
58
- throw err;
59
- }
60
- });
61
- if (!result) {
62
- delayHandle.unblock();
63
- return;
64
- }
53
+ const result = await iterator.getNext();
65
54
  if (iterator.isDestroyed()) {
66
55
  delayHandle.unblock();
67
56
  return;
@@ -90,7 +79,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
90
79
  }
91
80
  audioBufferIterator.moveQueuedChunksToPauseQueue();
92
81
  };
93
- const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }) => {
82
+ const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }) => {
94
83
  if (!audioBufferIterator) {
95
84
  await startAudioIterator({
96
85
  nonce,
@@ -101,10 +90,18 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
101
90
  });
102
91
  return;
103
92
  }
104
- const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod([]));
105
- const toBeScheduled = [];
93
+ const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod());
106
94
  if (!currentTimeIsAlreadyQueued) {
107
- const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, false);
95
+ const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, null, (buffer) => {
96
+ if (!nonce.isStale()) {
97
+ onAudioChunk({
98
+ getIsPlaying,
99
+ buffer,
100
+ playbackRate,
101
+ scheduleAudioNode,
102
+ });
103
+ }
104
+ });
108
105
  if (nonce.isStale()) {
109
106
  return;
110
107
  }
@@ -118,19 +115,33 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
118
115
  });
119
116
  return;
120
117
  }
121
- toBeScheduled.push(...audioSatisfyResult.buffers);
122
118
  }
123
119
  const nextTime = newTime +
124
- // start of next frame
125
- (1 / fps) * playbackRate +
126
- // need the full duration of the next frame to be queued
127
- (1 / fps) * playbackRate;
128
- const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod(toBeScheduled));
120
+ // 3 frames ahead to get enough of a buffer
121
+ (1 / fps) * Math.max(1, playbackRate) * 3;
122
+ const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod());
129
123
  if (!nextIsAlreadyQueued) {
130
124
  // here we allow waiting for the next buffer to be loaded
131
125
  // it's better than to create a new iterator
132
126
  // because we already know we are in the right spot
133
- const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, true);
127
+ const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, {
128
+ type: 'allow-wait',
129
+ waitCallback: () => {
130
+ const handle = bufferState.delayPlayback();
131
+ return () => {
132
+ handle.unblock();
133
+ };
134
+ },
135
+ }, (buffer) => {
136
+ if (!nonce.isStale()) {
137
+ onAudioChunk({
138
+ getIsPlaying,
139
+ buffer,
140
+ playbackRate,
141
+ scheduleAudioNode,
142
+ });
143
+ }
144
+ });
134
145
  if (nonce.isStale()) {
135
146
  return;
136
147
  }
@@ -142,17 +153,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
142
153
  getIsPlaying,
143
154
  scheduleAudioNode,
144
155
  });
145
- return;
146
156
  }
147
- toBeScheduled.push(...audioSatisfyResult.buffers);
148
- }
149
- for (const buffer of toBeScheduled) {
150
- onAudioChunk({
151
- getIsPlaying,
152
- buffer,
153
- playbackRate,
154
- scheduleAudioNode,
155
- });
156
157
  }
157
158
  };
158
159
  const resumeScheduledAudioChunks = ({ playbackRate, scheduleAudioNode, }) => {
@@ -1,2 +1,2 @@
1
- import type { PcmS16AudioData } from './convert-audiodata';
1
+ import { type PcmS16AudioData } from './convert-audiodata';
2
2
  export declare const combineAudioDataAndClosePrevious: (audioDataArray: PcmS16AudioData[]) => PcmS16AudioData;
@@ -1,9 +1,12 @@
1
+ import { fixFloatingPoint } from './convert-audiodata';
1
2
  import { TARGET_NUMBER_OF_CHANNELS } from './resample-audiodata';
2
3
  export const combineAudioDataAndClosePrevious = (audioDataArray) => {
3
4
  let numberOfFrames = 0;
5
+ let durationInMicroSeconds = 0;
4
6
  const { timestamp } = audioDataArray[0];
5
7
  for (const audioData of audioDataArray) {
6
8
  numberOfFrames += audioData.numberOfFrames;
9
+ durationInMicroSeconds += audioData.durationInMicroSeconds;
7
10
  }
8
11
  const arr = new Int16Array(numberOfFrames * TARGET_NUMBER_OF_CHANNELS);
9
12
  let offset = 0;
@@ -14,6 +17,7 @@ export const combineAudioDataAndClosePrevious = (audioDataArray) => {
14
17
  return {
15
18
  data: arr,
16
19
  numberOfFrames,
17
- timestamp,
20
+ timestamp: fixFloatingPoint(timestamp),
21
+ durationInMicroSeconds: fixFloatingPoint(durationInMicroSeconds),
18
22
  };
19
23
  };
@@ -3,10 +3,14 @@ export type ConvertAudioDataOptions = {
3
3
  trimStartInSeconds: number;
4
4
  trimEndInSeconds: number;
5
5
  playbackRate: number;
6
+ audioDataTimestamp: number;
7
+ isLast: boolean;
6
8
  };
7
9
  export type PcmS16AudioData = {
8
10
  data: Int16Array;
9
11
  numberOfFrames: number;
10
12
  timestamp: number;
13
+ durationInMicroSeconds: number;
11
14
  };
12
- export declare const convertAudioData: ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, }: ConvertAudioDataOptions) => PcmS16AudioData;
15
+ export declare const fixFloatingPoint: (value: number) => number;
16
+ export declare const convertAudioData: ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }: ConvertAudioDataOptions) => PcmS16AudioData;
@@ -1,6 +1,19 @@
1
1
  import { resampleAudioData, TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './resample-audiodata';
2
2
  const FORMAT = 's16';
3
- export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, }) => {
3
+ export const fixFloatingPoint = (value) => {
4
+ if (value % 1 < 0.0000001) {
5
+ return Math.floor(value);
6
+ }
7
+ if (value % 1 > 0.9999999) {
8
+ return Math.ceil(value);
9
+ }
10
+ return value;
11
+ };
12
+ const ceilButNotIfFloatingPointIssue = (value) => {
13
+ const fixed = fixFloatingPoint(value);
14
+ return Math.ceil(fixed);
15
+ };
16
+ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }) => {
4
17
  const { numberOfChannels: srcNumberOfChannels, sampleRate: currentSampleRate, numberOfFrames, } = audioData;
5
18
  const ratio = currentSampleRate / TARGET_SAMPLE_RATE;
6
19
  // Always rounding down start timestamps and rounding up end durations
@@ -9,11 +22,14 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
9
22
  // timestamp and round up the end timestamp
10
23
  // This might lead to overlapping, hopefully aligning perfectly!
11
24
  // Test case: https://github.com/remotion-dev/remotion/issues/5758
12
- const frameOffset = Math.floor(trimStartInSeconds * audioData.sampleRate);
13
- const unroundedFrameCount = numberOfFrames -
14
- (trimEndInSeconds + trimStartInSeconds) * audioData.sampleRate;
15
- const frameCount = Math.ceil(unroundedFrameCount);
16
- const newNumberOfFrames = Math.ceil(unroundedFrameCount / ratio / playbackRate);
25
+ const frameOffset = Math.floor(fixFloatingPoint(trimStartInSeconds * audioData.sampleRate));
26
+ const unroundedFrameCount = numberOfFrames - trimEndInSeconds * audioData.sampleRate - frameOffset;
27
+ const frameCount = isLast
28
+ ? ceilButNotIfFloatingPointIssue(unroundedFrameCount)
29
+ : Math.round(unroundedFrameCount);
30
+ const newNumberOfFrames = isLast
31
+ ? ceilButNotIfFloatingPointIssue(unroundedFrameCount / ratio / playbackRate)
32
+ : Math.round(unroundedFrameCount / ratio / playbackRate);
17
33
  if (newNumberOfFrames === 0) {
18
34
  throw new Error('Cannot resample - the given sample rate would result in less than 1 sample');
19
35
  }
@@ -26,13 +42,16 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
26
42
  });
27
43
  const data = new Int16Array(newNumberOfFrames * TARGET_NUMBER_OF_CHANNELS);
28
44
  const chunkSize = frameCount / newNumberOfFrames;
45
+ const timestampOffsetMicroseconds = (frameOffset / audioData.sampleRate) * 1000000;
29
46
  if (newNumberOfFrames === frameCount &&
30
47
  TARGET_NUMBER_OF_CHANNELS === srcNumberOfChannels &&
31
48
  playbackRate === 1) {
32
49
  return {
33
50
  data: srcChannels,
34
51
  numberOfFrames: newNumberOfFrames,
35
- timestamp: audioData.timestamp + (frameOffset / audioData.sampleRate) * 1000000,
52
+ timestamp: audioDataTimestamp * 1000000 +
53
+ fixFloatingPoint(timestampOffsetMicroseconds),
54
+ durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
36
55
  };
37
56
  }
38
57
  resampleAudioData({
@@ -45,7 +64,9 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
45
64
  const newAudioData = {
46
65
  data,
47
66
  numberOfFrames: newNumberOfFrames,
48
- timestamp: audioData.timestamp + (frameOffset / audioData.sampleRate) * 1000000,
67
+ timestamp: audioDataTimestamp * 1000000 +
68
+ fixFloatingPoint(timestampOffsetMicroseconds),
69
+ durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
49
70
  };
50
71
  return newAudioData;
51
72
  };
@@ -11,7 +11,7 @@ export const drawPreviewOverlay = ({ context, audioTime, audioContextState, audi
11
11
  if (audioIteratorManager) {
12
12
  const queuedPeriod = audioIteratorManager
13
13
  .getAudioBufferIterator()
14
- ?.getQueuedPeriod([]);
14
+ ?.getQueuedPeriod();
15
15
  const numberOfChunksAfterResuming = audioIteratorManager
16
16
  ?.getAudioBufferIterator()
17
17
  ?.getNumberOfChunksAfterResuming();