@remotion/media 4.0.370 → 4.0.372

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,6 @@
1
+ import type { WrappedAudioBuffer } from 'mediabunny';
2
+ export type AllowWait = {
3
+ type: 'allow-wait';
4
+ waitCallback: () => () => void;
5
+ };
6
+ export declare const allowWaitRoutine: (next: Promise<IteratorResult<WrappedAudioBuffer, void>>, waitFn: AllowWait) => Promise<IteratorResult<WrappedAudioBuffer, void>>;
@@ -0,0 +1,15 @@
1
+ export const allowWaitRoutine = async (next, waitFn) => {
2
+ const result = await Promise.race([
3
+ next,
4
+ new Promise((resolve) => {
5
+ Promise.resolve().then(() => resolve());
6
+ }),
7
+ ]);
8
+ if (!result) {
9
+ const unblock = waitFn.waitCallback();
10
+ const newRes = await next;
11
+ unblock();
12
+ return newRes;
13
+ }
14
+ return result;
15
+ };
@@ -1,5 +1,5 @@
1
1
  import { jsx as _jsx } from "react/jsx-runtime";
2
- import { useContext, useEffect, useMemo, useRef, useState } from 'react';
2
+ import { useContext, useEffect, useLayoutEffect, useMemo, useRef, useState, } from 'react';
3
3
  import { Internals, Audio as RemotionAudio, useBufferState, useCurrentFrame, useVideoConfig, } from 'remotion';
4
4
  import { getTimeInSeconds } from '../get-time-in-seconds';
5
5
  import { MediaPlayer } from '../media-player';
@@ -181,7 +181,7 @@ const AudioForPreviewAssertedShowing = ({ src, playbackRate, logLevel, muted, vo
181
181
  audioPlayer.pause();
182
182
  }
183
183
  }, [isPlayerBuffering, logLevel, playing]);
184
- useEffect(() => {
184
+ useLayoutEffect(() => {
185
185
  const audioPlayer = mediaPlayerRef.current;
186
186
  if (!audioPlayer || !mediaPlayerReady)
187
187
  return;
@@ -1,4 +1,5 @@
1
1
  import type { AudioBufferSink, WrappedAudioBuffer } from 'mediabunny';
2
+ import { type AllowWait } from './allow-wait';
2
3
  export declare const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
3
4
  export type QueuedNode = {
4
5
  node: AudioBufferSourceNode;
@@ -15,16 +16,15 @@ export declare const makeAudioIterator: (audioSink: AudioBufferSink, startFromSe
15
16
  buffer: AudioBuffer;
16
17
  timestamp: number;
17
18
  }[];
18
- getQueuedPeriod: (pendingBuffers: WrappedAudioBuffer[]) => {
19
+ getQueuedPeriod: () => {
19
20
  from: number;
20
21
  until: number;
21
22
  } | null;
22
- tryToSatisfySeek: (time: number, allowWait: boolean) => Promise<{
23
+ tryToSatisfySeek: (time: number, allowWait: AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
23
24
  type: "not-satisfied";
24
25
  reason: string;
25
26
  } | {
26
27
  type: "satisfied";
27
- buffers: WrappedAudioBuffer[];
28
28
  }>;
29
29
  addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
30
30
  moveQueuedChunksToPauseQueue: () => void;
@@ -1,4 +1,5 @@
1
1
  import { roundTo4Digits } from '../helpers/round-to-4-digits';
2
+ import { allowWaitRoutine } from './allow-wait';
2
3
  export const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
3
4
  export const makeAudioIterator = (audioSink, startFromSecond) => {
4
5
  let destroyed = false;
@@ -16,7 +17,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
16
17
  const getNextOrNullIfNotAvailable = async (allowWait) => {
17
18
  const next = iterator.next();
18
19
  const result = allowWait
19
- ? await next
20
+ ? await allowWaitRoutine(next, allowWait)
20
21
  : await Promise.race([
21
22
  next,
22
23
  new Promise((resolve) => {
@@ -49,7 +50,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
49
50
  buffer: result.value ?? null,
50
51
  };
51
52
  };
52
- const tryToSatisfySeek = async (time, allowWait) => {
53
+ const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
53
54
  if (lastReturnedBuffer) {
54
55
  const bufferTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp);
55
56
  const bufferEndTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp + lastReturnedBuffer.duration);
@@ -60,20 +61,21 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
60
61
  };
61
62
  }
62
63
  if (roundTo4Digits(time) <= bufferEndTimestamp) {
64
+ onBufferScheduled(lastReturnedBuffer);
63
65
  return {
64
66
  type: 'satisfied',
65
- buffers: [lastReturnedBuffer],
66
67
  };
67
68
  }
68
69
  // fall through
69
70
  }
70
71
  if (iteratorEnded) {
72
+ if (lastReturnedBuffer) {
73
+ onBufferScheduled(lastReturnedBuffer);
74
+ }
71
75
  return {
72
76
  type: 'satisfied',
73
- buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
74
77
  };
75
78
  }
76
- const toBeReturned = [];
77
79
  while (true) {
78
80
  const buffer = await getNextOrNullIfNotAvailable(allowWait);
79
81
  if (buffer.type === 'need-to-wait-for-it') {
@@ -85,21 +87,23 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
85
87
  if (buffer.type === 'got-buffer-or-end') {
86
88
  if (buffer.buffer === null) {
87
89
  iteratorEnded = true;
90
+ if (lastReturnedBuffer) {
91
+ onBufferScheduled(lastReturnedBuffer);
92
+ }
88
93
  return {
89
94
  type: 'satisfied',
90
- buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
91
95
  };
92
96
  }
93
97
  const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
94
98
  const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
95
99
  const timestamp = roundTo4Digits(time);
96
100
  if (bufferTimestamp <= timestamp && bufferEndTimestamp > timestamp) {
101
+ onBufferScheduled(buffer.buffer);
97
102
  return {
98
103
  type: 'satisfied',
99
- buffers: [...toBeReturned, buffer.buffer],
100
104
  };
101
105
  }
102
- toBeReturned.push(buffer.buffer);
106
+ onBufferScheduled(buffer.buffer);
103
107
  continue;
104
108
  }
105
109
  throw new Error('Unreachable');
@@ -159,13 +163,9 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
159
163
  audioChunksForAfterResuming.length = 0;
160
164
  return chunks;
161
165
  },
162
- getQueuedPeriod: (pendingBuffers) => {
166
+ getQueuedPeriod: () => {
163
167
  let until = -Infinity;
164
168
  let from = Infinity;
165
- for (const buffer of pendingBuffers) {
166
- until = Math.max(until, buffer.timestamp + buffer.duration);
167
- from = Math.min(from, buffer.timestamp);
168
- }
169
169
  for (const node of queuedAudioNodes) {
170
170
  until = Math.max(until, node.timestamp + node.buffer.duration);
171
171
  from = Math.min(from, node.timestamp);
@@ -84,6 +84,8 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
84
84
  trimStartInSeconds,
85
85
  trimEndInSeconds,
86
86
  playbackRate,
87
+ audioDataTimestamp: sample.timestamp,
88
+ isLast: isLastSample,
87
89
  });
88
90
  audioDataRaw.close();
89
91
  if (audioData.numberOfFrames === 0) {
@@ -1,4 +1,5 @@
1
1
  import type { InputAudioTrack, WrappedAudioBuffer } from 'mediabunny';
2
+ import type { useBufferState } from 'remotion';
2
3
  import type { Nonce } from './nonce-manager';
3
4
  export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }: {
4
5
  audioTrack: InputAudioTrack;
@@ -29,28 +30,28 @@ export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfN
29
30
  buffer: AudioBuffer;
30
31
  timestamp: number;
31
32
  }[];
32
- getQueuedPeriod: (pendingBuffers: WrappedAudioBuffer[]) => {
33
+ getQueuedPeriod: () => {
33
34
  from: number;
34
35
  until: number;
35
36
  } | null;
36
- tryToSatisfySeek: (time: number, allowWait: boolean) => Promise<{
37
+ tryToSatisfySeek: (time: number, allowWait: import("./audio/allow-wait").AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
37
38
  type: "not-satisfied";
38
39
  reason: string;
39
40
  } | {
40
41
  type: "satisfied";
41
- buffers: WrappedAudioBuffer[];
42
42
  }>;
43
43
  addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
44
44
  moveQueuedChunksToPauseQueue: () => void;
45
45
  getNumberOfChunksAfterResuming: () => number;
46
46
  } | null;
47
47
  destroy: () => void;
48
- seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }: {
48
+ seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }: {
49
49
  newTime: number;
50
50
  nonce: Nonce;
51
51
  fps: number;
52
52
  playbackRate: number;
53
53
  getIsPlaying: () => boolean;
54
+ bufferState: ReturnType<typeof useBufferState>;
54
55
  scheduleAudioNode: (node: AudioBufferSourceNode, mediaTimestamp: number) => void;
55
56
  }) => Promise<void>;
56
57
  getAudioIteratorsCreated: () => number;
@@ -1,4 +1,4 @@
1
- import { AudioBufferSink } from 'mediabunny';
1
+ import { AudioBufferSink, InputDisposedError } from 'mediabunny';
2
2
  import { isAlreadyQueued, makeAudioIterator, } from './audio/audio-preview-iterator';
3
3
  export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }) => {
4
4
  let muted = false;
@@ -48,30 +48,42 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
48
48
  const iterator = makeAudioIterator(audioSink, startFromSecond);
49
49
  audioIteratorsCreated++;
50
50
  audioBufferIterator = iterator;
51
- // Schedule up to 3 buffers ahead of the current time
52
- for (let i = 0; i < 3; i++) {
53
- const result = await iterator.getNext();
54
- if (iterator.isDestroyed()) {
55
- delayHandle.unblock();
56
- return;
57
- }
58
- if (nonce.isStale()) {
59
- delayHandle.unblock();
60
- return;
51
+ try {
52
+ // Schedule up to 3 buffers ahead of the current time
53
+ for (let i = 0; i < 3; i++) {
54
+ const result = await iterator.getNext();
55
+ if (iterator.isDestroyed()) {
56
+ delayHandle.unblock();
57
+ return;
58
+ }
59
+ if (nonce.isStale()) {
60
+ delayHandle.unblock();
61
+ return;
62
+ }
63
+ if (!result.value) {
64
+ // media ended
65
+ delayHandle.unblock();
66
+ return;
67
+ }
68
+ onAudioChunk({
69
+ getIsPlaying,
70
+ buffer: result.value,
71
+ playbackRate,
72
+ scheduleAudioNode,
73
+ });
61
74
  }
62
- if (!result.value) {
63
- // media ended
64
- delayHandle.unblock();
75
+ }
76
+ catch (e) {
77
+ if (e instanceof InputDisposedError) {
78
+ // iterator was disposed by a newer startAudioIterator call
79
+ // this is expected during rapid seeking
65
80
  return;
66
81
  }
67
- onAudioChunk({
68
- getIsPlaying,
69
- buffer: result.value,
70
- playbackRate,
71
- scheduleAudioNode,
72
- });
82
+ throw e;
83
+ }
84
+ finally {
85
+ delayHandle.unblock();
73
86
  }
74
- delayHandle.unblock();
75
87
  };
76
88
  const pausePlayback = () => {
77
89
  if (!audioBufferIterator) {
@@ -79,7 +91,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
79
91
  }
80
92
  audioBufferIterator.moveQueuedChunksToPauseQueue();
81
93
  };
82
- const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }) => {
94
+ const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }) => {
83
95
  if (!audioBufferIterator) {
84
96
  await startAudioIterator({
85
97
  nonce,
@@ -90,10 +102,18 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
90
102
  });
91
103
  return;
92
104
  }
93
- const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod([]));
94
- const toBeScheduled = [];
105
+ const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod());
95
106
  if (!currentTimeIsAlreadyQueued) {
96
- const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, false);
107
+ const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, null, (buffer) => {
108
+ if (!nonce.isStale()) {
109
+ onAudioChunk({
110
+ getIsPlaying,
111
+ buffer,
112
+ playbackRate,
113
+ scheduleAudioNode,
114
+ });
115
+ }
116
+ });
97
117
  if (nonce.isStale()) {
98
118
  return;
99
119
  }
@@ -107,19 +127,33 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
107
127
  });
108
128
  return;
109
129
  }
110
- toBeScheduled.push(...audioSatisfyResult.buffers);
111
130
  }
112
131
  const nextTime = newTime +
113
- // start of next frame
114
- (1 / fps) * playbackRate +
115
- // need the full duration of the next frame to be queued
116
- (1 / fps) * playbackRate;
117
- const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod(toBeScheduled));
132
+ // 3 frames ahead to get enough of a buffer
133
+ (1 / fps) * Math.max(1, playbackRate) * 3;
134
+ const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod());
118
135
  if (!nextIsAlreadyQueued) {
119
136
  // here we allow waiting for the next buffer to be loaded
120
137
  // it's better than to create a new iterator
121
138
  // because we already know we are in the right spot
122
- const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, true);
139
+ const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, {
140
+ type: 'allow-wait',
141
+ waitCallback: () => {
142
+ const handle = bufferState.delayPlayback();
143
+ return () => {
144
+ handle.unblock();
145
+ };
146
+ },
147
+ }, (buffer) => {
148
+ if (!nonce.isStale()) {
149
+ onAudioChunk({
150
+ getIsPlaying,
151
+ buffer,
152
+ playbackRate,
153
+ scheduleAudioNode,
154
+ });
155
+ }
156
+ });
123
157
  if (nonce.isStale()) {
124
158
  return;
125
159
  }
@@ -131,17 +165,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
131
165
  getIsPlaying,
132
166
  scheduleAudioNode,
133
167
  });
134
- return;
135
168
  }
136
- toBeScheduled.push(...audioSatisfyResult.buffers);
137
- }
138
- for (const buffer of toBeScheduled) {
139
- onAudioChunk({
140
- getIsPlaying,
141
- buffer,
142
- playbackRate,
143
- scheduleAudioNode,
144
- });
145
169
  }
146
170
  };
147
171
  const resumeScheduledAudioChunks = ({ playbackRate, scheduleAudioNode, }) => {
@@ -1,2 +1,2 @@
1
- import type { PcmS16AudioData } from './convert-audiodata';
1
+ import { type PcmS16AudioData } from './convert-audiodata';
2
2
  export declare const combineAudioDataAndClosePrevious: (audioDataArray: PcmS16AudioData[]) => PcmS16AudioData;
@@ -1,9 +1,12 @@
1
+ import { fixFloatingPoint } from './convert-audiodata';
1
2
  import { TARGET_NUMBER_OF_CHANNELS } from './resample-audiodata';
2
3
  export const combineAudioDataAndClosePrevious = (audioDataArray) => {
3
4
  let numberOfFrames = 0;
5
+ let durationInMicroSeconds = 0;
4
6
  const { timestamp } = audioDataArray[0];
5
7
  for (const audioData of audioDataArray) {
6
8
  numberOfFrames += audioData.numberOfFrames;
9
+ durationInMicroSeconds += audioData.durationInMicroSeconds;
7
10
  }
8
11
  const arr = new Int16Array(numberOfFrames * TARGET_NUMBER_OF_CHANNELS);
9
12
  let offset = 0;
@@ -14,6 +17,7 @@ export const combineAudioDataAndClosePrevious = (audioDataArray) => {
14
17
  return {
15
18
  data: arr,
16
19
  numberOfFrames,
17
- timestamp,
20
+ timestamp: fixFloatingPoint(timestamp),
21
+ durationInMicroSeconds: fixFloatingPoint(durationInMicroSeconds),
18
22
  };
19
23
  };
@@ -3,10 +3,14 @@ export type ConvertAudioDataOptions = {
3
3
  trimStartInSeconds: number;
4
4
  trimEndInSeconds: number;
5
5
  playbackRate: number;
6
+ audioDataTimestamp: number;
7
+ isLast: boolean;
6
8
  };
7
9
  export type PcmS16AudioData = {
8
10
  data: Int16Array;
9
11
  numberOfFrames: number;
10
12
  timestamp: number;
13
+ durationInMicroSeconds: number;
11
14
  };
12
- export declare const convertAudioData: ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, }: ConvertAudioDataOptions) => PcmS16AudioData;
15
+ export declare const fixFloatingPoint: (value: number) => number;
16
+ export declare const convertAudioData: ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }: ConvertAudioDataOptions) => PcmS16AudioData;
@@ -1,6 +1,19 @@
1
1
  import { resampleAudioData, TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './resample-audiodata';
2
2
  const FORMAT = 's16';
3
- export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, }) => {
3
+ export const fixFloatingPoint = (value) => {
4
+ if (value % 1 < 0.0000001) {
5
+ return Math.floor(value);
6
+ }
7
+ if (value % 1 > 0.9999999) {
8
+ return Math.ceil(value);
9
+ }
10
+ return value;
11
+ };
12
+ const ceilButNotIfFloatingPointIssue = (value) => {
13
+ const fixed = fixFloatingPoint(value);
14
+ return Math.ceil(fixed);
15
+ };
16
+ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }) => {
4
17
  const { numberOfChannels: srcNumberOfChannels, sampleRate: currentSampleRate, numberOfFrames, } = audioData;
5
18
  const ratio = currentSampleRate / TARGET_SAMPLE_RATE;
6
19
  // Always rounding down start timestamps and rounding up end durations
@@ -9,11 +22,14 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
9
22
  // timestamp and round up the end timestamp
10
23
  // This might lead to overlapping, hopefully aligning perfectly!
11
24
  // Test case: https://github.com/remotion-dev/remotion/issues/5758
12
- const frameOffset = Math.floor(trimStartInSeconds * audioData.sampleRate);
13
- const unroundedFrameCount = numberOfFrames -
14
- (trimEndInSeconds + trimStartInSeconds) * audioData.sampleRate;
15
- const frameCount = Math.ceil(unroundedFrameCount);
16
- const newNumberOfFrames = Math.ceil(unroundedFrameCount / ratio / playbackRate);
25
+ const frameOffset = Math.floor(fixFloatingPoint(trimStartInSeconds * audioData.sampleRate));
26
+ const unroundedFrameCount = numberOfFrames - trimEndInSeconds * audioData.sampleRate - frameOffset;
27
+ const frameCount = isLast
28
+ ? ceilButNotIfFloatingPointIssue(unroundedFrameCount)
29
+ : Math.round(unroundedFrameCount);
30
+ const newNumberOfFrames = isLast
31
+ ? ceilButNotIfFloatingPointIssue(unroundedFrameCount / ratio / playbackRate)
32
+ : Math.round(unroundedFrameCount / ratio / playbackRate);
17
33
  if (newNumberOfFrames === 0) {
18
34
  throw new Error('Cannot resample - the given sample rate would result in less than 1 sample');
19
35
  }
@@ -26,13 +42,16 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
26
42
  });
27
43
  const data = new Int16Array(newNumberOfFrames * TARGET_NUMBER_OF_CHANNELS);
28
44
  const chunkSize = frameCount / newNumberOfFrames;
45
+ const timestampOffsetMicroseconds = (frameOffset / audioData.sampleRate) * 1000000;
29
46
  if (newNumberOfFrames === frameCount &&
30
47
  TARGET_NUMBER_OF_CHANNELS === srcNumberOfChannels &&
31
48
  playbackRate === 1) {
32
49
  return {
33
50
  data: srcChannels,
34
51
  numberOfFrames: newNumberOfFrames,
35
- timestamp: audioData.timestamp + (frameOffset / audioData.sampleRate) * 1000000,
52
+ timestamp: audioDataTimestamp * 1000000 +
53
+ fixFloatingPoint(timestampOffsetMicroseconds),
54
+ durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
36
55
  };
37
56
  }
38
57
  resampleAudioData({
@@ -45,7 +64,9 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
45
64
  const newAudioData = {
46
65
  data,
47
66
  numberOfFrames: newNumberOfFrames,
48
- timestamp: audioData.timestamp + (frameOffset / audioData.sampleRate) * 1000000,
67
+ timestamp: audioDataTimestamp * 1000000 +
68
+ fixFloatingPoint(timestampOffsetMicroseconds),
69
+ durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
49
70
  };
50
71
  return newAudioData;
51
72
  };
@@ -11,7 +11,7 @@ export const drawPreviewOverlay = ({ context, audioTime, audioContextState, audi
11
11
  if (audioIteratorManager) {
12
12
  const queuedPeriod = audioIteratorManager
13
13
  .getAudioBufferIterator()
14
- ?.getQueuedPeriod([]);
14
+ ?.getQueuedPeriod();
15
15
  const numberOfChunksAfterResuming = audioIteratorManager
16
16
  ?.getAudioBufferIterator()
17
17
  ?.getNumberOfChunksAfterResuming();