@remotion/media 4.0.370 → 4.0.371
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/audio/allow-wait.d.ts +6 -0
- package/dist/audio/allow-wait.js +15 -0
- package/dist/audio/audio-for-preview.js +2 -2
- package/dist/audio/audio-preview-iterator.d.ts +3 -3
- package/dist/audio/audio-preview-iterator.js +13 -13
- package/dist/audio-extraction/extract-audio.js +2 -0
- package/dist/audio-iterator-manager.d.ts +5 -4
- package/dist/audio-iterator-manager.js +33 -21
- package/dist/convert-audiodata/combine-audiodata.d.ts +1 -1
- package/dist/convert-audiodata/combine-audiodata.js +5 -1
- package/dist/convert-audiodata/convert-audiodata.d.ts +5 -1
- package/dist/convert-audiodata/convert-audiodata.js +29 -8
- package/dist/debug-overlay/preview-overlay.js +1 -1
- package/dist/esm/index.mjs +137 -75
- package/dist/media-player.js +1 -0
- package/dist/video/props.d.ts +0 -1
- package/dist/video/video-for-rendering.js +2 -2
- package/package.json +4 -4
- package/dist/audio/audio-iterator.d.ts +0 -11
- package/dist/audio/audio-iterator.js +0 -24
- package/dist/video/media-player.d.ts +0 -98
- package/dist/video/media-player.js +0 -532
- package/dist/video/timeout-utils.d.ts +0 -5
- package/dist/video/timeout-utils.js +0 -24
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import type { WrappedAudioBuffer } from 'mediabunny';
|
|
2
|
+
export type AllowWait = {
|
|
3
|
+
type: 'allow-wait';
|
|
4
|
+
waitCallback: () => () => void;
|
|
5
|
+
};
|
|
6
|
+
export declare const allowWaitRoutine: (next: Promise<IteratorResult<WrappedAudioBuffer, void>>, waitFn: AllowWait) => Promise<IteratorResult<WrappedAudioBuffer, void>>;
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
export const allowWaitRoutine = async (next, waitFn) => {
|
|
2
|
+
const result = await Promise.race([
|
|
3
|
+
next,
|
|
4
|
+
new Promise((resolve) => {
|
|
5
|
+
Promise.resolve().then(() => resolve());
|
|
6
|
+
}),
|
|
7
|
+
]);
|
|
8
|
+
if (!result) {
|
|
9
|
+
const unblock = waitFn.waitCallback();
|
|
10
|
+
const newRes = await next;
|
|
11
|
+
unblock();
|
|
12
|
+
return newRes;
|
|
13
|
+
}
|
|
14
|
+
return result;
|
|
15
|
+
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { jsx as _jsx } from "react/jsx-runtime";
|
|
2
|
-
import { useContext, useEffect, useMemo, useRef, useState } from 'react';
|
|
2
|
+
import { useContext, useEffect, useLayoutEffect, useMemo, useRef, useState, } from 'react';
|
|
3
3
|
import { Internals, Audio as RemotionAudio, useBufferState, useCurrentFrame, useVideoConfig, } from 'remotion';
|
|
4
4
|
import { getTimeInSeconds } from '../get-time-in-seconds';
|
|
5
5
|
import { MediaPlayer } from '../media-player';
|
|
@@ -181,7 +181,7 @@ const AudioForPreviewAssertedShowing = ({ src, playbackRate, logLevel, muted, vo
|
|
|
181
181
|
audioPlayer.pause();
|
|
182
182
|
}
|
|
183
183
|
}, [isPlayerBuffering, logLevel, playing]);
|
|
184
|
-
|
|
184
|
+
useLayoutEffect(() => {
|
|
185
185
|
const audioPlayer = mediaPlayerRef.current;
|
|
186
186
|
if (!audioPlayer || !mediaPlayerReady)
|
|
187
187
|
return;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { AudioBufferSink, WrappedAudioBuffer } from 'mediabunny';
|
|
2
|
+
import { type AllowWait } from './allow-wait';
|
|
2
3
|
export declare const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
|
|
3
4
|
export type QueuedNode = {
|
|
4
5
|
node: AudioBufferSourceNode;
|
|
@@ -15,16 +16,15 @@ export declare const makeAudioIterator: (audioSink: AudioBufferSink, startFromSe
|
|
|
15
16
|
buffer: AudioBuffer;
|
|
16
17
|
timestamp: number;
|
|
17
18
|
}[];
|
|
18
|
-
getQueuedPeriod: (
|
|
19
|
+
getQueuedPeriod: () => {
|
|
19
20
|
from: number;
|
|
20
21
|
until: number;
|
|
21
22
|
} | null;
|
|
22
|
-
tryToSatisfySeek: (time: number, allowWait:
|
|
23
|
+
tryToSatisfySeek: (time: number, allowWait: AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
|
|
23
24
|
type: "not-satisfied";
|
|
24
25
|
reason: string;
|
|
25
26
|
} | {
|
|
26
27
|
type: "satisfied";
|
|
27
|
-
buffers: WrappedAudioBuffer[];
|
|
28
28
|
}>;
|
|
29
29
|
addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
|
|
30
30
|
moveQueuedChunksToPauseQueue: () => void;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { roundTo4Digits } from '../helpers/round-to-4-digits';
|
|
2
|
+
import { allowWaitRoutine } from './allow-wait';
|
|
2
3
|
export const HEALTHY_BUFFER_THRESHOLD_SECONDS = 1;
|
|
3
4
|
export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
4
5
|
let destroyed = false;
|
|
@@ -16,7 +17,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
16
17
|
const getNextOrNullIfNotAvailable = async (allowWait) => {
|
|
17
18
|
const next = iterator.next();
|
|
18
19
|
const result = allowWait
|
|
19
|
-
? await next
|
|
20
|
+
? await allowWaitRoutine(next, allowWait)
|
|
20
21
|
: await Promise.race([
|
|
21
22
|
next,
|
|
22
23
|
new Promise((resolve) => {
|
|
@@ -49,7 +50,7 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
49
50
|
buffer: result.value ?? null,
|
|
50
51
|
};
|
|
51
52
|
};
|
|
52
|
-
const tryToSatisfySeek = async (time, allowWait) => {
|
|
53
|
+
const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
|
|
53
54
|
if (lastReturnedBuffer) {
|
|
54
55
|
const bufferTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp);
|
|
55
56
|
const bufferEndTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp + lastReturnedBuffer.duration);
|
|
@@ -60,20 +61,21 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
60
61
|
};
|
|
61
62
|
}
|
|
62
63
|
if (roundTo4Digits(time) <= bufferEndTimestamp) {
|
|
64
|
+
onBufferScheduled(lastReturnedBuffer);
|
|
63
65
|
return {
|
|
64
66
|
type: 'satisfied',
|
|
65
|
-
buffers: [lastReturnedBuffer],
|
|
66
67
|
};
|
|
67
68
|
}
|
|
68
69
|
// fall through
|
|
69
70
|
}
|
|
70
71
|
if (iteratorEnded) {
|
|
72
|
+
if (lastReturnedBuffer) {
|
|
73
|
+
onBufferScheduled(lastReturnedBuffer);
|
|
74
|
+
}
|
|
71
75
|
return {
|
|
72
76
|
type: 'satisfied',
|
|
73
|
-
buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
|
|
74
77
|
};
|
|
75
78
|
}
|
|
76
|
-
const toBeReturned = [];
|
|
77
79
|
while (true) {
|
|
78
80
|
const buffer = await getNextOrNullIfNotAvailable(allowWait);
|
|
79
81
|
if (buffer.type === 'need-to-wait-for-it') {
|
|
@@ -85,21 +87,23 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
85
87
|
if (buffer.type === 'got-buffer-or-end') {
|
|
86
88
|
if (buffer.buffer === null) {
|
|
87
89
|
iteratorEnded = true;
|
|
90
|
+
if (lastReturnedBuffer) {
|
|
91
|
+
onBufferScheduled(lastReturnedBuffer);
|
|
92
|
+
}
|
|
88
93
|
return {
|
|
89
94
|
type: 'satisfied',
|
|
90
|
-
buffers: lastReturnedBuffer ? [lastReturnedBuffer] : [],
|
|
91
95
|
};
|
|
92
96
|
}
|
|
93
97
|
const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
|
|
94
98
|
const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
|
|
95
99
|
const timestamp = roundTo4Digits(time);
|
|
96
100
|
if (bufferTimestamp <= timestamp && bufferEndTimestamp > timestamp) {
|
|
101
|
+
onBufferScheduled(buffer.buffer);
|
|
97
102
|
return {
|
|
98
103
|
type: 'satisfied',
|
|
99
|
-
buffers: [...toBeReturned, buffer.buffer],
|
|
100
104
|
};
|
|
101
105
|
}
|
|
102
|
-
|
|
106
|
+
onBufferScheduled(buffer.buffer);
|
|
103
107
|
continue;
|
|
104
108
|
}
|
|
105
109
|
throw new Error('Unreachable');
|
|
@@ -159,13 +163,9 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
|
|
|
159
163
|
audioChunksForAfterResuming.length = 0;
|
|
160
164
|
return chunks;
|
|
161
165
|
},
|
|
162
|
-
getQueuedPeriod: (
|
|
166
|
+
getQueuedPeriod: () => {
|
|
163
167
|
let until = -Infinity;
|
|
164
168
|
let from = Infinity;
|
|
165
|
-
for (const buffer of pendingBuffers) {
|
|
166
|
-
until = Math.max(until, buffer.timestamp + buffer.duration);
|
|
167
|
-
from = Math.min(from, buffer.timestamp);
|
|
168
|
-
}
|
|
169
169
|
for (const node of queuedAudioNodes) {
|
|
170
170
|
until = Math.max(until, node.timestamp + node.buffer.duration);
|
|
171
171
|
from = Math.min(from, node.timestamp);
|
|
@@ -84,6 +84,8 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
|
|
|
84
84
|
trimStartInSeconds,
|
|
85
85
|
trimEndInSeconds,
|
|
86
86
|
playbackRate,
|
|
87
|
+
audioDataTimestamp: sample.timestamp,
|
|
88
|
+
isLast: isLastSample,
|
|
87
89
|
});
|
|
88
90
|
audioDataRaw.close();
|
|
89
91
|
if (audioData.numberOfFrames === 0) {
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { InputAudioTrack, WrappedAudioBuffer } from 'mediabunny';
|
|
2
|
+
import type { useBufferState } from 'remotion';
|
|
2
3
|
import type { Nonce } from './nonce-manager';
|
|
3
4
|
export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfNotPremounting, sharedAudioContext, }: {
|
|
4
5
|
audioTrack: InputAudioTrack;
|
|
@@ -29,28 +30,28 @@ export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfN
|
|
|
29
30
|
buffer: AudioBuffer;
|
|
30
31
|
timestamp: number;
|
|
31
32
|
}[];
|
|
32
|
-
getQueuedPeriod: (
|
|
33
|
+
getQueuedPeriod: () => {
|
|
33
34
|
from: number;
|
|
34
35
|
until: number;
|
|
35
36
|
} | null;
|
|
36
|
-
tryToSatisfySeek: (time: number, allowWait:
|
|
37
|
+
tryToSatisfySeek: (time: number, allowWait: import("./audio/allow-wait").AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
|
|
37
38
|
type: "not-satisfied";
|
|
38
39
|
reason: string;
|
|
39
40
|
} | {
|
|
40
41
|
type: "satisfied";
|
|
41
|
-
buffers: WrappedAudioBuffer[];
|
|
42
42
|
}>;
|
|
43
43
|
addChunkForAfterResuming: (buffer: AudioBuffer, timestamp: number) => void;
|
|
44
44
|
moveQueuedChunksToPauseQueue: () => void;
|
|
45
45
|
getNumberOfChunksAfterResuming: () => number;
|
|
46
46
|
} | null;
|
|
47
47
|
destroy: () => void;
|
|
48
|
-
seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }: {
|
|
48
|
+
seek: ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }: {
|
|
49
49
|
newTime: number;
|
|
50
50
|
nonce: Nonce;
|
|
51
51
|
fps: number;
|
|
52
52
|
playbackRate: number;
|
|
53
53
|
getIsPlaying: () => boolean;
|
|
54
|
+
bufferState: ReturnType<typeof useBufferState>;
|
|
54
55
|
scheduleAudioNode: (node: AudioBufferSourceNode, mediaTimestamp: number) => void;
|
|
55
56
|
}) => Promise<void>;
|
|
56
57
|
getAudioIteratorsCreated: () => number;
|
|
@@ -79,7 +79,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
|
|
|
79
79
|
}
|
|
80
80
|
audioBufferIterator.moveQueuedChunksToPauseQueue();
|
|
81
81
|
};
|
|
82
|
-
const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, }) => {
|
|
82
|
+
const seek = async ({ newTime, nonce, fps, playbackRate, getIsPlaying, scheduleAudioNode, bufferState, }) => {
|
|
83
83
|
if (!audioBufferIterator) {
|
|
84
84
|
await startAudioIterator({
|
|
85
85
|
nonce,
|
|
@@ -90,10 +90,18 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
|
|
|
90
90
|
});
|
|
91
91
|
return;
|
|
92
92
|
}
|
|
93
|
-
const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod(
|
|
94
|
-
const toBeScheduled = [];
|
|
93
|
+
const currentTimeIsAlreadyQueued = isAlreadyQueued(newTime, audioBufferIterator.getQueuedPeriod());
|
|
95
94
|
if (!currentTimeIsAlreadyQueued) {
|
|
96
|
-
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime,
|
|
95
|
+
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(newTime, null, (buffer) => {
|
|
96
|
+
if (!nonce.isStale()) {
|
|
97
|
+
onAudioChunk({
|
|
98
|
+
getIsPlaying,
|
|
99
|
+
buffer,
|
|
100
|
+
playbackRate,
|
|
101
|
+
scheduleAudioNode,
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
});
|
|
97
105
|
if (nonce.isStale()) {
|
|
98
106
|
return;
|
|
99
107
|
}
|
|
@@ -107,19 +115,33 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
|
|
|
107
115
|
});
|
|
108
116
|
return;
|
|
109
117
|
}
|
|
110
|
-
toBeScheduled.push(...audioSatisfyResult.buffers);
|
|
111
118
|
}
|
|
112
119
|
const nextTime = newTime +
|
|
113
|
-
//
|
|
114
|
-
(1 / fps) * playbackRate
|
|
115
|
-
|
|
116
|
-
(1 / fps) * playbackRate;
|
|
117
|
-
const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod(toBeScheduled));
|
|
120
|
+
// 3 frames ahead to get enough of a buffer
|
|
121
|
+
(1 / fps) * Math.max(1, playbackRate) * 3;
|
|
122
|
+
const nextIsAlreadyQueued = isAlreadyQueued(nextTime, audioBufferIterator.getQueuedPeriod());
|
|
118
123
|
if (!nextIsAlreadyQueued) {
|
|
119
124
|
// here we allow waiting for the next buffer to be loaded
|
|
120
125
|
// it's better than to create a new iterator
|
|
121
126
|
// because we already know we are in the right spot
|
|
122
|
-
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime,
|
|
127
|
+
const audioSatisfyResult = await audioBufferIterator.tryToSatisfySeek(nextTime, {
|
|
128
|
+
type: 'allow-wait',
|
|
129
|
+
waitCallback: () => {
|
|
130
|
+
const handle = bufferState.delayPlayback();
|
|
131
|
+
return () => {
|
|
132
|
+
handle.unblock();
|
|
133
|
+
};
|
|
134
|
+
},
|
|
135
|
+
}, (buffer) => {
|
|
136
|
+
if (!nonce.isStale()) {
|
|
137
|
+
onAudioChunk({
|
|
138
|
+
getIsPlaying,
|
|
139
|
+
buffer,
|
|
140
|
+
playbackRate,
|
|
141
|
+
scheduleAudioNode,
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
});
|
|
123
145
|
if (nonce.isStale()) {
|
|
124
146
|
return;
|
|
125
147
|
}
|
|
@@ -131,17 +153,7 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
|
|
|
131
153
|
getIsPlaying,
|
|
132
154
|
scheduleAudioNode,
|
|
133
155
|
});
|
|
134
|
-
return;
|
|
135
156
|
}
|
|
136
|
-
toBeScheduled.push(...audioSatisfyResult.buffers);
|
|
137
|
-
}
|
|
138
|
-
for (const buffer of toBeScheduled) {
|
|
139
|
-
onAudioChunk({
|
|
140
|
-
getIsPlaying,
|
|
141
|
-
buffer,
|
|
142
|
-
playbackRate,
|
|
143
|
-
scheduleAudioNode,
|
|
144
|
-
});
|
|
145
157
|
}
|
|
146
158
|
};
|
|
147
159
|
const resumeScheduledAudioChunks = ({ playbackRate, scheduleAudioNode, }) => {
|
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
import type
|
|
1
|
+
import { type PcmS16AudioData } from './convert-audiodata';
|
|
2
2
|
export declare const combineAudioDataAndClosePrevious: (audioDataArray: PcmS16AudioData[]) => PcmS16AudioData;
|
|
@@ -1,9 +1,12 @@
|
|
|
1
|
+
import { fixFloatingPoint } from './convert-audiodata';
|
|
1
2
|
import { TARGET_NUMBER_OF_CHANNELS } from './resample-audiodata';
|
|
2
3
|
export const combineAudioDataAndClosePrevious = (audioDataArray) => {
|
|
3
4
|
let numberOfFrames = 0;
|
|
5
|
+
let durationInMicroSeconds = 0;
|
|
4
6
|
const { timestamp } = audioDataArray[0];
|
|
5
7
|
for (const audioData of audioDataArray) {
|
|
6
8
|
numberOfFrames += audioData.numberOfFrames;
|
|
9
|
+
durationInMicroSeconds += audioData.durationInMicroSeconds;
|
|
7
10
|
}
|
|
8
11
|
const arr = new Int16Array(numberOfFrames * TARGET_NUMBER_OF_CHANNELS);
|
|
9
12
|
let offset = 0;
|
|
@@ -14,6 +17,7 @@ export const combineAudioDataAndClosePrevious = (audioDataArray) => {
|
|
|
14
17
|
return {
|
|
15
18
|
data: arr,
|
|
16
19
|
numberOfFrames,
|
|
17
|
-
timestamp,
|
|
20
|
+
timestamp: fixFloatingPoint(timestamp),
|
|
21
|
+
durationInMicroSeconds: fixFloatingPoint(durationInMicroSeconds),
|
|
18
22
|
};
|
|
19
23
|
};
|
|
@@ -3,10 +3,14 @@ export type ConvertAudioDataOptions = {
|
|
|
3
3
|
trimStartInSeconds: number;
|
|
4
4
|
trimEndInSeconds: number;
|
|
5
5
|
playbackRate: number;
|
|
6
|
+
audioDataTimestamp: number;
|
|
7
|
+
isLast: boolean;
|
|
6
8
|
};
|
|
7
9
|
export type PcmS16AudioData = {
|
|
8
10
|
data: Int16Array;
|
|
9
11
|
numberOfFrames: number;
|
|
10
12
|
timestamp: number;
|
|
13
|
+
durationInMicroSeconds: number;
|
|
11
14
|
};
|
|
12
|
-
export declare const
|
|
15
|
+
export declare const fixFloatingPoint: (value: number) => number;
|
|
16
|
+
export declare const convertAudioData: ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }: ConvertAudioDataOptions) => PcmS16AudioData;
|
|
@@ -1,6 +1,19 @@
|
|
|
1
1
|
import { resampleAudioData, TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './resample-audiodata';
|
|
2
2
|
const FORMAT = 's16';
|
|
3
|
-
export const
|
|
3
|
+
export const fixFloatingPoint = (value) => {
|
|
4
|
+
if (value % 1 < 0.0000001) {
|
|
5
|
+
return Math.floor(value);
|
|
6
|
+
}
|
|
7
|
+
if (value % 1 > 0.9999999) {
|
|
8
|
+
return Math.ceil(value);
|
|
9
|
+
}
|
|
10
|
+
return value;
|
|
11
|
+
};
|
|
12
|
+
const ceilButNotIfFloatingPointIssue = (value) => {
|
|
13
|
+
const fixed = fixFloatingPoint(value);
|
|
14
|
+
return Math.ceil(fixed);
|
|
15
|
+
};
|
|
16
|
+
export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSeconds, playbackRate, audioDataTimestamp, isLast, }) => {
|
|
4
17
|
const { numberOfChannels: srcNumberOfChannels, sampleRate: currentSampleRate, numberOfFrames, } = audioData;
|
|
5
18
|
const ratio = currentSampleRate / TARGET_SAMPLE_RATE;
|
|
6
19
|
// Always rounding down start timestamps and rounding up end durations
|
|
@@ -9,11 +22,14 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
|
|
|
9
22
|
// timestamp and round up the end timestamp
|
|
10
23
|
// This might lead to overlapping, hopefully aligning perfectly!
|
|
11
24
|
// Test case: https://github.com/remotion-dev/remotion/issues/5758
|
|
12
|
-
const frameOffset = Math.floor(trimStartInSeconds * audioData.sampleRate);
|
|
13
|
-
const unroundedFrameCount = numberOfFrames -
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
25
|
+
const frameOffset = Math.floor(fixFloatingPoint(trimStartInSeconds * audioData.sampleRate));
|
|
26
|
+
const unroundedFrameCount = numberOfFrames - trimEndInSeconds * audioData.sampleRate - frameOffset;
|
|
27
|
+
const frameCount = isLast
|
|
28
|
+
? ceilButNotIfFloatingPointIssue(unroundedFrameCount)
|
|
29
|
+
: Math.round(unroundedFrameCount);
|
|
30
|
+
const newNumberOfFrames = isLast
|
|
31
|
+
? ceilButNotIfFloatingPointIssue(unroundedFrameCount / ratio / playbackRate)
|
|
32
|
+
: Math.round(unroundedFrameCount / ratio / playbackRate);
|
|
17
33
|
if (newNumberOfFrames === 0) {
|
|
18
34
|
throw new Error('Cannot resample - the given sample rate would result in less than 1 sample');
|
|
19
35
|
}
|
|
@@ -26,13 +42,16 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
|
|
|
26
42
|
});
|
|
27
43
|
const data = new Int16Array(newNumberOfFrames * TARGET_NUMBER_OF_CHANNELS);
|
|
28
44
|
const chunkSize = frameCount / newNumberOfFrames;
|
|
45
|
+
const timestampOffsetMicroseconds = (frameOffset / audioData.sampleRate) * 1000000;
|
|
29
46
|
if (newNumberOfFrames === frameCount &&
|
|
30
47
|
TARGET_NUMBER_OF_CHANNELS === srcNumberOfChannels &&
|
|
31
48
|
playbackRate === 1) {
|
|
32
49
|
return {
|
|
33
50
|
data: srcChannels,
|
|
34
51
|
numberOfFrames: newNumberOfFrames,
|
|
35
|
-
timestamp:
|
|
52
|
+
timestamp: audioDataTimestamp * 1000000 +
|
|
53
|
+
fixFloatingPoint(timestampOffsetMicroseconds),
|
|
54
|
+
durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
|
|
36
55
|
};
|
|
37
56
|
}
|
|
38
57
|
resampleAudioData({
|
|
@@ -45,7 +64,9 @@ export const convertAudioData = ({ audioData, trimStartInSeconds, trimEndInSecon
|
|
|
45
64
|
const newAudioData = {
|
|
46
65
|
data,
|
|
47
66
|
numberOfFrames: newNumberOfFrames,
|
|
48
|
-
timestamp:
|
|
67
|
+
timestamp: audioDataTimestamp * 1000000 +
|
|
68
|
+
fixFloatingPoint(timestampOffsetMicroseconds),
|
|
69
|
+
durationInMicroSeconds: fixFloatingPoint((newNumberOfFrames / TARGET_SAMPLE_RATE) * 1000000),
|
|
49
70
|
};
|
|
50
71
|
return newAudioData;
|
|
51
72
|
};
|
|
@@ -11,7 +11,7 @@ export const drawPreviewOverlay = ({ context, audioTime, audioContextState, audi
|
|
|
11
11
|
if (audioIteratorManager) {
|
|
12
12
|
const queuedPeriod = audioIteratorManager
|
|
13
13
|
.getAudioBufferIterator()
|
|
14
|
-
?.getQueuedPeriod(
|
|
14
|
+
?.getQueuedPeriod();
|
|
15
15
|
const numberOfChunksAfterResuming = audioIteratorManager
|
|
16
16
|
?.getAudioBufferIterator()
|
|
17
17
|
?.getNumberOfChunksAfterResuming();
|