@remotion/media 4.0.374 → 4.0.376

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  import { jsx as _jsx } from "react/jsx-runtime";
2
2
  import { useContext, useLayoutEffect, useMemo, useState } from 'react';
3
3
  import { cancelRender, Html5Audio, Internals, random, useCurrentFrame, useDelayRender, useRemotionEnvironment, } from 'remotion';
4
+ import { useMaxMediaCacheSize } from '../caches';
4
5
  import { applyVolume } from '../convert-audiodata/apply-volume';
5
6
  import { TARGET_SAMPLE_RATE } from '../convert-audiodata/resample-audiodata';
6
7
  import { frameForVolumeProp } from '../looped-frame';
@@ -30,6 +31,7 @@ export const AudioForRendering = ({ volume: volumeProp, playbackRate, src, muted
30
31
  sequenceContext?.relativeFrom,
31
32
  sequenceContext?.durationInFrames,
32
33
  ]);
34
+ const maxCacheSize = useMaxMediaCacheSize(logLevel ?? window.remotion_logLevel);
33
35
  useLayoutEffect(() => {
34
36
  const timestamp = frame / fps;
35
37
  const durationInSeconds = 1 / fps;
@@ -63,6 +65,7 @@ export const AudioForRendering = ({ volume: volumeProp, playbackRate, src, muted
63
65
  trimAfter,
64
66
  trimBefore,
65
67
  fps,
68
+ maxCacheSize,
66
69
  })
67
70
  .then((result) => {
68
71
  if (result.type === 'unknown-container-format') {
@@ -163,6 +166,7 @@ export const AudioForRendering = ({ volume: volumeProp, playbackRate, src, muted
163
166
  trimAfter,
164
167
  trimBefore,
165
168
  replaceWithHtml5Audio,
169
+ maxCacheSize,
166
170
  ]);
167
171
  if (replaceWithHtml5Audio) {
168
172
  return (_jsx(Html5Audio, { src: src, playbackRate: playbackRate, muted: muted, loop: loop, volume: volumeProp, delayRenderRetries: delayRenderRetries, delayRenderTimeoutInMilliseconds: delayRenderTimeoutInMilliseconds, style: style, loopVolumeCurveBehavior: loopVolumeCurveBehavior, audioStreamIndex: audioStreamIndex, useWebAudioApi: fallbackHtml5AudioProps?.useWebAudioApi, onError: fallbackHtml5AudioProps?.onError, toneFrequency: toneFrequency, acceptableTimeShiftInSeconds: fallbackHtml5AudioProps?.acceptableTimeShiftInSeconds, name: name, showInTimeline: showInTimeline }));
@@ -23,6 +23,8 @@ export declare const makeAudioIterator: (audioSink: AudioBufferSink, startFromSe
23
23
  tryToSatisfySeek: (time: number, allowWait: AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
24
24
  type: "not-satisfied";
25
25
  reason: string;
26
+ } | {
27
+ type: "ended";
26
28
  } | {
27
29
  type: "satisfied";
28
30
  }>;
@@ -12,8 +12,6 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
12
12
  }
13
13
  queuedAudioNodes.length = 0;
14
14
  };
15
- let lastReturnedBuffer = null;
16
- let iteratorEnded = false;
17
15
  const getNextOrNullIfNotAvailable = async (allowWait) => {
18
16
  const next = iterator.next();
19
17
  const result = allowWait
@@ -29,53 +27,16 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
29
27
  type: 'need-to-wait-for-it',
30
28
  waitPromise: async () => {
31
29
  const res = await next;
32
- if (res.value) {
33
- lastReturnedBuffer = res.value;
34
- }
35
- else {
36
- iteratorEnded = true;
37
- }
38
30
  return res.value;
39
31
  },
40
32
  };
41
33
  }
42
- if (result.value) {
43
- lastReturnedBuffer = result.value;
44
- }
45
- else {
46
- iteratorEnded = true;
47
- }
48
34
  return {
49
35
  type: 'got-buffer-or-end',
50
36
  buffer: result.value ?? null,
51
37
  };
52
38
  };
53
39
  const tryToSatisfySeek = async (time, allowWait, onBufferScheduled) => {
54
- if (lastReturnedBuffer) {
55
- const bufferTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp);
56
- const bufferEndTimestamp = roundTo4Digits(lastReturnedBuffer.timestamp + lastReturnedBuffer.duration);
57
- if (roundTo4Digits(time) < bufferTimestamp) {
58
- return {
59
- type: 'not-satisfied',
60
- reason: `iterator is too far, most recently returned ${bufferTimestamp}-${bufferEndTimestamp}, requested ${time}`,
61
- };
62
- }
63
- if (roundTo4Digits(time) <= bufferEndTimestamp) {
64
- onBufferScheduled(lastReturnedBuffer);
65
- return {
66
- type: 'satisfied',
67
- };
68
- }
69
- // fall through
70
- }
71
- if (iteratorEnded) {
72
- if (lastReturnedBuffer) {
73
- onBufferScheduled(lastReturnedBuffer);
74
- }
75
- return {
76
- type: 'satisfied',
77
- };
78
- }
79
40
  while (true) {
80
41
  const buffer = await getNextOrNullIfNotAvailable(allowWait);
81
42
  if (buffer.type === 'need-to-wait-for-it') {
@@ -86,17 +47,19 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
86
47
  }
87
48
  if (buffer.type === 'got-buffer-or-end') {
88
49
  if (buffer.buffer === null) {
89
- iteratorEnded = true;
90
- if (lastReturnedBuffer) {
91
- onBufferScheduled(lastReturnedBuffer);
92
- }
93
50
  return {
94
- type: 'satisfied',
51
+ type: 'ended',
95
52
  };
96
53
  }
97
54
  const bufferTimestamp = roundTo4Digits(buffer.buffer.timestamp);
98
55
  const bufferEndTimestamp = roundTo4Digits(buffer.buffer.timestamp + buffer.buffer.duration);
99
56
  const timestamp = roundTo4Digits(time);
57
+ if (roundTo4Digits(time) < bufferTimestamp) {
58
+ return {
59
+ type: 'not-satisfied',
60
+ reason: `iterator is too far, most recently returned ${bufferTimestamp}-${bufferEndTimestamp}, requested ${time}`,
61
+ };
62
+ }
100
63
  if (bufferTimestamp <= timestamp && bufferEndTimestamp > timestamp) {
101
64
  onBufferScheduled(buffer.buffer);
102
65
  return {
@@ -138,12 +101,6 @@ export const makeAudioIterator = (audioSink, startFromSecond) => {
138
101
  },
139
102
  getNext: async () => {
140
103
  const next = await iterator.next();
141
- if (next.value) {
142
- lastReturnedBuffer = next.value;
143
- }
144
- else {
145
- iteratorEnded = true;
146
- }
147
104
  return next;
148
105
  },
149
106
  isDestroyed: () => {
@@ -3,13 +3,14 @@ import { type LogLevel } from 'remotion';
3
3
  import type { RememberActualMatroskaTimestamps } from '../video-extraction/remember-actual-matroska-timestamps';
4
4
  import type { AudioSampleIterator } from './audio-iterator';
5
5
  export declare const makeAudioManager: () => {
6
- getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, }: {
6
+ getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }: {
7
7
  src: string;
8
8
  timeInSeconds: number;
9
9
  audioSampleSink: AudioSampleSink;
10
10
  isMatroska: boolean;
11
11
  actualMatroskaTimestamps: RememberActualMatroskaTimestamps;
12
12
  logLevel: LogLevel;
13
+ maxCacheSize: number;
13
14
  }) => Promise<AudioSampleIterator>;
14
15
  getCacheStats: () => {
15
16
  count: number;
@@ -1,5 +1,5 @@
1
1
  import { Internals } from 'remotion';
2
- import { getMaxVideoCacheSize, getTotalCacheStats } from '../caches';
2
+ import { getTotalCacheStats } from '../caches';
3
3
  import { makeAudioIterator } from './audio-iterator';
4
4
  export const makeAudioManager = () => {
5
5
  const iterators = [];
@@ -47,8 +47,7 @@ export const makeAudioManager = () => {
47
47
  seenKeys.add(key);
48
48
  }
49
49
  };
50
- const getIterator = async ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, }) => {
51
- const maxCacheSize = getMaxVideoCacheSize(logLevel);
50
+ const getIterator = async ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }) => {
52
51
  while ((await getTotalCacheStats()).totalSize > maxCacheSize) {
53
52
  deleteOldestIterator();
54
53
  }
@@ -94,7 +93,7 @@ export const makeAudioManager = () => {
94
93
  };
95
94
  let queue = Promise.resolve(undefined);
96
95
  return {
97
- getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, }) => {
96
+ getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }) => {
98
97
  queue = queue.then(() => getIterator({
99
98
  src,
100
99
  timeInSeconds,
@@ -102,6 +101,7 @@ export const makeAudioManager = () => {
102
101
  isMatroska,
103
102
  actualMatroskaTimestamps,
104
103
  logLevel,
104
+ maxCacheSize,
105
105
  }));
106
106
  return queue;
107
107
  },
@@ -12,8 +12,9 @@ type ExtractAudioParams = {
12
12
  trimBefore: number | undefined;
13
13
  trimAfter: number | undefined;
14
14
  fps: number;
15
+ maxCacheSize: number;
15
16
  };
16
- declare const extractAudioInternal: ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, }: ExtractAudioParams) => Promise<{
17
+ declare const extractAudioInternal: ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, maxCacheSize, }: ExtractAudioParams) => Promise<{
17
18
  data: PcmS16AudioData | null;
18
19
  durationInSeconds: number | null;
19
20
  } | "cannot-decode" | "unknown-container-format">;
@@ -1,9 +1,10 @@
1
1
  import { audioManager } from '../caches';
2
2
  import { combineAudioDataAndClosePrevious } from '../convert-audiodata/combine-audiodata';
3
- import { convertAudioData } from '../convert-audiodata/convert-audiodata';
3
+ import { convertAudioData, fixFloatingPoint, } from '../convert-audiodata/convert-audiodata';
4
+ import { TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from '../convert-audiodata/resample-audiodata';
4
5
  import { getSink } from '../get-sink';
5
6
  import { getTimeInSeconds } from '../get-time-in-seconds';
6
- const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, }) => {
7
+ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds, durationInSeconds: durationNotYetApplyingPlaybackRate, logLevel, loop, playbackRate, audioStreamIndex, trimBefore, trimAfter, fps, maxCacheSize, }) => {
7
8
  const { getAudio, actualMatroskaTimestamps, isMatroska, getDuration } = await getSink(src, logLevel);
8
9
  let mediaDurationInSeconds = null;
9
10
  if (loop) {
@@ -40,11 +41,11 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
40
41
  isMatroska,
41
42
  actualMatroskaTimestamps,
42
43
  logLevel,
44
+ maxCacheSize,
43
45
  });
44
46
  const durationInSeconds = durationNotYetApplyingPlaybackRate * playbackRate;
45
47
  const samples = await sampleIterator.getSamples(timeInSeconds, durationInSeconds);
46
48
  audioManager.logOpenFrames();
47
- const trimStartToleranceInSeconds = 0.002;
48
49
  const audioDataArray = [];
49
50
  for (let i = 0; i < samples.length; i++) {
50
51
  const sample = samples[i];
@@ -64,14 +65,18 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
64
65
  // amount of samples to shave from start and end
65
66
  let trimStartInSeconds = 0;
66
67
  let trimEndInSeconds = 0;
68
+ let leadingSilence = null;
67
69
  if (isFirstSample) {
68
- trimStartInSeconds = timeInSeconds - sample.timestamp;
69
- if (trimStartInSeconds < 0 &&
70
- trimStartInSeconds > -trimStartToleranceInSeconds) {
71
- trimStartInSeconds = 0;
72
- }
70
+ trimStartInSeconds = fixFloatingPoint(timeInSeconds - sample.timestamp);
73
71
  if (trimStartInSeconds < 0) {
74
- throw new Error(`trimStartInSeconds is negative: ${trimStartInSeconds}. ${JSON.stringify({ timeInSeconds, ts: sample.timestamp, d: sample.duration, isFirstSample, isLastSample, durationInSeconds, i, st: samples.map((s) => s.timestamp) })}`);
72
+ const silenceFrames = Math.ceil(fixFloatingPoint(-trimStartInSeconds * TARGET_SAMPLE_RATE));
73
+ leadingSilence = {
74
+ data: new Int16Array(silenceFrames * TARGET_NUMBER_OF_CHANNELS),
75
+ numberOfFrames: silenceFrames,
76
+ timestamp: timeInSeconds * 1000000,
77
+ durationInMicroSeconds: (silenceFrames / TARGET_SAMPLE_RATE) * 1000000,
78
+ };
79
+ trimStartInSeconds = 0;
75
80
  }
76
81
  }
77
82
  if (isLastSample) {
@@ -93,6 +98,9 @@ const extractAudioInternal = async ({ src, timeInSeconds: unloopedTimeInSeconds,
93
98
  if (audioData.numberOfFrames === 0) {
94
99
  continue;
95
100
  }
101
+ if (leadingSilence) {
102
+ audioDataArray.push(leadingSilence);
103
+ }
96
104
  audioDataArray.push(audioData);
97
105
  }
98
106
  if (audioDataArray.length === 0) {
@@ -37,6 +37,8 @@ export declare const audioIteratorManager: ({ audioTrack, delayPlaybackHandleIfN
37
37
  tryToSatisfySeek: (time: number, allowWait: import("./audio/allow-wait").AllowWait | null, onBufferScheduled: (buffer: WrappedAudioBuffer) => void) => Promise<{
38
38
  type: "not-satisfied";
39
39
  reason: string;
40
+ } | {
41
+ type: "ended";
40
42
  } | {
41
43
  type: "satisfied";
42
44
  }>;
@@ -117,6 +117,9 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
117
117
  if (nonce.isStale()) {
118
118
  return;
119
119
  }
120
+ if (audioSatisfyResult.type === 'ended') {
121
+ return;
122
+ }
120
123
  if (audioSatisfyResult.type === 'not-satisfied') {
121
124
  await startAudioIterator({
122
125
  nonce,
@@ -157,6 +160,9 @@ export const audioIteratorManager = ({ audioTrack, delayPlaybackHandleIfNotPremo
157
160
  if (nonce.isStale()) {
158
161
  return;
159
162
  }
163
+ if (audioSatisfyResult.type === 'ended') {
164
+ return;
165
+ }
160
166
  if (audioSatisfyResult.type === 'not-satisfied') {
161
167
  await startAudioIterator({
162
168
  nonce,
package/dist/caches.d.ts CHANGED
@@ -1,12 +1,13 @@
1
1
  import { type LogLevel } from 'remotion';
2
2
  export declare const SAFE_BACK_WINDOW_IN_SECONDS = 1;
3
3
  export declare const keyframeManager: {
4
- requestKeyframeBank: ({ packetSink, timestamp, videoSampleSink, src, logLevel, }: {
4
+ requestKeyframeBank: ({ packetSink, timestamp, videoSampleSink, src, logLevel, maxCacheSize, }: {
5
5
  packetSink: import("mediabunny").EncodedPacketSink;
6
6
  timestamp: number;
7
7
  videoSampleSink: import("mediabunny").VideoSampleSink;
8
8
  src: string;
9
9
  logLevel: LogLevel;
10
+ maxCacheSize: number;
10
11
  }) => Promise<import("./video-extraction/keyframe-bank").KeyframeBank | "has-alpha" | null>;
11
12
  getCacheStats: () => Promise<{
12
13
  count: number;
@@ -15,13 +16,14 @@ export declare const keyframeManager: {
15
16
  clearAll: (logLevel: LogLevel) => Promise<void>;
16
17
  };
17
18
  export declare const audioManager: {
18
- getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, }: {
19
+ getIterator: ({ src, timeInSeconds, audioSampleSink, isMatroska, actualMatroskaTimestamps, logLevel, maxCacheSize, }: {
19
20
  src: string;
20
21
  timeInSeconds: number;
21
22
  audioSampleSink: import("mediabunny").AudioSampleSink;
22
23
  isMatroska: boolean;
23
24
  actualMatroskaTimestamps: import("./video-extraction/remember-actual-matroska-timestamps").RememberActualMatroskaTimestamps;
24
25
  logLevel: LogLevel;
26
+ maxCacheSize: number;
25
27
  }) => Promise<import("./audio-extraction/audio-iterator").AudioSampleIterator>;
26
28
  getCacheStats: () => {
27
29
  count: number;
@@ -52,3 +54,4 @@ export declare const getTotalCacheStats: () => Promise<{
52
54
  totalSize: number;
53
55
  }>;
54
56
  export declare const getMaxVideoCacheSize: (logLevel: LogLevel) => number;
57
+ export declare const useMaxMediaCacheSize: (logLevel: LogLevel) => number;
package/dist/caches.js CHANGED
@@ -1,3 +1,4 @@
1
+ import React from 'react';
1
2
  import { cancelRender, Internals } from 'remotion';
2
3
  import { makeAudioManager } from './audio-extraction/audio-manager';
3
4
  import { makeKeyframeManager } from './video-extraction/keyframe-manager';
@@ -51,3 +52,10 @@ export const getMaxVideoCacheSize = (logLevel) => {
51
52
  cachedMaxCacheSize = getUncachedMaxCacheSize(logLevel);
52
53
  return cachedMaxCacheSize;
53
54
  };
55
+ export const useMaxMediaCacheSize = (logLevel) => {
56
+ const context = React.useContext(Internals.MaxMediaCacheSizeContext);
57
+ if (context === null) {
58
+ return getMaxVideoCacheSize(logLevel);
59
+ }
60
+ return context;
61
+ };
@@ -1,11 +1,12 @@
1
1
  import { resampleAudioData, TARGET_NUMBER_OF_CHANNELS, TARGET_SAMPLE_RATE, } from './resample-audiodata';
2
2
  const FORMAT = 's16';
3
3
  export const fixFloatingPoint = (value) => {
4
- if (value % 1 < 0.0000001) {
5
- return Math.floor(value);
4
+ const decimal = Math.abs(value % 1);
5
+ if (decimal < 0.0000001) {
6
+ return value < 0 ? Math.ceil(value) : Math.floor(value);
6
7
  }
7
- if (value % 1 > 0.9999999) {
8
- return Math.ceil(value);
8
+ if (decimal > 0.9999999) {
9
+ return value < 0 ? Math.floor(value) : Math.ceil(value);
9
10
  }
10
11
  return value;
11
12
  };