@remotion/renderer 4.0.356 → 4.0.357

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,11 @@
1
+ import type { LogLevel } from '../log-level';
2
+ import type { CancelSignal } from '../make-cancel-signal';
3
+ export declare const applyToneFrequencyUsingFfmpeg: ({ input, output, toneFrequency, indent, logLevel, binariesDirectory, cancelSignal, }: {
4
+ input: string;
5
+ output: string;
6
+ toneFrequency: number;
7
+ indent: boolean;
8
+ logLevel: LogLevel;
9
+ binariesDirectory: string | null;
10
+ cancelSignal: CancelSignal | undefined;
11
+ }) => Promise<void>;
@@ -0,0 +1,34 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.applyToneFrequencyUsingFfmpeg = void 0;
4
+ const call_ffmpeg_1 = require("../call-ffmpeg");
5
+ const logger_1 = require("../logger");
6
+ const sample_rate_1 = require("../sample-rate");
7
+ const applyToneFrequencyUsingFfmpeg = async ({ input, output, toneFrequency, indent, logLevel, binariesDirectory, cancelSignal, }) => {
8
+ const filter = `asetrate=${sample_rate_1.DEFAULT_SAMPLE_RATE}*${toneFrequency},aresample=${sample_rate_1.DEFAULT_SAMPLE_RATE},atempo=1/${toneFrequency}`;
9
+ const args = [
10
+ '-hide_banner',
11
+ '-i',
12
+ input,
13
+ ['-ac', '2'],
14
+ '-filter:a',
15
+ filter,
16
+ ['-c:a', 'pcm_s16le'],
17
+ ['-ar', String(sample_rate_1.DEFAULT_SAMPLE_RATE)],
18
+ '-y',
19
+ output,
20
+ ].flat(2);
21
+ logger_1.Log.verbose({ indent, logLevel }, 'Changing tone frequency using FFmpeg:', JSON.stringify(args.join(' ')), 'Filter:', filter);
22
+ const startTimestamp = Date.now();
23
+ const task = (0, call_ffmpeg_1.callFf)({
24
+ bin: 'ffmpeg',
25
+ args,
26
+ indent,
27
+ logLevel,
28
+ binariesDirectory,
29
+ cancelSignal,
30
+ });
31
+ await task;
32
+ logger_1.Log.verbose({ indent, logLevel }, 'Changed tone frequency using FFmpeg', `${Date.now() - startTimestamp}ms`);
33
+ };
34
+ exports.applyToneFrequencyUsingFfmpeg = applyToneFrequencyUsingFfmpeg;
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Time-scale modification (tempo change) with approximate pitch preservation
3
+ * for interleaved Int16 PCM with multiple channels, using a SOLA/WSOLA-like method.
4
+ *
5
+ * @param input Interleaved Int16 PCM samples (e.g., LR LR LR ...)
6
+ * @param channels Number of channels (e.g., 2 for stereo)
7
+ * @param f Tempo factor: >1.0 = faster (shorter), <1.0 = slower (longer)
8
+ * @param opts Optional tuning parameters
9
+ * @returns Interleaved Int16 PCM with length ≈ round(input.length * f)
10
+ */
11
+ export declare function atempoInt16Interleaved(input: Int16Array, channels: number, f: number, opts?: {
12
+ sampleRate?: number;
13
+ frameMs?: number;
14
+ overlapRatio?: number;
15
+ searchMs?: number;
16
+ window?: 'hann' | 'hamming';
17
+ clamp?: boolean;
18
+ }): Int16Array;
19
+ /**
20
+ * Reads a WAV file, applies WSOLA tempo modification, and writes it back.
21
+ * Ignores the first 44 bytes (WAV header) and treats the rest as interleaved Int16 PCM.
22
+ *
23
+ * @param filePath Path to the WAV file to process
24
+ * @param tempoFactor Tempo factor: >1 = faster/shorter, <1 = slower/longer
25
+ */
26
+ export declare function processWavFileWithWSOLA(filePath: string, tempoFactor: number): Promise<void>;
27
+ export declare const NUMBER_OF_CHANNELS = 2;
28
+ export declare const applyToneFrequency: (numberOfFrames: number, audioData: Int16Array, toneFrequency: number) => Int16Array;
29
+ export declare const resampleAudioData: ({ sourceChannels, destination, targetFrames, chunkSize, }: {
30
+ sourceChannels: Int16Array;
31
+ destination: Int16Array;
32
+ targetFrames: number;
33
+ chunkSize: number;
34
+ }) => void;
@@ -0,0 +1,287 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.resampleAudioData = exports.applyToneFrequency = exports.NUMBER_OF_CHANNELS = void 0;
7
+ exports.atempoInt16Interleaved = atempoInt16Interleaved;
8
+ exports.processWavFileWithWSOLA = processWavFileWithWSOLA;
9
+ const promises_1 = __importDefault(require("fs/promises"));
10
+ const sample_rate_1 = require("../sample-rate");
11
+ function clamp16(x) {
12
+ const y = Math.round(x);
13
+ return y < -32768 ? -32768 : y > 32767 ? 32767 : y;
14
+ }
15
+ /**
16
+ * Time-scale modification (tempo change) with approximate pitch preservation
17
+ * for interleaved Int16 PCM with multiple channels, using a SOLA/WSOLA-like method.
18
+ *
19
+ * @param input Interleaved Int16 PCM samples (e.g., LR LR LR ...)
20
+ * @param channels Number of channels (e.g., 2 for stereo)
21
+ * @param f Tempo factor: >1.0 = faster (shorter), <1.0 = slower (longer)
22
+ * @param opts Optional tuning parameters
23
+ * @returns Interleaved Int16 PCM with length ≈ round(input.length * f)
24
+ */
25
+ function atempoInt16Interleaved(input, channels, f, opts) {
26
+ var _a, _b, _c, _d, _e;
27
+ if (!Number.isFinite(f) || f <= 0) {
28
+ throw new Error('f must be a positive finite number');
29
+ }
30
+ if (!Number.isInteger(channels) || channels <= 0) {
31
+ throw new Error('channels must be a positive integer');
32
+ }
33
+ const n = input.length;
34
+ if (n === 0)
35
+ return new Int16Array(0);
36
+ if (n % channels !== 0) {
37
+ throw new Error('input length must be a multiple of channels (interleaved PCM)');
38
+ }
39
+ // Parameters
40
+ const sampleRate = (_a = opts === null || opts === void 0 ? void 0 : opts.sampleRate) !== null && _a !== void 0 ? _a : 48000;
41
+ const frameMs = (_b = opts === null || opts === void 0 ? void 0 : opts.frameMs) !== null && _b !== void 0 ? _b : 30;
42
+ const overlapRatio = Math.max(0.1, Math.min(0.95, (_c = opts === null || opts === void 0 ? void 0 : opts.overlapRatio) !== null && _c !== void 0 ? _c : 0.55));
43
+ const searchMs = (_d = opts === null || opts === void 0 ? void 0 : opts.searchMs) !== null && _d !== void 0 ? _d : 8;
44
+ const winKind = (_e = opts === null || opts === void 0 ? void 0 : opts.window) !== null && _e !== void 0 ? _e : 'hann';
45
+ // Work in samples per channel
46
+ const samplesPerChannel = (n / channels) | 0;
47
+ // Frame sizing and hops (per channel)
48
+ const frameSize = Math.max(128, Math.floor((sampleRate * frameMs) / 1000));
49
+ const overlap = Math.floor(frameSize * overlapRatio);
50
+ const anaHop = Math.max(1, frameSize - overlap);
51
+ const synHop = Math.max(1, Math.round(anaHop * f));
52
+ const searchRadius = Math.max(0, Math.floor((sampleRate * searchMs) / 1000));
53
+ // Window
54
+ const win = new Float32Array(frameSize);
55
+ for (let i = 0; i < frameSize; i++) {
56
+ const x = (Math.PI * 2 * i) / (frameSize - 1);
57
+ win[i] =
58
+ winKind === 'hann' ? 0.5 * (1 - Math.cos(x)) : 0.54 - 0.46 * Math.cos(x);
59
+ }
60
+ // Output buffers as float accumulators per channel
61
+ const estFrames = Math.ceil((samplesPerChannel - frameSize) / anaHop) + 1;
62
+ const estLen = Math.max(0, frameSize + synHop * (estFrames - 1));
63
+ const outLenAlloc = estLen + frameSize + searchRadius + 16;
64
+ const out = Array.from({ length: channels }, () => new Float32Array(outLenAlloc));
65
+ const outWeight = new Float32Array(outLenAlloc);
66
+ // Helper: read one channel’s frame from interleaved PCM
67
+ function readChannelFrame(chan, start, dst) {
68
+ // start is per-channel sample index
69
+ let srcIndex = start * channels + chan;
70
+ for (let i = 0; i < frameSize; i++) {
71
+ const pos = start + i;
72
+ let v = 0;
73
+ if (pos >= 0 && pos < samplesPerChannel) {
74
+ v = input[srcIndex];
75
+ }
76
+ dst[i] = v;
77
+ srcIndex += channels;
78
+ }
79
+ }
80
+ // Build a mono guide frame (mid/mono mix) to drive alignment
81
+ const guideFrame = new Float32Array(frameSize);
82
+ function readGuideFrame(start) {
83
+ for (let i = 0; i < frameSize; i++) {
84
+ const pos = start + i;
85
+ if (pos >= 0 && pos < samplesPerChannel) {
86
+ let sum = 0;
87
+ const base = (pos * channels) | 0;
88
+ for (let c = 0; c < channels; c++) {
89
+ sum += input[base + c];
90
+ }
91
+ guideFrame[i] = sum / channels;
92
+ }
93
+ else {
94
+ guideFrame[i] = 0;
95
+ }
96
+ }
97
+ }
98
+ // Cross-correlation on overlap region using guide to find best local alignment
99
+ function bestAlignment(outPosition, baseShift) {
100
+ let bestShift = baseShift;
101
+ let bestScore = -Infinity;
102
+ for (let shift = -searchRadius; shift <= searchRadius; shift++) {
103
+ const pos = outPosition + shift - overlap;
104
+ let score = 0;
105
+ let normA = 0;
106
+ let normB = 0;
107
+ for (let i = 0; i < overlap; i++) {
108
+ const outIdx = pos + i;
109
+ const outVal = outIdx >= 0 && outIdx < outLenAlloc ? out[0][outIdx] : 0; // use channel 0 accumulator as proxy
110
+ const frmVal = guideFrame[i];
111
+ score += outVal * frmVal;
112
+ normA += outVal * outVal;
113
+ normB += frmVal * frmVal;
114
+ }
115
+ const denom = Math.sqrt((normA || 1e-9) * (normB || 1e-9));
116
+ const corr = score / denom;
117
+ if (corr > bestScore) {
118
+ bestScore = corr;
119
+ bestShift = shift;
120
+ }
121
+ }
122
+ return bestShift;
123
+ }
124
+ // Temp buffers per channel
125
+ const chanFrames = Array.from({ length: channels }, () => new Float32Array(frameSize));
126
+ let inPos = 0; // per-channel sample index
127
+ let outPos = 0; // per-channel sample index in accumulators
128
+ // First frame: place directly
129
+ readGuideFrame(0);
130
+ for (let c = 0; c < channels; c++) {
131
+ readChannelFrame(c, 0, chanFrames[c]);
132
+ for (let i = 0; i < frameSize; i++) {
133
+ const w = win[i];
134
+ const idx = i; // write starting at 0
135
+ out[c][idx] += chanFrames[c][i] * w;
136
+ if (c === 0)
137
+ outWeight[idx] += w;
138
+ }
139
+ }
140
+ inPos += anaHop;
141
+ outPos += synHop;
142
+ // Process remaining frames
143
+ while (inPos < samplesPerChannel - 1) {
144
+ readGuideFrame(inPos);
145
+ // Find best alignment using guide
146
+ const shift = bestAlignment(outPos, 0);
147
+ const writeStart = outPos + shift - overlap;
148
+ // Windowed overlap-add for each channel using same alignment
149
+ for (let c = 0; c < channels; c++) {
150
+ readChannelFrame(c, inPos, chanFrames[c]);
151
+ for (let i = 0; i < frameSize; i++) {
152
+ const idx = writeStart + i;
153
+ if (idx >= 0 && idx < outLenAlloc) {
154
+ const w = win[i];
155
+ out[c][idx] += chanFrames[c][i] * w;
156
+ if (c === 0)
157
+ outWeight[idx] += w;
158
+ }
159
+ }
160
+ }
161
+ inPos += anaHop;
162
+ outPos += synHop;
163
+ if (outPos + frameSize + searchRadius + 8 >= outLenAlloc)
164
+ break;
165
+ }
166
+ // Normalize by accumulated window weights
167
+ for (let i = 0; i < outLenAlloc; i++) {
168
+ const w = outWeight[i];
169
+ if (w > 1e-9) {
170
+ const inv = 1 / w;
171
+ for (let c = 0; c < channels; c++) {
172
+ out[c][i] *= inv;
173
+ }
174
+ }
175
+ }
176
+ // Target per-channel length and interleave
177
+ const targetPerChan = Math.max(1, Math.round(samplesPerChannel * f));
178
+ const targetTotal = targetPerChan * channels;
179
+ const result = new Int16Array(targetTotal);
180
+ // Clamp/convert and interleave
181
+ for (let i = 0; i < targetPerChan; i++) {
182
+ for (let c = 0; c < channels; c++) {
183
+ const v = i < out[c].length ? out[c][i] : 0;
184
+ const y = clamp16(v);
185
+ result[i * channels + c] = y;
186
+ }
187
+ }
188
+ return result;
189
+ }
190
+ /**
191
+ * Reads a WAV file, applies WSOLA tempo modification, and writes it back.
192
+ * Ignores the first 44 bytes (WAV header) and treats the rest as interleaved Int16 PCM.
193
+ *
194
+ * @param filePath Path to the WAV file to process
195
+ * @param tempoFactor Tempo factor: >1 = faster/shorter, <1 = slower/longer
196
+ */
197
+ async function processWavFileWithWSOLA(filePath, tempoFactor) {
198
+ // Read the file
199
+ const fileBuffer = await promises_1.default.readFile(filePath);
200
+ // Skip first 44 bytes (WAV header) and create Int16Array
201
+ const audioData = new Int16Array(fileBuffer.buffer, 44);
202
+ // Apply WSOLA with 2 channels (stereo)
203
+ const processedAudio = (0, exports.applyToneFrequency)(audioData.length / 2, audioData, tempoFactor);
204
+ // Create new buffer with original header + processed audio
205
+ const newBuffer = new Uint8Array(44 + processedAudio.length * 2);
206
+ // Copy original header (first 44 bytes)
207
+ newBuffer.set(fileBuffer.subarray(0, 44), 0);
208
+ // Copy processed audio data
209
+ const processedBytes = new Uint8Array(processedAudio.buffer);
210
+ newBuffer.set(processedBytes, 44);
211
+ // Write the processed file back
212
+ await promises_1.default.writeFile(filePath, newBuffer);
213
+ }
214
+ exports.NUMBER_OF_CHANNELS = 2;
215
+ const applyToneFrequency = (numberOfFrames, audioData, toneFrequency) => {
216
+ // In FFmpeg, we apply toneFrequency as follows:
217
+ // `asetrate=${DEFAULT_SAMPLE_RATE}*${toneFrequency},aresample=${DEFAULT_SAMPLE_RATE},atempo=1/${toneFrequency}`
218
+ // So there are 2 steps:
219
+ // 1. Change the assumed sample rate
220
+ // 2. Resample to 48Khz
221
+ // 3. Apply playback rate
222
+ const step1SampleRate = sample_rate_1.DEFAULT_SAMPLE_RATE * toneFrequency;
223
+ const newNumberOfFrames = Math.round(numberOfFrames * (sample_rate_1.DEFAULT_SAMPLE_RATE / step1SampleRate));
224
+ const step2Data = new Int16Array(newNumberOfFrames * exports.NUMBER_OF_CHANNELS);
225
+ const chunkSize = numberOfFrames / newNumberOfFrames;
226
+ (0, exports.resampleAudioData)({
227
+ sourceChannels: audioData,
228
+ destination: step2Data,
229
+ targetFrames: newNumberOfFrames,
230
+ chunkSize,
231
+ });
232
+ const step3Data = atempoInt16Interleaved(step2Data, exports.NUMBER_OF_CHANNELS, toneFrequency, {
233
+ sampleRate: 48000,
234
+ });
235
+ return step3Data;
236
+ };
237
+ exports.applyToneFrequency = applyToneFrequency;
238
+ const fixFloatingPoint = (value) => {
239
+ if (value % 1 < 0.0000001) {
240
+ return Math.floor(value);
241
+ }
242
+ if (value % 1 > 0.9999999) {
243
+ return Math.ceil(value);
244
+ }
245
+ return value;
246
+ };
247
+ const resampleAudioData = ({ sourceChannels, destination, targetFrames, chunkSize, }) => {
248
+ const getSourceValues = (startUnfixed, endUnfixed, channelIndex) => {
249
+ const start = fixFloatingPoint(startUnfixed);
250
+ const end = fixFloatingPoint(endUnfixed);
251
+ const startFloor = Math.floor(start);
252
+ const startCeil = Math.ceil(start);
253
+ const startFraction = start - startFloor;
254
+ const endFraction = end - Math.floor(end);
255
+ const endFloor = Math.floor(end);
256
+ let weightedSum = 0;
257
+ let totalWeight = 0;
258
+ // Handle first fractional sample
259
+ if (startFraction > 0) {
260
+ const firstSample = sourceChannels[startFloor * exports.NUMBER_OF_CHANNELS + channelIndex];
261
+ weightedSum += firstSample * (1 - startFraction);
262
+ totalWeight += 1 - startFraction;
263
+ }
264
+ // Handle full samples
265
+ for (let k = startCeil; k < endFloor; k++) {
266
+ const num = sourceChannels[k * exports.NUMBER_OF_CHANNELS + channelIndex];
267
+ weightedSum += num;
268
+ totalWeight += 1;
269
+ }
270
+ // Handle last fractional sample
271
+ if (endFraction > 0) {
272
+ const lastSample = sourceChannels[endFloor * exports.NUMBER_OF_CHANNELS + channelIndex];
273
+ weightedSum += lastSample * endFraction;
274
+ totalWeight += endFraction;
275
+ }
276
+ const average = weightedSum / totalWeight;
277
+ return average;
278
+ };
279
+ for (let newFrameIndex = 0; newFrameIndex < targetFrames; newFrameIndex++) {
280
+ const start = newFrameIndex * chunkSize;
281
+ const end = start + chunkSize;
282
+ for (let i = 0; i < exports.NUMBER_OF_CHANNELS; i++) {
283
+ destination[newFrameIndex * exports.NUMBER_OF_CHANNELS + i] = getSourceValues(start, end, i);
284
+ }
285
+ }
286
+ };
287
+ exports.resampleAudioData = resampleAudioData;
@@ -0,0 +1,2 @@
1
+ export declare const NUMBER_OF_CHANNELS = 2;
2
+ export declare const applyToneFrequency: (numberOfFrames: number, audioData: Int16Array, toneFrequency: number) => Int16Array;
@@ -0,0 +1,28 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.applyToneFrequency = exports.NUMBER_OF_CHANNELS = void 0;
4
+ const sample_rate_1 = require("../sample-rate");
5
+ const change_tempo_1 = require("./change-tempo");
6
+ const resample_audiodata_1 = require("./resample-audiodata");
7
+ exports.NUMBER_OF_CHANNELS = 2;
8
+ const applyToneFrequency = (numberOfFrames, audioData, toneFrequency) => {
9
+ // In FFmpeg, we apply toneFrequency as follows:
10
+ // `asetrate=${DEFAULT_SAMPLE_RATE}*${toneFrequency},aresample=${DEFAULT_SAMPLE_RATE},atempo=1/${toneFrequency}`
11
+ // So there are 2 steps:
12
+ // 1. Change the assumed sample rate
13
+ // 2. Resample to 48Khz
14
+ // 3. Apply playback rate
15
+ const step1SampleRate = sample_rate_1.DEFAULT_SAMPLE_RATE * toneFrequency;
16
+ const newNumberOfFrames = Math.round(numberOfFrames * (sample_rate_1.DEFAULT_SAMPLE_RATE / step1SampleRate));
17
+ const step2Data = new Int16Array(newNumberOfFrames * exports.NUMBER_OF_CHANNELS);
18
+ const chunkSize = numberOfFrames / newNumberOfFrames;
19
+ (0, resample_audiodata_1.resampleAudioData)({
20
+ sourceChannels: audioData,
21
+ destination: step2Data,
22
+ targetFrames: newNumberOfFrames,
23
+ chunkSize,
24
+ });
25
+ const step3Data = (0, change_tempo_1.wsolaInt16Interleaved)(step2Data, exports.NUMBER_OF_CHANNELS, toneFrequency);
26
+ return step3Data;
27
+ };
28
+ exports.applyToneFrequency = applyToneFrequency;
@@ -1,4 +1,6 @@
1
1
  import type { InlineAudioAsset } from 'remotion/no-react';
2
+ import type { LogLevel } from '../log-level';
3
+ import type { CancelSignal } from '../make-cancel-signal';
2
4
  export declare const makeInlineAudioMixing: (dir: string) => {
3
5
  cleanup: () => void;
4
6
  addAsset: ({ asset, fps, totalNumberOfFrames, firstFrame, trimLeftOffset, trimRightOffset, }: {
@@ -10,5 +12,11 @@ export declare const makeInlineAudioMixing: (dir: string) => {
10
12
  trimRightOffset: number;
11
13
  }) => void;
12
14
  getListOfAssets: () => string[];
15
+ finish: ({ binariesDirectory, indent, logLevel, cancelSignal, }: {
16
+ indent: boolean;
17
+ logLevel: LogLevel;
18
+ binariesDirectory: string | null;
19
+ cancelSignal: CancelSignal | undefined;
20
+ }) => Promise<void>;
13
21
  };
14
22
  export type InlineAudioMixing = ReturnType<typeof makeInlineAudioMixing>;
@@ -41,6 +41,7 @@ const node_fs_1 = __importStar(require("node:fs"));
41
41
  const node_path_1 = __importDefault(require("node:path"));
42
42
  const delete_directory_1 = require("../delete-directory");
43
43
  const sample_rate_1 = require("../sample-rate");
44
+ const apply_tone_frequency_1 = require("./apply-tone-frequency");
44
45
  const download_map_1 = require("./download-map");
45
46
  const numberTo32BiIntLittleEndian = (num) => {
46
47
  return new Uint8Array([
@@ -55,11 +56,13 @@ const numberTo16BitLittleEndian = (num) => {
55
56
  };
56
57
  const BIT_DEPTH = 16;
57
58
  const BYTES_PER_SAMPLE = BIT_DEPTH / 8;
59
+ const NUMBER_OF_CHANNELS = 2;
58
60
  const makeInlineAudioMixing = (dir) => {
59
61
  const folderToAdd = (0, download_map_1.makeAndReturn)(dir, 'remotion-inline-audio-mixing');
60
62
  // asset id -> file descriptor
61
63
  const openFiles = {};
62
64
  const writtenHeaders = {};
65
+ const toneFrequencies = {};
63
66
  const cleanup = () => {
64
67
  for (const fd of Object.values(openFiles)) {
65
68
  try {
@@ -85,11 +88,10 @@ const makeInlineAudioMixing = (dir) => {
85
88
  }
86
89
  writtenHeaders[filePath] = true;
87
90
  const expectedDataSize = Math.round((totalNumberOfFrames / fps - trimLeftOffset + trimRightOffset) *
88
- asset.numberOfChannels *
91
+ NUMBER_OF_CHANNELS *
89
92
  sample_rate_1.DEFAULT_SAMPLE_RATE *
90
93
  BYTES_PER_SAMPLE);
91
94
  const expectedSize = 40 + expectedDataSize;
92
- const { numberOfChannels } = asset;
93
95
  const fd = openFiles[filePath];
94
96
  (0, node_fs_1.writeSync)(fd, new Uint8Array([0x52, 0x49, 0x46, 0x46]), 0, 4, 0); // "RIFF"
95
97
  (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo32BiIntLittleEndian(expectedSize)), 0, 4, 4); // Remaining size
@@ -97,14 +99,32 @@ const makeInlineAudioMixing = (dir) => {
97
99
  (0, node_fs_1.writeSync)(fd, new Uint8Array([0x66, 0x6d, 0x74, 0x20]), 0, 4, 12); // "fmt "
98
100
  (0, node_fs_1.writeSync)(fd, new Uint8Array([BIT_DEPTH, 0x00, 0x00, 0x00]), 0, 4, 16); // fmt chunk size = 16
99
101
  (0, node_fs_1.writeSync)(fd, new Uint8Array([0x01, 0x00]), 0, 2, 20); // Audio format (PCM) = 1, set 3 if float32 would be true
100
- (0, node_fs_1.writeSync)(fd, new Uint8Array([numberOfChannels, 0x00]), 0, 2, 22); // Number of channels
102
+ (0, node_fs_1.writeSync)(fd, new Uint8Array([NUMBER_OF_CHANNELS, 0x00]), 0, 2, 22); // Number of channels
101
103
  (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo32BiIntLittleEndian(sample_rate_1.DEFAULT_SAMPLE_RATE)), 0, 4, 24); // Sample rate
102
- (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo32BiIntLittleEndian(sample_rate_1.DEFAULT_SAMPLE_RATE * numberOfChannels * BYTES_PER_SAMPLE)), 0, 4, 28); // Byte rate
103
- (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo16BitLittleEndian(numberOfChannels * BYTES_PER_SAMPLE)), 0, 2, 32); // Block align
104
+ (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo32BiIntLittleEndian(sample_rate_1.DEFAULT_SAMPLE_RATE * NUMBER_OF_CHANNELS * BYTES_PER_SAMPLE)), 0, 4, 28); // Byte rate
105
+ (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo16BitLittleEndian(NUMBER_OF_CHANNELS * BYTES_PER_SAMPLE)), 0, 2, 32); // Block align
104
106
  (0, node_fs_1.writeSync)(fd, numberTo16BitLittleEndian(BIT_DEPTH), 0, 2, 34); // Bits per sample
105
107
  (0, node_fs_1.writeSync)(fd, new Uint8Array([0x64, 0x61, 0x74, 0x61]), 0, 4, 36); // "data"
106
108
  (0, node_fs_1.writeSync)(fd, new Uint8Array(numberTo32BiIntLittleEndian(expectedDataSize)), 0, 4, 40); // Remaining size
107
109
  };
110
+ const finish = async ({ binariesDirectory, indent, logLevel, cancelSignal, }) => {
111
+ for (const fd of Object.keys(openFiles)) {
112
+ const frequency = toneFrequencies[fd];
113
+ if (frequency !== 1) {
114
+ const tmpFile = fd.replace(/.wav$/, '-tmp.wav');
115
+ await (0, apply_tone_frequency_1.applyToneFrequencyUsingFfmpeg)({
116
+ input: fd,
117
+ output: tmpFile,
118
+ toneFrequency: frequency,
119
+ indent,
120
+ logLevel,
121
+ binariesDirectory,
122
+ cancelSignal,
123
+ });
124
+ node_fs_1.default.renameSync(tmpFile, fd);
125
+ }
126
+ }
127
+ };
108
128
  const addAsset = ({ asset, fps, totalNumberOfFrames, firstFrame, trimLeftOffset, trimRightOffset, }) => {
109
129
  ensureAsset({
110
130
  asset,
@@ -114,7 +134,12 @@ const makeInlineAudioMixing = (dir) => {
114
134
  trimRightOffset,
115
135
  });
116
136
  const filePath = getFilePath(asset);
137
+ if (toneFrequencies[filePath] !== undefined &&
138
+ toneFrequencies[filePath] !== asset.toneFrequency) {
139
+ throw new Error(`toneFrequency must be the same across the entire audio, got ${asset.toneFrequency}, but before it was ${toneFrequencies[filePath]}`);
140
+ }
117
141
  const fileDescriptor = openFiles[filePath];
142
+ toneFrequencies[filePath] = asset.toneFrequency;
118
143
  let arr = new Int16Array(asset.audio);
119
144
  const isFirst = asset.frame === firstFrame;
120
145
  const isLast = asset.frame === totalNumberOfFrames + firstFrame - 1;
@@ -129,14 +154,18 @@ const makeInlineAudioMixing = (dir) => {
129
154
  throw new Error(`samplesToShaveFromStart should be approximately an integer, is ${samplesToShaveFromStart}`);
130
155
  }
131
156
  if (isFirst) {
132
- arr = arr.slice(Math.round(samplesToShaveFromStart) * asset.numberOfChannels);
157
+ arr = arr.slice(Math.round(samplesToShaveFromStart) * NUMBER_OF_CHANNELS);
133
158
  }
134
159
  if (isLast) {
135
- arr = arr.slice(0, arr.length + Math.round(samplesToShaveFromEnd) * asset.numberOfChannels);
160
+ arr = arr.slice(0, arr.length + Math.round(samplesToShaveFromEnd) * NUMBER_OF_CHANNELS);
136
161
  }
137
162
  const positionInSeconds = (asset.frame - firstFrame) / fps - (isFirst ? 0 : trimLeftOffset);
138
- const position = Math.round(positionInSeconds * sample_rate_1.DEFAULT_SAMPLE_RATE) *
139
- asset.numberOfChannels *
163
+ // Always rounding down to ensure there are no gaps when the samples don't align
164
+ // In @remotion/media, we also round down the sample start timestamp and round up the end timestamp
165
+ // This might lead to overlapping, hopefully aligning perfectly!
166
+ // Test case: https://github.com/remotion-dev/remotion/issues/5758
167
+ const position = Math.floor(positionInSeconds * sample_rate_1.DEFAULT_SAMPLE_RATE) *
168
+ NUMBER_OF_CHANNELS *
140
169
  BYTES_PER_SAMPLE;
141
170
  (0, node_fs_1.writeSync)(
142
171
  // fs
@@ -154,6 +183,7 @@ const makeInlineAudioMixing = (dir) => {
154
183
  cleanup,
155
184
  addAsset,
156
185
  getListOfAssets,
186
+ finish,
157
187
  };
158
188
  };
159
189
  exports.makeInlineAudioMixing = makeInlineAudioMixing;
@@ -0,0 +1,6 @@
1
+ export declare const resampleAudioData: ({ sourceChannels, destination, targetFrames, chunkSize, }: {
2
+ sourceChannels: Int16Array;
3
+ destination: Int16Array;
4
+ targetFrames: number;
5
+ chunkSize: number;
6
+ }) => void;
@@ -0,0 +1,54 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.resampleAudioData = void 0;
4
+ const change_tempo_1 = require("./change-tempo");
5
+ const fixFloatingPoint = (value) => {
6
+ if (value % 1 < 0.0000001) {
7
+ return Math.floor(value);
8
+ }
9
+ if (value % 1 > 0.9999999) {
10
+ return Math.ceil(value);
11
+ }
12
+ return value;
13
+ };
14
+ const resampleAudioData = ({ sourceChannels, destination, targetFrames, chunkSize, }) => {
15
+ const getSourceValues = (startUnfixed, endUnfixed, channelIndex) => {
16
+ const start = fixFloatingPoint(startUnfixed);
17
+ const end = fixFloatingPoint(endUnfixed);
18
+ const startFloor = Math.floor(start);
19
+ const startCeil = Math.ceil(start);
20
+ const startFraction = start - startFloor;
21
+ const endFraction = end - Math.floor(end);
22
+ const endFloor = Math.floor(end);
23
+ let weightedSum = 0;
24
+ let totalWeight = 0;
25
+ // Handle first fractional sample
26
+ if (startFraction > 0) {
27
+ const firstSample = sourceChannels[startFloor * change_tempo_1.NUMBER_OF_CHANNELS + channelIndex];
28
+ weightedSum += firstSample * (1 - startFraction);
29
+ totalWeight += 1 - startFraction;
30
+ }
31
+ // Handle full samples
32
+ for (let k = startCeil; k < endFloor; k++) {
33
+ const num = sourceChannels[k * change_tempo_1.NUMBER_OF_CHANNELS + channelIndex];
34
+ weightedSum += num;
35
+ totalWeight += 1;
36
+ }
37
+ // Handle last fractional sample
38
+ if (endFraction > 0) {
39
+ const lastSample = sourceChannels[endFloor * change_tempo_1.NUMBER_OF_CHANNELS + channelIndex];
40
+ weightedSum += lastSample * endFraction;
41
+ totalWeight += endFraction;
42
+ }
43
+ const average = weightedSum / totalWeight;
44
+ return average;
45
+ };
46
+ for (let newFrameIndex = 0; newFrameIndex < targetFrames; newFrameIndex++) {
47
+ const start = newFrameIndex * chunkSize;
48
+ const end = start + chunkSize;
49
+ for (let i = 0; i < change_tempo_1.NUMBER_OF_CHANNELS; i++) {
50
+ destination[newFrameIndex * change_tempo_1.NUMBER_OF_CHANNELS + i] = getSourceValues(start, end, i);
51
+ }
52
+ }
53
+ };
54
+ exports.resampleAudioData = resampleAudioData;
@@ -64,6 +64,12 @@ const createAudio = async ({ assets, onDownload, fps, logLevel, onProgress, down
64
64
  updateProgress();
65
65
  return result;
66
66
  }));
67
+ await downloadMap.inlineAudioMixing.finish({
68
+ indent,
69
+ logLevel,
70
+ binariesDirectory,
71
+ cancelSignal,
72
+ });
67
73
  const inlinedAudio = downloadMap.inlineAudioMixing.getListOfAssets();
68
74
  const preprocessed = [
69
75
  ...audioTracks.filter(truthy_1.truthy),
@@ -15101,6 +15101,43 @@ import path14 from "node:path";
15101
15101
  // src/sample-rate.ts
15102
15102
  var DEFAULT_SAMPLE_RATE = 48000;
15103
15103
 
15104
+ // src/assets/apply-tone-frequency.ts
15105
+ var applyToneFrequencyUsingFfmpeg = async ({
15106
+ input,
15107
+ output,
15108
+ toneFrequency,
15109
+ indent,
15110
+ logLevel,
15111
+ binariesDirectory,
15112
+ cancelSignal
15113
+ }) => {
15114
+ const filter = `asetrate=${DEFAULT_SAMPLE_RATE}*${toneFrequency},aresample=${DEFAULT_SAMPLE_RATE},atempo=1/${toneFrequency}`;
15115
+ const args = [
15116
+ "-hide_banner",
15117
+ "-i",
15118
+ input,
15119
+ ["-ac", "2"],
15120
+ "-filter:a",
15121
+ filter,
15122
+ ["-c:a", "pcm_s16le"],
15123
+ ["-ar", String(DEFAULT_SAMPLE_RATE)],
15124
+ "-y",
15125
+ output
15126
+ ].flat(2);
15127
+ Log.verbose({ indent, logLevel }, "Changing tone frequency using FFmpeg:", JSON.stringify(args.join(" ")), "Filter:", filter);
15128
+ const startTimestamp = Date.now();
15129
+ const task = callFf({
15130
+ bin: "ffmpeg",
15131
+ args,
15132
+ indent,
15133
+ logLevel,
15134
+ binariesDirectory,
15135
+ cancelSignal
15136
+ });
15137
+ await task;
15138
+ Log.verbose({ indent, logLevel }, "Changed tone frequency using FFmpeg", `${Date.now() - startTimestamp}ms`);
15139
+ };
15140
+
15104
15141
  // src/assets/inline-audio-mixing.ts
15105
15142
  var numberTo32BiIntLittleEndian = (num) => {
15106
15143
  return new Uint8Array([
@@ -15115,10 +15152,12 @@ var numberTo16BitLittleEndian = (num) => {
15115
15152
  };
15116
15153
  var BIT_DEPTH = 16;
15117
15154
  var BYTES_PER_SAMPLE = BIT_DEPTH / 8;
15155
+ var NUMBER_OF_CHANNELS = 2;
15118
15156
  var makeInlineAudioMixing = (dir) => {
15119
15157
  const folderToAdd = makeAndReturn(dir, "remotion-inline-audio-mixing");
15120
15158
  const openFiles = {};
15121
15159
  const writtenHeaders = {};
15160
+ const toneFrequencies = {};
15122
15161
  const cleanup = () => {
15123
15162
  for (const fd of Object.values(openFiles)) {
15124
15163
  try {
@@ -15148,9 +15187,8 @@ var makeInlineAudioMixing = (dir) => {
15148
15187
  return;
15149
15188
  }
15150
15189
  writtenHeaders[filePath] = true;
15151
- const expectedDataSize = Math.round((totalNumberOfFrames / fps - trimLeftOffset + trimRightOffset) * asset.numberOfChannels * DEFAULT_SAMPLE_RATE * BYTES_PER_SAMPLE);
15190
+ const expectedDataSize = Math.round((totalNumberOfFrames / fps - trimLeftOffset + trimRightOffset) * NUMBER_OF_CHANNELS * DEFAULT_SAMPLE_RATE * BYTES_PER_SAMPLE);
15152
15191
  const expectedSize = 40 + expectedDataSize;
15153
- const { numberOfChannels } = asset;
15154
15192
  const fd = openFiles[filePath];
15155
15193
  writeSync(fd, new Uint8Array([82, 73, 70, 70]), 0, 4, 0);
15156
15194
  writeSync(fd, new Uint8Array(numberTo32BiIntLittleEndian(expectedSize)), 0, 4, 4);
@@ -15158,14 +15196,37 @@ var makeInlineAudioMixing = (dir) => {
15158
15196
  writeSync(fd, new Uint8Array([102, 109, 116, 32]), 0, 4, 12);
15159
15197
  writeSync(fd, new Uint8Array([BIT_DEPTH, 0, 0, 0]), 0, 4, 16);
15160
15198
  writeSync(fd, new Uint8Array([1, 0]), 0, 2, 20);
15161
- writeSync(fd, new Uint8Array([numberOfChannels, 0]), 0, 2, 22);
15199
+ writeSync(fd, new Uint8Array([NUMBER_OF_CHANNELS, 0]), 0, 2, 22);
15162
15200
  writeSync(fd, new Uint8Array(numberTo32BiIntLittleEndian(DEFAULT_SAMPLE_RATE)), 0, 4, 24);
15163
- writeSync(fd, new Uint8Array(numberTo32BiIntLittleEndian(DEFAULT_SAMPLE_RATE * numberOfChannels * BYTES_PER_SAMPLE)), 0, 4, 28);
15164
- writeSync(fd, new Uint8Array(numberTo16BitLittleEndian(numberOfChannels * BYTES_PER_SAMPLE)), 0, 2, 32);
15201
+ writeSync(fd, new Uint8Array(numberTo32BiIntLittleEndian(DEFAULT_SAMPLE_RATE * NUMBER_OF_CHANNELS * BYTES_PER_SAMPLE)), 0, 4, 28);
15202
+ writeSync(fd, new Uint8Array(numberTo16BitLittleEndian(NUMBER_OF_CHANNELS * BYTES_PER_SAMPLE)), 0, 2, 32);
15165
15203
  writeSync(fd, numberTo16BitLittleEndian(BIT_DEPTH), 0, 2, 34);
15166
15204
  writeSync(fd, new Uint8Array([100, 97, 116, 97]), 0, 4, 36);
15167
15205
  writeSync(fd, new Uint8Array(numberTo32BiIntLittleEndian(expectedDataSize)), 0, 4, 40);
15168
15206
  };
15207
+ const finish = async ({
15208
+ binariesDirectory,
15209
+ indent,
15210
+ logLevel,
15211
+ cancelSignal
15212
+ }) => {
15213
+ for (const fd of Object.keys(openFiles)) {
15214
+ const frequency = toneFrequencies[fd];
15215
+ if (frequency !== 1) {
15216
+ const tmpFile = fd.replace(/.wav$/, "-tmp.wav");
15217
+ await applyToneFrequencyUsingFfmpeg({
15218
+ input: fd,
15219
+ output: tmpFile,
15220
+ toneFrequency: frequency,
15221
+ indent,
15222
+ logLevel,
15223
+ binariesDirectory,
15224
+ cancelSignal
15225
+ });
15226
+ fs12.renameSync(tmpFile, fd);
15227
+ }
15228
+ }
15229
+ };
15169
15230
  const addAsset = ({
15170
15231
  asset,
15171
15232
  fps,
@@ -15182,7 +15243,11 @@ var makeInlineAudioMixing = (dir) => {
15182
15243
  trimRightOffset
15183
15244
  });
15184
15245
  const filePath = getFilePath(asset);
15246
+ if (toneFrequencies[filePath] !== undefined && toneFrequencies[filePath] !== asset.toneFrequency) {
15247
+ throw new Error(`toneFrequency must be the same across the entire audio, got ${asset.toneFrequency}, but before it was ${toneFrequencies[filePath]}`);
15248
+ }
15185
15249
  const fileDescriptor = openFiles[filePath];
15250
+ toneFrequencies[filePath] = asset.toneFrequency;
15186
15251
  let arr = new Int16Array(asset.audio);
15187
15252
  const isFirst = asset.frame === firstFrame;
15188
15253
  const isLast = asset.frame === totalNumberOfFrames + firstFrame - 1;
@@ -15195,19 +15260,20 @@ var makeInlineAudioMixing = (dir) => {
15195
15260
  throw new Error(`samplesToShaveFromStart should be approximately an integer, is ${samplesToShaveFromStart}`);
15196
15261
  }
15197
15262
  if (isFirst) {
15198
- arr = arr.slice(Math.round(samplesToShaveFromStart) * asset.numberOfChannels);
15263
+ arr = arr.slice(Math.round(samplesToShaveFromStart) * NUMBER_OF_CHANNELS);
15199
15264
  }
15200
15265
  if (isLast) {
15201
- arr = arr.slice(0, arr.length + Math.round(samplesToShaveFromEnd) * asset.numberOfChannels);
15266
+ arr = arr.slice(0, arr.length + Math.round(samplesToShaveFromEnd) * NUMBER_OF_CHANNELS);
15202
15267
  }
15203
15268
  const positionInSeconds = (asset.frame - firstFrame) / fps - (isFirst ? 0 : trimLeftOffset);
15204
- const position = Math.round(positionInSeconds * DEFAULT_SAMPLE_RATE) * asset.numberOfChannels * BYTES_PER_SAMPLE;
15269
+ const position = Math.floor(positionInSeconds * DEFAULT_SAMPLE_RATE) * NUMBER_OF_CHANNELS * BYTES_PER_SAMPLE;
15205
15270
  writeSync(fileDescriptor, arr, 0, arr.byteLength, 44 + position);
15206
15271
  };
15207
15272
  return {
15208
15273
  cleanup,
15209
15274
  addAsset,
15210
- getListOfAssets
15275
+ getListOfAssets,
15276
+ finish
15211
15277
  };
15212
15278
  };
15213
15279
 
@@ -20726,6 +20792,12 @@ var createAudio = async ({
20726
20792
  updateProgress();
20727
20793
  return result;
20728
20794
  }));
20795
+ await downloadMap.inlineAudioMixing.finish({
20796
+ indent,
20797
+ logLevel,
20798
+ binariesDirectory,
20799
+ cancelSignal
20800
+ });
20729
20801
  const inlinedAudio = downloadMap.inlineAudioMixing.getListOfAssets();
20730
20802
  const preprocessed = [
20731
20803
  ...audioTracks.filter(truthy),
@@ -21300,7 +21372,7 @@ var validateOutputFilename = ({
21300
21372
 
21301
21373
  // src/render-media.ts
21302
21374
  var SLOWEST_FRAME_COUNT = 10;
21303
- var MAX_RECENT_FRAME_TIMINGS = 50;
21375
+ var MAX_RECENT_FRAME_TIMINGS = 150;
21304
21376
  var internalRenderMediaRaw = ({
21305
21377
  proResProfile,
21306
21378
  x264Preset,
@@ -21597,10 +21669,14 @@ var internalRenderMediaRaw = ({
21597
21669
  });
21598
21670
  }).then(({ server, cleanupServer }) => {
21599
21671
  cleanupServerFn = cleanupServer;
21672
+ let timeOfLastFrame = Date.now();
21600
21673
  const renderFramesProc = internalRenderFrames({
21601
21674
  composition,
21602
- onFrameUpdate: (frame, frameIndex, timeToRenderInMilliseconds) => {
21675
+ onFrameUpdate: (frame, frameIndex) => {
21603
21676
  renderedFrames = frame;
21677
+ const now = Date.now();
21678
+ const timeToRenderInMilliseconds = now - timeOfLastFrame;
21679
+ timeOfLastFrame = now;
21604
21680
  recentFrameTimings.push(timeToRenderInMilliseconds);
21605
21681
  if (recentFrameTimings.length > MAX_RECENT_FRAME_TIMINGS) {
21606
21682
  recentFrameTimings.shift();
@@ -52,7 +52,7 @@ const validate_scale_1 = require("./validate-scale");
52
52
  const validate_videobitrate_1 = require("./validate-videobitrate");
53
53
  const wrap_with_error_handling_1 = require("./wrap-with-error-handling");
54
54
  const SLOWEST_FRAME_COUNT = 10;
55
- const MAX_RECENT_FRAME_TIMINGS = 50;
55
+ const MAX_RECENT_FRAME_TIMINGS = 150;
56
56
  const internalRenderMediaRaw = ({ proResProfile, x264Preset, crf, composition: compositionWithPossibleUnevenDimensions, serializedInputPropsWithCustomSchema, pixelFormat: userPixelFormat, codec, envVariables, frameRange, puppeteerInstance, outputLocation, onProgress, overwrite, onDownload, onBrowserLog, onStart, timeoutInMilliseconds, chromiumOptions, scale, browserExecutable, port, cancelSignal, muted, enforceAudioTrack, ffmpegOverride, audioBitrate, videoBitrate, encodingMaxRate, encodingBufferSize, audioCodec, concurrency, disallowParallelEncoding, everyNthFrame, imageFormat: provisionalImageFormat, indent, jpegQuality, numberOfGifLoops, onCtrlCExit, preferLossless, serveUrl, server: reusedServer, logLevel, serializedResolvedPropsWithCustomSchema, offthreadVideoCacheSizeInBytes, colorSpace, repro, binariesDirectory, separateAudioTo, forSeamlessAacConcatenation, compositionStart, onBrowserDownload, onArtifact, metadata, hardwareAcceleration, chromeMode, offthreadVideoThreads, mediaCacheSizeInBytes, onLog, }) => {
57
57
  var _a, _b;
58
58
  const pixelFormat = (_a = userPixelFormat !== null && userPixelFormat !== void 0 ? userPixelFormat : compositionWithPossibleUnevenDimensions.defaultPixelFormat) !== null && _a !== void 0 ? _a : pixel_format_1.DEFAULT_PIXEL_FORMAT;
@@ -310,10 +310,14 @@ const internalRenderMediaRaw = ({ proResProfile, x264Preset, crf, composition: c
310
310
  })
311
311
  .then(({ server, cleanupServer }) => {
312
312
  cleanupServerFn = cleanupServer;
313
+ let timeOfLastFrame = Date.now();
313
314
  const renderFramesProc = (0, render_frames_1.internalRenderFrames)({
314
315
  composition,
315
- onFrameUpdate: (frame, frameIndex, timeToRenderInMilliseconds) => {
316
+ onFrameUpdate: (frame, frameIndex) => {
316
317
  renderedFrames = frame;
318
+ const now = Date.now();
319
+ const timeToRenderInMilliseconds = now - timeOfLastFrame;
320
+ timeOfLastFrame = now;
317
321
  // Track recent frame timings (at most 50)
318
322
  recentFrameTimings.push(timeToRenderInMilliseconds);
319
323
  if (recentFrameTimings.length > MAX_RECENT_FRAME_TIMINGS) {
@@ -2751,6 +2751,23 @@ var verboseTag = (str) => {
2751
2751
  return isColorSupported() ? chalk.bgBlack(` ${str} `) : `[${str}]`;
2752
2752
  };
2753
2753
  var Log = {
2754
+ formatLogs: (logLevel, options, args) => {
2755
+ return [
2756
+ options.indent ? INDENT_TOKEN : null,
2757
+ options.tag ? verboseTag(options.tag) : null
2758
+ ].filter(truthy).concat(args.map((a) => {
2759
+ if (logLevel === "warn") {
2760
+ return chalk.yellow(a);
2761
+ }
2762
+ if (logLevel === "error") {
2763
+ return chalk.red(a);
2764
+ }
2765
+ if (logLevel === "verbose" || logLevel === "trace") {
2766
+ return chalk.gray(a);
2767
+ }
2768
+ return a;
2769
+ }));
2770
+ },
2754
2771
  trace: (options, ...args) => {
2755
2772
  writeInRepro("trace", ...args);
2756
2773
  if (isEqualOrBelowLogLevel(options.logLevel, "trace")) {
@@ -2758,10 +2775,7 @@ var Log = {
2758
2775
  return process.stdout.write(`
2759
2776
  `);
2760
2777
  }
2761
- return console.log(...[
2762
- options.indent ? INDENT_TOKEN : null,
2763
- options.tag ? verboseTag(options.tag) : null
2764
- ].filter(truthy).concat(args.map((a) => chalk.gray(a))));
2778
+ return console.log(...Log.formatLogs("trace", options, args));
2765
2779
  }
2766
2780
  },
2767
2781
  verbose: (options, ...args) => {
@@ -2771,10 +2785,7 @@ var Log = {
2771
2785
  return process.stdout.write(`
2772
2786
  `);
2773
2787
  }
2774
- return console.log(...[
2775
- options.indent ? INDENT_TOKEN : null,
2776
- options.tag ? verboseTag(options.tag) : null
2777
- ].filter(truthy).concat(args.map((a) => chalk.gray(a))));
2788
+ return console.log(...Log.formatLogs("verbose", options, args));
2778
2789
  }
2779
2790
  },
2780
2791
  info: (options, ...args) => {
@@ -2784,7 +2795,7 @@ var Log = {
2784
2795
  return process.stdout.write(`
2785
2796
  `);
2786
2797
  }
2787
- return console.log(...[options.indent ? INDENT_TOKEN : null].filter(truthy).concat(args ?? []));
2798
+ return console.log(...Log.formatLogs("info", options, args));
2788
2799
  }
2789
2800
  },
2790
2801
  warn: (options, ...args) => {
@@ -2794,7 +2805,7 @@ var Log = {
2794
2805
  return process.stdout.write(`
2795
2806
  `);
2796
2807
  }
2797
- return console.warn(...[options.indent ? chalk.yellow(INDENT_TOKEN) : null].filter(truthy).concat(args.map((a) => chalk.yellow(a))));
2808
+ return console.warn(...Log.formatLogs("warn", options, args));
2798
2809
  }
2799
2810
  },
2800
2811
  error: (options, ...args) => {
@@ -2804,10 +2815,7 @@ var Log = {
2804
2815
  return process.stdout.write(`
2805
2816
  `);
2806
2817
  }
2807
- return console.error(...[
2808
- options.indent ? INDENT_TOKEN : null,
2809
- options.tag ? verboseTag(options.tag) : null
2810
- ].filter(truthy).concat(args.map((a) => chalk.red(a))));
2818
+ return console.error(...Log.formatLogs("error", options, args));
2811
2819
  }
2812
2820
  }
2813
2821
  };
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "url": "https://github.com/remotion-dev/remotion/tree/main/packages/renderer"
4
4
  },
5
5
  "name": "@remotion/renderer",
6
- "version": "4.0.356",
6
+ "version": "4.0.357",
7
7
  "description": "Render Remotion videos using Node.js or Bun",
8
8
  "main": "dist/index.js",
9
9
  "types": "dist/index.d.ts",
@@ -18,8 +18,8 @@
18
18
  "extract-zip": "2.0.1",
19
19
  "source-map": "^0.8.0-beta.0",
20
20
  "ws": "8.17.1",
21
- "remotion": "4.0.356",
22
- "@remotion/streaming": "4.0.356"
21
+ "remotion": "4.0.357",
22
+ "@remotion/streaming": "4.0.357"
23
23
  },
24
24
  "peerDependencies": {
25
25
  "react": ">=16.8.0",
@@ -33,17 +33,17 @@
33
33
  "react-dom": "19.0.0",
34
34
  "@types/ws": "8.5.10",
35
35
  "eslint": "9.19.0",
36
- "@remotion/example-videos": "4.0.356",
37
- "@remotion/eslint-config-internal": "4.0.356"
36
+ "@remotion/example-videos": "4.0.357",
37
+ "@remotion/eslint-config-internal": "4.0.357"
38
38
  },
39
39
  "optionalDependencies": {
40
- "@remotion/compositor-linux-arm64-gnu": "4.0.356",
41
- "@remotion/compositor-darwin-x64": "4.0.356",
42
- "@remotion/compositor-linux-x64-gnu": "4.0.356",
43
- "@remotion/compositor-linux-arm64-musl": "4.0.356",
44
- "@remotion/compositor-darwin-arm64": "4.0.356",
45
- "@remotion/compositor-win32-x64-msvc": "4.0.356",
46
- "@remotion/compositor-linux-x64-musl": "4.0.356"
40
+ "@remotion/compositor-darwin-arm64": "4.0.357",
41
+ "@remotion/compositor-darwin-x64": "4.0.357",
42
+ "@remotion/compositor-linux-arm64-gnu": "4.0.357",
43
+ "@remotion/compositor-linux-arm64-musl": "4.0.357",
44
+ "@remotion/compositor-linux-x64-gnu": "4.0.357",
45
+ "@remotion/compositor-linux-x64-musl": "4.0.357",
46
+ "@remotion/compositor-win32-x64-msvc": "4.0.357"
47
47
  },
48
48
  "keywords": [
49
49
  "remotion",