node-av 4.0.0 → 5.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -0
- package/binding.gyp +19 -11
- package/dist/api/bitstream-filter.d.ts +13 -12
- package/dist/api/bitstream-filter.js +33 -29
- package/dist/api/bitstream-filter.js.map +1 -1
- package/dist/api/decoder.d.ts +211 -96
- package/dist/api/decoder.js +396 -375
- package/dist/api/decoder.js.map +1 -1
- package/dist/api/demuxer.d.ts +10 -10
- package/dist/api/demuxer.js +7 -10
- package/dist/api/demuxer.js.map +1 -1
- package/dist/api/encoder.d.ts +155 -122
- package/dist/api/encoder.js +368 -541
- package/dist/api/encoder.js.map +1 -1
- package/dist/api/filter-complex.d.ts +769 -0
- package/dist/api/filter-complex.js +1596 -0
- package/dist/api/filter-complex.js.map +1 -0
- package/dist/api/filter-presets.d.ts +68 -0
- package/dist/api/filter-presets.js +96 -0
- package/dist/api/filter-presets.js.map +1 -1
- package/dist/api/filter.d.ts +183 -113
- package/dist/api/filter.js +347 -365
- package/dist/api/filter.js.map +1 -1
- package/dist/api/fmp4-stream.d.ts +18 -2
- package/dist/api/fmp4-stream.js +45 -4
- package/dist/api/fmp4-stream.js.map +1 -1
- package/dist/api/hardware.d.ts +47 -0
- package/dist/api/hardware.js +45 -0
- package/dist/api/hardware.js.map +1 -1
- package/dist/api/index.d.ts +2 -0
- package/dist/api/index.js +3 -0
- package/dist/api/index.js.map +1 -1
- package/dist/api/io-stream.d.ts +3 -3
- package/dist/api/io-stream.js.map +1 -1
- package/dist/api/muxer.d.ts +10 -10
- package/dist/api/muxer.js +6 -6
- package/dist/api/muxer.js.map +1 -1
- package/dist/api/pipeline.d.ts +2 -2
- package/dist/api/pipeline.js +22 -22
- package/dist/api/pipeline.js.map +1 -1
- package/dist/api/rtp-stream.d.ts +5 -2
- package/dist/api/rtp-stream.js +33 -4
- package/dist/api/rtp-stream.js.map +1 -1
- package/dist/api/types.d.ts +63 -7
- package/dist/api/utilities/audio-sample.d.ts +10 -0
- package/dist/api/utilities/audio-sample.js +10 -0
- package/dist/api/utilities/audio-sample.js.map +1 -1
- package/dist/api/utilities/channel-layout.d.ts +1 -0
- package/dist/api/utilities/channel-layout.js +1 -0
- package/dist/api/utilities/channel-layout.js.map +1 -1
- package/dist/api/utilities/image.d.ts +38 -0
- package/dist/api/utilities/image.js +38 -0
- package/dist/api/utilities/image.js.map +1 -1
- package/dist/api/utilities/index.d.ts +1 -0
- package/dist/api/utilities/index.js +2 -0
- package/dist/api/utilities/index.js.map +1 -1
- package/dist/api/utilities/media-type.d.ts +1 -0
- package/dist/api/utilities/media-type.js +1 -0
- package/dist/api/utilities/media-type.js.map +1 -1
- package/dist/api/utilities/pixel-format.d.ts +3 -0
- package/dist/api/utilities/pixel-format.js +3 -0
- package/dist/api/utilities/pixel-format.js.map +1 -1
- package/dist/api/utilities/sample-format.d.ts +5 -0
- package/dist/api/utilities/sample-format.js +5 -0
- package/dist/api/utilities/sample-format.js.map +1 -1
- package/dist/api/utilities/scheduler.d.ts +21 -52
- package/dist/api/utilities/scheduler.js +20 -58
- package/dist/api/utilities/scheduler.js.map +1 -1
- package/dist/api/utilities/streaming.d.ts +32 -1
- package/dist/api/utilities/streaming.js +32 -1
- package/dist/api/utilities/streaming.js.map +1 -1
- package/dist/api/utilities/timestamp.d.ts +14 -0
- package/dist/api/utilities/timestamp.js +14 -0
- package/dist/api/utilities/timestamp.js.map +1 -1
- package/dist/api/utilities/whisper-model.d.ts +310 -0
- package/dist/api/utilities/whisper-model.js +528 -0
- package/dist/api/utilities/whisper-model.js.map +1 -0
- package/dist/api/whisper.d.ts +324 -0
- package/dist/api/whisper.js +362 -0
- package/dist/api/whisper.js.map +1 -0
- package/dist/constants/constants.d.ts +3 -1
- package/dist/constants/constants.js +1 -0
- package/dist/constants/constants.js.map +1 -1
- package/dist/ffmpeg/index.d.ts +3 -3
- package/dist/ffmpeg/index.js +3 -3
- package/dist/ffmpeg/utils.d.ts +27 -0
- package/dist/ffmpeg/utils.js +28 -16
- package/dist/ffmpeg/utils.js.map +1 -1
- package/dist/lib/binding.d.ts +4 -4
- package/dist/lib/binding.js.map +1 -1
- package/dist/lib/codec-parameters.d.ts +47 -1
- package/dist/lib/codec-parameters.js +55 -0
- package/dist/lib/codec-parameters.js.map +1 -1
- package/dist/lib/fifo.d.ts +416 -0
- package/dist/lib/fifo.js +453 -0
- package/dist/lib/fifo.js.map +1 -0
- package/dist/lib/frame.d.ts +96 -1
- package/dist/lib/frame.js +139 -1
- package/dist/lib/frame.js.map +1 -1
- package/dist/lib/index.d.ts +1 -0
- package/dist/lib/index.js +2 -0
- package/dist/lib/index.js.map +1 -1
- package/dist/lib/native-types.d.ts +29 -2
- package/dist/lib/rational.d.ts +18 -0
- package/dist/lib/rational.js +19 -0
- package/dist/lib/rational.js.map +1 -1
- package/dist/lib/types.d.ts +23 -1
- package/install/check.js +2 -2
- package/package.json +31 -21
|
@@ -0,0 +1,1596 @@
|
|
|
1
|
+
import { AV_BUFFERSRC_FLAG_PUSH, AVERROR_EAGAIN, AVERROR_EOF, AVFILTER_FLAG_HWDEVICE, AVMEDIA_TYPE_VIDEO, EOF } from '../constants/constants.js';
|
|
2
|
+
import { FFmpegError } from '../lib/error.js';
|
|
3
|
+
import { FilterGraph } from '../lib/filter-graph.js';
|
|
4
|
+
import { FilterInOut } from '../lib/filter-inout.js';
|
|
5
|
+
import { Filter } from '../lib/filter.js';
|
|
6
|
+
import { Frame } from '../lib/frame.js';
|
|
7
|
+
import { Rational } from '../lib/rational.js';
|
|
8
|
+
import { avGetSampleFmtName, avInvQ, avRescaleQ } from '../lib/utilities.js';
|
|
9
|
+
/**
|
|
10
|
+
* High-level filter_complex API for multi-input/output filtering.
|
|
11
|
+
*
|
|
12
|
+
* Provides simplified interface for complex FFmpeg filter graphs with multiple inputs and outputs.
|
|
13
|
+
* Supports both high-level generator API and low-level manual control.
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* ```typescript
|
|
17
|
+
* // High-level API: Simple overlay with frames() generator
|
|
18
|
+
* using complex = FilterComplexAPI.create('[0:v][1:v]overlay=x=100:y=50[out]', {
|
|
19
|
+
* inputs: [{ label: '0:v' }, { label: '1:v' }],
|
|
20
|
+
* outputs: [{ label: 'out' }]
|
|
21
|
+
* });
|
|
22
|
+
*
|
|
23
|
+
* // Process multiple input streams automatically
|
|
24
|
+
* for await (using frame of complex.frames('out', {
|
|
25
|
+
* '0:v': decoder1.frames(packets1),
|
|
26
|
+
* '1:v': decoder2.frames(packets2)
|
|
27
|
+
* })) {
|
|
28
|
+
* await encoder.encode(frame);
|
|
29
|
+
* }
|
|
30
|
+
* ```
|
|
31
|
+
*
|
|
32
|
+
* @see {@link FilterAPI} For simple single-input/output filtering
|
|
33
|
+
* @see {@link FilterGraph} For low-level filter graph API
|
|
34
|
+
* @see {@link frames} For high-level stream processing
|
|
35
|
+
* @see {@link process} For low-level manual frame sending
|
|
36
|
+
* @see {@link receive} For low-level manual frame receiving
|
|
37
|
+
*/
|
|
38
|
+
export class FilterComplexAPI {
|
|
39
|
+
graph;
|
|
40
|
+
description;
|
|
41
|
+
options;
|
|
42
|
+
// Input/Output state
|
|
43
|
+
inputs = new Map();
|
|
44
|
+
outputs = new Map();
|
|
45
|
+
// Initialization state
|
|
46
|
+
initialized = false;
|
|
47
|
+
isClosed = false;
|
|
48
|
+
initializePromise = null;
|
|
49
|
+
// Reusable frame for receive operations
|
|
50
|
+
frame = new Frame();
|
|
51
|
+
/**
|
|
52
|
+
* @param graph - Filter graph instance
|
|
53
|
+
*
|
|
54
|
+
* @param description - Filter description string
|
|
55
|
+
*
|
|
56
|
+
* @param options - Filter complex options
|
|
57
|
+
*
|
|
58
|
+
* @internal
|
|
59
|
+
*/
|
|
60
|
+
constructor(graph, description, options) {
|
|
61
|
+
this.graph = graph;
|
|
62
|
+
this.description = description;
|
|
63
|
+
this.options = options;
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Create a complex filter with specified configuration.
|
|
67
|
+
*
|
|
68
|
+
* Direct mapping to avfilter_graph_segment_parse() and avfilter_graph_config().
|
|
69
|
+
*
|
|
70
|
+
* @param description - Filter description string (e.g., '[0:v][1:v]overlay[out]')
|
|
71
|
+
*
|
|
72
|
+
* @param options - Filter complex configuration including inputs and outputs
|
|
73
|
+
*
|
|
74
|
+
* @returns Filter complex instance ready to process frames
|
|
75
|
+
*
|
|
76
|
+
* @throws {Error} If configuration is invalid (duplicate labels, no inputs/outputs)
|
|
77
|
+
*
|
|
78
|
+
* @example
|
|
79
|
+
* ```typescript
|
|
80
|
+
* // Simple overlay example
|
|
81
|
+
* using complex = FilterComplexAPI.create(
|
|
82
|
+
* '[0:v][1:v]overlay=x=100:y=50[out]',
|
|
83
|
+
* {
|
|
84
|
+
* inputs: [
|
|
85
|
+
* { label: '0:v' }, // Base video
|
|
86
|
+
* { label: '1:v' } // Overlay video
|
|
87
|
+
* ],
|
|
88
|
+
* outputs: [{ label: 'out' }]
|
|
89
|
+
* }
|
|
90
|
+
* );
|
|
91
|
+
*
|
|
92
|
+
* // Send frames manually
|
|
93
|
+
* await complex.process('0:v', baseFrame);
|
|
94
|
+
* await complex.process('1:v', overlayFrame);
|
|
95
|
+
* using outFrame = await complex.receive('out');
|
|
96
|
+
* ```
|
|
97
|
+
*
|
|
98
|
+
* @see {@link process} For sending frames to inputs
|
|
99
|
+
* @see {@link receive} For getting frames from outputs
|
|
100
|
+
*/
|
|
101
|
+
static create(description, options) {
|
|
102
|
+
// Validate inputs and outputs
|
|
103
|
+
if (!options.inputs || options.inputs.length === 0) {
|
|
104
|
+
throw new Error('At least one input is required');
|
|
105
|
+
}
|
|
106
|
+
if (!options.outputs || options.outputs.length === 0) {
|
|
107
|
+
throw new Error('At least one output is required');
|
|
108
|
+
}
|
|
109
|
+
// Check for duplicate input labels
|
|
110
|
+
const inputLabels = new Set();
|
|
111
|
+
for (const input of options.inputs) {
|
|
112
|
+
if (inputLabels.has(input.label)) {
|
|
113
|
+
throw new Error(`Duplicate input label: ${input.label}`);
|
|
114
|
+
}
|
|
115
|
+
inputLabels.add(input.label);
|
|
116
|
+
}
|
|
117
|
+
// Check for duplicate output labels
|
|
118
|
+
const outputLabels = new Set();
|
|
119
|
+
for (const output of options.outputs) {
|
|
120
|
+
if (outputLabels.has(output.label)) {
|
|
121
|
+
throw new Error(`Duplicate output label: ${output.label}`);
|
|
122
|
+
}
|
|
123
|
+
outputLabels.add(output.label);
|
|
124
|
+
}
|
|
125
|
+
// Create graph
|
|
126
|
+
const graph = new FilterGraph();
|
|
127
|
+
graph.alloc();
|
|
128
|
+
// Configure threading
|
|
129
|
+
if (options.threads !== undefined) {
|
|
130
|
+
graph.nbThreads = options.threads;
|
|
131
|
+
}
|
|
132
|
+
const instance = new FilterComplexAPI(graph, description, options);
|
|
133
|
+
// Initialize input states
|
|
134
|
+
for (const input of options.inputs) {
|
|
135
|
+
instance.inputs.set(input.label, {
|
|
136
|
+
label: input.label,
|
|
137
|
+
buffersrc: null,
|
|
138
|
+
queuedFrames: [],
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
// Initialize output states
|
|
142
|
+
for (const output of options.outputs) {
|
|
143
|
+
instance.outputs.set(output.label, {
|
|
144
|
+
label: output.label,
|
|
145
|
+
buffersink: null,
|
|
146
|
+
});
|
|
147
|
+
}
|
|
148
|
+
return instance;
|
|
149
|
+
}
|
|
150
|
+
/**
|
|
151
|
+
* Check if filter complex is open.
|
|
152
|
+
*
|
|
153
|
+
* @returns true if not closed
|
|
154
|
+
*
|
|
155
|
+
* @example
|
|
156
|
+
* ```typescript
|
|
157
|
+
* if (complex.isOpen) {
|
|
158
|
+
* // Can still consume frames
|
|
159
|
+
* }
|
|
160
|
+
* ```
|
|
161
|
+
*/
|
|
162
|
+
get isOpen() {
|
|
163
|
+
return !this.isClosed;
|
|
164
|
+
}
|
|
165
|
+
/**
|
|
166
|
+
* Check if filter complex has been initialized.
|
|
167
|
+
*
|
|
168
|
+
* Returns true after first frame set has been processed from all inputs.
|
|
169
|
+
*
|
|
170
|
+
* @returns true if filter graph has been configured
|
|
171
|
+
*
|
|
172
|
+
* @example
|
|
173
|
+
* ```typescript
|
|
174
|
+
* if (!complex.isInitialized) {
|
|
175
|
+
* console.log('Filter will initialize on first frame set');
|
|
176
|
+
* }
|
|
177
|
+
* ```
|
|
178
|
+
*/
|
|
179
|
+
get isInitialized() {
|
|
180
|
+
return this.initialized;
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Get output frame rate.
|
|
184
|
+
*
|
|
185
|
+
* Returns frame rate from the first output's buffersink.
|
|
186
|
+
* Returns null if not initialized or frame rate is not set.
|
|
187
|
+
*
|
|
188
|
+
* @returns Frame rate as rational number or null
|
|
189
|
+
*
|
|
190
|
+
* @example
|
|
191
|
+
* ```typescript
|
|
192
|
+
* const frameRate = complex.frameRate;
|
|
193
|
+
* if (frameRate) {
|
|
194
|
+
* console.log(`Output: ${frameRate.num}/${frameRate.den} fps`);
|
|
195
|
+
* }
|
|
196
|
+
* ```
|
|
197
|
+
*
|
|
198
|
+
* @see {@link FilterAPI.frameRate} For single-output filter frame rate
|
|
199
|
+
*/
|
|
200
|
+
get frameRate() {
|
|
201
|
+
if (!this.initialized || this.outputs.size === 0) {
|
|
202
|
+
return null;
|
|
203
|
+
}
|
|
204
|
+
// Get frame rate from first output
|
|
205
|
+
const firstOutput = this.outputs.values().next().value;
|
|
206
|
+
if (!firstOutput?.buffersink) {
|
|
207
|
+
return null;
|
|
208
|
+
}
|
|
209
|
+
const fr = firstOutput.buffersink.buffersinkGetFrameRate();
|
|
210
|
+
// Return null if frame rate is not set (0/0 or 0/1)
|
|
211
|
+
if (fr.num <= 0 || fr.den <= 0) {
|
|
212
|
+
return null;
|
|
213
|
+
}
|
|
214
|
+
return fr;
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Get output time base.
|
|
218
|
+
*
|
|
219
|
+
* Returns time base from the first output's buffersink.
|
|
220
|
+
* Returns null if not initialized.
|
|
221
|
+
*
|
|
222
|
+
* @returns Time base as rational number or null
|
|
223
|
+
*
|
|
224
|
+
* @example
|
|
225
|
+
* ```typescript
|
|
226
|
+
* const timeBase = complex.timeBase;
|
|
227
|
+
* if (timeBase) {
|
|
228
|
+
* console.log(`Output timeBase: ${timeBase.num}/${timeBase.den}`);
|
|
229
|
+
* }
|
|
230
|
+
* ```
|
|
231
|
+
*
|
|
232
|
+
* @see {@link FilterAPI.timeBase} For single-output filter time base
|
|
233
|
+
*/
|
|
234
|
+
get timeBase() {
|
|
235
|
+
if (!this.initialized || this.outputs.size === 0) {
|
|
236
|
+
return null;
|
|
237
|
+
}
|
|
238
|
+
// Get time base from first output
|
|
239
|
+
const firstOutput = this.outputs.values().next().value;
|
|
240
|
+
if (!firstOutput?.buffersink) {
|
|
241
|
+
return null;
|
|
242
|
+
}
|
|
243
|
+
return firstOutput.buffersink.buffersinkGetTimeBase();
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* Process frame by sending to specified input.
|
|
247
|
+
*
|
|
248
|
+
* Sends a frame to the buffersrc of the specified input label.
|
|
249
|
+
* Automatically rescales timestamps to the input's calculated timeBase (CFR/VFR).
|
|
250
|
+
* Pass null to signal end-of-stream for that input.
|
|
251
|
+
*
|
|
252
|
+
* Direct mapping to av_buffersrc_add_frame().
|
|
253
|
+
*
|
|
254
|
+
* @param inLabel - Input label to send frame to
|
|
255
|
+
*
|
|
256
|
+
* @param frame - Frame to process
|
|
257
|
+
*
|
|
258
|
+
* @throws {Error} If input label not found or filter closed
|
|
259
|
+
*
|
|
260
|
+
* @throws {FFmpegError} If processing fails
|
|
261
|
+
*
|
|
262
|
+
* @example
|
|
263
|
+
* ```typescript
|
|
264
|
+
* // Process frames one at a time
|
|
265
|
+
* await complex.process('0:v', frame1);
|
|
266
|
+
* await complex.process('1:v', frame2);
|
|
267
|
+
* const outFrame = await complex.receive('out');
|
|
268
|
+
* ```
|
|
269
|
+
*
|
|
270
|
+
* @see {@link receive} For receiving output frames
|
|
271
|
+
* @see {@link flush} For flushing inputs
|
|
272
|
+
* @see {@link processSync} For synchronous version
|
|
273
|
+
*/
|
|
274
|
+
async process(inLabel, frame) {
|
|
275
|
+
if (this.isClosed) {
|
|
276
|
+
throw new Error('FilterComplexAPI is already closed');
|
|
277
|
+
}
|
|
278
|
+
// Get input state
|
|
279
|
+
const inputState = this.inputs.get(inLabel);
|
|
280
|
+
if (!inputState) {
|
|
281
|
+
throw new Error(`Input '${inLabel}' not found`);
|
|
282
|
+
}
|
|
283
|
+
// If not initialized, queue frame and try to initialize
|
|
284
|
+
if (!this.initialized) {
|
|
285
|
+
const cloned = frame.clone();
|
|
286
|
+
if (!cloned) {
|
|
287
|
+
throw new Error('Failed to clone frame for queuing');
|
|
288
|
+
}
|
|
289
|
+
inputState.queuedFrames.push(cloned);
|
|
290
|
+
// Check if all inputs have at least one frame
|
|
291
|
+
if (this.hasAllInputFormats()) {
|
|
292
|
+
// All inputs ready → initialize graph and process queued frames
|
|
293
|
+
this.initializePromise ??= this.initializeFromQueuedFrames();
|
|
294
|
+
await this.initializePromise;
|
|
295
|
+
}
|
|
296
|
+
return;
|
|
297
|
+
}
|
|
298
|
+
// Already initialized → send frame directly
|
|
299
|
+
if (!inputState.buffersrc || !inputState.calculatedTimeBase) {
|
|
300
|
+
throw new Error(`Input '${inLabel}' buffersrc not initialized`);
|
|
301
|
+
}
|
|
302
|
+
// Rescale timestamps using helper
|
|
303
|
+
this.rescaleFrameTimestamps(frame, inputState.calculatedTimeBase);
|
|
304
|
+
// Send frame to buffersrc
|
|
305
|
+
const ret = await inputState.buffersrc.buffersrcAddFrame(frame, AV_BUFFERSRC_FLAG_PUSH);
|
|
306
|
+
FFmpegError.throwIfError(ret, `Failed to send frame to input ${inLabel}`);
|
|
307
|
+
}
|
|
308
|
+
/**
|
|
309
|
+
* Process frame by sending to specified input synchronously.
|
|
310
|
+
* Synchronous version of process.
|
|
311
|
+
*
|
|
312
|
+
* Sends a frame to the buffersrc of the specified input label.
|
|
313
|
+
* Automatically rescales timestamps to the input's calculated timeBase (CFR/VFR).
|
|
314
|
+
* Pass null to signal end-of-stream for that input.
|
|
315
|
+
*
|
|
316
|
+
* Direct mapping to av_buffersrc_add_frame().
|
|
317
|
+
*
|
|
318
|
+
* @param inLabel - Input label to send frame to
|
|
319
|
+
*
|
|
320
|
+
* @param frame - Frame to process
|
|
321
|
+
*
|
|
322
|
+
* @throws {Error} If input label not found or filter closed
|
|
323
|
+
*
|
|
324
|
+
* @throws {FFmpegError} If processing fails
|
|
325
|
+
*
|
|
326
|
+
* @example
|
|
327
|
+
* ```typescript
|
|
328
|
+
* // Process frames one at a time
|
|
329
|
+
* complex.processSync('0:v', frame1);
|
|
330
|
+
* complex.processSync('1:v', frame2);
|
|
331
|
+
* const outFrame = complex.receiveSync('out');
|
|
332
|
+
* ```
|
|
333
|
+
*
|
|
334
|
+
* @see {@link receiveSync} For receiving output frames
|
|
335
|
+
* @see {@link flushSync} For flushing inputs
|
|
336
|
+
* @see {@link process} For async version
|
|
337
|
+
*/
|
|
338
|
+
processSync(inLabel, frame) {
|
|
339
|
+
if (this.isClosed) {
|
|
340
|
+
throw new Error('FilterComplexAPI is already closed');
|
|
341
|
+
}
|
|
342
|
+
// Get input state
|
|
343
|
+
const inputState = this.inputs.get(inLabel);
|
|
344
|
+
if (!inputState) {
|
|
345
|
+
throw new Error(`Input '${inLabel}' not found`);
|
|
346
|
+
}
|
|
347
|
+
// If not initialized, queue frame and try to initialize
|
|
348
|
+
if (!this.initialized) {
|
|
349
|
+
const cloned = frame.clone();
|
|
350
|
+
if (!cloned) {
|
|
351
|
+
throw new Error('Failed to clone frame for queuing');
|
|
352
|
+
}
|
|
353
|
+
inputState.queuedFrames.push(cloned);
|
|
354
|
+
// Check if all inputs have at least one frame
|
|
355
|
+
if (this.hasAllInputFormats()) {
|
|
356
|
+
// All inputs ready → initialize graph and process queued frames synchronously
|
|
357
|
+
this.initializeFromQueuedFramesSync();
|
|
358
|
+
}
|
|
359
|
+
return;
|
|
360
|
+
}
|
|
361
|
+
// Already initialized → send frame directly
|
|
362
|
+
if (!inputState.buffersrc || !inputState.calculatedTimeBase) {
|
|
363
|
+
throw new Error(`Input '${inLabel}' buffersrc not initialized`);
|
|
364
|
+
}
|
|
365
|
+
// Rescale timestamps using helper
|
|
366
|
+
this.rescaleFrameTimestamps(frame, inputState.calculatedTimeBase);
|
|
367
|
+
// Send frame to buffersrc
|
|
368
|
+
const ret = inputState.buffersrc.buffersrcAddFrameSync(frame, AV_BUFFERSRC_FLAG_PUSH);
|
|
369
|
+
FFmpegError.throwIfError(ret, `Failed to send frame to input ${inLabel}`);
|
|
370
|
+
}
|
|
371
|
+
/**
|
|
372
|
+
* Process frame streams from multiple inputs and yield frames from specified output.
|
|
373
|
+
*
|
|
374
|
+
* High-level async generator for multi-input filtering.
|
|
375
|
+
* Filter is only flushed when EOF (null) is explicitly sent to any input.
|
|
376
|
+
*
|
|
377
|
+
* **EOF Handling:**
|
|
378
|
+
* - Filter is only flushed when EOF (null) is explicitly sent to ANY input
|
|
379
|
+
* - Generator yields null after flushing when null is received
|
|
380
|
+
* - No automatic flushing - filter stays open until EOF or close()
|
|
381
|
+
* - Iterator completion without null does not trigger flush
|
|
382
|
+
*
|
|
383
|
+
* @param outLabel - Output label to receive frames from
|
|
384
|
+
*
|
|
385
|
+
* @param inputs - Record mapping input labels to frame sources (AsyncIterable, single Frame, or null)
|
|
386
|
+
*
|
|
387
|
+
* @yields {Frame | null} Filtered frames from output, followed by null when flushed
|
|
388
|
+
*
|
|
389
|
+
* @throws {Error} If input label not found
|
|
390
|
+
*
|
|
391
|
+
* @throws {FFmpegError} If processing fails
|
|
392
|
+
*
|
|
393
|
+
* @example
|
|
394
|
+
* ```typescript
|
|
395
|
+
* // Stream processing: 2 inputs, 1 output
|
|
396
|
+
* using complex = FilterComplexAPI.create('[0:v][1:v]overlay[out]', {
|
|
397
|
+
* inputs: [{ label: '0:v' }, { label: '1:v' }],
|
|
398
|
+
* outputs: [{ label: 'out' }]
|
|
399
|
+
* });
|
|
400
|
+
*
|
|
401
|
+
* for await (using frame of complex.frames('out', {
|
|
402
|
+
* '0:v': decoder1.frames(packets1),
|
|
403
|
+
* '1:v': decoder2.frames(packets2)
|
|
404
|
+
* })) {
|
|
405
|
+
* await encoder.encode(frame);
|
|
406
|
+
* }
|
|
407
|
+
* ```
|
|
408
|
+
*
|
|
409
|
+
* @example
|
|
410
|
+
* ```typescript
|
|
411
|
+
* // Single frames - no automatic flush
|
|
412
|
+
* for await (using frame of complex.frames('out', {
|
|
413
|
+
* '0:v': frame1,
|
|
414
|
+
* '1:v': frame2
|
|
415
|
+
* })) {
|
|
416
|
+
* await encoder.encode(frame);
|
|
417
|
+
* }
|
|
418
|
+
* // Filter remains open, buffered frames not flushed
|
|
419
|
+
* ```
|
|
420
|
+
*
|
|
421
|
+
* @example
|
|
422
|
+
* ```typescript
|
|
423
|
+
* // Explicit flush with null
|
|
424
|
+
* for await (using frame of complex.frames('out', {
|
|
425
|
+
* '0:v': null,
|
|
426
|
+
* '1:v': null
|
|
427
|
+
* })) {
|
|
428
|
+
* await encoder.encode(frame);
|
|
429
|
+
* }
|
|
430
|
+
* ```
|
|
431
|
+
*
|
|
432
|
+
* @example
|
|
433
|
+
* ```typescript
|
|
434
|
+
* // Mixed: stream + single frame
|
|
435
|
+
* for await (using frame of complex.frames('out', {
|
|
436
|
+
* '0:v': decoder.frames(packets), // Stream
|
|
437
|
+
* '1:v': watermarkFrame // Single frame (used for all)
|
|
438
|
+
* })) {
|
|
439
|
+
* await encoder.encode(frame);
|
|
440
|
+
* }
|
|
441
|
+
* ```
|
|
442
|
+
*
|
|
443
|
+
* @see {@link process} For manual frame sending
|
|
444
|
+
* @see {@link receive} For manual frame receiving
|
|
445
|
+
* @see {@link framesSync} For sync version
|
|
446
|
+
*/
|
|
447
|
+
async *frames(outLabel, inputs) {
|
|
448
|
+
// Validate output label
|
|
449
|
+
if (!this.outputs.has(outLabel)) {
|
|
450
|
+
throw new Error(`Output '${outLabel}' not found`);
|
|
451
|
+
}
|
|
452
|
+
// Validate all input labels exist
|
|
453
|
+
for (const label of Object.keys(inputs)) {
|
|
454
|
+
if (!this.inputs.has(label)) {
|
|
455
|
+
throw new Error(`Input '${label}' not found in filter complex configuration`);
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
// Helper to process a single frame and yield output
|
|
459
|
+
const processFrame = async function* (label, frame) {
|
|
460
|
+
await this.process(label, frame);
|
|
461
|
+
// Try to receive output frames
|
|
462
|
+
while (true) {
|
|
463
|
+
const outFrame = await this.receive(outLabel);
|
|
464
|
+
if (!outFrame || outFrame === EOF) {
|
|
465
|
+
break;
|
|
466
|
+
}
|
|
467
|
+
yield outFrame;
|
|
468
|
+
}
|
|
469
|
+
}.bind(this);
|
|
470
|
+
// Helper to finalize (flush all inputs and yield remaining frames)
|
|
471
|
+
const finalize = async function* () {
|
|
472
|
+
for await (const frame of this.flushFrames(outLabel)) {
|
|
473
|
+
yield frame;
|
|
474
|
+
}
|
|
475
|
+
yield null;
|
|
476
|
+
}.bind(this);
|
|
477
|
+
// Separate inputs by type
|
|
478
|
+
const iterableInputs = new Map();
|
|
479
|
+
const singleFrameInputs = [];
|
|
480
|
+
const flushInputs = new Set(); // Track which inputs to flush
|
|
481
|
+
for (const [label, source] of Object.entries(inputs)) {
|
|
482
|
+
if (source === null) {
|
|
483
|
+
// null - flush this input
|
|
484
|
+
flushInputs.add(label);
|
|
485
|
+
}
|
|
486
|
+
else if (source instanceof Frame) {
|
|
487
|
+
// Single frame
|
|
488
|
+
singleFrameInputs.push({ label, frame: source });
|
|
489
|
+
}
|
|
490
|
+
else {
|
|
491
|
+
// AsyncIterable
|
|
492
|
+
iterableInputs.set(label, source[Symbol.asyncIterator]());
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
// If only single frames/nulls and no iterables
|
|
496
|
+
if (iterableInputs.size === 0) {
|
|
497
|
+
// Process single frames
|
|
498
|
+
for (const { label, frame } of singleFrameInputs) {
|
|
499
|
+
yield* processFrame(label, frame);
|
|
500
|
+
}
|
|
501
|
+
// Flush inputs that were null
|
|
502
|
+
for (const label of flushInputs) {
|
|
503
|
+
await this.flush(label);
|
|
504
|
+
}
|
|
505
|
+
// Only finalize if we flushed any inputs
|
|
506
|
+
if (flushInputs.size > 0) {
|
|
507
|
+
yield* finalize();
|
|
508
|
+
}
|
|
509
|
+
return;
|
|
510
|
+
}
|
|
511
|
+
// Process single frames first
|
|
512
|
+
for (const { label, frame } of singleFrameInputs) {
|
|
513
|
+
yield* processFrame(label, frame);
|
|
514
|
+
}
|
|
515
|
+
// Track which inputs have finished
|
|
516
|
+
const finishedInputs = new Set();
|
|
517
|
+
let shouldFinalize = flushInputs.size > 0; // True if any single input was null
|
|
518
|
+
// Process frames from iterable inputs in parallel
|
|
519
|
+
while (finishedInputs.size < iterableInputs.size) {
|
|
520
|
+
// Read one frame from each active input
|
|
521
|
+
const readPromises = [];
|
|
522
|
+
for (const [label, iterator] of iterableInputs) {
|
|
523
|
+
if (!finishedInputs.has(label)) {
|
|
524
|
+
readPromises.push(iterator.next().then((result) => ({
|
|
525
|
+
label,
|
|
526
|
+
result,
|
|
527
|
+
})));
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
// Wait for all reads to complete
|
|
531
|
+
const results = await Promise.all(readPromises);
|
|
532
|
+
// Process each result
|
|
533
|
+
for (const { label, result } of results) {
|
|
534
|
+
if (result.done) {
|
|
535
|
+
// Iterator finished without explicit null - no automatic flush
|
|
536
|
+
finishedInputs.add(label);
|
|
537
|
+
continue;
|
|
538
|
+
}
|
|
539
|
+
const frame = result.value;
|
|
540
|
+
if (frame === null) {
|
|
541
|
+
// Explicit null from input stream - flush this input
|
|
542
|
+
await this.flush(label);
|
|
543
|
+
shouldFinalize = true;
|
|
544
|
+
finishedInputs.add(label);
|
|
545
|
+
}
|
|
546
|
+
else {
|
|
547
|
+
// Send frame to input
|
|
548
|
+
yield* processFrame(label, frame);
|
|
549
|
+
}
|
|
550
|
+
}
|
|
551
|
+
// If we got null from stream, finalize and return
|
|
552
|
+
if (shouldFinalize) {
|
|
553
|
+
yield* finalize();
|
|
554
|
+
return;
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
// Iterators finished without explicit null - no automatic flush
|
|
558
|
+
}
|
|
559
|
+
/**
|
|
560
|
+
* Process frame streams from multiple inputs and yield frames from specified output synchronously.
|
|
561
|
+
* Synchronous version of frames.
|
|
562
|
+
*
|
|
563
|
+
* High-level sync generator for multi-input filtering.
|
|
564
|
+
* Filter is only flushed when EOF (null) is explicitly sent to any input.
|
|
565
|
+
*
|
|
566
|
+
* **EOF Handling:**
|
|
567
|
+
* - Filter is only flushed when EOF (null) is explicitly sent to ANY input
|
|
568
|
+
* - Generator yields null after flushing when null is received
|
|
569
|
+
* - No automatic flushing - filter stays open until EOF or close()
|
|
570
|
+
* - Iterator completion without null does not trigger flush
|
|
571
|
+
*
|
|
572
|
+
* @param outLabel - Output label to receive frames from
|
|
573
|
+
*
|
|
574
|
+
* @param inputs - Record mapping input labels to frame sources (Iterable, single Frame, or null)
|
|
575
|
+
*
|
|
576
|
+
* @yields {Frame | null} Filtered frames from output, followed by null when flushed
|
|
577
|
+
*
|
|
578
|
+
* @throws {Error} If input label not found or filter not initialized
|
|
579
|
+
*
|
|
580
|
+
* @throws {FFmpegError} If processing fails
|
|
581
|
+
*
|
|
582
|
+
* @example
|
|
583
|
+
* ```typescript
|
|
584
|
+
* // Stream processing: 2 inputs, 1 output
|
|
585
|
+
* using complex = FilterComplexAPI.create('[0:v][1:v]overlay[out]', {
|
|
586
|
+
* inputs: [{ label: '0:v' }, { label: '1:v' }],
|
|
587
|
+
* outputs: [{ label: 'out' }]
|
|
588
|
+
* });
|
|
589
|
+
*
|
|
590
|
+
* // Note: Sync version requires async initialization first
|
|
591
|
+
* await complex.process('0:v', firstFrame1);
|
|
592
|
+
* await complex.process('1:v', firstFrame2);
|
|
593
|
+
*
|
|
594
|
+
* for (using frame of complex.framesSync('out', {
|
|
595
|
+
* '0:v': decoder1.framesSync(packets1),
|
|
596
|
+
* '1:v': decoder2.framesSync(packets2)
|
|
597
|
+
* })) {
|
|
598
|
+
* encoder.encodeSync(frame);
|
|
599
|
+
* }
|
|
600
|
+
* ```
|
|
601
|
+
*
|
|
602
|
+
* @example
|
|
603
|
+
* ```typescript
|
|
604
|
+
* // Single frames
|
|
605
|
+
* for (using frame of complex.framesSync('out', {
|
|
606
|
+
* '0:v': frame1,
|
|
607
|
+
* '1:v': frame2
|
|
608
|
+
* })) {
|
|
609
|
+
* encoder.encodeSync(frame);
|
|
610
|
+
* }
|
|
611
|
+
* ```
|
|
612
|
+
*
|
|
613
|
+
* @example
|
|
614
|
+
* ```typescript
|
|
615
|
+
* // Explicit flush
|
|
616
|
+
* for (using frame of complex.framesSync('out', {
|
|
617
|
+
* '0:v': null,
|
|
618
|
+
* '1:v': null
|
|
619
|
+
* })) {
|
|
620
|
+
* encoder.encodeSync(frame);
|
|
621
|
+
* }
|
|
622
|
+
* ```
|
|
623
|
+
*
|
|
624
|
+
* @see {@link processSync} For manual frame sending
|
|
625
|
+
* @see {@link receiveSync} For manual frame receiving
|
|
626
|
+
* @see {@link frames} For async version with lazy initialization
|
|
627
|
+
*/
|
|
628
|
+
*framesSync(outLabel, inputs) {
|
|
629
|
+
// Validate output label
|
|
630
|
+
if (!this.outputs.has(outLabel)) {
|
|
631
|
+
throw new Error(`Output '${outLabel}' not found`);
|
|
632
|
+
}
|
|
633
|
+
// Validate all input labels exist
|
|
634
|
+
for (const label of Object.keys(inputs)) {
|
|
635
|
+
if (!this.inputs.has(label)) {
|
|
636
|
+
throw new Error(`Input '${label}' not found in filter complex configuration`);
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
// Sync version requires filter to be initialized already
|
|
640
|
+
if (!this.initialized) {
|
|
641
|
+
throw new Error('FilterComplexAPI not initialized. Use async frames() method for lazy initialization.');
|
|
642
|
+
}
|
|
643
|
+
// Helper to process a single frame and yield output
|
|
644
|
+
const processFrame = function* (label, frame) {
|
|
645
|
+
this.processSync(label, frame);
|
|
646
|
+
// Try to receive output frames
|
|
647
|
+
while (true) {
|
|
648
|
+
const outFrame = this.receiveSync(outLabel);
|
|
649
|
+
if (!outFrame || outFrame === EOF) {
|
|
650
|
+
break;
|
|
651
|
+
}
|
|
652
|
+
yield outFrame;
|
|
653
|
+
}
|
|
654
|
+
}.bind(this);
|
|
655
|
+
// Helper to finalize (flush all inputs and yield remaining frames)
|
|
656
|
+
const finalize = function* () {
|
|
657
|
+
for (const frame of this.flushFramesSync(outLabel)) {
|
|
658
|
+
yield frame;
|
|
659
|
+
}
|
|
660
|
+
yield null;
|
|
661
|
+
}.bind(this);
|
|
662
|
+
// Separate inputs by type
|
|
663
|
+
const iterableInputs = new Map();
|
|
664
|
+
const singleFrameInputs = [];
|
|
665
|
+
const flushInputs = new Set(); // Track which inputs to flush
|
|
666
|
+
for (const [label, source] of Object.entries(inputs)) {
|
|
667
|
+
if (source === null) {
|
|
668
|
+
// null - flush this input
|
|
669
|
+
flushInputs.add(label);
|
|
670
|
+
}
|
|
671
|
+
else if (source instanceof Frame) {
|
|
672
|
+
// Single frame
|
|
673
|
+
singleFrameInputs.push({ label, frame: source });
|
|
674
|
+
}
|
|
675
|
+
else {
|
|
676
|
+
// Iterable
|
|
677
|
+
iterableInputs.set(label, source[Symbol.iterator]());
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
// If only single frames/nulls and no iterables
|
|
681
|
+
if (iterableInputs.size === 0) {
|
|
682
|
+
// Process single frames
|
|
683
|
+
for (const { label, frame } of singleFrameInputs) {
|
|
684
|
+
yield* processFrame(label, frame);
|
|
685
|
+
}
|
|
686
|
+
// Flush inputs that were null
|
|
687
|
+
for (const label of flushInputs) {
|
|
688
|
+
this.flushSync(label);
|
|
689
|
+
}
|
|
690
|
+
// Only finalize if we flushed any inputs
|
|
691
|
+
if (flushInputs.size > 0) {
|
|
692
|
+
yield* finalize();
|
|
693
|
+
}
|
|
694
|
+
return;
|
|
695
|
+
}
|
|
696
|
+
// Process single frames first
|
|
697
|
+
for (const { label, frame } of singleFrameInputs) {
|
|
698
|
+
yield* processFrame(label, frame);
|
|
699
|
+
}
|
|
700
|
+
// Track which inputs have finished
|
|
701
|
+
const finishedInputs = new Set();
|
|
702
|
+
let shouldFinalize = flushInputs.size > 0; // True if any single input was null
|
|
703
|
+
// Process frames from iterable inputs in round-robin fashion
|
|
704
|
+
while (finishedInputs.size < iterableInputs.size) {
|
|
705
|
+
// Read one frame from each active input
|
|
706
|
+
for (const [label, iterator] of iterableInputs) {
|
|
707
|
+
if (finishedInputs.has(label)) {
|
|
708
|
+
continue;
|
|
709
|
+
}
|
|
710
|
+
const result = iterator.next();
|
|
711
|
+
if (result.done) {
|
|
712
|
+
// Iterator finished without explicit null - no automatic flush
|
|
713
|
+
finishedInputs.add(label);
|
|
714
|
+
continue;
|
|
715
|
+
}
|
|
716
|
+
const frame = result.value;
|
|
717
|
+
if (frame === null) {
|
|
718
|
+
// Explicit null from input stream - flush this input
|
|
719
|
+
this.flushSync(label);
|
|
720
|
+
shouldFinalize = true;
|
|
721
|
+
finishedInputs.add(label);
|
|
722
|
+
}
|
|
723
|
+
else {
|
|
724
|
+
// Send frame to input
|
|
725
|
+
yield* processFrame(label, frame);
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
// If we got null from stream, finalize and return
|
|
729
|
+
if (shouldFinalize) {
|
|
730
|
+
yield* finalize();
|
|
731
|
+
return;
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
// Iterators finished without explicit null - no automatic flush
|
|
735
|
+
}
|
|
736
|
+
/**
|
|
737
|
+
* Flush input(s) and signal end-of-stream.
|
|
738
|
+
*
|
|
739
|
+
* Sends null frame to buffersrc filter(s) to flush buffered data.
|
|
740
|
+
* Must call receive() on outputs to get flushed frames.
|
|
741
|
+
* Does nothing if filter is closed or was never initialized.
|
|
742
|
+
*
|
|
743
|
+
* Direct mapping to av_buffersrc_add_frame(NULL).
|
|
744
|
+
*
|
|
745
|
+
* @param inLabel - Input label to flush. If not specified, flushes all inputs.
|
|
746
|
+
*
|
|
747
|
+
* @throws {Error} If input label not found
|
|
748
|
+
*
|
|
749
|
+
* @throws {FFmpegError} If flush fails
|
|
750
|
+
*
|
|
751
|
+
* @example
|
|
752
|
+
* ```typescript
|
|
753
|
+
* // Flush specific input
|
|
754
|
+
* await complex.flush('0:v');
|
|
755
|
+
*
|
|
756
|
+
* // Flush all inputs
|
|
757
|
+
* await complex.flush();
|
|
758
|
+
*
|
|
759
|
+
* // Get remaining frames from output
|
|
760
|
+
* let frame;
|
|
761
|
+
* while ((frame = await complex.receive('out')) !== null) {
|
|
762
|
+
* frame.free();
|
|
763
|
+
* }
|
|
764
|
+
* ```
|
|
765
|
+
*
|
|
766
|
+
* @see {@link flushFrames} For async iteration
|
|
767
|
+
* @see {@link receive} For getting flushed frames
|
|
768
|
+
* @see {@link flushSync} For synchronous version
|
|
769
|
+
*/
|
|
770
|
+
async flush(inLabel) {
|
|
771
|
+
if (this.isClosed || !this.initialized) {
|
|
772
|
+
return;
|
|
773
|
+
}
|
|
774
|
+
if (inLabel) {
|
|
775
|
+
// Flush specific input
|
|
776
|
+
const inputState = this.inputs.get(inLabel);
|
|
777
|
+
if (!inputState) {
|
|
778
|
+
throw new Error(`Input '${inLabel}' not found`);
|
|
779
|
+
}
|
|
780
|
+
if (inputState.buffersrc) {
|
|
781
|
+
const ret = await inputState.buffersrc.buffersrcAddFrame(null, AV_BUFFERSRC_FLAG_PUSH);
|
|
782
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
783
|
+
FFmpegError.throwIfError(ret, `Failed to flush input ${inLabel}`);
|
|
784
|
+
}
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
else {
|
|
788
|
+
// Flush all inputs
|
|
789
|
+
for (const inputState of this.inputs.values()) {
|
|
790
|
+
if (!inputState.buffersrc)
|
|
791
|
+
continue;
|
|
792
|
+
const ret = await inputState.buffersrc.buffersrcAddFrame(null, AV_BUFFERSRC_FLAG_PUSH);
|
|
793
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
794
|
+
FFmpegError.throwIfError(ret, `Failed to flush input ${inputState.label}`);
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
}
|
|
799
|
+
/**
|
|
800
|
+
* Flush input(s) and signal end-of-stream synchronously.
|
|
801
|
+
* Synchronous version of flush.
|
|
802
|
+
*
|
|
803
|
+
* Sends null frame to buffersrc filter(s) to flush buffered data.
|
|
804
|
+
* Must call receiveSync() on outputs to get flushed frames.
|
|
805
|
+
* Does nothing if filter is closed or was never initialized.
|
|
806
|
+
*
|
|
807
|
+
* Direct mapping to av_buffersrc_add_frame(NULL).
|
|
808
|
+
*
|
|
809
|
+
* @param inLabel - Input label to flush. If not specified, flushes all inputs.
|
|
810
|
+
*
|
|
811
|
+
* @throws {Error} If input label not found
|
|
812
|
+
*
|
|
813
|
+
* @throws {FFmpegError} If flush fails
|
|
814
|
+
*
|
|
815
|
+
* @example
|
|
816
|
+
* ```typescript
|
|
817
|
+
* // Flush specific input
|
|
818
|
+
* complex.flushSync('0:v');
|
|
819
|
+
*
|
|
820
|
+
* // Flush all inputs
|
|
821
|
+
* complex.flushSync();
|
|
822
|
+
*
|
|
823
|
+
* // Get remaining frames from output
|
|
824
|
+
* let frame;
|
|
825
|
+
* while ((frame = complex.receiveSync('out')) !== null) {
|
|
826
|
+
* frame.free();
|
|
827
|
+
* }
|
|
828
|
+
* ```
|
|
829
|
+
*
|
|
830
|
+
* @see {@link flushFramesSync} For sync iteration
|
|
831
|
+
* @see {@link receiveSync} For getting flushed frames
|
|
832
|
+
* @see {@link flush} For async version
|
|
833
|
+
*/
|
|
834
|
+
flushSync(inLabel) {
|
|
835
|
+
if (this.isClosed || !this.initialized) {
|
|
836
|
+
return;
|
|
837
|
+
}
|
|
838
|
+
if (inLabel) {
|
|
839
|
+
// Flush specific input
|
|
840
|
+
const inputState = this.inputs.get(inLabel);
|
|
841
|
+
if (!inputState) {
|
|
842
|
+
throw new Error(`Input '${inLabel}' not found`);
|
|
843
|
+
}
|
|
844
|
+
if (inputState.buffersrc) {
|
|
845
|
+
const ret = inputState.buffersrc.buffersrcAddFrameSync(null, AV_BUFFERSRC_FLAG_PUSH);
|
|
846
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
847
|
+
FFmpegError.throwIfError(ret, `Failed to flush input ${inLabel}`);
|
|
848
|
+
}
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
else {
|
|
852
|
+
// Flush all inputs
|
|
853
|
+
for (const inputState of this.inputs.values()) {
|
|
854
|
+
if (!inputState.buffersrc)
|
|
855
|
+
continue;
|
|
856
|
+
const ret = inputState.buffersrc.buffersrcAddFrameSync(null, AV_BUFFERSRC_FLAG_PUSH);
|
|
857
|
+
if (ret < 0 && ret !== AVERROR_EOF) {
|
|
858
|
+
FFmpegError.throwIfError(ret, `Failed to flush input ${inputState.label}`);
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
/**
|
|
864
|
+
* Flush all inputs and yield remaining frames from specified output.
|
|
865
|
+
*
|
|
866
|
+
* Convenience method that:
|
|
867
|
+
* 1. Calls flush() to send EOF to all inputs
|
|
868
|
+
* 2. Yields all remaining frames from the specified output
|
|
869
|
+
* 3. Continues until EOF is reached
|
|
870
|
+
*
|
|
871
|
+
* Automatically frees yielded frames after use (using declaration).
|
|
872
|
+
*
|
|
873
|
+
* @param outLabel - Output label to receive flushed frames from
|
|
874
|
+
*
|
|
875
|
+
* @yields {Frame} Remaining frames from filter after flush
|
|
876
|
+
*
|
|
877
|
+
* @throws {Error} If output label not found or filter not initialized
|
|
878
|
+
*
|
|
879
|
+
* @throws {FFmpegError} If flushing or receiving fails
|
|
880
|
+
*
|
|
881
|
+
* @example
|
|
882
|
+
* ```typescript
|
|
883
|
+
* // Process all frames, then flush
|
|
884
|
+
* for await (using frame of inputFrames) {
|
|
885
|
+
* await complex.process('0:v', frame);
|
|
886
|
+
* }
|
|
887
|
+
*
|
|
888
|
+
* // Get all remaining frames
|
|
889
|
+
* for await (using frame of complex.flushFrames('out')) {
|
|
890
|
+
* await encoder.encode(frame);
|
|
891
|
+
* }
|
|
892
|
+
* ```
|
|
893
|
+
*
|
|
894
|
+
* @see {@link flush} For flushing without iteration
|
|
895
|
+
* @see {@link receive} For manual frame retrieval
|
|
896
|
+
* @see {@link flushFramesSync} For synchronous version
|
|
897
|
+
*/
|
|
898
|
+
async *flushFrames(outLabel) {
|
|
899
|
+
// Flush all inputs
|
|
900
|
+
await this.flush();
|
|
901
|
+
// Yield all remaining frames from output
|
|
902
|
+
while (true) {
|
|
903
|
+
const frame = await this.receive(outLabel);
|
|
904
|
+
if (!frame || frame === EOF) {
|
|
905
|
+
break;
|
|
906
|
+
}
|
|
907
|
+
yield frame;
|
|
908
|
+
}
|
|
909
|
+
}
|
|
910
|
+
/**
|
|
911
|
+
* Flush all inputs and yield remaining frames from specified output synchronously.
|
|
912
|
+
* Synchronous version of flushFrames.
|
|
913
|
+
*
|
|
914
|
+
* Convenience method that:
|
|
915
|
+
* 1. Calls flushSync() to send EOF to all inputs
|
|
916
|
+
* 2. Yields all remaining frames from the specified output
|
|
917
|
+
* 3. Continues until EOF is reached
|
|
918
|
+
*
|
|
919
|
+
* Automatically frees yielded frames after use (using declaration).
|
|
920
|
+
*
|
|
921
|
+
* @param outLabel - Output label to receive flushed frames from
|
|
922
|
+
*
|
|
923
|
+
* @yields {Frame} Remaining frames from filter after flush
|
|
924
|
+
*
|
|
925
|
+
* @throws {Error} If output label not found or filter not initialized
|
|
926
|
+
*
|
|
927
|
+
* @throws {FFmpegError} If flushing or receiving fails
|
|
928
|
+
*
|
|
929
|
+
* @example
|
|
930
|
+
* ```typescript
|
|
931
|
+
* // Process all frames, then flush
|
|
932
|
+
* for (using frame of inputFrames) {
|
|
933
|
+
* complex.processSync('0:v', frame);
|
|
934
|
+
* }
|
|
935
|
+
*
|
|
936
|
+
* // Get all remaining frames
|
|
937
|
+
* for (using frame of complex.flushFramesSync('out')) {
|
|
938
|
+
* encoder.encodeSync(frame);
|
|
939
|
+
* }
|
|
940
|
+
* ```
|
|
941
|
+
*
|
|
942
|
+
* @see {@link flushSync} For flushing without iteration
|
|
943
|
+
* @see {@link receiveSync} For manual frame retrieval
|
|
944
|
+
* @see {@link flushFrames} For async version
|
|
945
|
+
*/
|
|
946
|
+
*flushFramesSync(outLabel) {
|
|
947
|
+
// Flush all inputs
|
|
948
|
+
this.flushSync();
|
|
949
|
+
// Yield all remaining frames from output
|
|
950
|
+
while (true) {
|
|
951
|
+
const frame = this.receiveSync(outLabel);
|
|
952
|
+
if (!frame || frame === EOF) {
|
|
953
|
+
break;
|
|
954
|
+
}
|
|
955
|
+
yield frame;
|
|
956
|
+
}
|
|
957
|
+
}
|
|
958
|
+
/**
|
|
959
|
+
* Receive filtered frame from specified output.
|
|
960
|
+
*
|
|
961
|
+
* Pulls a single frame from the buffersink of the specified output.
|
|
962
|
+
* Automatically post-processes frame (sets timeBase, calculates duration).
|
|
963
|
+
* Returns cloned frame - caller must free it.
|
|
964
|
+
*
|
|
965
|
+
* Return values:
|
|
966
|
+
* - Frame: Successfully received frame (caller must free)
|
|
967
|
+
* - null: Need more input (AVERROR_EAGAIN) - call process() to send more frames
|
|
968
|
+
* - EOF: End of stream reached
|
|
969
|
+
*
|
|
970
|
+
* Direct mapping to av_buffersink_get_frame().
|
|
971
|
+
*
|
|
972
|
+
* @param outLabel - Output label to receive from
|
|
973
|
+
*
|
|
974
|
+
* @returns Frame on success, null if need more input, EOF if finished
|
|
975
|
+
*
|
|
976
|
+
* @throws {Error} If output label not found or filter not initialized
|
|
977
|
+
*
|
|
978
|
+
* @throws {FFmpegError} If receive fails with unexpected error
|
|
979
|
+
*
|
|
980
|
+
* @example
|
|
981
|
+
* ```typescript
|
|
982
|
+
* // Process frames one at a time
|
|
983
|
+
* await complex.process('0:v', frame1);
|
|
984
|
+
* const outFrame = await complex.receive('out');
|
|
985
|
+
* if (outFrame && outFrame !== EOF) {
|
|
986
|
+
* // Use frame
|
|
987
|
+
* outFrame.free();
|
|
988
|
+
* }
|
|
989
|
+
* ```
|
|
990
|
+
*
|
|
991
|
+
* @see {@link process} For sending input frames
|
|
992
|
+
* @see {@link flush} For flushing after all input
|
|
993
|
+
* @see {@link receiveSync} For synchronous version
|
|
994
|
+
*/
|
|
995
|
+
async receive(outLabel) {
|
|
996
|
+
if (this.isClosed || !this.initialized) {
|
|
997
|
+
return null;
|
|
998
|
+
}
|
|
999
|
+
// Get output state
|
|
1000
|
+
const outputState = this.outputs.get(outLabel);
|
|
1001
|
+
if (!outputState?.buffersink) {
|
|
1002
|
+
throw new Error(`Output '${outLabel}' not found or not initialized`);
|
|
1003
|
+
}
|
|
1004
|
+
// Allocate frame for receiving
|
|
1005
|
+
this.frame.alloc();
|
|
1006
|
+
const ret = await outputState.buffersink.buffersinkGetFrame(this.frame);
|
|
1007
|
+
if (ret >= 0) {
|
|
1008
|
+
// Success - post-process and clone for user
|
|
1009
|
+
this.postProcessOutputFrame(this.frame, outputState.buffersink);
|
|
1010
|
+
const cloned = this.frame.clone();
|
|
1011
|
+
if (!cloned) {
|
|
1012
|
+
throw new Error('Failed to clone output frame');
|
|
1013
|
+
}
|
|
1014
|
+
return cloned;
|
|
1015
|
+
}
|
|
1016
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1017
|
+
// Need more input
|
|
1018
|
+
return null;
|
|
1019
|
+
}
|
|
1020
|
+
else if (ret === AVERROR_EOF) {
|
|
1021
|
+
// End of stream
|
|
1022
|
+
return EOF;
|
|
1023
|
+
}
|
|
1024
|
+
else {
|
|
1025
|
+
// Unexpected error
|
|
1026
|
+
FFmpegError.throwIfError(ret, `Failed to receive frame from ${outLabel}`);
|
|
1027
|
+
return null;
|
|
1028
|
+
}
|
|
1029
|
+
}
|
|
1030
|
+
/**
|
|
1031
|
+
* Receive filtered frame from specified output synchronously.
|
|
1032
|
+
* Synchronous version of receive.
|
|
1033
|
+
*
|
|
1034
|
+
* Pulls a single frame from the buffersink of the specified output.
|
|
1035
|
+
* Automatically post-processes frame (sets timeBase, calculates duration).
|
|
1036
|
+
* Returns cloned frame - caller must free it.
|
|
1037
|
+
*
|
|
1038
|
+
* Return values:
|
|
1039
|
+
* - Frame: Successfully received frame (caller must free)
|
|
1040
|
+
* - null: Need more input (AVERROR_EAGAIN) - call processSync() to send more frames
|
|
1041
|
+
* - EOF: End of stream reached
|
|
1042
|
+
*
|
|
1043
|
+
* Direct mapping to av_buffersink_get_frame().
|
|
1044
|
+
*
|
|
1045
|
+
* @param outLabel - Output label to receive from
|
|
1046
|
+
*
|
|
1047
|
+
* @returns Frame on success, null if need more input, EOF if finished
|
|
1048
|
+
*
|
|
1049
|
+
* @throws {Error} If output label not found or filter not initialized
|
|
1050
|
+
*
|
|
1051
|
+
* @throws {FFmpegError} If receive fails with unexpected error
|
|
1052
|
+
*
|
|
1053
|
+
* @example
|
|
1054
|
+
* ```typescript
|
|
1055
|
+
* // Process frames one at a time
|
|
1056
|
+
* complex.processSync('0:v', frame1);
|
|
1057
|
+
* const outFrame = complex.receiveSync('out');
|
|
1058
|
+
* if (outFrame && outFrame !== EOF) {
|
|
1059
|
+
* // Use frame
|
|
1060
|
+
* outFrame.free();
|
|
1061
|
+
* }
|
|
1062
|
+
* ```
|
|
1063
|
+
*
|
|
1064
|
+
* @see {@link processSync} For sending input frames
|
|
1065
|
+
* @see {@link flushSync} For flushing after all input
|
|
1066
|
+
* @see {@link receive} For async version
|
|
1067
|
+
*/
|
|
1068
|
+
receiveSync(outLabel) {
|
|
1069
|
+
if (this.isClosed || !this.initialized) {
|
|
1070
|
+
return null;
|
|
1071
|
+
}
|
|
1072
|
+
// Get output state
|
|
1073
|
+
const outputState = this.outputs.get(outLabel);
|
|
1074
|
+
if (!outputState?.buffersink) {
|
|
1075
|
+
throw new Error(`Output '${outLabel}' not found or not initialized`);
|
|
1076
|
+
}
|
|
1077
|
+
// Allocate frame for receiving
|
|
1078
|
+
this.frame.alloc();
|
|
1079
|
+
const ret = outputState.buffersink.buffersinkGetFrameSync(this.frame);
|
|
1080
|
+
if (ret >= 0) {
|
|
1081
|
+
// Success - post-process and clone for user
|
|
1082
|
+
this.postProcessOutputFrame(this.frame, outputState.buffersink);
|
|
1083
|
+
const cloned = this.frame.clone();
|
|
1084
|
+
if (!cloned) {
|
|
1085
|
+
throw new Error('Failed to clone output frame');
|
|
1086
|
+
}
|
|
1087
|
+
return cloned;
|
|
1088
|
+
}
|
|
1089
|
+
else if (ret === AVERROR_EAGAIN) {
|
|
1090
|
+
// Need more input
|
|
1091
|
+
return null;
|
|
1092
|
+
}
|
|
1093
|
+
else if (ret === AVERROR_EOF) {
|
|
1094
|
+
// End of stream
|
|
1095
|
+
return EOF;
|
|
1096
|
+
}
|
|
1097
|
+
else {
|
|
1098
|
+
// Unexpected error
|
|
1099
|
+
FFmpegError.throwIfError(ret, `Failed to receive frame from ${outLabel}`);
|
|
1100
|
+
return null;
|
|
1101
|
+
}
|
|
1102
|
+
}
|
|
1103
|
+
/**
|
|
1104
|
+
* Check if all inputs have received at least one frame (have format information).
|
|
1105
|
+
*
|
|
1106
|
+
* @returns true if all inputs have format info (first frame received)
|
|
1107
|
+
*
|
|
1108
|
+
* @internal
|
|
1109
|
+
*/
|
|
1110
|
+
hasAllInputFormats() {
|
|
1111
|
+
for (const inputState of this.inputs.values()) {
|
|
1112
|
+
// Input has format if it has at least one queued frame OR is already initialized
|
|
1113
|
+
if (inputState.queuedFrames.length === 0 && !inputState.buffersrc) {
|
|
1114
|
+
return false;
|
|
1115
|
+
}
|
|
1116
|
+
}
|
|
1117
|
+
return true;
|
|
1118
|
+
}
|
|
1119
|
+
/**
|
|
1120
|
+
* Initialize filter graph from queued frames.
|
|
1121
|
+
*
|
|
1122
|
+
* Implements FFmpeg's configure_filtergraph() logic:
|
|
1123
|
+
* 1. Create buffersrc filters from first queued frame of each input
|
|
1124
|
+
* 2. Parse filter description
|
|
1125
|
+
* 3. Create buffersink filters
|
|
1126
|
+
* 4. Configure graph with avfilter_graph_config()
|
|
1127
|
+
* 5. Send all queued frames to buffersrc
|
|
1128
|
+
*
|
|
1129
|
+
* @throws {Error} If initialization fails
|
|
1130
|
+
*
|
|
1131
|
+
* @throws {FFmpegError} If configuration fails
|
|
1132
|
+
*
|
|
1133
|
+
* @internal
|
|
1134
|
+
*/
|
|
1135
|
+
async initializeFromQueuedFrames() {
|
|
1136
|
+
if (this.isClosed) {
|
|
1137
|
+
throw new Error('FilterComplexAPI is already closed');
|
|
1138
|
+
}
|
|
1139
|
+
// Step 1: Create buffersrc filters from first queued frame
|
|
1140
|
+
for (const [label, inputState] of this.inputs) {
|
|
1141
|
+
if (inputState.queuedFrames.length === 0) {
|
|
1142
|
+
throw new Error(`Input '${label}' has no queued frames for initialization`);
|
|
1143
|
+
}
|
|
1144
|
+
const firstFrame = inputState.queuedFrames[0];
|
|
1145
|
+
// Calculate timeBase from first frame (CFR/VFR mode)
|
|
1146
|
+
inputState.calculatedTimeBase = this.calculateTimeBase(firstFrame);
|
|
1147
|
+
// Track initial frame properties for change detection
|
|
1148
|
+
inputState.lastFrameProps = {
|
|
1149
|
+
format: firstFrame.format,
|
|
1150
|
+
width: firstFrame.width,
|
|
1151
|
+
height: firstFrame.height,
|
|
1152
|
+
sampleRate: firstFrame.sampleRate,
|
|
1153
|
+
channels: firstFrame.channelLayout?.nbChannels ?? 0,
|
|
1154
|
+
};
|
|
1155
|
+
// Create buffersrc filter
|
|
1156
|
+
const buffersrc = this.createBufferSource(label, firstFrame, inputState.calculatedTimeBase);
|
|
1157
|
+
inputState.buffersrc = buffersrc;
|
|
1158
|
+
}
|
|
1159
|
+
// Step 2: Set graph options before parsing
|
|
1160
|
+
if (this.options.scaleSwsOpts) {
|
|
1161
|
+
this.graph.scaleSwsOpts = this.options.scaleSwsOpts;
|
|
1162
|
+
}
|
|
1163
|
+
if (this.options.audioResampleOpts) {
|
|
1164
|
+
this.graph.aresampleSwrOpts = this.options.audioResampleOpts;
|
|
1165
|
+
}
|
|
1166
|
+
// Step 3: Parse filter description and create buffersink filters
|
|
1167
|
+
this.parseFilterDescription();
|
|
1168
|
+
// Step 4: Configure the graph
|
|
1169
|
+
const ret = await this.graph.config();
|
|
1170
|
+
FFmpegError.throwIfError(ret, 'Failed to configure filter complex graph');
|
|
1171
|
+
// Step 5: Send all queued frames to buffersrc
|
|
1172
|
+
for (const [label, inputState] of this.inputs) {
|
|
1173
|
+
if (!inputState.buffersrc || !inputState.calculatedTimeBase) {
|
|
1174
|
+
continue;
|
|
1175
|
+
}
|
|
1176
|
+
// Process all queued frames for this input
|
|
1177
|
+
for (const frame of inputState.queuedFrames) {
|
|
1178
|
+
// Rescale timestamps using helper
|
|
1179
|
+
this.rescaleFrameTimestamps(frame, inputState.calculatedTimeBase);
|
|
1180
|
+
// Send to buffersrc
|
|
1181
|
+
const ret = await inputState.buffersrc.buffersrcAddFrame(frame, AV_BUFFERSRC_FLAG_PUSH);
|
|
1182
|
+
FFmpegError.throwIfError(ret, `Failed to send queued frame to ${label}`);
|
|
1183
|
+
// Free the frame
|
|
1184
|
+
frame.free();
|
|
1185
|
+
}
|
|
1186
|
+
// Clear the queue after processing
|
|
1187
|
+
inputState.queuedFrames = [];
|
|
1188
|
+
}
|
|
1189
|
+
this.initialized = true;
|
|
1190
|
+
}
|
|
1191
|
+
/**
|
|
1192
|
+
* Initialize filter graph from queued frames synchronously.
|
|
1193
|
+
* Synchronous version of initializeFromQueuedFrames.
|
|
1194
|
+
*
|
|
1195
|
+
* @throws {Error} If closed or inputs have no queued frames
|
|
1196
|
+
*
|
|
1197
|
+
* @throws {FFmpegError} If graph configuration or frame processing fails
|
|
1198
|
+
*
|
|
1199
|
+
* @internal
|
|
1200
|
+
*/
|
|
1201
|
+
initializeFromQueuedFramesSync() {
|
|
1202
|
+
if (this.isClosed) {
|
|
1203
|
+
throw new Error('FilterComplexAPI is already closed');
|
|
1204
|
+
}
|
|
1205
|
+
// Step 1: Create buffersrc filters from first queued frame
|
|
1206
|
+
for (const [label, inputState] of this.inputs) {
|
|
1207
|
+
if (inputState.queuedFrames.length === 0) {
|
|
1208
|
+
throw new Error(`Input '${label}' has no queued frames for initialization`);
|
|
1209
|
+
}
|
|
1210
|
+
const firstFrame = inputState.queuedFrames[0];
|
|
1211
|
+
// Calculate timeBase from first frame (CFR/VFR mode)
|
|
1212
|
+
inputState.calculatedTimeBase = this.calculateTimeBase(firstFrame);
|
|
1213
|
+
// Track initial frame properties for change detection
|
|
1214
|
+
inputState.lastFrameProps = {
|
|
1215
|
+
format: firstFrame.format,
|
|
1216
|
+
width: firstFrame.width,
|
|
1217
|
+
height: firstFrame.height,
|
|
1218
|
+
sampleRate: firstFrame.sampleRate,
|
|
1219
|
+
channels: firstFrame.channelLayout?.nbChannels ?? 0,
|
|
1220
|
+
};
|
|
1221
|
+
// Create buffersrc filter
|
|
1222
|
+
const buffersrc = this.createBufferSource(label, firstFrame, inputState.calculatedTimeBase);
|
|
1223
|
+
inputState.buffersrc = buffersrc;
|
|
1224
|
+
}
|
|
1225
|
+
// Step 2: Set graph options before parsing
|
|
1226
|
+
if (this.options.scaleSwsOpts) {
|
|
1227
|
+
this.graph.scaleSwsOpts = this.options.scaleSwsOpts;
|
|
1228
|
+
}
|
|
1229
|
+
if (this.options.audioResampleOpts) {
|
|
1230
|
+
this.graph.aresampleSwrOpts = this.options.audioResampleOpts;
|
|
1231
|
+
}
|
|
1232
|
+
// Step 3: Parse filter description and create buffersink filters
|
|
1233
|
+
this.parseFilterDescription();
|
|
1234
|
+
// Step 4: Configure the graph
|
|
1235
|
+
const ret = this.graph.configSync();
|
|
1236
|
+
FFmpegError.throwIfError(ret, 'Failed to configure filter complex graph');
|
|
1237
|
+
// Step 5: Send all queued frames to buffersrc
|
|
1238
|
+
for (const [label, inputState] of this.inputs) {
|
|
1239
|
+
if (!inputState.buffersrc || !inputState.calculatedTimeBase) {
|
|
1240
|
+
continue;
|
|
1241
|
+
}
|
|
1242
|
+
// Process all queued frames for this input
|
|
1243
|
+
for (const frame of inputState.queuedFrames) {
|
|
1244
|
+
// Rescale timestamps using helper
|
|
1245
|
+
this.rescaleFrameTimestamps(frame, inputState.calculatedTimeBase);
|
|
1246
|
+
// Send to buffersrc
|
|
1247
|
+
const ret = inputState.buffersrc.buffersrcAddFrameSync(frame, AV_BUFFERSRC_FLAG_PUSH);
|
|
1248
|
+
FFmpegError.throwIfError(ret, `Failed to send queued frame to ${label}`);
|
|
1249
|
+
// Free the frame
|
|
1250
|
+
frame.free();
|
|
1251
|
+
}
|
|
1252
|
+
// Clear the queue after processing
|
|
1253
|
+
inputState.queuedFrames = [];
|
|
1254
|
+
}
|
|
1255
|
+
this.initialized = true;
|
|
1256
|
+
}
|
|
1257
|
+
/**
|
|
1258
|
+
* Calculate timeBase from frame based on media type and CFR option.
|
|
1259
|
+
*
|
|
1260
|
+
* Implements FFmpeg's ifilter_parameters_from_frame logic:
|
|
1261
|
+
* - Audio: Always { 1, sample_rate }
|
|
1262
|
+
* - Video CFR: 1/framerate (inverse of framerate)
|
|
1263
|
+
* - Video VFR: Use frame.timeBase
|
|
1264
|
+
*
|
|
1265
|
+
* @param frame - Input frame
|
|
1266
|
+
*
|
|
1267
|
+
* @returns Calculated timeBase
|
|
1268
|
+
*
|
|
1269
|
+
* @internal
|
|
1270
|
+
*/
|
|
1271
|
+
calculateTimeBase(frame) {
|
|
1272
|
+
if (frame.isAudio()) {
|
|
1273
|
+
// Audio: Always { 1, sample_rate }
|
|
1274
|
+
return { num: 1, den: frame.sampleRate };
|
|
1275
|
+
}
|
|
1276
|
+
else {
|
|
1277
|
+
// Video: Check CFR flag
|
|
1278
|
+
if (this.options.cfr) {
|
|
1279
|
+
// CFR mode: timeBase = 1/framerate = inverse(framerate)
|
|
1280
|
+
// Note: framerate is guaranteed to be set (validated in create())
|
|
1281
|
+
return avInvQ(this.options.framerate);
|
|
1282
|
+
}
|
|
1283
|
+
else {
|
|
1284
|
+
// VFR mode: Use frame's timeBase
|
|
1285
|
+
return frame.timeBase;
|
|
1286
|
+
}
|
|
1287
|
+
}
|
|
1288
|
+
}
|
|
1289
|
+
/**
|
|
1290
|
+
* Rescale frame timestamps to calculated timeBase.
|
|
1291
|
+
*
|
|
1292
|
+
* Helper to avoid code duplication when rescaling timestamps.
|
|
1293
|
+
* Modifies the frame in-place.
|
|
1294
|
+
*
|
|
1295
|
+
* @param frame - Frame to rescale
|
|
1296
|
+
*
|
|
1297
|
+
* @param calculatedTimeBase - Target timeBase
|
|
1298
|
+
*
|
|
1299
|
+
* @internal
|
|
1300
|
+
*/
|
|
1301
|
+
rescaleFrameTimestamps(frame, calculatedTimeBase) {
|
|
1302
|
+
const originalTimeBase = frame.timeBase;
|
|
1303
|
+
frame.pts = avRescaleQ(frame.pts, originalTimeBase, calculatedTimeBase);
|
|
1304
|
+
frame.duration = avRescaleQ(frame.duration, originalTimeBase, calculatedTimeBase);
|
|
1305
|
+
frame.timeBase = new Rational(calculatedTimeBase.num, calculatedTimeBase.den);
|
|
1306
|
+
}
|
|
1307
|
+
/**
|
|
1308
|
+
* Create buffer source for an input.
|
|
1309
|
+
*
|
|
1310
|
+
* @param label - Input label
|
|
1311
|
+
*
|
|
1312
|
+
* @param frame - First frame from this input
|
|
1313
|
+
*
|
|
1314
|
+
* @param timeBase - Calculated timeBase for this input (from calculateTimeBase)
|
|
1315
|
+
*
|
|
1316
|
+
* @returns BufferSrc filter context
|
|
1317
|
+
*
|
|
1318
|
+
* @throws {Error} If creation fails
|
|
1319
|
+
*
|
|
1320
|
+
* @internal
|
|
1321
|
+
*/
|
|
1322
|
+
createBufferSource(label, frame, timeBase) {
|
|
1323
|
+
const filterName = frame.isVideo() ? 'buffer' : 'abuffer';
|
|
1324
|
+
const bufferFilter = Filter.getByName(filterName);
|
|
1325
|
+
if (!bufferFilter) {
|
|
1326
|
+
throw new Error(`${filterName} filter not found`);
|
|
1327
|
+
}
|
|
1328
|
+
let buffersrcCtx;
|
|
1329
|
+
if (frame.isVideo()) {
|
|
1330
|
+
// Video: allocate + set parameters
|
|
1331
|
+
buffersrcCtx = this.graph.allocFilter(bufferFilter, `in_${label}`);
|
|
1332
|
+
if (!buffersrcCtx) {
|
|
1333
|
+
throw new Error(`Failed to allocate buffer source for ${label}`);
|
|
1334
|
+
}
|
|
1335
|
+
const ret = buffersrcCtx.buffersrcParametersSet({
|
|
1336
|
+
width: frame.width,
|
|
1337
|
+
height: frame.height,
|
|
1338
|
+
format: frame.format,
|
|
1339
|
+
timeBase: timeBase,
|
|
1340
|
+
frameRate: this.options.framerate,
|
|
1341
|
+
sampleAspectRatio: frame.sampleAspectRatio,
|
|
1342
|
+
colorRange: frame.colorRange,
|
|
1343
|
+
colorSpace: frame.colorSpace,
|
|
1344
|
+
hwFramesCtx: frame.hwFramesCtx,
|
|
1345
|
+
});
|
|
1346
|
+
FFmpegError.throwIfError(ret, `Failed to set buffer source parameters for ${label}`);
|
|
1347
|
+
const initRet = buffersrcCtx.init(null);
|
|
1348
|
+
FFmpegError.throwIfError(initRet, `Failed to initialize buffer source for ${label}`);
|
|
1349
|
+
}
|
|
1350
|
+
else {
|
|
1351
|
+
// Audio: create with args string
|
|
1352
|
+
const formatName = avGetSampleFmtName(frame.format);
|
|
1353
|
+
const channelLayout = frame.channelLayout.mask === 0n ? 'stereo' : frame.channelLayout.mask.toString();
|
|
1354
|
+
const args = `time_base=${timeBase.num}/${timeBase.den}:sample_rate=${frame.sampleRate}:sample_fmt=${formatName}:channel_layout=${channelLayout}`;
|
|
1355
|
+
buffersrcCtx = this.graph.createFilter(bufferFilter, `in_${label}`, args);
|
|
1356
|
+
if (!buffersrcCtx) {
|
|
1357
|
+
throw new Error(`Failed to create audio buffer source for ${label}`);
|
|
1358
|
+
}
|
|
1359
|
+
}
|
|
1360
|
+
return buffersrcCtx;
|
|
1361
|
+
}
|
|
1362
|
+
/**
|
|
1363
|
+
* Create buffer sink for an output.
|
|
1364
|
+
*
|
|
1365
|
+
* @param label - Output label
|
|
1366
|
+
*
|
|
1367
|
+
* @param isVideo - Whether this is a video output
|
|
1368
|
+
*
|
|
1369
|
+
* @returns BufferSink filter context
|
|
1370
|
+
*
|
|
1371
|
+
* @throws {Error} If creation fails
|
|
1372
|
+
*
|
|
1373
|
+
* @internal
|
|
1374
|
+
*/
|
|
1375
|
+
createBufferSink(label, isVideo) {
|
|
1376
|
+
const filterName = isVideo ? 'buffersink' : 'abuffersink';
|
|
1377
|
+
const sinkFilter = Filter.getByName(filterName);
|
|
1378
|
+
if (!sinkFilter) {
|
|
1379
|
+
throw new Error(`${filterName} filter not found`);
|
|
1380
|
+
}
|
|
1381
|
+
const buffersinkCtx = this.graph.createFilter(sinkFilter, `out_${label}`, null);
|
|
1382
|
+
if (!buffersinkCtx) {
|
|
1383
|
+
throw new Error(`Failed to create buffer sink for ${label}`);
|
|
1384
|
+
}
|
|
1385
|
+
return buffersinkCtx;
|
|
1386
|
+
}
|
|
1387
|
+
/**
|
|
1388
|
+
* Parse filter description and build graph using segment API.
|
|
1389
|
+
*
|
|
1390
|
+
* @throws {Error} If parsing fails
|
|
1391
|
+
*
|
|
1392
|
+
* @throws {FFmpegError} If graph construction fails
|
|
1393
|
+
*
|
|
1394
|
+
* @internal
|
|
1395
|
+
*/
|
|
1396
|
+
parseFilterDescription() {
|
|
1397
|
+
// Step 1: Parse the filter description into a segment
|
|
1398
|
+
const segment = this.graph.segmentParse(this.description);
|
|
1399
|
+
if (!segment) {
|
|
1400
|
+
throw new Error('Failed to parse filter segment');
|
|
1401
|
+
}
|
|
1402
|
+
try {
|
|
1403
|
+
// Step 2: Create filter instances (but don't initialize yet)
|
|
1404
|
+
let ret = segment.createFilters();
|
|
1405
|
+
FFmpegError.throwIfError(ret, 'Failed to create filters in segment');
|
|
1406
|
+
// Step 3: Set hw_device_ctx on filters that need it BEFORE initialization
|
|
1407
|
+
const filters = this.graph.filters;
|
|
1408
|
+
if (filters && this.options.hardware) {
|
|
1409
|
+
for (const filterCtx of filters) {
|
|
1410
|
+
const filter = filterCtx.filter;
|
|
1411
|
+
if (filter?.hasFlags(AVFILTER_FLAG_HWDEVICE)) {
|
|
1412
|
+
filterCtx.hwDeviceCtx = this.options.hardware.deviceContext;
|
|
1413
|
+
// Set extra_hw_frames if specified
|
|
1414
|
+
if (this.options.extraHWFrames !== undefined && this.options.extraHWFrames > 0) {
|
|
1415
|
+
filterCtx.extraHWFrames = this.options.extraHWFrames;
|
|
1416
|
+
}
|
|
1417
|
+
}
|
|
1418
|
+
}
|
|
1419
|
+
}
|
|
1420
|
+
// Step 4: Apply options to filters
|
|
1421
|
+
ret = segment.applyOpts();
|
|
1422
|
+
FFmpegError.throwIfError(ret, 'Failed to apply options to segment');
|
|
1423
|
+
// Step 5: Initialize and link filters in the segment
|
|
1424
|
+
const inputs = new FilterInOut();
|
|
1425
|
+
const outputs = new FilterInOut();
|
|
1426
|
+
ret = segment.apply(inputs, outputs);
|
|
1427
|
+
FFmpegError.throwIfError(ret, 'Failed to apply segment');
|
|
1428
|
+
// Step 6: Link buffersrc filters to segment inputs
|
|
1429
|
+
this.linkBufferSources(inputs);
|
|
1430
|
+
// Step 7: Link segment outputs to buffersink filters
|
|
1431
|
+
this.linkBufferSinks(outputs);
|
|
1432
|
+
// Clean up FilterInOut structures
|
|
1433
|
+
inputs.free();
|
|
1434
|
+
outputs.free();
|
|
1435
|
+
}
|
|
1436
|
+
finally {
|
|
1437
|
+
// Always free the segment
|
|
1438
|
+
segment.free();
|
|
1439
|
+
}
|
|
1440
|
+
}
|
|
1441
|
+
/**
|
|
1442
|
+
* Link buffersrc filters to segment inputs.
|
|
1443
|
+
*
|
|
1444
|
+
* Iterates through FilterInOut chain and links by label.
|
|
1445
|
+
*
|
|
1446
|
+
* @param inputs - FilterInOut chain of segment inputs
|
|
1447
|
+
*
|
|
1448
|
+
* @throws {Error} If linking fails
|
|
1449
|
+
*
|
|
1450
|
+
* @internal
|
|
1451
|
+
*/
|
|
1452
|
+
linkBufferSources(inputs) {
|
|
1453
|
+
let current = inputs;
|
|
1454
|
+
while (current?.name) {
|
|
1455
|
+
const label = current.name;
|
|
1456
|
+
const inputState = this.inputs.get(label);
|
|
1457
|
+
if (!inputState?.buffersrc) {
|
|
1458
|
+
throw new Error(`No buffersrc found for input label '${label}'`);
|
|
1459
|
+
}
|
|
1460
|
+
if (!current.filterCtx) {
|
|
1461
|
+
throw new Error(`FilterContext is null for input label '${label}'`);
|
|
1462
|
+
}
|
|
1463
|
+
// Link buffersrc → segment input
|
|
1464
|
+
const ret = inputState.buffersrc.link(0, current.filterCtx, current.padIdx);
|
|
1465
|
+
FFmpegError.throwIfError(ret, `Failed to link buffersrc '${label}' to segment`);
|
|
1466
|
+
current = current.next;
|
|
1467
|
+
}
|
|
1468
|
+
}
|
|
1469
|
+
/**
|
|
1470
|
+
* Link segment outputs to buffersink filters.
|
|
1471
|
+
*
|
|
1472
|
+
* Iterates through FilterInOut chain and links by label.
|
|
1473
|
+
*
|
|
1474
|
+
* @param outputs - FilterInOut chain of segment outputs
|
|
1475
|
+
*
|
|
1476
|
+
* @throws {Error} If linking fails
|
|
1477
|
+
*
|
|
1478
|
+
* @internal
|
|
1479
|
+
*/
|
|
1480
|
+
linkBufferSinks(outputs) {
|
|
1481
|
+
let current = outputs;
|
|
1482
|
+
// Get media type from first input as default
|
|
1483
|
+
const firstInput = this.inputs.values().next().value;
|
|
1484
|
+
const defaultIsVideo = firstInput?.buffersrc?.filter?.name === 'buffer'; // 'buffer' = video, 'abuffer' = audio
|
|
1485
|
+
while (current?.name) {
|
|
1486
|
+
const label = current.name;
|
|
1487
|
+
const outputState = this.outputs.get(label);
|
|
1488
|
+
if (!outputState) {
|
|
1489
|
+
throw new Error(`No output state found for label '${label}'`);
|
|
1490
|
+
}
|
|
1491
|
+
if (!current.filterCtx) {
|
|
1492
|
+
throw new Error(`FilterContext is null for output label '${label}'`);
|
|
1493
|
+
}
|
|
1494
|
+
// Determine media type: use configured value or default from first input
|
|
1495
|
+
const outputConfig = this.options.outputs.find((o) => o.label === label);
|
|
1496
|
+
let isVideo = defaultIsVideo;
|
|
1497
|
+
if (outputConfig?.mediaType !== undefined) {
|
|
1498
|
+
isVideo = outputConfig.mediaType === AVMEDIA_TYPE_VIDEO;
|
|
1499
|
+
}
|
|
1500
|
+
// Create buffersink filter
|
|
1501
|
+
const buffersink = this.createBufferSink(label, isVideo);
|
|
1502
|
+
outputState.buffersink = buffersink;
|
|
1503
|
+
// Link segment output → buffersink
|
|
1504
|
+
const ret = current.filterCtx.link(current.padIdx, buffersink, 0);
|
|
1505
|
+
FFmpegError.throwIfError(ret, `Failed to link segment to buffersink '${label}'`);
|
|
1506
|
+
current = current.next;
|
|
1507
|
+
}
|
|
1508
|
+
}
|
|
1509
|
+
/**
|
|
1510
|
+
* Post-process output frame from buffersink.
|
|
1511
|
+
*
|
|
1512
|
+
* Applies FFmpeg's fg_output_step() behavior:
|
|
1513
|
+
* 1. Sets frame.timeBase from buffersink (filters can change timeBase)
|
|
1514
|
+
* 2. Calculates video frame duration from frame rate if not set
|
|
1515
|
+
*
|
|
1516
|
+
* This must be called AFTER buffersinkGetFrame() for every output frame.
|
|
1517
|
+
*
|
|
1518
|
+
* @param frame - Output frame from buffersink
|
|
1519
|
+
*
|
|
1520
|
+
* @param buffersink - The buffersink context
|
|
1521
|
+
*
|
|
1522
|
+
* @internal
|
|
1523
|
+
*/
|
|
1524
|
+
postProcessOutputFrame(frame, buffersink) {
|
|
1525
|
+
// Filters can change timeBase (e.g., aresample sets output to {1, out_sample_rate})
|
|
1526
|
+
// Without this, frame has INPUT timeBase instead of filter's OUTPUT timeBase
|
|
1527
|
+
frame.timeBase = buffersink.buffersinkGetTimeBase();
|
|
1528
|
+
if (frame.isVideo() && !frame.duration) {
|
|
1529
|
+
const frameRate = buffersink.buffersinkGetFrameRate();
|
|
1530
|
+
if (frameRate.num > 0 && frameRate.den > 0) {
|
|
1531
|
+
frame.duration = avRescaleQ(1, avInvQ(frameRate), frame.timeBase);
|
|
1532
|
+
}
|
|
1533
|
+
}
|
|
1534
|
+
}
|
|
1535
|
+
/**
|
|
1536
|
+
* Close filter complex and release resources.
|
|
1537
|
+
*
|
|
1538
|
+
* Frees queued frames, filter graph and all filter contexts.
|
|
1539
|
+
* Safe to call multiple times.
|
|
1540
|
+
*
|
|
1541
|
+
* @example
|
|
1542
|
+
* ```typescript
|
|
1543
|
+
* complex.close();
|
|
1544
|
+
* ```
|
|
1545
|
+
*
|
|
1546
|
+
* @example
|
|
1547
|
+
* ```typescript
|
|
1548
|
+
* // Automatic cleanup with using
|
|
1549
|
+
* {
|
|
1550
|
+
* using complex = FilterComplexAPI.create('[0:v]scale=640:480[out]', { ... });
|
|
1551
|
+
* // Use complex...
|
|
1552
|
+
* } // Automatically freed
|
|
1553
|
+
* ```
|
|
1554
|
+
*
|
|
1555
|
+
* @see {@link Symbol.dispose} For automatic cleanup
|
|
1556
|
+
*/
|
|
1557
|
+
close() {
|
|
1558
|
+
if (this.isClosed) {
|
|
1559
|
+
return;
|
|
1560
|
+
}
|
|
1561
|
+
this.isClosed = true;
|
|
1562
|
+
// Free any queued frames
|
|
1563
|
+
for (const inputState of this.inputs.values()) {
|
|
1564
|
+
for (const frame of inputState.queuedFrames) {
|
|
1565
|
+
frame.free();
|
|
1566
|
+
}
|
|
1567
|
+
inputState.queuedFrames = [];
|
|
1568
|
+
}
|
|
1569
|
+
// Clear maps
|
|
1570
|
+
this.inputs.clear();
|
|
1571
|
+
this.outputs.clear();
|
|
1572
|
+
// Free graph
|
|
1573
|
+
this.graph.free();
|
|
1574
|
+
this.initialized = false;
|
|
1575
|
+
this.initializePromise = null;
|
|
1576
|
+
}
|
|
1577
|
+
/**
|
|
1578
|
+
* Dispose of filter complex.
|
|
1579
|
+
*
|
|
1580
|
+
* Implements Disposable interface for automatic cleanup.
|
|
1581
|
+
*
|
|
1582
|
+
* @example
|
|
1583
|
+
* ```typescript
|
|
1584
|
+
* {
|
|
1585
|
+
* using complex = FilterComplexAPI.create('[0:v]scale=640:480[out]', { ... });
|
|
1586
|
+
* // Use complex...
|
|
1587
|
+
* } // Automatically freed
|
|
1588
|
+
* ```
|
|
1589
|
+
*
|
|
1590
|
+
* @see {@link close} For manual cleanup
|
|
1591
|
+
*/
|
|
1592
|
+
[Symbol.dispose]() {
|
|
1593
|
+
this.close();
|
|
1594
|
+
}
|
|
1595
|
+
}
|
|
1596
|
+
//# sourceMappingURL=filter-complex.js.map
|