typed-ffmpeg-compatible 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- typed_ffmpeg/__init__.py +25 -0
- typed_ffmpeg/base.py +114 -0
- typed_ffmpeg/common/__init__.py +0 -0
- typed_ffmpeg/common/schema.py +308 -0
- typed_ffmpeg/common/serialize.py +132 -0
- typed_ffmpeg/dag/__init__.py +13 -0
- typed_ffmpeg/dag/compile.py +51 -0
- typed_ffmpeg/dag/context.py +221 -0
- typed_ffmpeg/dag/factory.py +31 -0
- typed_ffmpeg/dag/global_runnable/__init__.py +0 -0
- typed_ffmpeg/dag/global_runnable/global_args.py +178 -0
- typed_ffmpeg/dag/global_runnable/runnable.py +174 -0
- typed_ffmpeg/dag/io/__init__.py +0 -0
- typed_ffmpeg/dag/io/_input.py +197 -0
- typed_ffmpeg/dag/io/_output.py +318 -0
- typed_ffmpeg/dag/io/output_args.py +327 -0
- typed_ffmpeg/dag/nodes.py +479 -0
- typed_ffmpeg/dag/schema.py +210 -0
- typed_ffmpeg/dag/utils.py +41 -0
- typed_ffmpeg/dag/validate.py +172 -0
- typed_ffmpeg/exceptions.py +42 -0
- typed_ffmpeg/filters.py +3510 -0
- typed_ffmpeg/probe.py +43 -0
- typed_ffmpeg/py.typed +0 -0
- typed_ffmpeg/schema.py +29 -0
- typed_ffmpeg/streams/__init__.py +5 -0
- typed_ffmpeg/streams/audio.py +6955 -0
- typed_ffmpeg/streams/av.py +22 -0
- typed_ffmpeg/streams/channel_layout.py +39 -0
- typed_ffmpeg/streams/video.py +12974 -0
- typed_ffmpeg/types.py +119 -0
- typed_ffmpeg/utils/__init__.py +0 -0
- typed_ffmpeg/utils/escaping.py +49 -0
- typed_ffmpeg/utils/lazy_eval/__init__.py +0 -0
- typed_ffmpeg/utils/lazy_eval/operator.py +134 -0
- typed_ffmpeg/utils/lazy_eval/schema.py +211 -0
- typed_ffmpeg/utils/run.py +27 -0
- typed_ffmpeg/utils/snapshot.py +26 -0
- typed_ffmpeg/utils/typing.py +17 -0
- typed_ffmpeg/utils/view.py +64 -0
- typed_ffmpeg_compatible-2.1.0.dist-info/LICENSE +21 -0
- typed_ffmpeg_compatible-2.1.0.dist-info/METADATA +183 -0
- typed_ffmpeg_compatible-2.1.0.dist-info/RECORD +45 -0
- typed_ffmpeg_compatible-2.1.0.dist-info/WHEEL +4 -0
- typed_ffmpeg_compatible-2.1.0.dist-info/entry_points.txt +3 -0
typed_ffmpeg/filters.py
ADDED
@@ -0,0 +1,3510 @@
|
|
1
|
+
# NOTE: this file is auto-generated, do not modify
|
2
|
+
from typing import Any, Literal
|
3
|
+
|
4
|
+
from .common.schema import FFMpegFilterDef
|
5
|
+
from .dag.factory import filter_node_factory
|
6
|
+
from .dag.nodes import FilterableStream, FilterNode
|
7
|
+
from .schema import Auto, Default
|
8
|
+
from .streams.audio import AudioStream
|
9
|
+
from .streams.video import VideoStream
|
10
|
+
from .types import Boolean, Color, Double, Duration, Flags, Float, Image_size, Int, Int64, Pix_fmt, String
|
11
|
+
|
12
|
+
|
13
|
+
def acrossfade(
|
14
|
+
_crossfade0: AudioStream,
|
15
|
+
_crossfade1: AudioStream,
|
16
|
+
*,
|
17
|
+
nb_samples: Int = Default(44100),
|
18
|
+
duration: Duration = Default(0.0),
|
19
|
+
overlap: Boolean = Default(True),
|
20
|
+
curve1: Int
|
21
|
+
| Literal[
|
22
|
+
"nofade",
|
23
|
+
"tri",
|
24
|
+
"qsin",
|
25
|
+
"esin",
|
26
|
+
"hsin",
|
27
|
+
"log",
|
28
|
+
"ipar",
|
29
|
+
"qua",
|
30
|
+
"cub",
|
31
|
+
"squ",
|
32
|
+
"cbr",
|
33
|
+
"par",
|
34
|
+
"exp",
|
35
|
+
"iqsin",
|
36
|
+
"ihsin",
|
37
|
+
"dese",
|
38
|
+
"desi",
|
39
|
+
"losi",
|
40
|
+
"sinc",
|
41
|
+
"isinc",
|
42
|
+
]
|
43
|
+
| Default = Default("tri"),
|
44
|
+
curve2: Int
|
45
|
+
| Literal[
|
46
|
+
"nofade",
|
47
|
+
"tri",
|
48
|
+
"qsin",
|
49
|
+
"esin",
|
50
|
+
"hsin",
|
51
|
+
"log",
|
52
|
+
"ipar",
|
53
|
+
"qua",
|
54
|
+
"cub",
|
55
|
+
"squ",
|
56
|
+
"cbr",
|
57
|
+
"par",
|
58
|
+
"exp",
|
59
|
+
"iqsin",
|
60
|
+
"ihsin",
|
61
|
+
"dese",
|
62
|
+
"desi",
|
63
|
+
"losi",
|
64
|
+
"sinc",
|
65
|
+
"isinc",
|
66
|
+
]
|
67
|
+
| Default = Default("tri"),
|
68
|
+
**kwargs: Any
|
69
|
+
) -> AudioStream:
|
70
|
+
"""
|
71
|
+
|
72
|
+
Cross fade two input audio streams.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
nb_samples: set number of samples for cross fade duration (from 1 to 2.14748e+08) (default 44100)
|
76
|
+
duration: set cross fade duration (default 0)
|
77
|
+
overlap: overlap 1st stream end with 2nd stream start (default true)
|
78
|
+
curve1: set fade curve type for 1st stream (from -1 to 18) (default tri)
|
79
|
+
curve2: set fade curve type for 2nd stream (from -1 to 18) (default tri)
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
default: the audio stream
|
83
|
+
|
84
|
+
References:
|
85
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#acrossfade)
|
86
|
+
|
87
|
+
"""
|
88
|
+
filter_node = filter_node_factory(
|
89
|
+
FFMpegFilterDef(name="acrossfade", typings_input=("audio", "audio"), typings_output=("audio",)),
|
90
|
+
_crossfade0,
|
91
|
+
_crossfade1,
|
92
|
+
**{
|
93
|
+
"nb_samples": nb_samples,
|
94
|
+
"duration": duration,
|
95
|
+
"overlap": overlap,
|
96
|
+
"curve1": curve1,
|
97
|
+
"curve2": curve2,
|
98
|
+
}
|
99
|
+
| kwargs
|
100
|
+
)
|
101
|
+
return filter_node.audio(0)
|
102
|
+
|
103
|
+
|
104
|
+
def ainterleave(
|
105
|
+
*streams: AudioStream,
|
106
|
+
nb_inputs: Int = Auto("len(streams)"),
|
107
|
+
duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
|
108
|
+
**kwargs: Any
|
109
|
+
) -> AudioStream:
|
110
|
+
"""
|
111
|
+
|
112
|
+
Temporally interleave audio inputs.
|
113
|
+
|
114
|
+
Args:
|
115
|
+
nb_inputs: set number of inputs (from 1 to INT_MAX) (default 2)
|
116
|
+
duration: how to determine the end-of-stream (from 0 to 2) (default longest)
|
117
|
+
|
118
|
+
Returns:
|
119
|
+
default: the audio stream
|
120
|
+
|
121
|
+
References:
|
122
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#interleave_002c-ainterleave)
|
123
|
+
|
124
|
+
"""
|
125
|
+
filter_node = filter_node_factory(
|
126
|
+
FFMpegFilterDef(
|
127
|
+
name="ainterleave", typings_input="[StreamType.audio] * int(nb_inputs)", typings_output=("audio",)
|
128
|
+
),
|
129
|
+
*streams,
|
130
|
+
**{
|
131
|
+
"nb_inputs": nb_inputs,
|
132
|
+
"duration": duration,
|
133
|
+
}
|
134
|
+
| kwargs
|
135
|
+
)
|
136
|
+
return filter_node.audio(0)
|
137
|
+
|
138
|
+
|
139
|
+
def alphamerge(
|
140
|
+
_main: VideoStream,
|
141
|
+
_alpha: VideoStream,
|
142
|
+
*,
|
143
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
144
|
+
shortest: Boolean = Default(False),
|
145
|
+
repeatlast: Boolean = Default(True),
|
146
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
147
|
+
enable: String = Default(None),
|
148
|
+
**kwargs: Any
|
149
|
+
) -> VideoStream:
|
150
|
+
"""
|
151
|
+
|
152
|
+
Copy the luma value of the second input into the alpha channel of the first input.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
156
|
+
shortest: force termination when the shortest input terminates (default false)
|
157
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
158
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
159
|
+
enable: timeline editing
|
160
|
+
|
161
|
+
Returns:
|
162
|
+
default: the video stream
|
163
|
+
|
164
|
+
References:
|
165
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#alphamerge)
|
166
|
+
|
167
|
+
"""
|
168
|
+
filter_node = filter_node_factory(
|
169
|
+
FFMpegFilterDef(name="alphamerge", typings_input=("video", "video"), typings_output=("video",)),
|
170
|
+
_main,
|
171
|
+
_alpha,
|
172
|
+
**{
|
173
|
+
"eof_action": eof_action,
|
174
|
+
"shortest": shortest,
|
175
|
+
"repeatlast": repeatlast,
|
176
|
+
"ts_sync_mode": ts_sync_mode,
|
177
|
+
"enable": enable,
|
178
|
+
}
|
179
|
+
| kwargs
|
180
|
+
)
|
181
|
+
return filter_node.video(0)
|
182
|
+
|
183
|
+
|
184
|
+
def amerge(*streams: AudioStream, inputs: Int = Auto("len(streams)"), **kwargs: Any) -> AudioStream:
|
185
|
+
"""
|
186
|
+
|
187
|
+
Merge two or more audio streams into a single multi-channel stream.
|
188
|
+
|
189
|
+
Args:
|
190
|
+
inputs: specify the number of inputs (from 1 to 64) (default 2)
|
191
|
+
|
192
|
+
Returns:
|
193
|
+
default: the audio stream
|
194
|
+
|
195
|
+
References:
|
196
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amerge)
|
197
|
+
|
198
|
+
"""
|
199
|
+
filter_node = filter_node_factory(
|
200
|
+
FFMpegFilterDef(name="amerge", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
|
201
|
+
*streams,
|
202
|
+
**{
|
203
|
+
"inputs": inputs,
|
204
|
+
}
|
205
|
+
| kwargs
|
206
|
+
)
|
207
|
+
return filter_node.audio(0)
|
208
|
+
|
209
|
+
|
210
|
+
def amix(
|
211
|
+
*streams: AudioStream,
|
212
|
+
inputs: Int = Auto("len(streams)"),
|
213
|
+
duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
|
214
|
+
dropout_transition: Float = Default(2.0),
|
215
|
+
weights: String = Default("1 1"),
|
216
|
+
normalize: Boolean = Default(True),
|
217
|
+
**kwargs: Any
|
218
|
+
) -> AudioStream:
|
219
|
+
"""
|
220
|
+
|
221
|
+
Audio mixing.
|
222
|
+
|
223
|
+
Args:
|
224
|
+
inputs: Number of inputs. (from 1 to 32767) (default 2)
|
225
|
+
duration: How to determine the end-of-stream. (from 0 to 2) (default longest)
|
226
|
+
dropout_transition: Transition time, in seconds, for volume renormalization when an input stream ends. (from 0 to INT_MAX) (default 2)
|
227
|
+
weights: Set weight for each input. (default "1 1")
|
228
|
+
normalize: Scale inputs (default true)
|
229
|
+
|
230
|
+
Returns:
|
231
|
+
default: the audio stream
|
232
|
+
|
233
|
+
References:
|
234
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amix)
|
235
|
+
|
236
|
+
"""
|
237
|
+
filter_node = filter_node_factory(
|
238
|
+
FFMpegFilterDef(name="amix", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
|
239
|
+
*streams,
|
240
|
+
**{
|
241
|
+
"inputs": inputs,
|
242
|
+
"duration": duration,
|
243
|
+
"dropout_transition": dropout_transition,
|
244
|
+
"weights": weights,
|
245
|
+
"normalize": normalize,
|
246
|
+
}
|
247
|
+
| kwargs
|
248
|
+
)
|
249
|
+
return filter_node.audio(0)
|
250
|
+
|
251
|
+
|
252
|
+
def amultiply(_multiply0: AudioStream, _multiply1: AudioStream, **kwargs: Any) -> AudioStream:
|
253
|
+
"""
|
254
|
+
|
255
|
+
Multiply two audio streams.
|
256
|
+
|
257
|
+
Returns:
|
258
|
+
default: the audio stream
|
259
|
+
|
260
|
+
References:
|
261
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amultiply)
|
262
|
+
|
263
|
+
"""
|
264
|
+
filter_node = filter_node_factory(
|
265
|
+
FFMpegFilterDef(name="amultiply", typings_input=("audio", "audio"), typings_output=("audio",)),
|
266
|
+
_multiply0,
|
267
|
+
_multiply1,
|
268
|
+
**{} | kwargs
|
269
|
+
)
|
270
|
+
return filter_node.audio(0)
|
271
|
+
|
272
|
+
|
273
|
+
def anlmf(
|
274
|
+
_input: AudioStream,
|
275
|
+
_desired: AudioStream,
|
276
|
+
*,
|
277
|
+
order: Int = Default(256),
|
278
|
+
mu: Float = Default(0.75),
|
279
|
+
eps: Float = Default(1.0),
|
280
|
+
leakage: Float = Default(0.0),
|
281
|
+
out_mode: Int | Literal["i", "d", "o", "n"] | Default = Default("o"),
|
282
|
+
enable: String = Default(None),
|
283
|
+
**kwargs: Any
|
284
|
+
) -> AudioStream:
|
285
|
+
"""
|
286
|
+
|
287
|
+
Apply Normalized Least-Mean-Fourth algorithm to first audio stream.
|
288
|
+
|
289
|
+
Args:
|
290
|
+
order: set the filter order (from 1 to 32767) (default 256)
|
291
|
+
mu: set the filter mu (from 0 to 2) (default 0.75)
|
292
|
+
eps: set the filter eps (from 0 to 1) (default 1)
|
293
|
+
leakage: set the filter leakage (from 0 to 1) (default 0)
|
294
|
+
out_mode: set output mode (from 0 to 3) (default o)
|
295
|
+
enable: timeline editing
|
296
|
+
|
297
|
+
Returns:
|
298
|
+
default: the audio stream
|
299
|
+
|
300
|
+
References:
|
301
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#anlmf_002c-anlms)
|
302
|
+
|
303
|
+
"""
|
304
|
+
filter_node = filter_node_factory(
|
305
|
+
FFMpegFilterDef(name="anlmf", typings_input=("audio", "audio"), typings_output=("audio",)),
|
306
|
+
_input,
|
307
|
+
_desired,
|
308
|
+
**{
|
309
|
+
"order": order,
|
310
|
+
"mu": mu,
|
311
|
+
"eps": eps,
|
312
|
+
"leakage": leakage,
|
313
|
+
"out_mode": out_mode,
|
314
|
+
"enable": enable,
|
315
|
+
}
|
316
|
+
| kwargs
|
317
|
+
)
|
318
|
+
return filter_node.audio(0)
|
319
|
+
|
320
|
+
|
321
|
+
def anlms(
|
322
|
+
_input: AudioStream,
|
323
|
+
_desired: AudioStream,
|
324
|
+
*,
|
325
|
+
order: Int = Default(256),
|
326
|
+
mu: Float = Default(0.75),
|
327
|
+
eps: Float = Default(1.0),
|
328
|
+
leakage: Float = Default(0.0),
|
329
|
+
out_mode: Int | Literal["i", "d", "o", "n"] | Default = Default("o"),
|
330
|
+
enable: String = Default(None),
|
331
|
+
**kwargs: Any
|
332
|
+
) -> AudioStream:
|
333
|
+
"""
|
334
|
+
|
335
|
+
Apply Normalized Least-Mean-Squares algorithm to first audio stream.
|
336
|
+
|
337
|
+
Args:
|
338
|
+
order: set the filter order (from 1 to 32767) (default 256)
|
339
|
+
mu: set the filter mu (from 0 to 2) (default 0.75)
|
340
|
+
eps: set the filter eps (from 0 to 1) (default 1)
|
341
|
+
leakage: set the filter leakage (from 0 to 1) (default 0)
|
342
|
+
out_mode: set output mode (from 0 to 3) (default o)
|
343
|
+
enable: timeline editing
|
344
|
+
|
345
|
+
Returns:
|
346
|
+
default: the audio stream
|
347
|
+
|
348
|
+
References:
|
349
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#anlmf_002c-anlms)
|
350
|
+
|
351
|
+
"""
|
352
|
+
filter_node = filter_node_factory(
|
353
|
+
FFMpegFilterDef(name="anlms", typings_input=("audio", "audio"), typings_output=("audio",)),
|
354
|
+
_input,
|
355
|
+
_desired,
|
356
|
+
**{
|
357
|
+
"order": order,
|
358
|
+
"mu": mu,
|
359
|
+
"eps": eps,
|
360
|
+
"leakage": leakage,
|
361
|
+
"out_mode": out_mode,
|
362
|
+
"enable": enable,
|
363
|
+
}
|
364
|
+
| kwargs
|
365
|
+
)
|
366
|
+
return filter_node.audio(0)
|
367
|
+
|
368
|
+
|
369
|
+
def asdr(_input0: AudioStream, _input1: AudioStream, **kwargs: Any) -> AudioStream:
|
370
|
+
"""
|
371
|
+
|
372
|
+
Measure Audio Signal-to-Distortion Ratio.
|
373
|
+
|
374
|
+
Returns:
|
375
|
+
default: the audio stream
|
376
|
+
|
377
|
+
References:
|
378
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#asdr)
|
379
|
+
|
380
|
+
"""
|
381
|
+
filter_node = filter_node_factory(
|
382
|
+
FFMpegFilterDef(name="asdr", typings_input=("audio", "audio"), typings_output=("audio",)),
|
383
|
+
_input0,
|
384
|
+
_input1,
|
385
|
+
**{} | kwargs
|
386
|
+
)
|
387
|
+
return filter_node.audio(0)
|
388
|
+
|
389
|
+
|
390
|
+
def astreamselect(
|
391
|
+
*streams: AudioStream, inputs: Int = Auto("len(streams)"), map: String = Default(None), **kwargs: Any
|
392
|
+
) -> FilterNode:
|
393
|
+
"""
|
394
|
+
|
395
|
+
Select audio streams
|
396
|
+
|
397
|
+
Args:
|
398
|
+
inputs: number of input streams (from 2 to INT_MAX) (default 2)
|
399
|
+
map: input indexes to remap to outputs
|
400
|
+
|
401
|
+
Returns:
|
402
|
+
filter_node: the filter node
|
403
|
+
|
404
|
+
|
405
|
+
References:
|
406
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#streamselect_002c-astreamselect)
|
407
|
+
|
408
|
+
"""
|
409
|
+
filter_node = filter_node_factory(
|
410
|
+
FFMpegFilterDef(
|
411
|
+
name="astreamselect",
|
412
|
+
typings_input="[StreamType.audio] * int(inputs)",
|
413
|
+
typings_output="[StreamType.audio] * len(re.findall(r'\\d+', str(map)))",
|
414
|
+
),
|
415
|
+
*streams,
|
416
|
+
**{
|
417
|
+
"inputs": inputs,
|
418
|
+
"map": map,
|
419
|
+
}
|
420
|
+
| kwargs
|
421
|
+
)
|
422
|
+
|
423
|
+
return filter_node
|
424
|
+
|
425
|
+
|
426
|
+
def axcorrelate(
|
427
|
+
_axcorrelate0: AudioStream,
|
428
|
+
_axcorrelate1: AudioStream,
|
429
|
+
*,
|
430
|
+
size: Int = Default(256),
|
431
|
+
algo: Int | Literal["slow", "fast"] | Default = Default("slow"),
|
432
|
+
**kwargs: Any
|
433
|
+
) -> AudioStream:
|
434
|
+
"""
|
435
|
+
|
436
|
+
Cross-correlate two audio streams.
|
437
|
+
|
438
|
+
Args:
|
439
|
+
size: set segment size (from 2 to 131072) (default 256)
|
440
|
+
algo: set algorithm (from 0 to 1) (default slow)
|
441
|
+
|
442
|
+
Returns:
|
443
|
+
default: the audio stream
|
444
|
+
|
445
|
+
References:
|
446
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#axcorrelate)
|
447
|
+
|
448
|
+
"""
|
449
|
+
filter_node = filter_node_factory(
|
450
|
+
FFMpegFilterDef(name="axcorrelate", typings_input=("audio", "audio"), typings_output=("audio",)),
|
451
|
+
_axcorrelate0,
|
452
|
+
_axcorrelate1,
|
453
|
+
**{
|
454
|
+
"size": size,
|
455
|
+
"algo": algo,
|
456
|
+
}
|
457
|
+
| kwargs
|
458
|
+
)
|
459
|
+
return filter_node.audio(0)
|
460
|
+
|
461
|
+
|
462
|
+
def blend(
|
463
|
+
_top: VideoStream,
|
464
|
+
_bottom: VideoStream,
|
465
|
+
*,
|
466
|
+
c0_mode: Int
|
467
|
+
| Literal[
|
468
|
+
"addition",
|
469
|
+
"addition128",
|
470
|
+
"grainmerge",
|
471
|
+
"and",
|
472
|
+
"average",
|
473
|
+
"burn",
|
474
|
+
"darken",
|
475
|
+
"difference",
|
476
|
+
"difference128",
|
477
|
+
"grainextract",
|
478
|
+
"divide",
|
479
|
+
"dodge",
|
480
|
+
"exclusion",
|
481
|
+
"extremity",
|
482
|
+
"freeze",
|
483
|
+
"glow",
|
484
|
+
"hardlight",
|
485
|
+
"hardmix",
|
486
|
+
"heat",
|
487
|
+
"lighten",
|
488
|
+
"linearlight",
|
489
|
+
"multiply",
|
490
|
+
"multiply128",
|
491
|
+
"negation",
|
492
|
+
"normal",
|
493
|
+
"or",
|
494
|
+
"overlay",
|
495
|
+
"phoenix",
|
496
|
+
"pinlight",
|
497
|
+
"reflect",
|
498
|
+
"screen",
|
499
|
+
"softlight",
|
500
|
+
"subtract",
|
501
|
+
"vividlight",
|
502
|
+
"xor",
|
503
|
+
"softdifference",
|
504
|
+
"geometric",
|
505
|
+
"harmonic",
|
506
|
+
"bleach",
|
507
|
+
"stain",
|
508
|
+
"interpolate",
|
509
|
+
"hardoverlay",
|
510
|
+
]
|
511
|
+
| Default = Default("normal"),
|
512
|
+
c1_mode: Int
|
513
|
+
| Literal[
|
514
|
+
"addition",
|
515
|
+
"addition128",
|
516
|
+
"grainmerge",
|
517
|
+
"and",
|
518
|
+
"average",
|
519
|
+
"burn",
|
520
|
+
"darken",
|
521
|
+
"difference",
|
522
|
+
"difference128",
|
523
|
+
"grainextract",
|
524
|
+
"divide",
|
525
|
+
"dodge",
|
526
|
+
"exclusion",
|
527
|
+
"extremity",
|
528
|
+
"freeze",
|
529
|
+
"glow",
|
530
|
+
"hardlight",
|
531
|
+
"hardmix",
|
532
|
+
"heat",
|
533
|
+
"lighten",
|
534
|
+
"linearlight",
|
535
|
+
"multiply",
|
536
|
+
"multiply128",
|
537
|
+
"negation",
|
538
|
+
"normal",
|
539
|
+
"or",
|
540
|
+
"overlay",
|
541
|
+
"phoenix",
|
542
|
+
"pinlight",
|
543
|
+
"reflect",
|
544
|
+
"screen",
|
545
|
+
"softlight",
|
546
|
+
"subtract",
|
547
|
+
"vividlight",
|
548
|
+
"xor",
|
549
|
+
"softdifference",
|
550
|
+
"geometric",
|
551
|
+
"harmonic",
|
552
|
+
"bleach",
|
553
|
+
"stain",
|
554
|
+
"interpolate",
|
555
|
+
"hardoverlay",
|
556
|
+
]
|
557
|
+
| Default = Default("normal"),
|
558
|
+
c2_mode: Int
|
559
|
+
| Literal[
|
560
|
+
"addition",
|
561
|
+
"addition128",
|
562
|
+
"grainmerge",
|
563
|
+
"and",
|
564
|
+
"average",
|
565
|
+
"burn",
|
566
|
+
"darken",
|
567
|
+
"difference",
|
568
|
+
"difference128",
|
569
|
+
"grainextract",
|
570
|
+
"divide",
|
571
|
+
"dodge",
|
572
|
+
"exclusion",
|
573
|
+
"extremity",
|
574
|
+
"freeze",
|
575
|
+
"glow",
|
576
|
+
"hardlight",
|
577
|
+
"hardmix",
|
578
|
+
"heat",
|
579
|
+
"lighten",
|
580
|
+
"linearlight",
|
581
|
+
"multiply",
|
582
|
+
"multiply128",
|
583
|
+
"negation",
|
584
|
+
"normal",
|
585
|
+
"or",
|
586
|
+
"overlay",
|
587
|
+
"phoenix",
|
588
|
+
"pinlight",
|
589
|
+
"reflect",
|
590
|
+
"screen",
|
591
|
+
"softlight",
|
592
|
+
"subtract",
|
593
|
+
"vividlight",
|
594
|
+
"xor",
|
595
|
+
"softdifference",
|
596
|
+
"geometric",
|
597
|
+
"harmonic",
|
598
|
+
"bleach",
|
599
|
+
"stain",
|
600
|
+
"interpolate",
|
601
|
+
"hardoverlay",
|
602
|
+
]
|
603
|
+
| Default = Default("normal"),
|
604
|
+
c3_mode: Int
|
605
|
+
| Literal[
|
606
|
+
"addition",
|
607
|
+
"addition128",
|
608
|
+
"grainmerge",
|
609
|
+
"and",
|
610
|
+
"average",
|
611
|
+
"burn",
|
612
|
+
"darken",
|
613
|
+
"difference",
|
614
|
+
"difference128",
|
615
|
+
"grainextract",
|
616
|
+
"divide",
|
617
|
+
"dodge",
|
618
|
+
"exclusion",
|
619
|
+
"extremity",
|
620
|
+
"freeze",
|
621
|
+
"glow",
|
622
|
+
"hardlight",
|
623
|
+
"hardmix",
|
624
|
+
"heat",
|
625
|
+
"lighten",
|
626
|
+
"linearlight",
|
627
|
+
"multiply",
|
628
|
+
"multiply128",
|
629
|
+
"negation",
|
630
|
+
"normal",
|
631
|
+
"or",
|
632
|
+
"overlay",
|
633
|
+
"phoenix",
|
634
|
+
"pinlight",
|
635
|
+
"reflect",
|
636
|
+
"screen",
|
637
|
+
"softlight",
|
638
|
+
"subtract",
|
639
|
+
"vividlight",
|
640
|
+
"xor",
|
641
|
+
"softdifference",
|
642
|
+
"geometric",
|
643
|
+
"harmonic",
|
644
|
+
"bleach",
|
645
|
+
"stain",
|
646
|
+
"interpolate",
|
647
|
+
"hardoverlay",
|
648
|
+
]
|
649
|
+
| Default = Default("normal"),
|
650
|
+
all_mode: Int
|
651
|
+
| Literal[
|
652
|
+
"addition",
|
653
|
+
"addition128",
|
654
|
+
"grainmerge",
|
655
|
+
"and",
|
656
|
+
"average",
|
657
|
+
"burn",
|
658
|
+
"darken",
|
659
|
+
"difference",
|
660
|
+
"difference128",
|
661
|
+
"grainextract",
|
662
|
+
"divide",
|
663
|
+
"dodge",
|
664
|
+
"exclusion",
|
665
|
+
"extremity",
|
666
|
+
"freeze",
|
667
|
+
"glow",
|
668
|
+
"hardlight",
|
669
|
+
"hardmix",
|
670
|
+
"heat",
|
671
|
+
"lighten",
|
672
|
+
"linearlight",
|
673
|
+
"multiply",
|
674
|
+
"multiply128",
|
675
|
+
"negation",
|
676
|
+
"normal",
|
677
|
+
"or",
|
678
|
+
"overlay",
|
679
|
+
"phoenix",
|
680
|
+
"pinlight",
|
681
|
+
"reflect",
|
682
|
+
"screen",
|
683
|
+
"softlight",
|
684
|
+
"subtract",
|
685
|
+
"vividlight",
|
686
|
+
"xor",
|
687
|
+
"softdifference",
|
688
|
+
"geometric",
|
689
|
+
"harmonic",
|
690
|
+
"bleach",
|
691
|
+
"stain",
|
692
|
+
"interpolate",
|
693
|
+
"hardoverlay",
|
694
|
+
]
|
695
|
+
| Default = Default(-1),
|
696
|
+
c0_expr: String = Default(None),
|
697
|
+
c1_expr: String = Default(None),
|
698
|
+
c2_expr: String = Default(None),
|
699
|
+
c3_expr: String = Default(None),
|
700
|
+
all_expr: String = Default(None),
|
701
|
+
c0_opacity: Double = Default(1.0),
|
702
|
+
c1_opacity: Double = Default(1.0),
|
703
|
+
c2_opacity: Double = Default(1.0),
|
704
|
+
c3_opacity: Double = Default(1.0),
|
705
|
+
all_opacity: Double = Default(1.0),
|
706
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
707
|
+
shortest: Boolean = Default(False),
|
708
|
+
repeatlast: Boolean = Default(True),
|
709
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
710
|
+
enable: String = Default(None),
|
711
|
+
**kwargs: Any
|
712
|
+
) -> VideoStream:
|
713
|
+
"""
|
714
|
+
|
715
|
+
Blend two video frames into each other.
|
716
|
+
|
717
|
+
Args:
|
718
|
+
c0_mode: set component #0 blend mode (from 0 to 39) (default normal)
|
719
|
+
c1_mode: set component #1 blend mode (from 0 to 39) (default normal)
|
720
|
+
c2_mode: set component #2 blend mode (from 0 to 39) (default normal)
|
721
|
+
c3_mode: set component #3 blend mode (from 0 to 39) (default normal)
|
722
|
+
all_mode: set blend mode for all components (from -1 to 39) (default -1)
|
723
|
+
c0_expr: set color component #0 expression
|
724
|
+
c1_expr: set color component #1 expression
|
725
|
+
c2_expr: set color component #2 expression
|
726
|
+
c3_expr: set color component #3 expression
|
727
|
+
all_expr: set expression for all color components
|
728
|
+
c0_opacity: set color component #0 opacity (from 0 to 1) (default 1)
|
729
|
+
c1_opacity: set color component #1 opacity (from 0 to 1) (default 1)
|
730
|
+
c2_opacity: set color component #2 opacity (from 0 to 1) (default 1)
|
731
|
+
c3_opacity: set color component #3 opacity (from 0 to 1) (default 1)
|
732
|
+
all_opacity: set opacity for all color components (from 0 to 1) (default 1)
|
733
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
734
|
+
shortest: force termination when the shortest input terminates (default false)
|
735
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
736
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
737
|
+
enable: timeline editing
|
738
|
+
|
739
|
+
Returns:
|
740
|
+
default: the video stream
|
741
|
+
|
742
|
+
References:
|
743
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#blend)
|
744
|
+
|
745
|
+
"""
|
746
|
+
filter_node = filter_node_factory(
|
747
|
+
FFMpegFilterDef(name="blend", typings_input=("video", "video"), typings_output=("video",)),
|
748
|
+
_top,
|
749
|
+
_bottom,
|
750
|
+
**{
|
751
|
+
"c0_mode": c0_mode,
|
752
|
+
"c1_mode": c1_mode,
|
753
|
+
"c2_mode": c2_mode,
|
754
|
+
"c3_mode": c3_mode,
|
755
|
+
"all_mode": all_mode,
|
756
|
+
"c0_expr": c0_expr,
|
757
|
+
"c1_expr": c1_expr,
|
758
|
+
"c2_expr": c2_expr,
|
759
|
+
"c3_expr": c3_expr,
|
760
|
+
"all_expr": all_expr,
|
761
|
+
"c0_opacity": c0_opacity,
|
762
|
+
"c1_opacity": c1_opacity,
|
763
|
+
"c2_opacity": c2_opacity,
|
764
|
+
"c3_opacity": c3_opacity,
|
765
|
+
"all_opacity": all_opacity,
|
766
|
+
"eof_action": eof_action,
|
767
|
+
"shortest": shortest,
|
768
|
+
"repeatlast": repeatlast,
|
769
|
+
"ts_sync_mode": ts_sync_mode,
|
770
|
+
"enable": enable,
|
771
|
+
}
|
772
|
+
| kwargs
|
773
|
+
)
|
774
|
+
return filter_node.video(0)
|
775
|
+
|
776
|
+
|
777
|
+
def bm3d(
|
778
|
+
*streams: VideoStream,
|
779
|
+
sigma: Float = Default(1.0),
|
780
|
+
block: Int = Default(16),
|
781
|
+
bstep: Int = Default(4),
|
782
|
+
group: Int = Default(1),
|
783
|
+
range: Int = Default(9),
|
784
|
+
mstep: Int = Default(1),
|
785
|
+
thmse: Float = Default(0.0),
|
786
|
+
hdthr: Float = Default(2.7),
|
787
|
+
estim: Int | Literal["basic", "final"] | Default = Default("basic"),
|
788
|
+
ref: Boolean = Default(False),
|
789
|
+
planes: Int = Default(7),
|
790
|
+
enable: String = Default(None),
|
791
|
+
**kwargs: Any
|
792
|
+
) -> VideoStream:
|
793
|
+
"""
|
794
|
+
|
795
|
+
Block-Matching 3D denoiser.
|
796
|
+
|
797
|
+
Args:
|
798
|
+
sigma: set denoising strength (from 0 to 99999.9) (default 1)
|
799
|
+
block: set size of local patch (from 8 to 64) (default 16)
|
800
|
+
bstep: set sliding step for processing blocks (from 1 to 64) (default 4)
|
801
|
+
group: set maximal number of similar blocks (from 1 to 256) (default 1)
|
802
|
+
range: set block matching range (from 1 to INT_MAX) (default 9)
|
803
|
+
mstep: set step for block matching (from 1 to 64) (default 1)
|
804
|
+
thmse: set threshold of mean square error for block matching (from 0 to INT_MAX) (default 0)
|
805
|
+
hdthr: set hard threshold for 3D transfer domain (from 0 to INT_MAX) (default 2.7)
|
806
|
+
estim: set filtering estimation mode (from 0 to 1) (default basic)
|
807
|
+
ref: have reference stream (default false)
|
808
|
+
planes: set planes to filter (from 0 to 15) (default 7)
|
809
|
+
enable: timeline editing
|
810
|
+
|
811
|
+
Returns:
|
812
|
+
default: the video stream
|
813
|
+
|
814
|
+
References:
|
815
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#bm3d)
|
816
|
+
|
817
|
+
"""
|
818
|
+
filter_node = filter_node_factory(
|
819
|
+
FFMpegFilterDef(
|
820
|
+
name="bm3d",
|
821
|
+
typings_input="[StreamType.video] + [StreamType.video] if ref else []",
|
822
|
+
typings_output=("video",),
|
823
|
+
),
|
824
|
+
*streams,
|
825
|
+
**{
|
826
|
+
"sigma": sigma,
|
827
|
+
"block": block,
|
828
|
+
"bstep": bstep,
|
829
|
+
"group": group,
|
830
|
+
"range": range,
|
831
|
+
"mstep": mstep,
|
832
|
+
"thmse": thmse,
|
833
|
+
"hdthr": hdthr,
|
834
|
+
"estim": estim,
|
835
|
+
"ref": ref,
|
836
|
+
"planes": planes,
|
837
|
+
"enable": enable,
|
838
|
+
}
|
839
|
+
| kwargs
|
840
|
+
)
|
841
|
+
return filter_node.video(0)
|
842
|
+
|
843
|
+
|
844
|
+
def colormap(
|
845
|
+
_default: VideoStream,
|
846
|
+
_source: VideoStream,
|
847
|
+
_target: VideoStream,
|
848
|
+
*,
|
849
|
+
patch_size: Image_size = Default("64x64"),
|
850
|
+
nb_patches: Int = Default(0),
|
851
|
+
type: Int | Literal["relative", "absolute"] | Default = Default("absolute"),
|
852
|
+
kernel: Int | Literal["euclidean", "weuclidean"] | Default = Default("euclidean"),
|
853
|
+
enable: String = Default(None),
|
854
|
+
**kwargs: Any
|
855
|
+
) -> VideoStream:
|
856
|
+
"""
|
857
|
+
|
858
|
+
Apply custom Color Maps to video stream.
|
859
|
+
|
860
|
+
Args:
|
861
|
+
patch_size: set patch size (default "64x64")
|
862
|
+
nb_patches: set number of patches (from 0 to 64) (default 0)
|
863
|
+
type: set the target type used (from 0 to 1) (default absolute)
|
864
|
+
kernel: set the kernel used for measuring color difference (from 0 to 1) (default euclidean)
|
865
|
+
enable: timeline editing
|
866
|
+
|
867
|
+
Returns:
|
868
|
+
default: the video stream
|
869
|
+
|
870
|
+
References:
|
871
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#colormap)
|
872
|
+
|
873
|
+
"""
|
874
|
+
filter_node = filter_node_factory(
|
875
|
+
FFMpegFilterDef(name="colormap", typings_input=("video", "video", "video"), typings_output=("video",)),
|
876
|
+
_default,
|
877
|
+
_source,
|
878
|
+
_target,
|
879
|
+
**{
|
880
|
+
"patch_size": patch_size,
|
881
|
+
"nb_patches": nb_patches,
|
882
|
+
"type": type,
|
883
|
+
"kernel": kernel,
|
884
|
+
"enable": enable,
|
885
|
+
}
|
886
|
+
| kwargs
|
887
|
+
)
|
888
|
+
return filter_node.video(0)
|
889
|
+
|
890
|
+
|
891
|
+
def concat(
|
892
|
+
*streams: FilterableStream,
|
893
|
+
n: Int = Auto("len(streams) // (int(v) + int(a))"),
|
894
|
+
v: Int = Default(1),
|
895
|
+
a: Int = Default(0),
|
896
|
+
unsafe: Boolean = Default(False),
|
897
|
+
**kwargs: Any
|
898
|
+
) -> FilterNode:
|
899
|
+
"""
|
900
|
+
|
901
|
+
Concatenate audio and video streams.
|
902
|
+
|
903
|
+
Args:
|
904
|
+
n: specify the number of segments (from 1 to INT_MAX) (default 2)
|
905
|
+
v: specify the number of video streams (from 0 to INT_MAX) (default 1)
|
906
|
+
a: specify the number of audio streams (from 0 to INT_MAX) (default 0)
|
907
|
+
unsafe: enable unsafe mode (default false)
|
908
|
+
|
909
|
+
Returns:
|
910
|
+
filter_node: the filter node
|
911
|
+
|
912
|
+
|
913
|
+
References:
|
914
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#concat)
|
915
|
+
|
916
|
+
"""
|
917
|
+
filter_node = filter_node_factory(
|
918
|
+
FFMpegFilterDef(
|
919
|
+
name="concat",
|
920
|
+
typings_input="([StreamType.video]*int(v) + [StreamType.audio]*int(a))*int(n)",
|
921
|
+
typings_output="[StreamType.video]*int(v) + [StreamType.audio]*int(a)",
|
922
|
+
),
|
923
|
+
*streams,
|
924
|
+
**{
|
925
|
+
"n": n,
|
926
|
+
"v": v,
|
927
|
+
"a": a,
|
928
|
+
"unsafe": unsafe,
|
929
|
+
}
|
930
|
+
| kwargs
|
931
|
+
)
|
932
|
+
|
933
|
+
return filter_node
|
934
|
+
|
935
|
+
|
936
|
+
def convolve(
|
937
|
+
_main: VideoStream,
|
938
|
+
_impulse: VideoStream,
|
939
|
+
*,
|
940
|
+
planes: Int = Default(7),
|
941
|
+
impulse: Int | Literal["first", "all"] | Default = Default("all"),
|
942
|
+
noise: Float = Default(1e-07),
|
943
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
944
|
+
shortest: Boolean = Default(False),
|
945
|
+
repeatlast: Boolean = Default(True),
|
946
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
947
|
+
enable: String = Default(None),
|
948
|
+
**kwargs: Any
|
949
|
+
) -> VideoStream:
|
950
|
+
"""
|
951
|
+
|
952
|
+
Convolve first video stream with second video stream.
|
953
|
+
|
954
|
+
Args:
|
955
|
+
planes: set planes to convolve (from 0 to 15) (default 7)
|
956
|
+
impulse: when to process impulses (from 0 to 1) (default all)
|
957
|
+
noise: set noise (from 0 to 1) (default 1e-07)
|
958
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
959
|
+
shortest: force termination when the shortest input terminates (default false)
|
960
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
961
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
962
|
+
enable: timeline editing
|
963
|
+
|
964
|
+
Returns:
|
965
|
+
default: the video stream
|
966
|
+
|
967
|
+
References:
|
968
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#convolve)
|
969
|
+
|
970
|
+
"""
|
971
|
+
filter_node = filter_node_factory(
|
972
|
+
FFMpegFilterDef(name="convolve", typings_input=("video", "video"), typings_output=("video",)),
|
973
|
+
_main,
|
974
|
+
_impulse,
|
975
|
+
**{
|
976
|
+
"planes": planes,
|
977
|
+
"impulse": impulse,
|
978
|
+
"noise": noise,
|
979
|
+
"eof_action": eof_action,
|
980
|
+
"shortest": shortest,
|
981
|
+
"repeatlast": repeatlast,
|
982
|
+
"ts_sync_mode": ts_sync_mode,
|
983
|
+
"enable": enable,
|
984
|
+
}
|
985
|
+
| kwargs
|
986
|
+
)
|
987
|
+
return filter_node.video(0)
|
988
|
+
|
989
|
+
|
990
|
+
def corr(
|
991
|
+
_main: VideoStream,
|
992
|
+
_reference: VideoStream,
|
993
|
+
*,
|
994
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
995
|
+
shortest: Boolean = Default(False),
|
996
|
+
repeatlast: Boolean = Default(True),
|
997
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
998
|
+
enable: String = Default(None),
|
999
|
+
**kwargs: Any
|
1000
|
+
) -> VideoStream:
|
1001
|
+
"""
|
1002
|
+
|
1003
|
+
Calculate the correlation between two video streams.
|
1004
|
+
|
1005
|
+
Args:
|
1006
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1007
|
+
shortest: force termination when the shortest input terminates (default false)
|
1008
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1009
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1010
|
+
enable: timeline editing
|
1011
|
+
|
1012
|
+
Returns:
|
1013
|
+
default: the video stream
|
1014
|
+
|
1015
|
+
References:
|
1016
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#corr)
|
1017
|
+
|
1018
|
+
"""
|
1019
|
+
filter_node = filter_node_factory(
|
1020
|
+
FFMpegFilterDef(name="corr", typings_input=("video", "video"), typings_output=("video",)),
|
1021
|
+
_main,
|
1022
|
+
_reference,
|
1023
|
+
**{
|
1024
|
+
"eof_action": eof_action,
|
1025
|
+
"shortest": shortest,
|
1026
|
+
"repeatlast": repeatlast,
|
1027
|
+
"ts_sync_mode": ts_sync_mode,
|
1028
|
+
"enable": enable,
|
1029
|
+
}
|
1030
|
+
| kwargs
|
1031
|
+
)
|
1032
|
+
return filter_node.video(0)
|
1033
|
+
|
1034
|
+
|
1035
|
+
def decimate(
|
1036
|
+
*streams: VideoStream,
|
1037
|
+
cycle: Int = Default(5),
|
1038
|
+
dupthresh: Double = Default(1.1),
|
1039
|
+
scthresh: Double = Default(15.0),
|
1040
|
+
blockx: Int = Default(32),
|
1041
|
+
blocky: Int = Default(32),
|
1042
|
+
ppsrc: Boolean = Default(False),
|
1043
|
+
chroma: Boolean = Default(True),
|
1044
|
+
mixed: Boolean = Default(False),
|
1045
|
+
**kwargs: Any
|
1046
|
+
) -> VideoStream:
|
1047
|
+
"""
|
1048
|
+
|
1049
|
+
Decimate frames (post field matching filter).
|
1050
|
+
|
1051
|
+
Args:
|
1052
|
+
cycle: set the number of frame from which one will be dropped (from 2 to 25) (default 5)
|
1053
|
+
dupthresh: set duplicate threshold (from 0 to 100) (default 1.1)
|
1054
|
+
scthresh: set scene change threshold (from 0 to 100) (default 15)
|
1055
|
+
blockx: set the size of the x-axis blocks used during metric calculations (from 4 to 512) (default 32)
|
1056
|
+
blocky: set the size of the y-axis blocks used during metric calculations (from 4 to 512) (default 32)
|
1057
|
+
ppsrc: mark main input as a pre-processed input and activate clean source input stream (default false)
|
1058
|
+
chroma: set whether or not chroma is considered in the metric calculations (default true)
|
1059
|
+
mixed: set whether or not the input only partially contains content to be decimated (default false)
|
1060
|
+
|
1061
|
+
Returns:
|
1062
|
+
default: the video stream
|
1063
|
+
|
1064
|
+
References:
|
1065
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#decimate)
|
1066
|
+
|
1067
|
+
"""
|
1068
|
+
filter_node = filter_node_factory(
|
1069
|
+
FFMpegFilterDef(
|
1070
|
+
name="decimate",
|
1071
|
+
typings_input="[StreamType.video] + ([StreamType.video] if ppsrc else [])",
|
1072
|
+
typings_output=("video",),
|
1073
|
+
),
|
1074
|
+
*streams,
|
1075
|
+
**{
|
1076
|
+
"cycle": cycle,
|
1077
|
+
"dupthresh": dupthresh,
|
1078
|
+
"scthresh": scthresh,
|
1079
|
+
"blockx": blockx,
|
1080
|
+
"blocky": blocky,
|
1081
|
+
"ppsrc": ppsrc,
|
1082
|
+
"chroma": chroma,
|
1083
|
+
"mixed": mixed,
|
1084
|
+
}
|
1085
|
+
| kwargs
|
1086
|
+
)
|
1087
|
+
return filter_node.video(0)
|
1088
|
+
|
1089
|
+
|
1090
|
+
def deconvolve(
|
1091
|
+
_main: VideoStream,
|
1092
|
+
_impulse: VideoStream,
|
1093
|
+
*,
|
1094
|
+
planes: Int = Default(7),
|
1095
|
+
impulse: Int | Literal["first", "all"] | Default = Default("all"),
|
1096
|
+
noise: Float = Default(1e-07),
|
1097
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1098
|
+
shortest: Boolean = Default(False),
|
1099
|
+
repeatlast: Boolean = Default(True),
|
1100
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1101
|
+
enable: String = Default(None),
|
1102
|
+
**kwargs: Any
|
1103
|
+
) -> VideoStream:
|
1104
|
+
"""
|
1105
|
+
|
1106
|
+
Deconvolve first video stream with second video stream.
|
1107
|
+
|
1108
|
+
Args:
|
1109
|
+
planes: set planes to deconvolve (from 0 to 15) (default 7)
|
1110
|
+
impulse: when to process impulses (from 0 to 1) (default all)
|
1111
|
+
noise: set noise (from 0 to 1) (default 1e-07)
|
1112
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1113
|
+
shortest: force termination when the shortest input terminates (default false)
|
1114
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1115
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1116
|
+
enable: timeline editing
|
1117
|
+
|
1118
|
+
Returns:
|
1119
|
+
default: the video stream
|
1120
|
+
|
1121
|
+
References:
|
1122
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#deconvolve)
|
1123
|
+
|
1124
|
+
"""
|
1125
|
+
filter_node = filter_node_factory(
|
1126
|
+
FFMpegFilterDef(name="deconvolve", typings_input=("video", "video"), typings_output=("video",)),
|
1127
|
+
_main,
|
1128
|
+
_impulse,
|
1129
|
+
**{
|
1130
|
+
"planes": planes,
|
1131
|
+
"impulse": impulse,
|
1132
|
+
"noise": noise,
|
1133
|
+
"eof_action": eof_action,
|
1134
|
+
"shortest": shortest,
|
1135
|
+
"repeatlast": repeatlast,
|
1136
|
+
"ts_sync_mode": ts_sync_mode,
|
1137
|
+
"enable": enable,
|
1138
|
+
}
|
1139
|
+
| kwargs
|
1140
|
+
)
|
1141
|
+
return filter_node.video(0)
|
1142
|
+
|
1143
|
+
|
1144
|
+
def displace(
|
1145
|
+
_source: VideoStream,
|
1146
|
+
_xmap: VideoStream,
|
1147
|
+
_ymap: VideoStream,
|
1148
|
+
*,
|
1149
|
+
edge: Int | Literal["blank", "smear", "wrap", "mirror"] | Default = Default("smear"),
|
1150
|
+
enable: String = Default(None),
|
1151
|
+
**kwargs: Any
|
1152
|
+
) -> VideoStream:
|
1153
|
+
"""
|
1154
|
+
|
1155
|
+
Displace pixels.
|
1156
|
+
|
1157
|
+
Args:
|
1158
|
+
edge: set edge mode (from 0 to 3) (default smear)
|
1159
|
+
enable: timeline editing
|
1160
|
+
|
1161
|
+
Returns:
|
1162
|
+
default: the video stream
|
1163
|
+
|
1164
|
+
References:
|
1165
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#displace)
|
1166
|
+
|
1167
|
+
"""
|
1168
|
+
filter_node = filter_node_factory(
|
1169
|
+
FFMpegFilterDef(name="displace", typings_input=("video", "video", "video"), typings_output=("video",)),
|
1170
|
+
_source,
|
1171
|
+
_xmap,
|
1172
|
+
_ymap,
|
1173
|
+
**{
|
1174
|
+
"edge": edge,
|
1175
|
+
"enable": enable,
|
1176
|
+
}
|
1177
|
+
| kwargs
|
1178
|
+
)
|
1179
|
+
return filter_node.video(0)
|
1180
|
+
|
1181
|
+
|
1182
|
+
def feedback(
|
1183
|
+
_default: VideoStream, _feedin: VideoStream, *, x: Int = Default(0), w: Int = Default(0), **kwargs: Any
|
1184
|
+
) -> tuple[VideoStream, VideoStream,]:
|
1185
|
+
"""
|
1186
|
+
|
1187
|
+
Apply feedback video filter.
|
1188
|
+
|
1189
|
+
Args:
|
1190
|
+
x: set top left crop position (from 0 to INT_MAX) (default 0)
|
1191
|
+
w: set crop size (from 0 to INT_MAX) (default 0)
|
1192
|
+
|
1193
|
+
Returns:
|
1194
|
+
default: the video stream
|
1195
|
+
feedout: the video stream
|
1196
|
+
|
1197
|
+
References:
|
1198
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#feedback)
|
1199
|
+
|
1200
|
+
"""
|
1201
|
+
filter_node = filter_node_factory(
|
1202
|
+
FFMpegFilterDef(name="feedback", typings_input=("video", "video"), typings_output=("video", "video")),
|
1203
|
+
_default,
|
1204
|
+
_feedin,
|
1205
|
+
**{
|
1206
|
+
"x": x,
|
1207
|
+
"w": w,
|
1208
|
+
}
|
1209
|
+
| kwargs
|
1210
|
+
)
|
1211
|
+
return (
|
1212
|
+
filter_node.video(0),
|
1213
|
+
filter_node.video(1),
|
1214
|
+
)
|
1215
|
+
|
1216
|
+
|
1217
|
+
def fieldmatch(
|
1218
|
+
*streams: VideoStream,
|
1219
|
+
order: Int | Literal["auto", "bff", "tff"] | Default = Default("auto"),
|
1220
|
+
mode: Int | Literal["pc", "pc_n", "pc_u", "pc_n_ub", "pcn", "pcn_ub"] | Default = Default("pc_n"),
|
1221
|
+
ppsrc: Boolean = Default(False),
|
1222
|
+
field: Int | Literal["auto", "bottom", "top"] | Default = Default("auto"),
|
1223
|
+
mchroma: Boolean = Default(True),
|
1224
|
+
y0: Int = Default(0),
|
1225
|
+
scthresh: Double = Default(12.0),
|
1226
|
+
combmatch: Int | Literal["none", "sc", "full"] | Default = Default("sc"),
|
1227
|
+
combdbg: Int | Literal["none", "pcn", "pcnub"] | Default = Default("none"),
|
1228
|
+
cthresh: Int = Default(9),
|
1229
|
+
chroma: Boolean = Default(False),
|
1230
|
+
blockx: Int = Default(16),
|
1231
|
+
blocky: Int = Default(16),
|
1232
|
+
combpel: Int = Default(80),
|
1233
|
+
**kwargs: Any
|
1234
|
+
) -> VideoStream:
|
1235
|
+
"""
|
1236
|
+
|
1237
|
+
Field matching for inverse telecine.
|
1238
|
+
|
1239
|
+
Args:
|
1240
|
+
order: specify the assumed field order (from -1 to 1) (default auto)
|
1241
|
+
mode: set the matching mode or strategy to use (from 0 to 5) (default pc_n)
|
1242
|
+
ppsrc: mark main input as a pre-processed input and activate clean source input stream (default false)
|
1243
|
+
field: set the field to match from (from -1 to 1) (default auto)
|
1244
|
+
mchroma: set whether or not chroma is included during the match comparisons (default true)
|
1245
|
+
y0: define an exclusion band which excludes the lines between y0 and y1 from the field matching decision (from 0 to INT_MAX) (default 0)
|
1246
|
+
scthresh: set scene change detection threshold (from 0 to 100) (default 12)
|
1247
|
+
combmatch: set combmatching mode (from 0 to 2) (default sc)
|
1248
|
+
combdbg: enable comb debug (from 0 to 2) (default none)
|
1249
|
+
cthresh: set the area combing threshold used for combed frame detection (from -1 to 255) (default 9)
|
1250
|
+
chroma: set whether or not chroma is considered in the combed frame decision (default false)
|
1251
|
+
blockx: set the x-axis size of the window used during combed frame detection (from 4 to 512) (default 16)
|
1252
|
+
blocky: set the y-axis size of the window used during combed frame detection (from 4 to 512) (default 16)
|
1253
|
+
combpel: set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed (from 0 to INT_MAX) (default 80)
|
1254
|
+
|
1255
|
+
Returns:
|
1256
|
+
default: the video stream
|
1257
|
+
|
1258
|
+
References:
|
1259
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#fieldmatch)
|
1260
|
+
|
1261
|
+
"""
|
1262
|
+
filter_node = filter_node_factory(
|
1263
|
+
FFMpegFilterDef(
|
1264
|
+
name="fieldmatch",
|
1265
|
+
typings_input="[StreamType.video] + [StreamType.video] if ppsrc else []",
|
1266
|
+
typings_output=("video",),
|
1267
|
+
),
|
1268
|
+
*streams,
|
1269
|
+
**{
|
1270
|
+
"order": order,
|
1271
|
+
"mode": mode,
|
1272
|
+
"ppsrc": ppsrc,
|
1273
|
+
"field": field,
|
1274
|
+
"mchroma": mchroma,
|
1275
|
+
"y0": y0,
|
1276
|
+
"scthresh": scthresh,
|
1277
|
+
"combmatch": combmatch,
|
1278
|
+
"combdbg": combdbg,
|
1279
|
+
"cthresh": cthresh,
|
1280
|
+
"chroma": chroma,
|
1281
|
+
"blockx": blockx,
|
1282
|
+
"blocky": blocky,
|
1283
|
+
"combpel": combpel,
|
1284
|
+
}
|
1285
|
+
| kwargs
|
1286
|
+
)
|
1287
|
+
return filter_node.video(0)
|
1288
|
+
|
1289
|
+
|
1290
|
+
def framepack(
|
1291
|
+
_left: VideoStream,
|
1292
|
+
_right: VideoStream,
|
1293
|
+
*,
|
1294
|
+
format: Int | Literal["sbs", "tab", "frameseq", "lines", "columns"] | Default = Default("sbs"),
|
1295
|
+
**kwargs: Any
|
1296
|
+
) -> VideoStream:
|
1297
|
+
"""
|
1298
|
+
|
1299
|
+
Generate a frame packed stereoscopic video.
|
1300
|
+
|
1301
|
+
Args:
|
1302
|
+
format: Frame pack output format (from 0 to INT_MAX) (default sbs)
|
1303
|
+
|
1304
|
+
Returns:
|
1305
|
+
packed: the video stream
|
1306
|
+
|
1307
|
+
References:
|
1308
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#framepack)
|
1309
|
+
|
1310
|
+
"""
|
1311
|
+
filter_node = filter_node_factory(
|
1312
|
+
FFMpegFilterDef(name="framepack", typings_input=("video", "video"), typings_output=("video",)),
|
1313
|
+
_left,
|
1314
|
+
_right,
|
1315
|
+
**{
|
1316
|
+
"format": format,
|
1317
|
+
}
|
1318
|
+
| kwargs
|
1319
|
+
)
|
1320
|
+
return filter_node.video(0)
|
1321
|
+
|
1322
|
+
|
1323
|
+
def freezeframes(
|
1324
|
+
_source: VideoStream,
|
1325
|
+
_replace: VideoStream,
|
1326
|
+
*,
|
1327
|
+
first: Int64 = Default(0),
|
1328
|
+
last: Int64 = Default(0),
|
1329
|
+
replace: Int64 = Default(0),
|
1330
|
+
**kwargs: Any
|
1331
|
+
) -> VideoStream:
|
1332
|
+
"""
|
1333
|
+
|
1334
|
+
Freeze video frames.
|
1335
|
+
|
1336
|
+
Args:
|
1337
|
+
first: set first frame to freeze (from 0 to I64_MAX) (default 0)
|
1338
|
+
last: set last frame to freeze (from 0 to I64_MAX) (default 0)
|
1339
|
+
replace: set frame to replace (from 0 to I64_MAX) (default 0)
|
1340
|
+
|
1341
|
+
Returns:
|
1342
|
+
default: the video stream
|
1343
|
+
|
1344
|
+
References:
|
1345
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#freezeframes)
|
1346
|
+
|
1347
|
+
"""
|
1348
|
+
filter_node = filter_node_factory(
|
1349
|
+
FFMpegFilterDef(name="freezeframes", typings_input=("video", "video"), typings_output=("video",)),
|
1350
|
+
_source,
|
1351
|
+
_replace,
|
1352
|
+
**{
|
1353
|
+
"first": first,
|
1354
|
+
"last": last,
|
1355
|
+
"replace": replace,
|
1356
|
+
}
|
1357
|
+
| kwargs
|
1358
|
+
)
|
1359
|
+
return filter_node.video(0)
|
1360
|
+
|
1361
|
+
|
1362
|
+
def guided(
|
1363
|
+
*streams: VideoStream,
|
1364
|
+
radius: Int = Default(3),
|
1365
|
+
eps: Float = Default(0.01),
|
1366
|
+
mode: Int | Literal["basic", "fast"] | Default = Default("basic"),
|
1367
|
+
sub: Int = Default(4),
|
1368
|
+
guidance: Int | Literal["off", "on"] | Default = Default("off"),
|
1369
|
+
planes: Int = Default(1),
|
1370
|
+
enable: String = Default(None),
|
1371
|
+
**kwargs: Any
|
1372
|
+
) -> VideoStream:
|
1373
|
+
"""
|
1374
|
+
|
1375
|
+
Apply Guided filter.
|
1376
|
+
|
1377
|
+
Args:
|
1378
|
+
radius: set the box radius (from 1 to 20) (default 3)
|
1379
|
+
eps: set the regularization parameter (with square) (from 0 to 1) (default 0.01)
|
1380
|
+
mode: set filtering mode (0: basic mode; 1: fast mode) (from 0 to 1) (default basic)
|
1381
|
+
sub: subsampling ratio for fast mode (from 2 to 64) (default 4)
|
1382
|
+
guidance: set guidance mode (0: off mode; 1: on mode) (from 0 to 1) (default off)
|
1383
|
+
planes: set planes to filter (from 0 to 15) (default 1)
|
1384
|
+
enable: timeline editing
|
1385
|
+
|
1386
|
+
Returns:
|
1387
|
+
default: the video stream
|
1388
|
+
|
1389
|
+
References:
|
1390
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#guided)
|
1391
|
+
|
1392
|
+
"""
|
1393
|
+
filter_node = filter_node_factory(
|
1394
|
+
FFMpegFilterDef(
|
1395
|
+
name="guided",
|
1396
|
+
typings_input="[StreamType.video] + [StreamType.video] if guidance else []",
|
1397
|
+
typings_output=("video",),
|
1398
|
+
),
|
1399
|
+
*streams,
|
1400
|
+
**{
|
1401
|
+
"radius": radius,
|
1402
|
+
"eps": eps,
|
1403
|
+
"mode": mode,
|
1404
|
+
"sub": sub,
|
1405
|
+
"guidance": guidance,
|
1406
|
+
"planes": planes,
|
1407
|
+
"enable": enable,
|
1408
|
+
}
|
1409
|
+
| kwargs
|
1410
|
+
)
|
1411
|
+
return filter_node.video(0)
|
1412
|
+
|
1413
|
+
|
1414
|
+
def haldclut(
|
1415
|
+
_main: VideoStream,
|
1416
|
+
_clut: VideoStream,
|
1417
|
+
*,
|
1418
|
+
clut: Int | Literal["first", "all"] | Default = Default("all"),
|
1419
|
+
interp: Int | Literal["nearest", "trilinear", "tetrahedral", "pyramid", "prism"] | Default = Default("tetrahedral"),
|
1420
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1421
|
+
shortest: Boolean = Default(False),
|
1422
|
+
repeatlast: Boolean = Default(True),
|
1423
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1424
|
+
enable: String = Default(None),
|
1425
|
+
**kwargs: Any
|
1426
|
+
) -> VideoStream:
|
1427
|
+
"""
|
1428
|
+
|
1429
|
+
Adjust colors using a Hald CLUT.
|
1430
|
+
|
1431
|
+
Args:
|
1432
|
+
clut: when to process CLUT (from 0 to 1) (default all)
|
1433
|
+
interp: select interpolation mode (from 0 to 4) (default tetrahedral)
|
1434
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1435
|
+
shortest: force termination when the shortest input terminates (default false)
|
1436
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1437
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1438
|
+
enable: timeline editing
|
1439
|
+
|
1440
|
+
Returns:
|
1441
|
+
default: the video stream
|
1442
|
+
|
1443
|
+
References:
|
1444
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#haldclut)
|
1445
|
+
|
1446
|
+
"""
|
1447
|
+
filter_node = filter_node_factory(
|
1448
|
+
FFMpegFilterDef(name="haldclut", typings_input=("video", "video"), typings_output=("video",)),
|
1449
|
+
_main,
|
1450
|
+
_clut,
|
1451
|
+
**{
|
1452
|
+
"clut": clut,
|
1453
|
+
"interp": interp,
|
1454
|
+
"eof_action": eof_action,
|
1455
|
+
"shortest": shortest,
|
1456
|
+
"repeatlast": repeatlast,
|
1457
|
+
"ts_sync_mode": ts_sync_mode,
|
1458
|
+
"enable": enable,
|
1459
|
+
}
|
1460
|
+
| kwargs
|
1461
|
+
)
|
1462
|
+
return filter_node.video(0)
|
1463
|
+
|
1464
|
+
|
1465
|
+
def headphone(
|
1466
|
+
*streams: AudioStream,
|
1467
|
+
map: String = Default(None),
|
1468
|
+
gain: Float = Default(0.0),
|
1469
|
+
lfe: Float = Default(0.0),
|
1470
|
+
type: Int | Literal["time", "freq"] | Default = Default("freq"),
|
1471
|
+
size: Int = Default(1024),
|
1472
|
+
hrir: Int | Literal["stereo", "multich"] | Default = Default("stereo"),
|
1473
|
+
**kwargs: Any
|
1474
|
+
) -> AudioStream:
|
1475
|
+
"""
|
1476
|
+
|
1477
|
+
Apply headphone binaural spatialization with HRTFs in additional streams.
|
1478
|
+
|
1479
|
+
Args:
|
1480
|
+
map: set channels convolution mappings
|
1481
|
+
gain: set gain in dB (from -20 to 40) (default 0)
|
1482
|
+
lfe: set lfe gain in dB (from -20 to 40) (default 0)
|
1483
|
+
type: set processing (from 0 to 1) (default freq)
|
1484
|
+
size: set frame size (from 1024 to 96000) (default 1024)
|
1485
|
+
hrir: set hrir format (from 0 to 1) (default stereo)
|
1486
|
+
|
1487
|
+
Returns:
|
1488
|
+
default: the audio stream
|
1489
|
+
|
1490
|
+
References:
|
1491
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#headphone)
|
1492
|
+
|
1493
|
+
"""
|
1494
|
+
filter_node = filter_node_factory(
|
1495
|
+
FFMpegFilterDef(
|
1496
|
+
name="headphone",
|
1497
|
+
typings_input="[StreamType.audio] + [StreamType.audio] * (len(str(map).split('|')) - 1) if int(hrir) == 1 else []",
|
1498
|
+
typings_output=("audio",),
|
1499
|
+
),
|
1500
|
+
*streams,
|
1501
|
+
**{
|
1502
|
+
"map": map,
|
1503
|
+
"gain": gain,
|
1504
|
+
"lfe": lfe,
|
1505
|
+
"type": type,
|
1506
|
+
"size": size,
|
1507
|
+
"hrir": hrir,
|
1508
|
+
}
|
1509
|
+
| kwargs
|
1510
|
+
)
|
1511
|
+
return filter_node.audio(0)
|
1512
|
+
|
1513
|
+
|
1514
|
+
def hstack(
|
1515
|
+
*streams: VideoStream, inputs: Int = Auto("len(streams)"), shortest: Boolean = Default(False), **kwargs: Any
|
1516
|
+
) -> VideoStream:
|
1517
|
+
"""
|
1518
|
+
|
1519
|
+
Stack video inputs horizontally.
|
1520
|
+
|
1521
|
+
Args:
|
1522
|
+
inputs: set number of inputs (from 2 to INT_MAX) (default 2)
|
1523
|
+
shortest: force termination when the shortest input terminates (default false)
|
1524
|
+
|
1525
|
+
Returns:
|
1526
|
+
default: the video stream
|
1527
|
+
|
1528
|
+
References:
|
1529
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#hstack)
|
1530
|
+
|
1531
|
+
"""
|
1532
|
+
filter_node = filter_node_factory(
|
1533
|
+
FFMpegFilterDef(name="hstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
|
1534
|
+
*streams,
|
1535
|
+
**{
|
1536
|
+
"inputs": inputs,
|
1537
|
+
"shortest": shortest,
|
1538
|
+
}
|
1539
|
+
| kwargs
|
1540
|
+
)
|
1541
|
+
return filter_node.video(0)
|
1542
|
+
|
1543
|
+
|
1544
|
+
def hysteresis(
|
1545
|
+
_base: VideoStream,
|
1546
|
+
_alt: VideoStream,
|
1547
|
+
*,
|
1548
|
+
planes: Int = Default(15),
|
1549
|
+
threshold: Int = Default(0),
|
1550
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1551
|
+
shortest: Boolean = Default(False),
|
1552
|
+
repeatlast: Boolean = Default(True),
|
1553
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1554
|
+
enable: String = Default(None),
|
1555
|
+
**kwargs: Any
|
1556
|
+
) -> VideoStream:
|
1557
|
+
"""
|
1558
|
+
|
1559
|
+
Grow first stream into second stream by connecting components.
|
1560
|
+
|
1561
|
+
Args:
|
1562
|
+
planes: set planes (from 0 to 15) (default 15)
|
1563
|
+
threshold: set threshold (from 0 to 65535) (default 0)
|
1564
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1565
|
+
shortest: force termination when the shortest input terminates (default false)
|
1566
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1567
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1568
|
+
enable: timeline editing
|
1569
|
+
|
1570
|
+
Returns:
|
1571
|
+
default: the video stream
|
1572
|
+
|
1573
|
+
References:
|
1574
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#hysteresis)
|
1575
|
+
|
1576
|
+
"""
|
1577
|
+
filter_node = filter_node_factory(
|
1578
|
+
FFMpegFilterDef(name="hysteresis", typings_input=("video", "video"), typings_output=("video",)),
|
1579
|
+
_base,
|
1580
|
+
_alt,
|
1581
|
+
**{
|
1582
|
+
"planes": planes,
|
1583
|
+
"threshold": threshold,
|
1584
|
+
"eof_action": eof_action,
|
1585
|
+
"shortest": shortest,
|
1586
|
+
"repeatlast": repeatlast,
|
1587
|
+
"ts_sync_mode": ts_sync_mode,
|
1588
|
+
"enable": enable,
|
1589
|
+
}
|
1590
|
+
| kwargs
|
1591
|
+
)
|
1592
|
+
return filter_node.video(0)
|
1593
|
+
|
1594
|
+
|
1595
|
+
def identity(
|
1596
|
+
_main: VideoStream,
|
1597
|
+
_reference: VideoStream,
|
1598
|
+
*,
|
1599
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1600
|
+
shortest: Boolean = Default(False),
|
1601
|
+
repeatlast: Boolean = Default(True),
|
1602
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1603
|
+
enable: String = Default(None),
|
1604
|
+
**kwargs: Any
|
1605
|
+
) -> VideoStream:
|
1606
|
+
"""
|
1607
|
+
|
1608
|
+
Calculate the Identity between two video streams.
|
1609
|
+
|
1610
|
+
Args:
|
1611
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1612
|
+
shortest: force termination when the shortest input terminates (default false)
|
1613
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1614
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1615
|
+
enable: timeline editing
|
1616
|
+
|
1617
|
+
Returns:
|
1618
|
+
default: the video stream
|
1619
|
+
|
1620
|
+
References:
|
1621
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#identity)
|
1622
|
+
|
1623
|
+
"""
|
1624
|
+
filter_node = filter_node_factory(
|
1625
|
+
FFMpegFilterDef(name="identity", typings_input=("video", "video"), typings_output=("video",)),
|
1626
|
+
_main,
|
1627
|
+
_reference,
|
1628
|
+
**{
|
1629
|
+
"eof_action": eof_action,
|
1630
|
+
"shortest": shortest,
|
1631
|
+
"repeatlast": repeatlast,
|
1632
|
+
"ts_sync_mode": ts_sync_mode,
|
1633
|
+
"enable": enable,
|
1634
|
+
}
|
1635
|
+
| kwargs
|
1636
|
+
)
|
1637
|
+
return filter_node.video(0)
|
1638
|
+
|
1639
|
+
|
1640
|
+
def interleave(
|
1641
|
+
*streams: VideoStream,
|
1642
|
+
nb_inputs: Int = Auto("len(streams)"),
|
1643
|
+
duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
|
1644
|
+
**kwargs: Any
|
1645
|
+
) -> VideoStream:
|
1646
|
+
"""
|
1647
|
+
|
1648
|
+
Temporally interleave video inputs.
|
1649
|
+
|
1650
|
+
Args:
|
1651
|
+
nb_inputs: set number of inputs (from 1 to INT_MAX) (default 2)
|
1652
|
+
duration: how to determine the end-of-stream (from 0 to 2) (default longest)
|
1653
|
+
|
1654
|
+
Returns:
|
1655
|
+
default: the video stream
|
1656
|
+
|
1657
|
+
References:
|
1658
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#interleave_002c-ainterleave)
|
1659
|
+
|
1660
|
+
"""
|
1661
|
+
filter_node = filter_node_factory(
|
1662
|
+
FFMpegFilterDef(
|
1663
|
+
name="interleave", typings_input="[StreamType.video] * int(nb_inputs)", typings_output=("video",)
|
1664
|
+
),
|
1665
|
+
*streams,
|
1666
|
+
**{
|
1667
|
+
"nb_inputs": nb_inputs,
|
1668
|
+
"duration": duration,
|
1669
|
+
}
|
1670
|
+
| kwargs
|
1671
|
+
)
|
1672
|
+
return filter_node.video(0)
|
1673
|
+
|
1674
|
+
|
1675
|
+
def join(
|
1676
|
+
*streams: AudioStream,
|
1677
|
+
inputs: Int = Auto("len(streams)"),
|
1678
|
+
channel_layout: String = Default("stereo"),
|
1679
|
+
map: String = Default(None),
|
1680
|
+
**kwargs: Any
|
1681
|
+
) -> AudioStream:
|
1682
|
+
"""
|
1683
|
+
|
1684
|
+
Join multiple audio streams into multi-channel output.
|
1685
|
+
|
1686
|
+
Args:
|
1687
|
+
inputs: Number of input streams. (from 1 to INT_MAX) (default 2)
|
1688
|
+
channel_layout: Channel layout of the output stream. (default "stereo")
|
1689
|
+
map: A comma-separated list of channels maps in the format 'input_stream.input_channel-output_channel.
|
1690
|
+
|
1691
|
+
Returns:
|
1692
|
+
default: the audio stream
|
1693
|
+
|
1694
|
+
References:
|
1695
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#join)
|
1696
|
+
|
1697
|
+
"""
|
1698
|
+
filter_node = filter_node_factory(
|
1699
|
+
FFMpegFilterDef(name="join", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
|
1700
|
+
*streams,
|
1701
|
+
**{
|
1702
|
+
"inputs": inputs,
|
1703
|
+
"channel_layout": channel_layout,
|
1704
|
+
"map": map,
|
1705
|
+
}
|
1706
|
+
| kwargs
|
1707
|
+
)
|
1708
|
+
return filter_node.audio(0)
|
1709
|
+
|
1710
|
+
|
1711
|
+
def libvmaf(
|
1712
|
+
_main: VideoStream,
|
1713
|
+
_reference: VideoStream,
|
1714
|
+
*,
|
1715
|
+
model_path: String = Default(None),
|
1716
|
+
log_path: String = Default(None),
|
1717
|
+
log_fmt: String = Default("xml"),
|
1718
|
+
enable_transform: Boolean = Default(False),
|
1719
|
+
psnr: Boolean = Default(False),
|
1720
|
+
ssim: Boolean = Default(False),
|
1721
|
+
ms_ssim: Boolean = Default(False),
|
1722
|
+
pool: String = Default(None),
|
1723
|
+
n_threads: Int = Default(0),
|
1724
|
+
n_subsample: Int = Default(1),
|
1725
|
+
enable_conf_interval: Boolean = Default(False),
|
1726
|
+
model: String = Default("version=vmaf_v0.6.1"),
|
1727
|
+
feature: String = Default(None),
|
1728
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1729
|
+
shortest: Boolean = Default(False),
|
1730
|
+
repeatlast: Boolean = Default(True),
|
1731
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1732
|
+
**kwargs: Any
|
1733
|
+
) -> VideoStream:
|
1734
|
+
"""
|
1735
|
+
|
1736
|
+
Calculate the VMAF between two video streams.
|
1737
|
+
|
1738
|
+
Args:
|
1739
|
+
model_path: use model='path=...'.
|
1740
|
+
log_path: Set the file path to be used to write log.
|
1741
|
+
log_fmt: Set the format of the log (csv, json, xml, or sub). (default "xml")
|
1742
|
+
enable_transform: use model='enable_transform=true'. (default false)
|
1743
|
+
psnr: use feature='name=psnr'. (default false)
|
1744
|
+
ssim: use feature='name=float_ssim'. (default false)
|
1745
|
+
ms_ssim: use feature='name=float_ms_ssim'. (default false)
|
1746
|
+
pool: Set the pool method to be used for computing vmaf.
|
1747
|
+
n_threads: Set number of threads to be used when computing vmaf. (from 0 to UINT32_MAX) (default 0)
|
1748
|
+
n_subsample: Set interval for frame subsampling used when computing vmaf. (from 1 to UINT32_MAX) (default 1)
|
1749
|
+
enable_conf_interval: model='enable_conf_interval=true'. (default false)
|
1750
|
+
model: Set the model to be used for computing vmaf. (default "version=vmaf_v0.6.1")
|
1751
|
+
feature: Set the feature to be used for computing vmaf.
|
1752
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1753
|
+
shortest: force termination when the shortest input terminates (default false)
|
1754
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1755
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1756
|
+
|
1757
|
+
Returns:
|
1758
|
+
default: the video stream
|
1759
|
+
|
1760
|
+
References:
|
1761
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#libvmaf)
|
1762
|
+
|
1763
|
+
"""
|
1764
|
+
filter_node = filter_node_factory(
|
1765
|
+
FFMpegFilterDef(name="libvmaf", typings_input=("video", "video"), typings_output=("video",)),
|
1766
|
+
_main,
|
1767
|
+
_reference,
|
1768
|
+
**{
|
1769
|
+
"model_path": model_path,
|
1770
|
+
"log_path": log_path,
|
1771
|
+
"log_fmt": log_fmt,
|
1772
|
+
"enable_transform": enable_transform,
|
1773
|
+
"psnr": psnr,
|
1774
|
+
"ssim": ssim,
|
1775
|
+
"ms_ssim": ms_ssim,
|
1776
|
+
"pool": pool,
|
1777
|
+
"n_threads": n_threads,
|
1778
|
+
"n_subsample": n_subsample,
|
1779
|
+
"enable_conf_interval": enable_conf_interval,
|
1780
|
+
"model": model,
|
1781
|
+
"feature": feature,
|
1782
|
+
"eof_action": eof_action,
|
1783
|
+
"shortest": shortest,
|
1784
|
+
"repeatlast": repeatlast,
|
1785
|
+
"ts_sync_mode": ts_sync_mode,
|
1786
|
+
}
|
1787
|
+
| kwargs
|
1788
|
+
)
|
1789
|
+
return filter_node.video(0)
|
1790
|
+
|
1791
|
+
|
1792
|
+
def limitdiff(
|
1793
|
+
*streams: VideoStream,
|
1794
|
+
threshold: Float = Default(0.00392157),
|
1795
|
+
elasticity: Float = Default(2.0),
|
1796
|
+
reference: Boolean = Default(False),
|
1797
|
+
planes: Int = Default(15),
|
1798
|
+
enable: String = Default(None),
|
1799
|
+
**kwargs: Any
|
1800
|
+
) -> VideoStream:
|
1801
|
+
"""
|
1802
|
+
|
1803
|
+
Apply filtering with limiting difference.
|
1804
|
+
|
1805
|
+
Args:
|
1806
|
+
threshold: set the threshold (from 0 to 1) (default 0.00392157)
|
1807
|
+
elasticity: set the elasticity (from 0 to 10) (default 2)
|
1808
|
+
reference: enable reference stream (default false)
|
1809
|
+
planes: set the planes to filter (from 0 to 15) (default 15)
|
1810
|
+
enable: timeline editing
|
1811
|
+
|
1812
|
+
Returns:
|
1813
|
+
default: the video stream
|
1814
|
+
|
1815
|
+
References:
|
1816
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#limitdiff)
|
1817
|
+
|
1818
|
+
"""
|
1819
|
+
filter_node = filter_node_factory(
|
1820
|
+
FFMpegFilterDef(
|
1821
|
+
name="limitdiff",
|
1822
|
+
typings_input="[StreamType.video, StreamType.video] + ([StreamType.video] if reference else [])",
|
1823
|
+
typings_output=("video",),
|
1824
|
+
),
|
1825
|
+
*streams,
|
1826
|
+
**{
|
1827
|
+
"threshold": threshold,
|
1828
|
+
"elasticity": elasticity,
|
1829
|
+
"reference": reference,
|
1830
|
+
"planes": planes,
|
1831
|
+
"enable": enable,
|
1832
|
+
}
|
1833
|
+
| kwargs
|
1834
|
+
)
|
1835
|
+
return filter_node.video(0)
|
1836
|
+
|
1837
|
+
|
1838
|
+
def lut2(
|
1839
|
+
_srcx: VideoStream,
|
1840
|
+
_srcy: VideoStream,
|
1841
|
+
*,
|
1842
|
+
c0: String = Default("x"),
|
1843
|
+
c1: String = Default("x"),
|
1844
|
+
c2: String = Default("x"),
|
1845
|
+
c3: String = Default("x"),
|
1846
|
+
d: Int = Default(0),
|
1847
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
1848
|
+
shortest: Boolean = Default(False),
|
1849
|
+
repeatlast: Boolean = Default(True),
|
1850
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
1851
|
+
enable: String = Default(None),
|
1852
|
+
**kwargs: Any
|
1853
|
+
) -> VideoStream:
|
1854
|
+
"""
|
1855
|
+
|
1856
|
+
Compute and apply a lookup table from two video inputs.
|
1857
|
+
|
1858
|
+
Args:
|
1859
|
+
c0: set component #0 expression (default "x")
|
1860
|
+
c1: set component #1 expression (default "x")
|
1861
|
+
c2: set component #2 expression (default "x")
|
1862
|
+
c3: set component #3 expression (default "x")
|
1863
|
+
d: set output depth (from 0 to 16) (default 0)
|
1864
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
1865
|
+
shortest: force termination when the shortest input terminates (default false)
|
1866
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
1867
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
1868
|
+
enable: timeline editing
|
1869
|
+
|
1870
|
+
Returns:
|
1871
|
+
default: the video stream
|
1872
|
+
|
1873
|
+
References:
|
1874
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#lut2_002c-tlut2)
|
1875
|
+
|
1876
|
+
"""
|
1877
|
+
filter_node = filter_node_factory(
|
1878
|
+
FFMpegFilterDef(name="lut2", typings_input=("video", "video"), typings_output=("video",)),
|
1879
|
+
_srcx,
|
1880
|
+
_srcy,
|
1881
|
+
**{
|
1882
|
+
"c0": c0,
|
1883
|
+
"c1": c1,
|
1884
|
+
"c2": c2,
|
1885
|
+
"c3": c3,
|
1886
|
+
"d": d,
|
1887
|
+
"eof_action": eof_action,
|
1888
|
+
"shortest": shortest,
|
1889
|
+
"repeatlast": repeatlast,
|
1890
|
+
"ts_sync_mode": ts_sync_mode,
|
1891
|
+
"enable": enable,
|
1892
|
+
}
|
1893
|
+
| kwargs
|
1894
|
+
)
|
1895
|
+
return filter_node.video(0)
|
1896
|
+
|
1897
|
+
|
1898
|
+
def maskedclamp(
|
1899
|
+
_base: VideoStream,
|
1900
|
+
_dark: VideoStream,
|
1901
|
+
_bright: VideoStream,
|
1902
|
+
*,
|
1903
|
+
undershoot: Int = Default(0),
|
1904
|
+
overshoot: Int = Default(0),
|
1905
|
+
planes: Int = Default(15),
|
1906
|
+
enable: String = Default(None),
|
1907
|
+
**kwargs: Any
|
1908
|
+
) -> VideoStream:
|
1909
|
+
"""
|
1910
|
+
|
1911
|
+
Clamp first stream with second stream and third stream.
|
1912
|
+
|
1913
|
+
Args:
|
1914
|
+
undershoot: set undershoot (from 0 to 65535) (default 0)
|
1915
|
+
overshoot: set overshoot (from 0 to 65535) (default 0)
|
1916
|
+
planes: set planes (from 0 to 15) (default 15)
|
1917
|
+
enable: timeline editing
|
1918
|
+
|
1919
|
+
Returns:
|
1920
|
+
default: the video stream
|
1921
|
+
|
1922
|
+
References:
|
1923
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedclamp)
|
1924
|
+
|
1925
|
+
"""
|
1926
|
+
filter_node = filter_node_factory(
|
1927
|
+
FFMpegFilterDef(name="maskedclamp", typings_input=("video", "video", "video"), typings_output=("video",)),
|
1928
|
+
_base,
|
1929
|
+
_dark,
|
1930
|
+
_bright,
|
1931
|
+
**{
|
1932
|
+
"undershoot": undershoot,
|
1933
|
+
"overshoot": overshoot,
|
1934
|
+
"planes": planes,
|
1935
|
+
"enable": enable,
|
1936
|
+
}
|
1937
|
+
| kwargs
|
1938
|
+
)
|
1939
|
+
return filter_node.video(0)
|
1940
|
+
|
1941
|
+
|
1942
|
+
def maskedmax(
|
1943
|
+
_source: VideoStream,
|
1944
|
+
_filter1: VideoStream,
|
1945
|
+
_filter2: VideoStream,
|
1946
|
+
*,
|
1947
|
+
planes: Int = Default(15),
|
1948
|
+
enable: String = Default(None),
|
1949
|
+
**kwargs: Any
|
1950
|
+
) -> VideoStream:
|
1951
|
+
"""
|
1952
|
+
|
1953
|
+
Apply filtering with maximum difference of two streams.
|
1954
|
+
|
1955
|
+
Args:
|
1956
|
+
planes: set planes (from 0 to 15) (default 15)
|
1957
|
+
enable: timeline editing
|
1958
|
+
|
1959
|
+
Returns:
|
1960
|
+
default: the video stream
|
1961
|
+
|
1962
|
+
References:
|
1963
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmax)
|
1964
|
+
|
1965
|
+
"""
|
1966
|
+
filter_node = filter_node_factory(
|
1967
|
+
FFMpegFilterDef(name="maskedmax", typings_input=("video", "video", "video"), typings_output=("video",)),
|
1968
|
+
_source,
|
1969
|
+
_filter1,
|
1970
|
+
_filter2,
|
1971
|
+
**{
|
1972
|
+
"planes": planes,
|
1973
|
+
"enable": enable,
|
1974
|
+
}
|
1975
|
+
| kwargs
|
1976
|
+
)
|
1977
|
+
return filter_node.video(0)
|
1978
|
+
|
1979
|
+
|
1980
|
+
def maskedmerge(
|
1981
|
+
_base: VideoStream,
|
1982
|
+
_overlay: VideoStream,
|
1983
|
+
_mask: VideoStream,
|
1984
|
+
*,
|
1985
|
+
planes: Int = Default(15),
|
1986
|
+
enable: String = Default(None),
|
1987
|
+
**kwargs: Any
|
1988
|
+
) -> VideoStream:
|
1989
|
+
"""
|
1990
|
+
|
1991
|
+
Merge first stream with second stream using third stream as mask.
|
1992
|
+
|
1993
|
+
Args:
|
1994
|
+
planes: set planes (from 0 to 15) (default 15)
|
1995
|
+
enable: timeline editing
|
1996
|
+
|
1997
|
+
Returns:
|
1998
|
+
default: the video stream
|
1999
|
+
|
2000
|
+
References:
|
2001
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmerge)
|
2002
|
+
|
2003
|
+
"""
|
2004
|
+
filter_node = filter_node_factory(
|
2005
|
+
FFMpegFilterDef(name="maskedmerge", typings_input=("video", "video", "video"), typings_output=("video",)),
|
2006
|
+
_base,
|
2007
|
+
_overlay,
|
2008
|
+
_mask,
|
2009
|
+
**{
|
2010
|
+
"planes": planes,
|
2011
|
+
"enable": enable,
|
2012
|
+
}
|
2013
|
+
| kwargs
|
2014
|
+
)
|
2015
|
+
return filter_node.video(0)
|
2016
|
+
|
2017
|
+
|
2018
|
+
def maskedmin(
|
2019
|
+
_source: VideoStream,
|
2020
|
+
_filter1: VideoStream,
|
2021
|
+
_filter2: VideoStream,
|
2022
|
+
*,
|
2023
|
+
planes: Int = Default(15),
|
2024
|
+
enable: String = Default(None),
|
2025
|
+
**kwargs: Any
|
2026
|
+
) -> VideoStream:
|
2027
|
+
"""
|
2028
|
+
|
2029
|
+
Apply filtering with minimum difference of two streams.
|
2030
|
+
|
2031
|
+
Args:
|
2032
|
+
planes: set planes (from 0 to 15) (default 15)
|
2033
|
+
enable: timeline editing
|
2034
|
+
|
2035
|
+
Returns:
|
2036
|
+
default: the video stream
|
2037
|
+
|
2038
|
+
References:
|
2039
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmin)
|
2040
|
+
|
2041
|
+
"""
|
2042
|
+
filter_node = filter_node_factory(
|
2043
|
+
FFMpegFilterDef(name="maskedmin", typings_input=("video", "video", "video"), typings_output=("video",)),
|
2044
|
+
_source,
|
2045
|
+
_filter1,
|
2046
|
+
_filter2,
|
2047
|
+
**{
|
2048
|
+
"planes": planes,
|
2049
|
+
"enable": enable,
|
2050
|
+
}
|
2051
|
+
| kwargs
|
2052
|
+
)
|
2053
|
+
return filter_node.video(0)
|
2054
|
+
|
2055
|
+
|
2056
|
+
def maskedthreshold(
|
2057
|
+
_source: VideoStream,
|
2058
|
+
_reference: VideoStream,
|
2059
|
+
*,
|
2060
|
+
threshold: Int = Default(1),
|
2061
|
+
planes: Int = Default(15),
|
2062
|
+
mode: Int | Literal["abs", "diff"] | Default = Default("abs"),
|
2063
|
+
enable: String = Default(None),
|
2064
|
+
**kwargs: Any
|
2065
|
+
) -> VideoStream:
|
2066
|
+
"""
|
2067
|
+
|
2068
|
+
Pick pixels comparing absolute difference of two streams with threshold.
|
2069
|
+
|
2070
|
+
Args:
|
2071
|
+
threshold: set threshold (from 0 to 65535) (default 1)
|
2072
|
+
planes: set planes (from 0 to 15) (default 15)
|
2073
|
+
mode: set mode (from 0 to 1) (default abs)
|
2074
|
+
enable: timeline editing
|
2075
|
+
|
2076
|
+
Returns:
|
2077
|
+
default: the video stream
|
2078
|
+
|
2079
|
+
References:
|
2080
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedthreshold)
|
2081
|
+
|
2082
|
+
"""
|
2083
|
+
filter_node = filter_node_factory(
|
2084
|
+
FFMpegFilterDef(name="maskedthreshold", typings_input=("video", "video"), typings_output=("video",)),
|
2085
|
+
_source,
|
2086
|
+
_reference,
|
2087
|
+
**{
|
2088
|
+
"threshold": threshold,
|
2089
|
+
"planes": planes,
|
2090
|
+
"mode": mode,
|
2091
|
+
"enable": enable,
|
2092
|
+
}
|
2093
|
+
| kwargs
|
2094
|
+
)
|
2095
|
+
return filter_node.video(0)
|
2096
|
+
|
2097
|
+
|
2098
|
+
def mergeplanes(
|
2099
|
+
*streams: VideoStream,
|
2100
|
+
mapping: Int = Default(-1),
|
2101
|
+
format: Pix_fmt = Default("yuva444p"),
|
2102
|
+
map0s: Int = Default(0),
|
2103
|
+
map0p: Int = Default(0),
|
2104
|
+
map1s: Int = Default(0),
|
2105
|
+
map1p: Int = Default(0),
|
2106
|
+
map2s: Int = Default(0),
|
2107
|
+
map2p: Int = Default(0),
|
2108
|
+
map3s: Int = Default(0),
|
2109
|
+
map3p: Int = Default(0),
|
2110
|
+
**kwargs: Any
|
2111
|
+
) -> VideoStream:
|
2112
|
+
"""
|
2113
|
+
|
2114
|
+
Merge planes.
|
2115
|
+
|
2116
|
+
Args:
|
2117
|
+
mapping: set input to output plane mapping (from -1 to 8.58993e+08) (default -1)
|
2118
|
+
format: set output pixel format (default yuva444p)
|
2119
|
+
map0s: set 1st input to output stream mapping (from 0 to 3) (default 0)
|
2120
|
+
map0p: set 1st input to output plane mapping (from 0 to 3) (default 0)
|
2121
|
+
map1s: set 2nd input to output stream mapping (from 0 to 3) (default 0)
|
2122
|
+
map1p: set 2nd input to output plane mapping (from 0 to 3) (default 0)
|
2123
|
+
map2s: set 3rd input to output stream mapping (from 0 to 3) (default 0)
|
2124
|
+
map2p: set 3rd input to output plane mapping (from 0 to 3) (default 0)
|
2125
|
+
map3s: set 4th input to output stream mapping (from 0 to 3) (default 0)
|
2126
|
+
map3p: set 4th input to output plane mapping (from 0 to 3) (default 0)
|
2127
|
+
|
2128
|
+
Returns:
|
2129
|
+
default: the video stream
|
2130
|
+
|
2131
|
+
References:
|
2132
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#mergeplanes)
|
2133
|
+
|
2134
|
+
"""
|
2135
|
+
filter_node = filter_node_factory(
|
2136
|
+
FFMpegFilterDef(
|
2137
|
+
name="mergeplanes",
|
2138
|
+
typings_input="[StreamType.video] * int(max(hex(int(mapping))[2::2]))",
|
2139
|
+
typings_output=("video",),
|
2140
|
+
),
|
2141
|
+
*streams,
|
2142
|
+
**{
|
2143
|
+
"mapping": mapping,
|
2144
|
+
"format": format,
|
2145
|
+
"map0s": map0s,
|
2146
|
+
"map0p": map0p,
|
2147
|
+
"map1s": map1s,
|
2148
|
+
"map1p": map1p,
|
2149
|
+
"map2s": map2s,
|
2150
|
+
"map2p": map2p,
|
2151
|
+
"map3s": map3s,
|
2152
|
+
"map3p": map3p,
|
2153
|
+
}
|
2154
|
+
| kwargs
|
2155
|
+
)
|
2156
|
+
return filter_node.video(0)
|
2157
|
+
|
2158
|
+
|
2159
|
+
def midequalizer(
|
2160
|
+
_in0: VideoStream, _in1: VideoStream, *, planes: Int = Default(15), enable: String = Default(None), **kwargs: Any
|
2161
|
+
) -> VideoStream:
|
2162
|
+
"""
|
2163
|
+
|
2164
|
+
Apply Midway Equalization.
|
2165
|
+
|
2166
|
+
Args:
|
2167
|
+
planes: set planes (from 0 to 15) (default 15)
|
2168
|
+
enable: timeline editing
|
2169
|
+
|
2170
|
+
Returns:
|
2171
|
+
default: the video stream
|
2172
|
+
|
2173
|
+
References:
|
2174
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#midequalizer)
|
2175
|
+
|
2176
|
+
"""
|
2177
|
+
filter_node = filter_node_factory(
|
2178
|
+
FFMpegFilterDef(name="midequalizer", typings_input=("video", "video"), typings_output=("video",)),
|
2179
|
+
_in0,
|
2180
|
+
_in1,
|
2181
|
+
**{
|
2182
|
+
"planes": planes,
|
2183
|
+
"enable": enable,
|
2184
|
+
}
|
2185
|
+
| kwargs
|
2186
|
+
)
|
2187
|
+
return filter_node.video(0)
|
2188
|
+
|
2189
|
+
|
2190
|
+
def mix(
|
2191
|
+
*streams: VideoStream,
|
2192
|
+
inputs: Int = Auto("len(streams)"),
|
2193
|
+
weights: String = Default("1 1"),
|
2194
|
+
scale: Float = Default(0.0),
|
2195
|
+
planes: Flags = Default("F"),
|
2196
|
+
duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
|
2197
|
+
enable: String = Default(None),
|
2198
|
+
**kwargs: Any
|
2199
|
+
) -> VideoStream:
|
2200
|
+
"""
|
2201
|
+
|
2202
|
+
Mix video inputs.
|
2203
|
+
|
2204
|
+
Args:
|
2205
|
+
inputs: set number of inputs (from 2 to 32767) (default 2)
|
2206
|
+
weights: set weight for each input (default "1 1")
|
2207
|
+
scale: set scale (from 0 to 32767) (default 0)
|
2208
|
+
planes: set what planes to filter (default F)
|
2209
|
+
duration: how to determine end of stream (from 0 to 2) (default longest)
|
2210
|
+
enable: timeline editing
|
2211
|
+
|
2212
|
+
Returns:
|
2213
|
+
default: the video stream
|
2214
|
+
|
2215
|
+
References:
|
2216
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#mix)
|
2217
|
+
|
2218
|
+
"""
|
2219
|
+
filter_node = filter_node_factory(
|
2220
|
+
FFMpegFilterDef(name="mix", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
|
2221
|
+
*streams,
|
2222
|
+
**{
|
2223
|
+
"inputs": inputs,
|
2224
|
+
"weights": weights,
|
2225
|
+
"scale": scale,
|
2226
|
+
"planes": planes,
|
2227
|
+
"duration": duration,
|
2228
|
+
"enable": enable,
|
2229
|
+
}
|
2230
|
+
| kwargs
|
2231
|
+
)
|
2232
|
+
return filter_node.video(0)
|
2233
|
+
|
2234
|
+
|
2235
|
+
def morpho(
|
2236
|
+
_default: VideoStream,
|
2237
|
+
_structure: VideoStream,
|
2238
|
+
*,
|
2239
|
+
mode: Int
|
2240
|
+
| Literal["erode", "dilate", "open", "close", "gradient", "tophat", "blackhat"]
|
2241
|
+
| Default = Default("erode"),
|
2242
|
+
planes: Int = Default(7),
|
2243
|
+
structure: Int | Literal["first", "all"] | Default = Default("all"),
|
2244
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
2245
|
+
shortest: Boolean = Default(False),
|
2246
|
+
repeatlast: Boolean = Default(True),
|
2247
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
2248
|
+
enable: String = Default(None),
|
2249
|
+
**kwargs: Any
|
2250
|
+
) -> VideoStream:
|
2251
|
+
"""
|
2252
|
+
|
2253
|
+
Apply Morphological filter.
|
2254
|
+
|
2255
|
+
Args:
|
2256
|
+
mode: set morphological transform (from 0 to 6) (default erode)
|
2257
|
+
planes: set planes to filter (from 0 to 15) (default 7)
|
2258
|
+
structure: when to process structures (from 0 to 1) (default all)
|
2259
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
2260
|
+
shortest: force termination when the shortest input terminates (default false)
|
2261
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
2262
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
2263
|
+
enable: timeline editing
|
2264
|
+
|
2265
|
+
Returns:
|
2266
|
+
default: the video stream
|
2267
|
+
|
2268
|
+
References:
|
2269
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#morpho)
|
2270
|
+
|
2271
|
+
"""
|
2272
|
+
filter_node = filter_node_factory(
|
2273
|
+
FFMpegFilterDef(name="morpho", typings_input=("video", "video"), typings_output=("video",)),
|
2274
|
+
_default,
|
2275
|
+
_structure,
|
2276
|
+
**{
|
2277
|
+
"mode": mode,
|
2278
|
+
"planes": planes,
|
2279
|
+
"structure": structure,
|
2280
|
+
"eof_action": eof_action,
|
2281
|
+
"shortest": shortest,
|
2282
|
+
"repeatlast": repeatlast,
|
2283
|
+
"ts_sync_mode": ts_sync_mode,
|
2284
|
+
"enable": enable,
|
2285
|
+
}
|
2286
|
+
| kwargs
|
2287
|
+
)
|
2288
|
+
return filter_node.video(0)
|
2289
|
+
|
2290
|
+
|
2291
|
+
def msad(
|
2292
|
+
_main: VideoStream,
|
2293
|
+
_reference: VideoStream,
|
2294
|
+
*,
|
2295
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
2296
|
+
shortest: Boolean = Default(False),
|
2297
|
+
repeatlast: Boolean = Default(True),
|
2298
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
2299
|
+
enable: String = Default(None),
|
2300
|
+
**kwargs: Any
|
2301
|
+
) -> VideoStream:
|
2302
|
+
"""
|
2303
|
+
|
2304
|
+
Calculate the MSAD between two video streams.
|
2305
|
+
|
2306
|
+
Args:
|
2307
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
2308
|
+
shortest: force termination when the shortest input terminates (default false)
|
2309
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
2310
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
2311
|
+
enable: timeline editing
|
2312
|
+
|
2313
|
+
Returns:
|
2314
|
+
default: the video stream
|
2315
|
+
|
2316
|
+
References:
|
2317
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#msad)
|
2318
|
+
|
2319
|
+
"""
|
2320
|
+
filter_node = filter_node_factory(
|
2321
|
+
FFMpegFilterDef(name="msad", typings_input=("video", "video"), typings_output=("video",)),
|
2322
|
+
_main,
|
2323
|
+
_reference,
|
2324
|
+
**{
|
2325
|
+
"eof_action": eof_action,
|
2326
|
+
"shortest": shortest,
|
2327
|
+
"repeatlast": repeatlast,
|
2328
|
+
"ts_sync_mode": ts_sync_mode,
|
2329
|
+
"enable": enable,
|
2330
|
+
}
|
2331
|
+
| kwargs
|
2332
|
+
)
|
2333
|
+
return filter_node.video(0)
|
2334
|
+
|
2335
|
+
|
2336
|
+
def multiply(
|
2337
|
+
_source: VideoStream,
|
2338
|
+
_factor: VideoStream,
|
2339
|
+
*,
|
2340
|
+
scale: Float = Default(1.0),
|
2341
|
+
offset: Float = Default(0.5),
|
2342
|
+
planes: Flags = Default("F"),
|
2343
|
+
enable: String = Default(None),
|
2344
|
+
**kwargs: Any
|
2345
|
+
) -> VideoStream:
|
2346
|
+
"""
|
2347
|
+
|
2348
|
+
Multiply first video stream with second video stream.
|
2349
|
+
|
2350
|
+
Args:
|
2351
|
+
scale: set scale (from 0 to 9) (default 1)
|
2352
|
+
offset: set offset (from -1 to 1) (default 0.5)
|
2353
|
+
planes: set planes (default F)
|
2354
|
+
enable: timeline editing
|
2355
|
+
|
2356
|
+
Returns:
|
2357
|
+
default: the video stream
|
2358
|
+
|
2359
|
+
References:
|
2360
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#multiply)
|
2361
|
+
|
2362
|
+
"""
|
2363
|
+
filter_node = filter_node_factory(
|
2364
|
+
FFMpegFilterDef(name="multiply", typings_input=("video", "video"), typings_output=("video",)),
|
2365
|
+
_source,
|
2366
|
+
_factor,
|
2367
|
+
**{
|
2368
|
+
"scale": scale,
|
2369
|
+
"offset": offset,
|
2370
|
+
"planes": planes,
|
2371
|
+
"enable": enable,
|
2372
|
+
}
|
2373
|
+
| kwargs
|
2374
|
+
)
|
2375
|
+
return filter_node.video(0)
|
2376
|
+
|
2377
|
+
|
2378
|
+
def overlay(
|
2379
|
+
_main: VideoStream,
|
2380
|
+
_overlay: VideoStream,
|
2381
|
+
*,
|
2382
|
+
x: String = Default("0"),
|
2383
|
+
y: String = Default("0"),
|
2384
|
+
eof_action: Int | Literal["repeat", "endall", "pass", "repeat", "endall", "pass"] | Default = Default("repeat"),
|
2385
|
+
eval: Int | Literal["init", "frame"] | Default = Default("frame"),
|
2386
|
+
shortest: Boolean = Default(False),
|
2387
|
+
format: Int
|
2388
|
+
| Literal["yuv420", "yuv420p10", "yuv422", "yuv422p10", "yuv444", "rgb", "gbrp", "auto"]
|
2389
|
+
| Default = Default("yuv420"),
|
2390
|
+
repeatlast: Boolean = Default(True),
|
2391
|
+
alpha: Int | Literal["straight", "premultiplied"] | Default = Default("straight"),
|
2392
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
2393
|
+
enable: String = Default(None),
|
2394
|
+
**kwargs: Any
|
2395
|
+
) -> VideoStream:
|
2396
|
+
"""
|
2397
|
+
|
2398
|
+
Overlay a video source on top of the input.
|
2399
|
+
|
2400
|
+
Args:
|
2401
|
+
x: set the x expression (default "0")
|
2402
|
+
y: set the y expression (default "0")
|
2403
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
2404
|
+
eval: specify when to evaluate expressions (from 0 to 1) (default frame)
|
2405
|
+
shortest: force termination when the shortest input terminates (default false)
|
2406
|
+
format: set output format (from 0 to 7) (default yuv420)
|
2407
|
+
repeatlast: repeat overlay of the last overlay frame (default true)
|
2408
|
+
alpha: alpha format (from 0 to 1) (default straight)
|
2409
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
2410
|
+
enable: timeline editing
|
2411
|
+
|
2412
|
+
Returns:
|
2413
|
+
default: the video stream
|
2414
|
+
|
2415
|
+
References:
|
2416
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#overlay)
|
2417
|
+
|
2418
|
+
"""
|
2419
|
+
filter_node = filter_node_factory(
|
2420
|
+
FFMpegFilterDef(name="overlay", typings_input=("video", "video"), typings_output=("video",)),
|
2421
|
+
_main,
|
2422
|
+
_overlay,
|
2423
|
+
**{
|
2424
|
+
"x": x,
|
2425
|
+
"y": y,
|
2426
|
+
"eof_action": eof_action,
|
2427
|
+
"eval": eval,
|
2428
|
+
"shortest": shortest,
|
2429
|
+
"format": format,
|
2430
|
+
"repeatlast": repeatlast,
|
2431
|
+
"alpha": alpha,
|
2432
|
+
"ts_sync_mode": ts_sync_mode,
|
2433
|
+
"enable": enable,
|
2434
|
+
}
|
2435
|
+
| kwargs
|
2436
|
+
)
|
2437
|
+
return filter_node.video(0)
|
2438
|
+
|
2439
|
+
|
2440
|
+
def paletteuse(
|
2441
|
+
_default: VideoStream,
|
2442
|
+
_palette: VideoStream,
|
2443
|
+
*,
|
2444
|
+
dither: Int
|
2445
|
+
| Literal["bayer", "heckbert", "floyd_steinberg", "sierra2", "sierra2_4a", "sierra3", "burkes", "atkinson"]
|
2446
|
+
| Default = Default("sierra2_4a"),
|
2447
|
+
bayer_scale: Int = Default(2),
|
2448
|
+
diff_mode: Int | Literal["rectangle"] | Default = Default(0),
|
2449
|
+
new: Boolean = Default(False),
|
2450
|
+
alpha_threshold: Int = Default(128),
|
2451
|
+
debug_kdtree: String = Default(None),
|
2452
|
+
**kwargs: Any
|
2453
|
+
) -> VideoStream:
|
2454
|
+
"""
|
2455
|
+
|
2456
|
+
Use a palette to downsample an input video stream.
|
2457
|
+
|
2458
|
+
Args:
|
2459
|
+
dither: select dithering mode (from 0 to 8) (default sierra2_4a)
|
2460
|
+
bayer_scale: set scale for bayer dithering (from 0 to 5) (default 2)
|
2461
|
+
diff_mode: set frame difference mode (from 0 to 1) (default 0)
|
2462
|
+
new: take new palette for each output frame (default false)
|
2463
|
+
alpha_threshold: set the alpha threshold for transparency (from 0 to 255) (default 128)
|
2464
|
+
debug_kdtree: save Graphviz graph of the kdtree in specified file
|
2465
|
+
|
2466
|
+
Returns:
|
2467
|
+
default: the video stream
|
2468
|
+
|
2469
|
+
References:
|
2470
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#paletteuse)
|
2471
|
+
|
2472
|
+
"""
|
2473
|
+
filter_node = filter_node_factory(
|
2474
|
+
FFMpegFilterDef(name="paletteuse", typings_input=("video", "video"), typings_output=("video",)),
|
2475
|
+
_default,
|
2476
|
+
_palette,
|
2477
|
+
**{
|
2478
|
+
"dither": dither,
|
2479
|
+
"bayer_scale": bayer_scale,
|
2480
|
+
"diff_mode": diff_mode,
|
2481
|
+
"new": new,
|
2482
|
+
"alpha_threshold": alpha_threshold,
|
2483
|
+
"debug_kdtree": debug_kdtree,
|
2484
|
+
}
|
2485
|
+
| kwargs
|
2486
|
+
)
|
2487
|
+
return filter_node.video(0)
|
2488
|
+
|
2489
|
+
|
2490
|
+
def premultiply(
|
2491
|
+
*streams: VideoStream,
|
2492
|
+
planes: Int = Default(15),
|
2493
|
+
inplace: Boolean = Default(False),
|
2494
|
+
enable: String = Default(None),
|
2495
|
+
**kwargs: Any
|
2496
|
+
) -> VideoStream:
|
2497
|
+
"""
|
2498
|
+
|
2499
|
+
PreMultiply first stream with first plane of second stream.
|
2500
|
+
|
2501
|
+
Args:
|
2502
|
+
planes: set planes (from 0 to 15) (default 15)
|
2503
|
+
inplace: enable inplace mode (default false)
|
2504
|
+
enable: timeline editing
|
2505
|
+
|
2506
|
+
Returns:
|
2507
|
+
default: the video stream
|
2508
|
+
|
2509
|
+
References:
|
2510
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#premultiply)
|
2511
|
+
|
2512
|
+
"""
|
2513
|
+
filter_node = filter_node_factory(
|
2514
|
+
FFMpegFilterDef(
|
2515
|
+
name="premultiply",
|
2516
|
+
typings_input="[StreamType.video] + [StreamType.video] if inplace else []",
|
2517
|
+
typings_output=("video",),
|
2518
|
+
),
|
2519
|
+
*streams,
|
2520
|
+
**{
|
2521
|
+
"planes": planes,
|
2522
|
+
"inplace": inplace,
|
2523
|
+
"enable": enable,
|
2524
|
+
}
|
2525
|
+
| kwargs
|
2526
|
+
)
|
2527
|
+
return filter_node.video(0)
|
2528
|
+
|
2529
|
+
|
2530
|
+
def psnr(
|
2531
|
+
_main: VideoStream,
|
2532
|
+
_reference: VideoStream,
|
2533
|
+
*,
|
2534
|
+
stats_file: String = Default(None),
|
2535
|
+
stats_version: Int = Default(1),
|
2536
|
+
output_max: Boolean = Default(False),
|
2537
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
2538
|
+
shortest: Boolean = Default(False),
|
2539
|
+
repeatlast: Boolean = Default(True),
|
2540
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
2541
|
+
enable: String = Default(None),
|
2542
|
+
**kwargs: Any
|
2543
|
+
) -> VideoStream:
|
2544
|
+
"""
|
2545
|
+
|
2546
|
+
Calculate the PSNR between two video streams.
|
2547
|
+
|
2548
|
+
Args:
|
2549
|
+
stats_file: Set file where to store per-frame difference information
|
2550
|
+
stats_version: Set the format version for the stats file. (from 1 to 2) (default 1)
|
2551
|
+
output_max: Add raw stats (max values) to the output log. (default false)
|
2552
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
2553
|
+
shortest: force termination when the shortest input terminates (default false)
|
2554
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
2555
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
2556
|
+
enable: timeline editing
|
2557
|
+
|
2558
|
+
Returns:
|
2559
|
+
default: the video stream
|
2560
|
+
|
2561
|
+
References:
|
2562
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#psnr)
|
2563
|
+
|
2564
|
+
"""
|
2565
|
+
filter_node = filter_node_factory(
|
2566
|
+
FFMpegFilterDef(name="psnr", typings_input=("video", "video"), typings_output=("video",)),
|
2567
|
+
_main,
|
2568
|
+
_reference,
|
2569
|
+
**{
|
2570
|
+
"stats_file": stats_file,
|
2571
|
+
"stats_version": stats_version,
|
2572
|
+
"output_max": output_max,
|
2573
|
+
"eof_action": eof_action,
|
2574
|
+
"shortest": shortest,
|
2575
|
+
"repeatlast": repeatlast,
|
2576
|
+
"ts_sync_mode": ts_sync_mode,
|
2577
|
+
"enable": enable,
|
2578
|
+
}
|
2579
|
+
| kwargs
|
2580
|
+
)
|
2581
|
+
return filter_node.video(0)
|
2582
|
+
|
2583
|
+
|
2584
|
+
def remap(
|
2585
|
+
_source: VideoStream,
|
2586
|
+
_xmap: VideoStream,
|
2587
|
+
_ymap: VideoStream,
|
2588
|
+
*,
|
2589
|
+
format: Int | Literal["color", "gray"] | Default = Default("color"),
|
2590
|
+
fill: Color = Default("black"),
|
2591
|
+
**kwargs: Any
|
2592
|
+
) -> VideoStream:
|
2593
|
+
"""
|
2594
|
+
|
2595
|
+
Remap pixels.
|
2596
|
+
|
2597
|
+
Args:
|
2598
|
+
format: set output format (from 0 to 1) (default color)
|
2599
|
+
fill: set the color of the unmapped pixels (default "black")
|
2600
|
+
|
2601
|
+
Returns:
|
2602
|
+
default: the video stream
|
2603
|
+
|
2604
|
+
References:
|
2605
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#remap)
|
2606
|
+
|
2607
|
+
"""
|
2608
|
+
filter_node = filter_node_factory(
|
2609
|
+
FFMpegFilterDef(name="remap", typings_input=("video", "video", "video"), typings_output=("video",)),
|
2610
|
+
_source,
|
2611
|
+
_xmap,
|
2612
|
+
_ymap,
|
2613
|
+
**{
|
2614
|
+
"format": format,
|
2615
|
+
"fill": fill,
|
2616
|
+
}
|
2617
|
+
| kwargs
|
2618
|
+
)
|
2619
|
+
return filter_node.video(0)
|
2620
|
+
|
2621
|
+
|
2622
|
+
def scale2ref(
|
2623
|
+
_default: VideoStream,
|
2624
|
+
_ref: VideoStream,
|
2625
|
+
*,
|
2626
|
+
w: String = Default(None),
|
2627
|
+
h: String = Default(None),
|
2628
|
+
flags: String = Default(""),
|
2629
|
+
interl: Boolean = Default(False),
|
2630
|
+
in_color_matrix: String
|
2631
|
+
| Literal["auto", "bt601", "bt470", "smpte170m", "bt709", "fcc", "smpte240m", "bt2020"]
|
2632
|
+
| Default = Default("auto"),
|
2633
|
+
out_color_matrix: String
|
2634
|
+
| Literal["auto", "bt601", "bt470", "smpte170m", "bt709", "fcc", "smpte240m", "bt2020"]
|
2635
|
+
| Default = Default(None),
|
2636
|
+
in_range: Int
|
2637
|
+
| Literal["auto", "unknown", "full", "limited", "jpeg", "mpeg", "tv", "pc"]
|
2638
|
+
| Default = Default("auto"),
|
2639
|
+
out_range: Int
|
2640
|
+
| Literal["auto", "unknown", "full", "limited", "jpeg", "mpeg", "tv", "pc"]
|
2641
|
+
| Default = Default("auto"),
|
2642
|
+
in_v_chr_pos: Int = Default(-513),
|
2643
|
+
in_h_chr_pos: Int = Default(-513),
|
2644
|
+
out_v_chr_pos: Int = Default(-513),
|
2645
|
+
out_h_chr_pos: Int = Default(-513),
|
2646
|
+
force_original_aspect_ratio: Int | Literal["disable", "decrease", "increase"] | Default = Default("disable"),
|
2647
|
+
force_divisible_by: Int = Default(1),
|
2648
|
+
param0: Double = Default("DBL_MAX"),
|
2649
|
+
param1: Double = Default("DBL_MAX"),
|
2650
|
+
eval: Int | Literal["init", "frame"] | Default = Default("init"),
|
2651
|
+
**kwargs: Any
|
2652
|
+
) -> tuple[VideoStream, VideoStream,]:
|
2653
|
+
"""
|
2654
|
+
|
2655
|
+
Scale the input video size and/or convert the image format to the given reference.
|
2656
|
+
|
2657
|
+
Args:
|
2658
|
+
w: Output video width
|
2659
|
+
h: Output video height
|
2660
|
+
flags: Flags to pass to libswscale (default "")
|
2661
|
+
interl: set interlacing (default false)
|
2662
|
+
in_color_matrix: set input YCbCr type (default "auto")
|
2663
|
+
out_color_matrix: set output YCbCr type
|
2664
|
+
in_range: set input color range (from 0 to 2) (default auto)
|
2665
|
+
out_range: set output color range (from 0 to 2) (default auto)
|
2666
|
+
in_v_chr_pos: input vertical chroma position in luma grid/256 (from -513 to 512) (default -513)
|
2667
|
+
in_h_chr_pos: input horizontal chroma position in luma grid/256 (from -513 to 512) (default -513)
|
2668
|
+
out_v_chr_pos: output vertical chroma position in luma grid/256 (from -513 to 512) (default -513)
|
2669
|
+
out_h_chr_pos: output horizontal chroma position in luma grid/256 (from -513 to 512) (default -513)
|
2670
|
+
force_original_aspect_ratio: decrease or increase w/h if necessary to keep the original AR (from 0 to 2) (default disable)
|
2671
|
+
force_divisible_by: enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used (from 1 to 256) (default 1)
|
2672
|
+
param0: Scaler param 0 (from -DBL_MAX to DBL_MAX) (default DBL_MAX)
|
2673
|
+
param1: Scaler param 1 (from -DBL_MAX to DBL_MAX) (default DBL_MAX)
|
2674
|
+
eval: specify when to evaluate expressions (from 0 to 1) (default init)
|
2675
|
+
|
2676
|
+
Returns:
|
2677
|
+
default: the video stream
|
2678
|
+
ref: the video stream
|
2679
|
+
|
2680
|
+
References:
|
2681
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#scale2ref)
|
2682
|
+
|
2683
|
+
"""
|
2684
|
+
filter_node = filter_node_factory(
|
2685
|
+
FFMpegFilterDef(name="scale2ref", typings_input=("video", "video"), typings_output=("video", "video")),
|
2686
|
+
_default,
|
2687
|
+
_ref,
|
2688
|
+
**{
|
2689
|
+
"w": w,
|
2690
|
+
"h": h,
|
2691
|
+
"flags": flags,
|
2692
|
+
"interl": interl,
|
2693
|
+
"in_color_matrix": in_color_matrix,
|
2694
|
+
"out_color_matrix": out_color_matrix,
|
2695
|
+
"in_range": in_range,
|
2696
|
+
"out_range": out_range,
|
2697
|
+
"in_v_chr_pos": in_v_chr_pos,
|
2698
|
+
"in_h_chr_pos": in_h_chr_pos,
|
2699
|
+
"out_v_chr_pos": out_v_chr_pos,
|
2700
|
+
"out_h_chr_pos": out_h_chr_pos,
|
2701
|
+
"force_original_aspect_ratio": force_original_aspect_ratio,
|
2702
|
+
"force_divisible_by": force_divisible_by,
|
2703
|
+
"param0": param0,
|
2704
|
+
"param1": param1,
|
2705
|
+
"eval": eval,
|
2706
|
+
}
|
2707
|
+
| kwargs
|
2708
|
+
)
|
2709
|
+
return (
|
2710
|
+
filter_node.video(0),
|
2711
|
+
filter_node.video(1),
|
2712
|
+
)
|
2713
|
+
|
2714
|
+
|
2715
|
+
def sidechaincompress(
|
2716
|
+
_main: AudioStream,
|
2717
|
+
_sidechain: AudioStream,
|
2718
|
+
*,
|
2719
|
+
level_in: Double = Default(1.0),
|
2720
|
+
mode: Int | Literal["downward", "upward"] | Default = Default("downward"),
|
2721
|
+
threshold: Double = Default(0.125),
|
2722
|
+
ratio: Double = Default(2.0),
|
2723
|
+
attack: Double = Default(20.0),
|
2724
|
+
release: Double = Default(250.0),
|
2725
|
+
makeup: Double = Default(1.0),
|
2726
|
+
knee: Double = Default(2.82843),
|
2727
|
+
link: Int | Literal["average", "maximum"] | Default = Default("average"),
|
2728
|
+
detection: Int | Literal["peak", "rms"] | Default = Default("rms"),
|
2729
|
+
level_sc: Double = Default(1.0),
|
2730
|
+
mix: Double = Default(1.0),
|
2731
|
+
**kwargs: Any
|
2732
|
+
) -> AudioStream:
|
2733
|
+
"""
|
2734
|
+
|
2735
|
+
Sidechain compressor.
|
2736
|
+
|
2737
|
+
Args:
|
2738
|
+
level_in: set input gain (from 0.015625 to 64) (default 1)
|
2739
|
+
mode: set mode (from 0 to 1) (default downward)
|
2740
|
+
threshold: set threshold (from 0.000976563 to 1) (default 0.125)
|
2741
|
+
ratio: set ratio (from 1 to 20) (default 2)
|
2742
|
+
attack: set attack (from 0.01 to 2000) (default 20)
|
2743
|
+
release: set release (from 0.01 to 9000) (default 250)
|
2744
|
+
makeup: set make up gain (from 1 to 64) (default 1)
|
2745
|
+
knee: set knee (from 1 to 8) (default 2.82843)
|
2746
|
+
link: set link type (from 0 to 1) (default average)
|
2747
|
+
detection: set detection (from 0 to 1) (default rms)
|
2748
|
+
level_sc: set sidechain gain (from 0.015625 to 64) (default 1)
|
2749
|
+
mix: set mix (from 0 to 1) (default 1)
|
2750
|
+
|
2751
|
+
Returns:
|
2752
|
+
default: the audio stream
|
2753
|
+
|
2754
|
+
References:
|
2755
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#sidechaincompress)
|
2756
|
+
|
2757
|
+
"""
|
2758
|
+
filter_node = filter_node_factory(
|
2759
|
+
FFMpegFilterDef(name="sidechaincompress", typings_input=("audio", "audio"), typings_output=("audio",)),
|
2760
|
+
_main,
|
2761
|
+
_sidechain,
|
2762
|
+
**{
|
2763
|
+
"level_in": level_in,
|
2764
|
+
"mode": mode,
|
2765
|
+
"threshold": threshold,
|
2766
|
+
"ratio": ratio,
|
2767
|
+
"attack": attack,
|
2768
|
+
"release": release,
|
2769
|
+
"makeup": makeup,
|
2770
|
+
"knee": knee,
|
2771
|
+
"link": link,
|
2772
|
+
"detection": detection,
|
2773
|
+
"level_sc": level_sc,
|
2774
|
+
"mix": mix,
|
2775
|
+
}
|
2776
|
+
| kwargs
|
2777
|
+
)
|
2778
|
+
return filter_node.audio(0)
|
2779
|
+
|
2780
|
+
|
2781
|
+
def sidechaingate(
|
2782
|
+
_main: AudioStream,
|
2783
|
+
_sidechain: AudioStream,
|
2784
|
+
*,
|
2785
|
+
level_in: Double = Default(1.0),
|
2786
|
+
mode: Int | Literal["downward", "upward"] | Default = Default("downward"),
|
2787
|
+
range: Double = Default(0.06125),
|
2788
|
+
threshold: Double = Default(0.125),
|
2789
|
+
ratio: Double = Default(2.0),
|
2790
|
+
attack: Double = Default(20.0),
|
2791
|
+
release: Double = Default(250.0),
|
2792
|
+
makeup: Double = Default(1.0),
|
2793
|
+
knee: Double = Default(2.82843),
|
2794
|
+
detection: Int | Literal["peak", "rms"] | Default = Default("rms"),
|
2795
|
+
link: Int | Literal["average", "maximum"] | Default = Default("average"),
|
2796
|
+
level_sc: Double = Default(1.0),
|
2797
|
+
enable: String = Default(None),
|
2798
|
+
**kwargs: Any
|
2799
|
+
) -> AudioStream:
|
2800
|
+
"""
|
2801
|
+
|
2802
|
+
Audio sidechain gate.
|
2803
|
+
|
2804
|
+
Args:
|
2805
|
+
level_in: set input level (from 0.015625 to 64) (default 1)
|
2806
|
+
mode: set mode (from 0 to 1) (default downward)
|
2807
|
+
range: set max gain reduction (from 0 to 1) (default 0.06125)
|
2808
|
+
threshold: set threshold (from 0 to 1) (default 0.125)
|
2809
|
+
ratio: set ratio (from 1 to 9000) (default 2)
|
2810
|
+
attack: set attack (from 0.01 to 9000) (default 20)
|
2811
|
+
release: set release (from 0.01 to 9000) (default 250)
|
2812
|
+
makeup: set makeup gain (from 1 to 64) (default 1)
|
2813
|
+
knee: set knee (from 1 to 8) (default 2.82843)
|
2814
|
+
detection: set detection (from 0 to 1) (default rms)
|
2815
|
+
link: set link (from 0 to 1) (default average)
|
2816
|
+
level_sc: set sidechain gain (from 0.015625 to 64) (default 1)
|
2817
|
+
enable: timeline editing
|
2818
|
+
|
2819
|
+
Returns:
|
2820
|
+
default: the audio stream
|
2821
|
+
|
2822
|
+
References:
|
2823
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#sidechaingate)
|
2824
|
+
|
2825
|
+
"""
|
2826
|
+
filter_node = filter_node_factory(
|
2827
|
+
FFMpegFilterDef(name="sidechaingate", typings_input=("audio", "audio"), typings_output=("audio",)),
|
2828
|
+
_main,
|
2829
|
+
_sidechain,
|
2830
|
+
**{
|
2831
|
+
"level_in": level_in,
|
2832
|
+
"mode": mode,
|
2833
|
+
"range": range,
|
2834
|
+
"threshold": threshold,
|
2835
|
+
"ratio": ratio,
|
2836
|
+
"attack": attack,
|
2837
|
+
"release": release,
|
2838
|
+
"makeup": makeup,
|
2839
|
+
"knee": knee,
|
2840
|
+
"detection": detection,
|
2841
|
+
"link": link,
|
2842
|
+
"level_sc": level_sc,
|
2843
|
+
"enable": enable,
|
2844
|
+
}
|
2845
|
+
| kwargs
|
2846
|
+
)
|
2847
|
+
return filter_node.audio(0)
|
2848
|
+
|
2849
|
+
|
2850
|
+
def signature(
|
2851
|
+
*streams: VideoStream,
|
2852
|
+
detectmode: Int | Literal["off", "full", "fast"] | Default = Default("off"),
|
2853
|
+
nb_inputs: Int = Auto("len(streams)"),
|
2854
|
+
filename: String = Default(""),
|
2855
|
+
format: Int | Literal["binary", "xml"] | Default = Default("binary"),
|
2856
|
+
th_d: Int = Default(9000),
|
2857
|
+
th_dc: Int = Default(60000),
|
2858
|
+
th_xh: Int = Default(116),
|
2859
|
+
th_di: Int = Default(0),
|
2860
|
+
th_it: Double = Default(0.5),
|
2861
|
+
**kwargs: Any
|
2862
|
+
) -> VideoStream:
|
2863
|
+
"""
|
2864
|
+
|
2865
|
+
Calculate the MPEG-7 video signature
|
2866
|
+
|
2867
|
+
Args:
|
2868
|
+
detectmode: set the detectmode (from 0 to 2) (default off)
|
2869
|
+
nb_inputs: number of inputs (from 1 to INT_MAX) (default 1)
|
2870
|
+
filename: filename for output files (default "")
|
2871
|
+
format: set output format (from 0 to 1) (default binary)
|
2872
|
+
th_d: threshold to detect one word as similar (from 1 to INT_MAX) (default 9000)
|
2873
|
+
th_dc: threshold to detect all words as similar (from 1 to INT_MAX) (default 60000)
|
2874
|
+
th_xh: threshold to detect frames as similar (from 1 to INT_MAX) (default 116)
|
2875
|
+
th_di: minimum length of matching sequence in frames (from 0 to INT_MAX) (default 0)
|
2876
|
+
th_it: threshold for relation of good to all frames (from 0 to 1) (default 0.5)
|
2877
|
+
|
2878
|
+
Returns:
|
2879
|
+
default: the video stream
|
2880
|
+
|
2881
|
+
References:
|
2882
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#signature)
|
2883
|
+
|
2884
|
+
"""
|
2885
|
+
filter_node = filter_node_factory(
|
2886
|
+
FFMpegFilterDef(
|
2887
|
+
name="signature", typings_input="[StreamType.video] * int(nb_inputs)", typings_output=("video",)
|
2888
|
+
),
|
2889
|
+
*streams,
|
2890
|
+
**{
|
2891
|
+
"detectmode": detectmode,
|
2892
|
+
"nb_inputs": nb_inputs,
|
2893
|
+
"filename": filename,
|
2894
|
+
"format": format,
|
2895
|
+
"th_d": th_d,
|
2896
|
+
"th_dc": th_dc,
|
2897
|
+
"th_xh": th_xh,
|
2898
|
+
"th_di": th_di,
|
2899
|
+
"th_it": th_it,
|
2900
|
+
}
|
2901
|
+
| kwargs
|
2902
|
+
)
|
2903
|
+
return filter_node.video(0)
|
2904
|
+
|
2905
|
+
|
2906
|
+
def spectrumsynth(
|
2907
|
+
_magnitude: VideoStream,
|
2908
|
+
_phase: VideoStream,
|
2909
|
+
*,
|
2910
|
+
sample_rate: Int = Default(44100),
|
2911
|
+
channels: Int = Default(1),
|
2912
|
+
scale: Int | Literal["lin", "log"] | Default = Default("log"),
|
2913
|
+
slide: Int | Literal["replace", "scroll", "fullframe", "rscroll"] | Default = Default("fullframe"),
|
2914
|
+
win_func: Int
|
2915
|
+
| Literal[
|
2916
|
+
"rect",
|
2917
|
+
"bartlett",
|
2918
|
+
"hann",
|
2919
|
+
"hanning",
|
2920
|
+
"hamming",
|
2921
|
+
"blackman",
|
2922
|
+
"welch",
|
2923
|
+
"flattop",
|
2924
|
+
"bharris",
|
2925
|
+
"bnuttall",
|
2926
|
+
"bhann",
|
2927
|
+
"sine",
|
2928
|
+
"nuttall",
|
2929
|
+
"lanczos",
|
2930
|
+
"gauss",
|
2931
|
+
"tukey",
|
2932
|
+
"dolph",
|
2933
|
+
"cauchy",
|
2934
|
+
"parzen",
|
2935
|
+
"poisson",
|
2936
|
+
"bohman",
|
2937
|
+
"kaiser",
|
2938
|
+
]
|
2939
|
+
| Default = Default("rect"),
|
2940
|
+
overlap: Float = Default(1.0),
|
2941
|
+
orientation: Int | Literal["vertical", "horizontal"] | Default = Default("vertical"),
|
2942
|
+
**kwargs: Any
|
2943
|
+
) -> AudioStream:
|
2944
|
+
"""
|
2945
|
+
|
2946
|
+
Convert input spectrum videos to audio output.
|
2947
|
+
|
2948
|
+
Args:
|
2949
|
+
sample_rate: set sample rate (from 15 to INT_MAX) (default 44100)
|
2950
|
+
channels: set channels (from 1 to 8) (default 1)
|
2951
|
+
scale: set input amplitude scale (from 0 to 1) (default log)
|
2952
|
+
slide: set input sliding mode (from 0 to 3) (default fullframe)
|
2953
|
+
win_func: set window function (from 0 to 20) (default rect)
|
2954
|
+
overlap: set window overlap (from 0 to 1) (default 1)
|
2955
|
+
orientation: set orientation (from 0 to 1) (default vertical)
|
2956
|
+
|
2957
|
+
Returns:
|
2958
|
+
default: the audio stream
|
2959
|
+
|
2960
|
+
References:
|
2961
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#spectrumsynth)
|
2962
|
+
|
2963
|
+
"""
|
2964
|
+
filter_node = filter_node_factory(
|
2965
|
+
FFMpegFilterDef(name="spectrumsynth", typings_input=("video", "video"), typings_output=("audio",)),
|
2966
|
+
_magnitude,
|
2967
|
+
_phase,
|
2968
|
+
**{
|
2969
|
+
"sample_rate": sample_rate,
|
2970
|
+
"channels": channels,
|
2971
|
+
"scale": scale,
|
2972
|
+
"slide": slide,
|
2973
|
+
"win_func": win_func,
|
2974
|
+
"overlap": overlap,
|
2975
|
+
"orientation": orientation,
|
2976
|
+
}
|
2977
|
+
| kwargs
|
2978
|
+
)
|
2979
|
+
return filter_node.audio(0)
|
2980
|
+
|
2981
|
+
|
2982
|
+
def ssim(
|
2983
|
+
_main: VideoStream,
|
2984
|
+
_reference: VideoStream,
|
2985
|
+
*,
|
2986
|
+
stats_file: String = Default(None),
|
2987
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
2988
|
+
shortest: Boolean = Default(False),
|
2989
|
+
repeatlast: Boolean = Default(True),
|
2990
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
2991
|
+
enable: String = Default(None),
|
2992
|
+
**kwargs: Any
|
2993
|
+
) -> VideoStream:
|
2994
|
+
"""
|
2995
|
+
|
2996
|
+
Calculate the SSIM between two video streams.
|
2997
|
+
|
2998
|
+
Args:
|
2999
|
+
stats_file: Set file where to store per-frame difference information
|
3000
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
3001
|
+
shortest: force termination when the shortest input terminates (default false)
|
3002
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
3003
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
3004
|
+
enable: timeline editing
|
3005
|
+
|
3006
|
+
Returns:
|
3007
|
+
default: the video stream
|
3008
|
+
|
3009
|
+
References:
|
3010
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#ssim)
|
3011
|
+
|
3012
|
+
"""
|
3013
|
+
filter_node = filter_node_factory(
|
3014
|
+
FFMpegFilterDef(name="ssim", typings_input=("video", "video"), typings_output=("video",)),
|
3015
|
+
_main,
|
3016
|
+
_reference,
|
3017
|
+
**{
|
3018
|
+
"stats_file": stats_file,
|
3019
|
+
"eof_action": eof_action,
|
3020
|
+
"shortest": shortest,
|
3021
|
+
"repeatlast": repeatlast,
|
3022
|
+
"ts_sync_mode": ts_sync_mode,
|
3023
|
+
"enable": enable,
|
3024
|
+
}
|
3025
|
+
| kwargs
|
3026
|
+
)
|
3027
|
+
return filter_node.video(0)
|
3028
|
+
|
3029
|
+
|
3030
|
+
def streamselect(
|
3031
|
+
*streams: VideoStream, inputs: Int = Auto("len(streams)"), map: String = Default(None), **kwargs: Any
|
3032
|
+
) -> FilterNode:
|
3033
|
+
"""
|
3034
|
+
|
3035
|
+
Select video streams
|
3036
|
+
|
3037
|
+
Args:
|
3038
|
+
inputs: number of input streams (from 2 to INT_MAX) (default 2)
|
3039
|
+
map: input indexes to remap to outputs
|
3040
|
+
|
3041
|
+
Returns:
|
3042
|
+
filter_node: the filter node
|
3043
|
+
|
3044
|
+
|
3045
|
+
References:
|
3046
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#streamselect_002c-astreamselect)
|
3047
|
+
|
3048
|
+
"""
|
3049
|
+
filter_node = filter_node_factory(
|
3050
|
+
FFMpegFilterDef(
|
3051
|
+
name="streamselect",
|
3052
|
+
typings_input="[StreamType.video] * int(inputs)",
|
3053
|
+
typings_output="[StreamType.video] * len(re.findall(r'\\d+', str(map)))",
|
3054
|
+
),
|
3055
|
+
*streams,
|
3056
|
+
**{
|
3057
|
+
"inputs": inputs,
|
3058
|
+
"map": map,
|
3059
|
+
}
|
3060
|
+
| kwargs
|
3061
|
+
)
|
3062
|
+
|
3063
|
+
return filter_node
|
3064
|
+
|
3065
|
+
|
3066
|
+
def threshold(
|
3067
|
+
_default: VideoStream,
|
3068
|
+
_threshold: VideoStream,
|
3069
|
+
_min: VideoStream,
|
3070
|
+
_max: VideoStream,
|
3071
|
+
*,
|
3072
|
+
planes: Int = Default(15),
|
3073
|
+
enable: String = Default(None),
|
3074
|
+
**kwargs: Any
|
3075
|
+
) -> VideoStream:
|
3076
|
+
"""
|
3077
|
+
|
3078
|
+
Threshold first video stream using other video streams.
|
3079
|
+
|
3080
|
+
Args:
|
3081
|
+
planes: set planes to filter (from 0 to 15) (default 15)
|
3082
|
+
enable: timeline editing
|
3083
|
+
|
3084
|
+
Returns:
|
3085
|
+
default: the video stream
|
3086
|
+
|
3087
|
+
References:
|
3088
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#threshold)
|
3089
|
+
|
3090
|
+
"""
|
3091
|
+
filter_node = filter_node_factory(
|
3092
|
+
FFMpegFilterDef(
|
3093
|
+
name="threshold", typings_input=("video", "video", "video", "video"), typings_output=("video",)
|
3094
|
+
),
|
3095
|
+
_default,
|
3096
|
+
_threshold,
|
3097
|
+
_min,
|
3098
|
+
_max,
|
3099
|
+
**{
|
3100
|
+
"planes": planes,
|
3101
|
+
"enable": enable,
|
3102
|
+
}
|
3103
|
+
| kwargs
|
3104
|
+
)
|
3105
|
+
return filter_node.video(0)
|
3106
|
+
|
3107
|
+
|
3108
|
+
def unpremultiply(
|
3109
|
+
*streams: VideoStream,
|
3110
|
+
planes: Int = Default(15),
|
3111
|
+
inplace: Boolean = Default(False),
|
3112
|
+
enable: String = Default(None),
|
3113
|
+
**kwargs: Any
|
3114
|
+
) -> VideoStream:
|
3115
|
+
"""
|
3116
|
+
|
3117
|
+
UnPreMultiply first stream with first plane of second stream.
|
3118
|
+
|
3119
|
+
Args:
|
3120
|
+
planes: set planes (from 0 to 15) (default 15)
|
3121
|
+
inplace: enable inplace mode (default false)
|
3122
|
+
enable: timeline editing
|
3123
|
+
|
3124
|
+
Returns:
|
3125
|
+
default: the video stream
|
3126
|
+
|
3127
|
+
References:
|
3128
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#unpremultiply)
|
3129
|
+
|
3130
|
+
"""
|
3131
|
+
filter_node = filter_node_factory(
|
3132
|
+
FFMpegFilterDef(
|
3133
|
+
name="unpremultiply",
|
3134
|
+
typings_input="[StreamType.video] + ([StreamType.video] if inplace else [])",
|
3135
|
+
typings_output=("video",),
|
3136
|
+
),
|
3137
|
+
*streams,
|
3138
|
+
**{
|
3139
|
+
"planes": planes,
|
3140
|
+
"inplace": inplace,
|
3141
|
+
"enable": enable,
|
3142
|
+
}
|
3143
|
+
| kwargs
|
3144
|
+
)
|
3145
|
+
return filter_node.video(0)
|
3146
|
+
|
3147
|
+
|
3148
|
+
def varblur(
|
3149
|
+
_default: VideoStream,
|
3150
|
+
_radius: VideoStream,
|
3151
|
+
*,
|
3152
|
+
min_r: Int = Default(0),
|
3153
|
+
max_r: Int = Default(8),
|
3154
|
+
planes: Int = Default(15),
|
3155
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
3156
|
+
shortest: Boolean = Default(False),
|
3157
|
+
repeatlast: Boolean = Default(True),
|
3158
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
3159
|
+
enable: String = Default(None),
|
3160
|
+
**kwargs: Any
|
3161
|
+
) -> VideoStream:
|
3162
|
+
"""
|
3163
|
+
|
3164
|
+
Apply Variable Blur filter.
|
3165
|
+
|
3166
|
+
Args:
|
3167
|
+
min_r: set min blur radius (from 0 to 254) (default 0)
|
3168
|
+
max_r: set max blur radius (from 1 to 255) (default 8)
|
3169
|
+
planes: set planes to filter (from 0 to 15) (default 15)
|
3170
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
3171
|
+
shortest: force termination when the shortest input terminates (default false)
|
3172
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
3173
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
3174
|
+
enable: timeline editing
|
3175
|
+
|
3176
|
+
Returns:
|
3177
|
+
default: the video stream
|
3178
|
+
|
3179
|
+
References:
|
3180
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#varblur)
|
3181
|
+
|
3182
|
+
"""
|
3183
|
+
filter_node = filter_node_factory(
|
3184
|
+
FFMpegFilterDef(name="varblur", typings_input=("video", "video"), typings_output=("video",)),
|
3185
|
+
_default,
|
3186
|
+
_radius,
|
3187
|
+
**{
|
3188
|
+
"min_r": min_r,
|
3189
|
+
"max_r": max_r,
|
3190
|
+
"planes": planes,
|
3191
|
+
"eof_action": eof_action,
|
3192
|
+
"shortest": shortest,
|
3193
|
+
"repeatlast": repeatlast,
|
3194
|
+
"ts_sync_mode": ts_sync_mode,
|
3195
|
+
"enable": enable,
|
3196
|
+
}
|
3197
|
+
| kwargs
|
3198
|
+
)
|
3199
|
+
return filter_node.video(0)
|
3200
|
+
|
3201
|
+
|
3202
|
+
def vif(
|
3203
|
+
_main: VideoStream,
|
3204
|
+
_reference: VideoStream,
|
3205
|
+
*,
|
3206
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
3207
|
+
shortest: Boolean = Default(False),
|
3208
|
+
repeatlast: Boolean = Default(True),
|
3209
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
3210
|
+
enable: String = Default(None),
|
3211
|
+
**kwargs: Any
|
3212
|
+
) -> VideoStream:
|
3213
|
+
"""
|
3214
|
+
|
3215
|
+
Calculate the VIF between two video streams.
|
3216
|
+
|
3217
|
+
Args:
|
3218
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
3219
|
+
shortest: force termination when the shortest input terminates (default false)
|
3220
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
3221
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
3222
|
+
enable: timeline editing
|
3223
|
+
|
3224
|
+
Returns:
|
3225
|
+
default: the video stream
|
3226
|
+
|
3227
|
+
References:
|
3228
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#vif)
|
3229
|
+
|
3230
|
+
"""
|
3231
|
+
filter_node = filter_node_factory(
|
3232
|
+
FFMpegFilterDef(name="vif", typings_input=("video", "video"), typings_output=("video",)),
|
3233
|
+
_main,
|
3234
|
+
_reference,
|
3235
|
+
**{
|
3236
|
+
"eof_action": eof_action,
|
3237
|
+
"shortest": shortest,
|
3238
|
+
"repeatlast": repeatlast,
|
3239
|
+
"ts_sync_mode": ts_sync_mode,
|
3240
|
+
"enable": enable,
|
3241
|
+
}
|
3242
|
+
| kwargs
|
3243
|
+
)
|
3244
|
+
return filter_node.video(0)
|
3245
|
+
|
3246
|
+
|
3247
|
+
def vstack(
|
3248
|
+
*streams: VideoStream, inputs: Int = Auto("len(streams)"), shortest: Boolean = Default(False), **kwargs: Any
|
3249
|
+
) -> VideoStream:
|
3250
|
+
"""
|
3251
|
+
|
3252
|
+
Stack video inputs vertically.
|
3253
|
+
|
3254
|
+
Args:
|
3255
|
+
inputs: set number of inputs (from 2 to INT_MAX) (default 2)
|
3256
|
+
shortest: force termination when the shortest input terminates (default false)
|
3257
|
+
|
3258
|
+
Returns:
|
3259
|
+
default: the video stream
|
3260
|
+
|
3261
|
+
References:
|
3262
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#vstack)
|
3263
|
+
|
3264
|
+
"""
|
3265
|
+
filter_node = filter_node_factory(
|
3266
|
+
FFMpegFilterDef(name="vstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
|
3267
|
+
*streams,
|
3268
|
+
**{
|
3269
|
+
"inputs": inputs,
|
3270
|
+
"shortest": shortest,
|
3271
|
+
}
|
3272
|
+
| kwargs
|
3273
|
+
)
|
3274
|
+
return filter_node.video(0)
|
3275
|
+
|
3276
|
+
|
3277
|
+
def xcorrelate(
|
3278
|
+
_primary: VideoStream,
|
3279
|
+
_secondary: VideoStream,
|
3280
|
+
*,
|
3281
|
+
planes: Int = Default(7),
|
3282
|
+
secondary: Int | Literal["first", "all"] | Default = Default("all"),
|
3283
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
3284
|
+
shortest: Boolean = Default(False),
|
3285
|
+
repeatlast: Boolean = Default(True),
|
3286
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
3287
|
+
enable: String = Default(None),
|
3288
|
+
**kwargs: Any
|
3289
|
+
) -> VideoStream:
|
3290
|
+
"""
|
3291
|
+
|
3292
|
+
Cross-correlate first video stream with second video stream.
|
3293
|
+
|
3294
|
+
Args:
|
3295
|
+
planes: set planes to cross-correlate (from 0 to 15) (default 7)
|
3296
|
+
secondary: when to process secondary frame (from 0 to 1) (default all)
|
3297
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
3298
|
+
shortest: force termination when the shortest input terminates (default false)
|
3299
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
3300
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
3301
|
+
enable: timeline editing
|
3302
|
+
|
3303
|
+
Returns:
|
3304
|
+
default: the video stream
|
3305
|
+
|
3306
|
+
References:
|
3307
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xcorrelate)
|
3308
|
+
|
3309
|
+
"""
|
3310
|
+
filter_node = filter_node_factory(
|
3311
|
+
FFMpegFilterDef(name="xcorrelate", typings_input=("video", "video"), typings_output=("video",)),
|
3312
|
+
_primary,
|
3313
|
+
_secondary,
|
3314
|
+
**{
|
3315
|
+
"planes": planes,
|
3316
|
+
"secondary": secondary,
|
3317
|
+
"eof_action": eof_action,
|
3318
|
+
"shortest": shortest,
|
3319
|
+
"repeatlast": repeatlast,
|
3320
|
+
"ts_sync_mode": ts_sync_mode,
|
3321
|
+
"enable": enable,
|
3322
|
+
}
|
3323
|
+
| kwargs
|
3324
|
+
)
|
3325
|
+
return filter_node.video(0)
|
3326
|
+
|
3327
|
+
|
3328
|
+
def xfade(
|
3329
|
+
_main: VideoStream,
|
3330
|
+
_xfade: VideoStream,
|
3331
|
+
*,
|
3332
|
+
transition: Int
|
3333
|
+
| Literal[
|
3334
|
+
"custom",
|
3335
|
+
"fade",
|
3336
|
+
"wipeleft",
|
3337
|
+
"wiperight",
|
3338
|
+
"wipeup",
|
3339
|
+
"wipedown",
|
3340
|
+
"slideleft",
|
3341
|
+
"slideright",
|
3342
|
+
"slideup",
|
3343
|
+
"slidedown",
|
3344
|
+
"circlecrop",
|
3345
|
+
"rectcrop",
|
3346
|
+
"distance",
|
3347
|
+
"fadeblack",
|
3348
|
+
"fadewhite",
|
3349
|
+
"radial",
|
3350
|
+
"smoothleft",
|
3351
|
+
"smoothright",
|
3352
|
+
"smoothup",
|
3353
|
+
"smoothdown",
|
3354
|
+
"circleopen",
|
3355
|
+
"circleclose",
|
3356
|
+
"vertopen",
|
3357
|
+
"vertclose",
|
3358
|
+
"horzopen",
|
3359
|
+
"horzclose",
|
3360
|
+
"dissolve",
|
3361
|
+
"pixelize",
|
3362
|
+
"diagtl",
|
3363
|
+
"diagtr",
|
3364
|
+
"diagbl",
|
3365
|
+
"diagbr",
|
3366
|
+
"hlslice",
|
3367
|
+
"hrslice",
|
3368
|
+
"vuslice",
|
3369
|
+
"vdslice",
|
3370
|
+
"hblur",
|
3371
|
+
"fadegrays",
|
3372
|
+
"wipetl",
|
3373
|
+
"wipetr",
|
3374
|
+
"wipebl",
|
3375
|
+
"wipebr",
|
3376
|
+
"squeezeh",
|
3377
|
+
"squeezev",
|
3378
|
+
"zoomin",
|
3379
|
+
"fadefast",
|
3380
|
+
"fadeslow",
|
3381
|
+
]
|
3382
|
+
| Default = Default("fade"),
|
3383
|
+
duration: Duration = Default(1.0),
|
3384
|
+
offset: Duration = Default(0.0),
|
3385
|
+
expr: String = Default(None),
|
3386
|
+
**kwargs: Any
|
3387
|
+
) -> VideoStream:
|
3388
|
+
"""
|
3389
|
+
|
3390
|
+
Cross fade one video with another video.
|
3391
|
+
|
3392
|
+
Args:
|
3393
|
+
transition: set cross fade transition (from -1 to 45) (default fade)
|
3394
|
+
duration: set cross fade duration (default 1)
|
3395
|
+
offset: set cross fade start relative to first input stream (default 0)
|
3396
|
+
expr: set expression for custom transition
|
3397
|
+
|
3398
|
+
Returns:
|
3399
|
+
default: the video stream
|
3400
|
+
|
3401
|
+
References:
|
3402
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xfade)
|
3403
|
+
|
3404
|
+
"""
|
3405
|
+
filter_node = filter_node_factory(
|
3406
|
+
FFMpegFilterDef(name="xfade", typings_input=("video", "video"), typings_output=("video",)),
|
3407
|
+
_main,
|
3408
|
+
_xfade,
|
3409
|
+
**{
|
3410
|
+
"transition": transition,
|
3411
|
+
"duration": duration,
|
3412
|
+
"offset": offset,
|
3413
|
+
"expr": expr,
|
3414
|
+
}
|
3415
|
+
| kwargs
|
3416
|
+
)
|
3417
|
+
return filter_node.video(0)
|
3418
|
+
|
3419
|
+
|
3420
|
+
def xmedian(
|
3421
|
+
*streams: VideoStream,
|
3422
|
+
inputs: Int = Auto("len(streams)"),
|
3423
|
+
planes: Int = Default(15),
|
3424
|
+
percentile: Float = Default(0.5),
|
3425
|
+
eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
|
3426
|
+
shortest: Boolean = Default(False),
|
3427
|
+
repeatlast: Boolean = Default(True),
|
3428
|
+
ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
|
3429
|
+
enable: String = Default(None),
|
3430
|
+
**kwargs: Any
|
3431
|
+
) -> VideoStream:
|
3432
|
+
"""
|
3433
|
+
|
3434
|
+
Pick median pixels from several video inputs.
|
3435
|
+
|
3436
|
+
Args:
|
3437
|
+
inputs: set number of inputs (from 3 to 255) (default 3)
|
3438
|
+
planes: set planes to filter (from 0 to 15) (default 15)
|
3439
|
+
percentile: set percentile (from 0 to 1) (default 0.5)
|
3440
|
+
eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
|
3441
|
+
shortest: force termination when the shortest input terminates (default false)
|
3442
|
+
repeatlast: extend last frame of secondary streams beyond EOF (default true)
|
3443
|
+
ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
|
3444
|
+
enable: timeline editing
|
3445
|
+
|
3446
|
+
Returns:
|
3447
|
+
default: the video stream
|
3448
|
+
|
3449
|
+
References:
|
3450
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xmedian)
|
3451
|
+
|
3452
|
+
"""
|
3453
|
+
filter_node = filter_node_factory(
|
3454
|
+
FFMpegFilterDef(name="xmedian", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
|
3455
|
+
*streams,
|
3456
|
+
**{
|
3457
|
+
"inputs": inputs,
|
3458
|
+
"planes": planes,
|
3459
|
+
"percentile": percentile,
|
3460
|
+
"eof_action": eof_action,
|
3461
|
+
"shortest": shortest,
|
3462
|
+
"repeatlast": repeatlast,
|
3463
|
+
"ts_sync_mode": ts_sync_mode,
|
3464
|
+
"enable": enable,
|
3465
|
+
}
|
3466
|
+
| kwargs
|
3467
|
+
)
|
3468
|
+
return filter_node.video(0)
|
3469
|
+
|
3470
|
+
|
3471
|
+
def xstack(
|
3472
|
+
*streams: VideoStream,
|
3473
|
+
inputs: Int = Auto("len(streams)"),
|
3474
|
+
layout: String = Default(None),
|
3475
|
+
grid: Image_size = Default(None),
|
3476
|
+
shortest: Boolean = Default(False),
|
3477
|
+
fill: String = Default("none"),
|
3478
|
+
**kwargs: Any
|
3479
|
+
) -> VideoStream:
|
3480
|
+
"""
|
3481
|
+
|
3482
|
+
Stack video inputs into custom layout.
|
3483
|
+
|
3484
|
+
Args:
|
3485
|
+
inputs: set number of inputs (from 2 to INT_MAX) (default 2)
|
3486
|
+
layout: set custom layout
|
3487
|
+
grid: set fixed size grid layout
|
3488
|
+
shortest: force termination when the shortest input terminates (default false)
|
3489
|
+
fill: set the color for unused pixels (default "none")
|
3490
|
+
|
3491
|
+
Returns:
|
3492
|
+
default: the video stream
|
3493
|
+
|
3494
|
+
References:
|
3495
|
+
[FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xstack)
|
3496
|
+
|
3497
|
+
"""
|
3498
|
+
filter_node = filter_node_factory(
|
3499
|
+
FFMpegFilterDef(name="xstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
|
3500
|
+
*streams,
|
3501
|
+
**{
|
3502
|
+
"inputs": inputs,
|
3503
|
+
"layout": layout,
|
3504
|
+
"grid": grid,
|
3505
|
+
"shortest": shortest,
|
3506
|
+
"fill": fill,
|
3507
|
+
}
|
3508
|
+
| kwargs
|
3509
|
+
)
|
3510
|
+
return filter_node.video(0)
|