typed-ffmpeg-compatible 2.4.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. typed_ffmpeg/__init__.py +25 -0
  2. typed_ffmpeg/base.py +114 -0
  3. typed_ffmpeg/common/__init__.py +0 -0
  4. typed_ffmpeg/common/schema.py +308 -0
  5. typed_ffmpeg/common/serialize.py +132 -0
  6. typed_ffmpeg/dag/__init__.py +13 -0
  7. typed_ffmpeg/dag/compile.py +51 -0
  8. typed_ffmpeg/dag/context.py +221 -0
  9. typed_ffmpeg/dag/factory.py +31 -0
  10. typed_ffmpeg/dag/global_runnable/__init__.py +0 -0
  11. typed_ffmpeg/dag/global_runnable/global_args.py +178 -0
  12. typed_ffmpeg/dag/global_runnable/runnable.py +174 -0
  13. typed_ffmpeg/dag/io/__init__.py +0 -0
  14. typed_ffmpeg/dag/io/_input.py +197 -0
  15. typed_ffmpeg/dag/io/_output.py +320 -0
  16. typed_ffmpeg/dag/io/output_args.py +327 -0
  17. typed_ffmpeg/dag/nodes.py +479 -0
  18. typed_ffmpeg/dag/schema.py +210 -0
  19. typed_ffmpeg/dag/utils.py +41 -0
  20. typed_ffmpeg/dag/validate.py +172 -0
  21. typed_ffmpeg/exceptions.py +42 -0
  22. typed_ffmpeg/filters.py +3572 -0
  23. typed_ffmpeg/probe.py +43 -0
  24. typed_ffmpeg/py.typed +0 -0
  25. typed_ffmpeg/schema.py +29 -0
  26. typed_ffmpeg/streams/__init__.py +5 -0
  27. typed_ffmpeg/streams/audio.py +7358 -0
  28. typed_ffmpeg/streams/av.py +22 -0
  29. typed_ffmpeg/streams/channel_layout.py +39 -0
  30. typed_ffmpeg/streams/video.py +13469 -0
  31. typed_ffmpeg/types.py +119 -0
  32. typed_ffmpeg/utils/__init__.py +0 -0
  33. typed_ffmpeg/utils/escaping.py +49 -0
  34. typed_ffmpeg/utils/lazy_eval/__init__.py +0 -0
  35. typed_ffmpeg/utils/lazy_eval/operator.py +134 -0
  36. typed_ffmpeg/utils/lazy_eval/schema.py +211 -0
  37. typed_ffmpeg/utils/run.py +27 -0
  38. typed_ffmpeg/utils/snapshot.py +26 -0
  39. typed_ffmpeg/utils/typing.py +17 -0
  40. typed_ffmpeg/utils/view.py +64 -0
  41. typed_ffmpeg_compatible-2.4.1.dist-info/LICENSE +21 -0
  42. typed_ffmpeg_compatible-2.4.1.dist-info/METADATA +182 -0
  43. typed_ffmpeg_compatible-2.4.1.dist-info/RECORD +45 -0
  44. typed_ffmpeg_compatible-2.4.1.dist-info/WHEEL +4 -0
  45. typed_ffmpeg_compatible-2.4.1.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,3572 @@
1
+ # NOTE: this file is auto-generated, do not modify
2
+ from typing import Any, Literal
3
+
4
+ from .common.schema import FFMpegFilterDef
5
+ from .dag.factory import filter_node_factory
6
+ from .dag.nodes import FilterableStream, FilterNode
7
+ from .schema import Auto, Default
8
+ from .streams.audio import AudioStream
9
+ from .streams.video import VideoStream
10
+ from .types import Boolean, Color, Double, Duration, Flags, Float, Image_size, Int, Int64, Pix_fmt, String
11
+
12
+
13
+ def acrossfade(
14
+ _crossfade0: AudioStream,
15
+ _crossfade1: AudioStream,
16
+ *,
17
+ nb_samples: Int = Default(44100),
18
+ duration: Duration = Default(0.0),
19
+ overlap: Boolean = Default(True),
20
+ curve1: Int
21
+ | Literal[
22
+ "nofade",
23
+ "tri",
24
+ "qsin",
25
+ "esin",
26
+ "hsin",
27
+ "log",
28
+ "ipar",
29
+ "qua",
30
+ "cub",
31
+ "squ",
32
+ "cbr",
33
+ "par",
34
+ "exp",
35
+ "iqsin",
36
+ "ihsin",
37
+ "dese",
38
+ "desi",
39
+ "losi",
40
+ "sinc",
41
+ "isinc",
42
+ "quat",
43
+ "quatr",
44
+ "qsin2",
45
+ "hsin2",
46
+ ]
47
+ | Default = Default("tri"),
48
+ curve2: Int
49
+ | Literal[
50
+ "nofade",
51
+ "tri",
52
+ "qsin",
53
+ "esin",
54
+ "hsin",
55
+ "log",
56
+ "ipar",
57
+ "qua",
58
+ "cub",
59
+ "squ",
60
+ "cbr",
61
+ "par",
62
+ "exp",
63
+ "iqsin",
64
+ "ihsin",
65
+ "dese",
66
+ "desi",
67
+ "losi",
68
+ "sinc",
69
+ "isinc",
70
+ "quat",
71
+ "quatr",
72
+ "qsin2",
73
+ "hsin2",
74
+ ]
75
+ | Default = Default("tri"),
76
+ extra_options: dict[str, Any] = None,
77
+ ) -> AudioStream:
78
+ """
79
+
80
+ Cross fade two input audio streams.
81
+
82
+ Args:
83
+ nb_samples: set number of samples for cross fade duration (from 1 to 2.14748e+08) (default 44100)
84
+ duration: set cross fade duration (default 0)
85
+ overlap: overlap 1st stream end with 2nd stream start (default true)
86
+ curve1: set fade curve type for 1st stream (from -1 to 22) (default tri)
87
+ curve2: set fade curve type for 2nd stream (from -1 to 22) (default tri)
88
+
89
+ Returns:
90
+ default: the audio stream
91
+
92
+ References:
93
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#acrossfade)
94
+
95
+ """
96
+ filter_node = filter_node_factory(
97
+ FFMpegFilterDef(name="acrossfade", typings_input=("audio", "audio"), typings_output=("audio",)),
98
+ _crossfade0,
99
+ _crossfade1,
100
+ **{
101
+ "nb_samples": nb_samples,
102
+ "duration": duration,
103
+ "overlap": overlap,
104
+ "curve1": curve1,
105
+ "curve2": curve2,
106
+ }
107
+ | (extra_options or {}),
108
+ )
109
+ return filter_node.audio(0)
110
+
111
+
112
+ def ainterleave(
113
+ *streams: AudioStream,
114
+ nb_inputs: Int = Auto("len(streams)"),
115
+ duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
116
+ extra_options: dict[str, Any] = None,
117
+ ) -> AudioStream:
118
+ """
119
+
120
+ Temporally interleave audio inputs.
121
+
122
+ Args:
123
+ nb_inputs: set number of inputs (from 1 to INT_MAX) (default 2)
124
+ duration: how to determine the end-of-stream (from 0 to 2) (default longest)
125
+
126
+ Returns:
127
+ default: the audio stream
128
+
129
+ References:
130
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#interleave_002c-ainterleave)
131
+
132
+ """
133
+ filter_node = filter_node_factory(
134
+ FFMpegFilterDef(
135
+ name="ainterleave", typings_input="[StreamType.audio] * int(nb_inputs)", typings_output=("audio",)
136
+ ),
137
+ *streams,
138
+ **{
139
+ "nb_inputs": nb_inputs,
140
+ "duration": duration,
141
+ }
142
+ | (extra_options or {}),
143
+ )
144
+ return filter_node.audio(0)
145
+
146
+
147
+ def alphamerge(
148
+ _main: VideoStream,
149
+ _alpha: VideoStream,
150
+ *,
151
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
152
+ shortest: Boolean = Default(False),
153
+ repeatlast: Boolean = Default(True),
154
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
155
+ enable: String = Default(None),
156
+ extra_options: dict[str, Any] = None,
157
+ ) -> VideoStream:
158
+ """
159
+
160
+ Copy the luma value of the second input into the alpha channel of the first input.
161
+
162
+ Args:
163
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
164
+ shortest: force termination when the shortest input terminates (default false)
165
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
166
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
167
+ enable: timeline editing
168
+
169
+ Returns:
170
+ default: the video stream
171
+
172
+ References:
173
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#alphamerge)
174
+
175
+ """
176
+ filter_node = filter_node_factory(
177
+ FFMpegFilterDef(name="alphamerge", typings_input=("video", "video"), typings_output=("video",)),
178
+ _main,
179
+ _alpha,
180
+ **{
181
+ "eof_action": eof_action,
182
+ "shortest": shortest,
183
+ "repeatlast": repeatlast,
184
+ "ts_sync_mode": ts_sync_mode,
185
+ "enable": enable,
186
+ }
187
+ | (extra_options or {}),
188
+ )
189
+ return filter_node.video(0)
190
+
191
+
192
+ def amerge(
193
+ *streams: AudioStream,
194
+ inputs: Int = Auto("len(streams)"),
195
+ extra_options: dict[str, Any] = None,
196
+ ) -> AudioStream:
197
+ """
198
+
199
+ Merge two or more audio streams into a single multi-channel stream.
200
+
201
+ Args:
202
+ inputs: specify the number of inputs (from 1 to 64) (default 2)
203
+
204
+ Returns:
205
+ default: the audio stream
206
+
207
+ References:
208
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amerge)
209
+
210
+ """
211
+ filter_node = filter_node_factory(
212
+ FFMpegFilterDef(name="amerge", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
213
+ *streams,
214
+ **{
215
+ "inputs": inputs,
216
+ }
217
+ | (extra_options or {}),
218
+ )
219
+ return filter_node.audio(0)
220
+
221
+
222
+ def amix(
223
+ *streams: AudioStream,
224
+ inputs: Int = Auto("len(streams)"),
225
+ duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
226
+ dropout_transition: Float = Default(2.0),
227
+ weights: String = Default("1 1"),
228
+ normalize: Boolean = Default(True),
229
+ extra_options: dict[str, Any] = None,
230
+ ) -> AudioStream:
231
+ """
232
+
233
+ Audio mixing.
234
+
235
+ Args:
236
+ inputs: Number of inputs. (from 1 to 32767) (default 2)
237
+ duration: How to determine the end-of-stream. (from 0 to 2) (default longest)
238
+ dropout_transition: Transition time, in seconds, for volume renormalization when an input stream ends. (from 0 to INT_MAX) (default 2)
239
+ weights: Set weight for each input. (default "1 1")
240
+ normalize: Scale inputs (default true)
241
+
242
+ Returns:
243
+ default: the audio stream
244
+
245
+ References:
246
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amix)
247
+
248
+ """
249
+ filter_node = filter_node_factory(
250
+ FFMpegFilterDef(name="amix", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
251
+ *streams,
252
+ **{
253
+ "inputs": inputs,
254
+ "duration": duration,
255
+ "dropout_transition": dropout_transition,
256
+ "weights": weights,
257
+ "normalize": normalize,
258
+ }
259
+ | (extra_options or {}),
260
+ )
261
+ return filter_node.audio(0)
262
+
263
+
264
+ def amultiply(
265
+ _multiply0: AudioStream,
266
+ _multiply1: AudioStream,
267
+ extra_options: dict[str, Any] = None,
268
+ ) -> AudioStream:
269
+ """
270
+
271
+ Multiply two audio streams.
272
+
273
+ Returns:
274
+ default: the audio stream
275
+
276
+ References:
277
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#amultiply)
278
+
279
+ """
280
+ filter_node = filter_node_factory(
281
+ FFMpegFilterDef(name="amultiply", typings_input=("audio", "audio"), typings_output=("audio",)),
282
+ _multiply0,
283
+ _multiply1,
284
+ **{} | (extra_options or {}),
285
+ )
286
+ return filter_node.audio(0)
287
+
288
+
289
+ def anlmf(
290
+ _input: AudioStream,
291
+ _desired: AudioStream,
292
+ *,
293
+ order: Int = Default(256),
294
+ mu: Float = Default(0.75),
295
+ eps: Float = Default(1.0),
296
+ leakage: Float = Default(0.0),
297
+ out_mode: Int | Literal["i", "d", "o", "n", "e"] | Default = Default("o"),
298
+ enable: String = Default(None),
299
+ extra_options: dict[str, Any] = None,
300
+ ) -> AudioStream:
301
+ """
302
+
303
+ Apply Normalized Least-Mean-Fourth algorithm to first audio stream.
304
+
305
+ Args:
306
+ order: set the filter order (from 1 to 32767) (default 256)
307
+ mu: set the filter mu (from 0 to 2) (default 0.75)
308
+ eps: set the filter eps (from 0 to 1) (default 1)
309
+ leakage: set the filter leakage (from 0 to 1) (default 0)
310
+ out_mode: set output mode (from 0 to 4) (default o)
311
+ enable: timeline editing
312
+
313
+ Returns:
314
+ default: the audio stream
315
+
316
+ References:
317
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#anlmf_002c-anlms)
318
+
319
+ """
320
+ filter_node = filter_node_factory(
321
+ FFMpegFilterDef(name="anlmf", typings_input=("audio", "audio"), typings_output=("audio",)),
322
+ _input,
323
+ _desired,
324
+ **{
325
+ "order": order,
326
+ "mu": mu,
327
+ "eps": eps,
328
+ "leakage": leakage,
329
+ "out_mode": out_mode,
330
+ "enable": enable,
331
+ }
332
+ | (extra_options or {}),
333
+ )
334
+ return filter_node.audio(0)
335
+
336
+
337
+ def anlms(
338
+ _input: AudioStream,
339
+ _desired: AudioStream,
340
+ *,
341
+ order: Int = Default(256),
342
+ mu: Float = Default(0.75),
343
+ eps: Float = Default(1.0),
344
+ leakage: Float = Default(0.0),
345
+ out_mode: Int | Literal["i", "d", "o", "n", "e"] | Default = Default("o"),
346
+ enable: String = Default(None),
347
+ extra_options: dict[str, Any] = None,
348
+ ) -> AudioStream:
349
+ """
350
+
351
+ Apply Normalized Least-Mean-Squares algorithm to first audio stream.
352
+
353
+ Args:
354
+ order: set the filter order (from 1 to 32767) (default 256)
355
+ mu: set the filter mu (from 0 to 2) (default 0.75)
356
+ eps: set the filter eps (from 0 to 1) (default 1)
357
+ leakage: set the filter leakage (from 0 to 1) (default 0)
358
+ out_mode: set output mode (from 0 to 4) (default o)
359
+ enable: timeline editing
360
+
361
+ Returns:
362
+ default: the audio stream
363
+
364
+ References:
365
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#anlmf_002c-anlms)
366
+
367
+ """
368
+ filter_node = filter_node_factory(
369
+ FFMpegFilterDef(name="anlms", typings_input=("audio", "audio"), typings_output=("audio",)),
370
+ _input,
371
+ _desired,
372
+ **{
373
+ "order": order,
374
+ "mu": mu,
375
+ "eps": eps,
376
+ "leakage": leakage,
377
+ "out_mode": out_mode,
378
+ "enable": enable,
379
+ }
380
+ | (extra_options or {}),
381
+ )
382
+ return filter_node.audio(0)
383
+
384
+
385
+ def apsnr(
386
+ _input0: AudioStream,
387
+ _input1: AudioStream,
388
+ *,
389
+ enable: String = Default(None),
390
+ extra_options: dict[str, Any] = None,
391
+ ) -> AudioStream:
392
+ """
393
+
394
+ Measure Audio Peak Signal-to-Noise Ratio.
395
+
396
+ Args:
397
+ enable: timeline editing
398
+
399
+ Returns:
400
+ default: the audio stream
401
+
402
+ References:
403
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#apsnr)
404
+
405
+ """
406
+ filter_node = filter_node_factory(
407
+ FFMpegFilterDef(name="apsnr", typings_input=("audio", "audio"), typings_output=("audio",)),
408
+ _input0,
409
+ _input1,
410
+ **{
411
+ "enable": enable,
412
+ }
413
+ | (extra_options or {}),
414
+ )
415
+ return filter_node.audio(0)
416
+
417
+
418
+ def arls(
419
+ _input: AudioStream,
420
+ _desired: AudioStream,
421
+ *,
422
+ order: Int = Default(16),
423
+ _lambda: Float = Default(1.0),
424
+ delta: Float = Default(2.0),
425
+ out_mode: Int | Literal["i", "d", "o", "n", "e"] | Default = Default("o"),
426
+ enable: String = Default(None),
427
+ extra_options: dict[str, Any] = None,
428
+ ) -> AudioStream:
429
+ """
430
+
431
+ Apply Recursive Least Squares algorithm to first audio stream.
432
+
433
+ Args:
434
+ order: set the filter order (from 1 to 32767) (default 16)
435
+ _lambda: set the filter lambda (from 0 to 1) (default 1)
436
+ delta: set the filter delta (from 0 to 32767) (default 2)
437
+ out_mode: set output mode (from 0 to 4) (default o)
438
+ enable: timeline editing
439
+
440
+ Returns:
441
+ default: the audio stream
442
+
443
+ References:
444
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#arls)
445
+
446
+ """
447
+ filter_node = filter_node_factory(
448
+ FFMpegFilterDef(name="arls", typings_input=("audio", "audio"), typings_output=("audio",)),
449
+ _input,
450
+ _desired,
451
+ **{
452
+ "order": order,
453
+ "lambda": _lambda,
454
+ "delta": delta,
455
+ "out_mode": out_mode,
456
+ "enable": enable,
457
+ }
458
+ | (extra_options or {}),
459
+ )
460
+ return filter_node.audio(0)
461
+
462
+
463
+ def asdr(
464
+ _input0: AudioStream,
465
+ _input1: AudioStream,
466
+ *,
467
+ enable: String = Default(None),
468
+ extra_options: dict[str, Any] = None,
469
+ ) -> AudioStream:
470
+ """
471
+
472
+ Measure Audio Signal-to-Distortion Ratio.
473
+
474
+ Args:
475
+ enable: timeline editing
476
+
477
+ Returns:
478
+ default: the audio stream
479
+
480
+ References:
481
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#asdr)
482
+
483
+ """
484
+ filter_node = filter_node_factory(
485
+ FFMpegFilterDef(name="asdr", typings_input=("audio", "audio"), typings_output=("audio",)),
486
+ _input0,
487
+ _input1,
488
+ **{
489
+ "enable": enable,
490
+ }
491
+ | (extra_options or {}),
492
+ )
493
+ return filter_node.audio(0)
494
+
495
+
496
+ def asisdr(
497
+ _input0: AudioStream,
498
+ _input1: AudioStream,
499
+ *,
500
+ enable: String = Default(None),
501
+ extra_options: dict[str, Any] = None,
502
+ ) -> AudioStream:
503
+ """
504
+
505
+ Measure Audio Scale-Invariant Signal-to-Distortion Ratio.
506
+
507
+ Args:
508
+ enable: timeline editing
509
+
510
+ Returns:
511
+ default: the audio stream
512
+
513
+ References:
514
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#asisdr)
515
+
516
+ """
517
+ filter_node = filter_node_factory(
518
+ FFMpegFilterDef(name="asisdr", typings_input=("audio", "audio"), typings_output=("audio",)),
519
+ _input0,
520
+ _input1,
521
+ **{
522
+ "enable": enable,
523
+ }
524
+ | (extra_options or {}),
525
+ )
526
+ return filter_node.audio(0)
527
+
528
+
529
+ def astreamselect(
530
+ *streams: AudioStream,
531
+ inputs: Int = Auto("len(streams)"),
532
+ map: String = Default(None),
533
+ extra_options: dict[str, Any] = None,
534
+ ) -> FilterNode:
535
+ """
536
+
537
+ Select audio streams
538
+
539
+ Args:
540
+ inputs: number of input streams (from 2 to INT_MAX) (default 2)
541
+ map: input indexes to remap to outputs
542
+
543
+ Returns:
544
+ filter_node: the filter node
545
+
546
+
547
+ References:
548
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#streamselect_002c-astreamselect)
549
+
550
+ """
551
+ filter_node = filter_node_factory(
552
+ FFMpegFilterDef(
553
+ name="astreamselect",
554
+ typings_input="[StreamType.audio] * int(inputs)",
555
+ typings_output="[StreamType.audio] * len(re.findall(r'\\d+', str(map)))",
556
+ ),
557
+ *streams,
558
+ **{
559
+ "inputs": inputs,
560
+ "map": map,
561
+ }
562
+ | (extra_options or {}),
563
+ )
564
+
565
+ return filter_node
566
+
567
+
568
+ def axcorrelate(
569
+ _axcorrelate0: AudioStream,
570
+ _axcorrelate1: AudioStream,
571
+ *,
572
+ size: Int = Default(256),
573
+ algo: Int | Literal["slow", "fast", "best"] | Default = Default("best"),
574
+ extra_options: dict[str, Any] = None,
575
+ ) -> AudioStream:
576
+ """
577
+
578
+ Cross-correlate two audio streams.
579
+
580
+ Args:
581
+ size: set the segment size (from 2 to 131072) (default 256)
582
+ algo: set the algorithm (from 0 to 2) (default best)
583
+
584
+ Returns:
585
+ default: the audio stream
586
+
587
+ References:
588
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#axcorrelate)
589
+
590
+ """
591
+ filter_node = filter_node_factory(
592
+ FFMpegFilterDef(name="axcorrelate", typings_input=("audio", "audio"), typings_output=("audio",)),
593
+ _axcorrelate0,
594
+ _axcorrelate1,
595
+ **{
596
+ "size": size,
597
+ "algo": algo,
598
+ }
599
+ | (extra_options or {}),
600
+ )
601
+ return filter_node.audio(0)
602
+
603
+
604
+ def blend(
605
+ _top: VideoStream,
606
+ _bottom: VideoStream,
607
+ *,
608
+ c0_mode: Int
609
+ | Literal[
610
+ "addition",
611
+ "addition128",
612
+ "grainmerge",
613
+ "and",
614
+ "average",
615
+ "burn",
616
+ "darken",
617
+ "difference",
618
+ "difference128",
619
+ "grainextract",
620
+ "divide",
621
+ "dodge",
622
+ "exclusion",
623
+ "extremity",
624
+ "freeze",
625
+ "glow",
626
+ "hardlight",
627
+ "hardmix",
628
+ "heat",
629
+ "lighten",
630
+ "linearlight",
631
+ "multiply",
632
+ "multiply128",
633
+ "negation",
634
+ "normal",
635
+ "or",
636
+ "overlay",
637
+ "phoenix",
638
+ "pinlight",
639
+ "reflect",
640
+ "screen",
641
+ "softlight",
642
+ "subtract",
643
+ "vividlight",
644
+ "xor",
645
+ "softdifference",
646
+ "geometric",
647
+ "harmonic",
648
+ "bleach",
649
+ "stain",
650
+ "interpolate",
651
+ "hardoverlay",
652
+ ]
653
+ | Default = Default("normal"),
654
+ c1_mode: Int
655
+ | Literal[
656
+ "addition",
657
+ "addition128",
658
+ "grainmerge",
659
+ "and",
660
+ "average",
661
+ "burn",
662
+ "darken",
663
+ "difference",
664
+ "difference128",
665
+ "grainextract",
666
+ "divide",
667
+ "dodge",
668
+ "exclusion",
669
+ "extremity",
670
+ "freeze",
671
+ "glow",
672
+ "hardlight",
673
+ "hardmix",
674
+ "heat",
675
+ "lighten",
676
+ "linearlight",
677
+ "multiply",
678
+ "multiply128",
679
+ "negation",
680
+ "normal",
681
+ "or",
682
+ "overlay",
683
+ "phoenix",
684
+ "pinlight",
685
+ "reflect",
686
+ "screen",
687
+ "softlight",
688
+ "subtract",
689
+ "vividlight",
690
+ "xor",
691
+ "softdifference",
692
+ "geometric",
693
+ "harmonic",
694
+ "bleach",
695
+ "stain",
696
+ "interpolate",
697
+ "hardoverlay",
698
+ ]
699
+ | Default = Default("normal"),
700
+ c2_mode: Int
701
+ | Literal[
702
+ "addition",
703
+ "addition128",
704
+ "grainmerge",
705
+ "and",
706
+ "average",
707
+ "burn",
708
+ "darken",
709
+ "difference",
710
+ "difference128",
711
+ "grainextract",
712
+ "divide",
713
+ "dodge",
714
+ "exclusion",
715
+ "extremity",
716
+ "freeze",
717
+ "glow",
718
+ "hardlight",
719
+ "hardmix",
720
+ "heat",
721
+ "lighten",
722
+ "linearlight",
723
+ "multiply",
724
+ "multiply128",
725
+ "negation",
726
+ "normal",
727
+ "or",
728
+ "overlay",
729
+ "phoenix",
730
+ "pinlight",
731
+ "reflect",
732
+ "screen",
733
+ "softlight",
734
+ "subtract",
735
+ "vividlight",
736
+ "xor",
737
+ "softdifference",
738
+ "geometric",
739
+ "harmonic",
740
+ "bleach",
741
+ "stain",
742
+ "interpolate",
743
+ "hardoverlay",
744
+ ]
745
+ | Default = Default("normal"),
746
+ c3_mode: Int
747
+ | Literal[
748
+ "addition",
749
+ "addition128",
750
+ "grainmerge",
751
+ "and",
752
+ "average",
753
+ "burn",
754
+ "darken",
755
+ "difference",
756
+ "difference128",
757
+ "grainextract",
758
+ "divide",
759
+ "dodge",
760
+ "exclusion",
761
+ "extremity",
762
+ "freeze",
763
+ "glow",
764
+ "hardlight",
765
+ "hardmix",
766
+ "heat",
767
+ "lighten",
768
+ "linearlight",
769
+ "multiply",
770
+ "multiply128",
771
+ "negation",
772
+ "normal",
773
+ "or",
774
+ "overlay",
775
+ "phoenix",
776
+ "pinlight",
777
+ "reflect",
778
+ "screen",
779
+ "softlight",
780
+ "subtract",
781
+ "vividlight",
782
+ "xor",
783
+ "softdifference",
784
+ "geometric",
785
+ "harmonic",
786
+ "bleach",
787
+ "stain",
788
+ "interpolate",
789
+ "hardoverlay",
790
+ ]
791
+ | Default = Default("normal"),
792
+ all_mode: Int
793
+ | Literal[
794
+ "addition",
795
+ "addition128",
796
+ "grainmerge",
797
+ "and",
798
+ "average",
799
+ "burn",
800
+ "darken",
801
+ "difference",
802
+ "difference128",
803
+ "grainextract",
804
+ "divide",
805
+ "dodge",
806
+ "exclusion",
807
+ "extremity",
808
+ "freeze",
809
+ "glow",
810
+ "hardlight",
811
+ "hardmix",
812
+ "heat",
813
+ "lighten",
814
+ "linearlight",
815
+ "multiply",
816
+ "multiply128",
817
+ "negation",
818
+ "normal",
819
+ "or",
820
+ "overlay",
821
+ "phoenix",
822
+ "pinlight",
823
+ "reflect",
824
+ "screen",
825
+ "softlight",
826
+ "subtract",
827
+ "vividlight",
828
+ "xor",
829
+ "softdifference",
830
+ "geometric",
831
+ "harmonic",
832
+ "bleach",
833
+ "stain",
834
+ "interpolate",
835
+ "hardoverlay",
836
+ ]
837
+ | Default = Default(-1),
838
+ c0_expr: String = Default(None),
839
+ c1_expr: String = Default(None),
840
+ c2_expr: String = Default(None),
841
+ c3_expr: String = Default(None),
842
+ all_expr: String = Default(None),
843
+ c0_opacity: Double = Default(1.0),
844
+ c1_opacity: Double = Default(1.0),
845
+ c2_opacity: Double = Default(1.0),
846
+ c3_opacity: Double = Default(1.0),
847
+ all_opacity: Double = Default(1.0),
848
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
849
+ shortest: Boolean = Default(False),
850
+ repeatlast: Boolean = Default(True),
851
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
852
+ enable: String = Default(None),
853
+ extra_options: dict[str, Any] = None,
854
+ ) -> VideoStream:
855
+ """
856
+
857
+ Blend two video frames into each other.
858
+
859
+ Args:
860
+ c0_mode: set component #0 blend mode (from 0 to 39) (default normal)
861
+ c1_mode: set component #1 blend mode (from 0 to 39) (default normal)
862
+ c2_mode: set component #2 blend mode (from 0 to 39) (default normal)
863
+ c3_mode: set component #3 blend mode (from 0 to 39) (default normal)
864
+ all_mode: set blend mode for all components (from -1 to 39) (default -1)
865
+ c0_expr: set color component #0 expression
866
+ c1_expr: set color component #1 expression
867
+ c2_expr: set color component #2 expression
868
+ c3_expr: set color component #3 expression
869
+ all_expr: set expression for all color components
870
+ c0_opacity: set color component #0 opacity (from 0 to 1) (default 1)
871
+ c1_opacity: set color component #1 opacity (from 0 to 1) (default 1)
872
+ c2_opacity: set color component #2 opacity (from 0 to 1) (default 1)
873
+ c3_opacity: set color component #3 opacity (from 0 to 1) (default 1)
874
+ all_opacity: set opacity for all color components (from 0 to 1) (default 1)
875
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
876
+ shortest: force termination when the shortest input terminates (default false)
877
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
878
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
879
+ enable: timeline editing
880
+
881
+ Returns:
882
+ default: the video stream
883
+
884
+ References:
885
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#blend)
886
+
887
+ """
888
+ filter_node = filter_node_factory(
889
+ FFMpegFilterDef(name="blend", typings_input=("video", "video"), typings_output=("video",)),
890
+ _top,
891
+ _bottom,
892
+ **{
893
+ "c0_mode": c0_mode,
894
+ "c1_mode": c1_mode,
895
+ "c2_mode": c2_mode,
896
+ "c3_mode": c3_mode,
897
+ "all_mode": all_mode,
898
+ "c0_expr": c0_expr,
899
+ "c1_expr": c1_expr,
900
+ "c2_expr": c2_expr,
901
+ "c3_expr": c3_expr,
902
+ "all_expr": all_expr,
903
+ "c0_opacity": c0_opacity,
904
+ "c1_opacity": c1_opacity,
905
+ "c2_opacity": c2_opacity,
906
+ "c3_opacity": c3_opacity,
907
+ "all_opacity": all_opacity,
908
+ "eof_action": eof_action,
909
+ "shortest": shortest,
910
+ "repeatlast": repeatlast,
911
+ "ts_sync_mode": ts_sync_mode,
912
+ "enable": enable,
913
+ }
914
+ | (extra_options or {}),
915
+ )
916
+ return filter_node.video(0)
917
+
918
+
919
+ def bm3d(
920
+ *streams: VideoStream,
921
+ sigma: Float = Default(1.0),
922
+ block: Int = Default(16),
923
+ bstep: Int = Default(4),
924
+ group: Int = Default(1),
925
+ range: Int = Default(9),
926
+ mstep: Int = Default(1),
927
+ thmse: Float = Default(0.0),
928
+ hdthr: Float = Default(2.7),
929
+ estim: Int | Literal["basic", "final"] | Default = Default("basic"),
930
+ ref: Boolean = Default(False),
931
+ planes: Int = Default(7),
932
+ enable: String = Default(None),
933
+ extra_options: dict[str, Any] = None,
934
+ ) -> VideoStream:
935
+ """
936
+
937
+ Block-Matching 3D denoiser.
938
+
939
+ Args:
940
+ sigma: set denoising strength (from 0 to 99999.9) (default 1)
941
+ block: set size of local patch (from 8 to 64) (default 16)
942
+ bstep: set sliding step for processing blocks (from 1 to 64) (default 4)
943
+ group: set maximal number of similar blocks (from 1 to 256) (default 1)
944
+ range: set block matching range (from 1 to INT_MAX) (default 9)
945
+ mstep: set step for block matching (from 1 to 64) (default 1)
946
+ thmse: set threshold of mean square error for block matching (from 0 to INT_MAX) (default 0)
947
+ hdthr: set hard threshold for 3D transfer domain (from 0 to INT_MAX) (default 2.7)
948
+ estim: set filtering estimation mode (from 0 to 1) (default basic)
949
+ ref: have reference stream (default false)
950
+ planes: set planes to filter (from 0 to 15) (default 7)
951
+ enable: timeline editing
952
+
953
+ Returns:
954
+ default: the video stream
955
+
956
+ References:
957
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#bm3d)
958
+
959
+ """
960
+ filter_node = filter_node_factory(
961
+ FFMpegFilterDef(
962
+ name="bm3d",
963
+ typings_input="[StreamType.video] + [StreamType.video] if ref else []",
964
+ typings_output=("video",),
965
+ ),
966
+ *streams,
967
+ **{
968
+ "sigma": sigma,
969
+ "block": block,
970
+ "bstep": bstep,
971
+ "group": group,
972
+ "range": range,
973
+ "mstep": mstep,
974
+ "thmse": thmse,
975
+ "hdthr": hdthr,
976
+ "estim": estim,
977
+ "ref": ref,
978
+ "planes": planes,
979
+ "enable": enable,
980
+ }
981
+ | (extra_options or {}),
982
+ )
983
+ return filter_node.video(0)
984
+
985
+
986
+ def colormap(
987
+ _default: VideoStream,
988
+ _source: VideoStream,
989
+ _target: VideoStream,
990
+ *,
991
+ patch_size: Image_size = Default("64x64"),
992
+ nb_patches: Int = Default(0),
993
+ type: Int | Literal["relative", "absolute"] | Default = Default("absolute"),
994
+ kernel: Int | Literal["euclidean", "weuclidean"] | Default = Default("euclidean"),
995
+ enable: String = Default(None),
996
+ extra_options: dict[str, Any] = None,
997
+ ) -> VideoStream:
998
+ """
999
+
1000
+ Apply custom Color Maps to video stream.
1001
+
1002
+ Args:
1003
+ patch_size: set patch size (default "64x64")
1004
+ nb_patches: set number of patches (from 0 to 64) (default 0)
1005
+ type: set the target type used (from 0 to 1) (default absolute)
1006
+ kernel: set the kernel used for measuring color difference (from 0 to 1) (default euclidean)
1007
+ enable: timeline editing
1008
+
1009
+ Returns:
1010
+ default: the video stream
1011
+
1012
+ References:
1013
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#colormap)
1014
+
1015
+ """
1016
+ filter_node = filter_node_factory(
1017
+ FFMpegFilterDef(name="colormap", typings_input=("video", "video", "video"), typings_output=("video",)),
1018
+ _default,
1019
+ _source,
1020
+ _target,
1021
+ **{
1022
+ "patch_size": patch_size,
1023
+ "nb_patches": nb_patches,
1024
+ "type": type,
1025
+ "kernel": kernel,
1026
+ "enable": enable,
1027
+ }
1028
+ | (extra_options or {}),
1029
+ )
1030
+ return filter_node.video(0)
1031
+
1032
+
1033
+ def concat(
1034
+ *streams: FilterableStream,
1035
+ n: Int = Auto("len(streams) // (int(v) + int(a))"),
1036
+ v: Int = Default(1),
1037
+ a: Int = Default(0),
1038
+ unsafe: Boolean = Default(False),
1039
+ extra_options: dict[str, Any] = None,
1040
+ ) -> FilterNode:
1041
+ """
1042
+
1043
+ Concatenate audio and video streams.
1044
+
1045
+ Args:
1046
+ n: specify the number of segments (from 1 to INT_MAX) (default 2)
1047
+ v: specify the number of video streams (from 0 to INT_MAX) (default 1)
1048
+ a: specify the number of audio streams (from 0 to INT_MAX) (default 0)
1049
+ unsafe: enable unsafe mode (default false)
1050
+
1051
+ Returns:
1052
+ filter_node: the filter node
1053
+
1054
+
1055
+ References:
1056
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#concat)
1057
+
1058
+ """
1059
+ filter_node = filter_node_factory(
1060
+ FFMpegFilterDef(
1061
+ name="concat",
1062
+ typings_input="([StreamType.video]*int(v) + [StreamType.audio]*int(a))*int(n)",
1063
+ typings_output="[StreamType.video]*int(v) + [StreamType.audio]*int(a)",
1064
+ ),
1065
+ *streams,
1066
+ **{
1067
+ "n": n,
1068
+ "v": v,
1069
+ "a": a,
1070
+ "unsafe": unsafe,
1071
+ }
1072
+ | (extra_options or {}),
1073
+ )
1074
+
1075
+ return filter_node
1076
+
1077
+
1078
+ def convolve(
1079
+ _main: VideoStream,
1080
+ _impulse: VideoStream,
1081
+ *,
1082
+ planes: Int = Default(7),
1083
+ impulse: Int | Literal["first", "all"] | Default = Default("all"),
1084
+ noise: Float = Default(1e-07),
1085
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1086
+ shortest: Boolean = Default(False),
1087
+ repeatlast: Boolean = Default(True),
1088
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1089
+ enable: String = Default(None),
1090
+ extra_options: dict[str, Any] = None,
1091
+ ) -> VideoStream:
1092
+ """
1093
+
1094
+ Convolve first video stream with second video stream.
1095
+
1096
+ Args:
1097
+ planes: set planes to convolve (from 0 to 15) (default 7)
1098
+ impulse: when to process impulses (from 0 to 1) (default all)
1099
+ noise: set noise (from 0 to 1) (default 1e-07)
1100
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1101
+ shortest: force termination when the shortest input terminates (default false)
1102
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1103
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1104
+ enable: timeline editing
1105
+
1106
+ Returns:
1107
+ default: the video stream
1108
+
1109
+ References:
1110
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#convolve)
1111
+
1112
+ """
1113
+ filter_node = filter_node_factory(
1114
+ FFMpegFilterDef(name="convolve", typings_input=("video", "video"), typings_output=("video",)),
1115
+ _main,
1116
+ _impulse,
1117
+ **{
1118
+ "planes": planes,
1119
+ "impulse": impulse,
1120
+ "noise": noise,
1121
+ "eof_action": eof_action,
1122
+ "shortest": shortest,
1123
+ "repeatlast": repeatlast,
1124
+ "ts_sync_mode": ts_sync_mode,
1125
+ "enable": enable,
1126
+ }
1127
+ | (extra_options or {}),
1128
+ )
1129
+ return filter_node.video(0)
1130
+
1131
+
1132
+ def corr(
1133
+ _main: VideoStream,
1134
+ _reference: VideoStream,
1135
+ *,
1136
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1137
+ shortest: Boolean = Default(False),
1138
+ repeatlast: Boolean = Default(True),
1139
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1140
+ enable: String = Default(None),
1141
+ extra_options: dict[str, Any] = None,
1142
+ ) -> VideoStream:
1143
+ """
1144
+
1145
+ Calculate the correlation between two video streams.
1146
+
1147
+ Args:
1148
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1149
+ shortest: force termination when the shortest input terminates (default false)
1150
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1151
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1152
+ enable: timeline editing
1153
+
1154
+ Returns:
1155
+ default: the video stream
1156
+
1157
+ References:
1158
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#corr)
1159
+
1160
+ """
1161
+ filter_node = filter_node_factory(
1162
+ FFMpegFilterDef(name="corr", typings_input=("video", "video"), typings_output=("video",)),
1163
+ _main,
1164
+ _reference,
1165
+ **{
1166
+ "eof_action": eof_action,
1167
+ "shortest": shortest,
1168
+ "repeatlast": repeatlast,
1169
+ "ts_sync_mode": ts_sync_mode,
1170
+ "enable": enable,
1171
+ }
1172
+ | (extra_options or {}),
1173
+ )
1174
+ return filter_node.video(0)
1175
+
1176
+
1177
+ def decimate(
1178
+ *streams: VideoStream,
1179
+ cycle: Int = Default(5),
1180
+ dupthresh: Double = Default(1.1),
1181
+ scthresh: Double = Default(15.0),
1182
+ blockx: Int = Default(32),
1183
+ blocky: Int = Default(32),
1184
+ ppsrc: Boolean = Default(False),
1185
+ chroma: Boolean = Default(True),
1186
+ mixed: Boolean = Default(False),
1187
+ extra_options: dict[str, Any] = None,
1188
+ ) -> VideoStream:
1189
+ """
1190
+
1191
+ Decimate frames (post field matching filter).
1192
+
1193
+ Args:
1194
+ cycle: set the number of frame from which one will be dropped (from 2 to 25) (default 5)
1195
+ dupthresh: set duplicate threshold (from 0 to 100) (default 1.1)
1196
+ scthresh: set scene change threshold (from 0 to 100) (default 15)
1197
+ blockx: set the size of the x-axis blocks used during metric calculations (from 4 to 512) (default 32)
1198
+ blocky: set the size of the y-axis blocks used during metric calculations (from 4 to 512) (default 32)
1199
+ ppsrc: mark main input as a pre-processed input and activate clean source input stream (default false)
1200
+ chroma: set whether or not chroma is considered in the metric calculations (default true)
1201
+ mixed: set whether or not the input only partially contains content to be decimated (default false)
1202
+
1203
+ Returns:
1204
+ default: the video stream
1205
+
1206
+ References:
1207
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#decimate)
1208
+
1209
+ """
1210
+ filter_node = filter_node_factory(
1211
+ FFMpegFilterDef(
1212
+ name="decimate",
1213
+ typings_input="[StreamType.video] + ([StreamType.video] if ppsrc else [])",
1214
+ typings_output=("video",),
1215
+ ),
1216
+ *streams,
1217
+ **{
1218
+ "cycle": cycle,
1219
+ "dupthresh": dupthresh,
1220
+ "scthresh": scthresh,
1221
+ "blockx": blockx,
1222
+ "blocky": blocky,
1223
+ "ppsrc": ppsrc,
1224
+ "chroma": chroma,
1225
+ "mixed": mixed,
1226
+ }
1227
+ | (extra_options or {}),
1228
+ )
1229
+ return filter_node.video(0)
1230
+
1231
+
1232
+ def deconvolve(
1233
+ _main: VideoStream,
1234
+ _impulse: VideoStream,
1235
+ *,
1236
+ planes: Int = Default(7),
1237
+ impulse: Int | Literal["first", "all"] | Default = Default("all"),
1238
+ noise: Float = Default(1e-07),
1239
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1240
+ shortest: Boolean = Default(False),
1241
+ repeatlast: Boolean = Default(True),
1242
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1243
+ enable: String = Default(None),
1244
+ extra_options: dict[str, Any] = None,
1245
+ ) -> VideoStream:
1246
+ """
1247
+
1248
+ Deconvolve first video stream with second video stream.
1249
+
1250
+ Args:
1251
+ planes: set planes to deconvolve (from 0 to 15) (default 7)
1252
+ impulse: when to process impulses (from 0 to 1) (default all)
1253
+ noise: set noise (from 0 to 1) (default 1e-07)
1254
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1255
+ shortest: force termination when the shortest input terminates (default false)
1256
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1257
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1258
+ enable: timeline editing
1259
+
1260
+ Returns:
1261
+ default: the video stream
1262
+
1263
+ References:
1264
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#deconvolve)
1265
+
1266
+ """
1267
+ filter_node = filter_node_factory(
1268
+ FFMpegFilterDef(name="deconvolve", typings_input=("video", "video"), typings_output=("video",)),
1269
+ _main,
1270
+ _impulse,
1271
+ **{
1272
+ "planes": planes,
1273
+ "impulse": impulse,
1274
+ "noise": noise,
1275
+ "eof_action": eof_action,
1276
+ "shortest": shortest,
1277
+ "repeatlast": repeatlast,
1278
+ "ts_sync_mode": ts_sync_mode,
1279
+ "enable": enable,
1280
+ }
1281
+ | (extra_options or {}),
1282
+ )
1283
+ return filter_node.video(0)
1284
+
1285
+
1286
+ def displace(
1287
+ _source: VideoStream,
1288
+ _xmap: VideoStream,
1289
+ _ymap: VideoStream,
1290
+ *,
1291
+ edge: Int | Literal["blank", "smear", "wrap", "mirror"] | Default = Default("smear"),
1292
+ enable: String = Default(None),
1293
+ extra_options: dict[str, Any] = None,
1294
+ ) -> VideoStream:
1295
+ """
1296
+
1297
+ Displace pixels.
1298
+
1299
+ Args:
1300
+ edge: set edge mode (from 0 to 3) (default smear)
1301
+ enable: timeline editing
1302
+
1303
+ Returns:
1304
+ default: the video stream
1305
+
1306
+ References:
1307
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#displace)
1308
+
1309
+ """
1310
+ filter_node = filter_node_factory(
1311
+ FFMpegFilterDef(name="displace", typings_input=("video", "video", "video"), typings_output=("video",)),
1312
+ _source,
1313
+ _xmap,
1314
+ _ymap,
1315
+ **{
1316
+ "edge": edge,
1317
+ "enable": enable,
1318
+ }
1319
+ | (extra_options or {}),
1320
+ )
1321
+ return filter_node.video(0)
1322
+
1323
+
1324
+ def feedback(
1325
+ _default: VideoStream,
1326
+ _feedin: VideoStream,
1327
+ *,
1328
+ x: Int = Default(0),
1329
+ w: Int = Default(0),
1330
+ extra_options: dict[str, Any] = None,
1331
+ ) -> tuple[VideoStream, VideoStream,]:
1332
+ """
1333
+
1334
+ Apply feedback video filter.
1335
+
1336
+ Args:
1337
+ x: set top left crop position (from 0 to INT_MAX) (default 0)
1338
+ w: set crop size (from 0 to INT_MAX) (default 0)
1339
+
1340
+ Returns:
1341
+ default: the video stream
1342
+ feedout: the video stream
1343
+
1344
+ References:
1345
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#feedback)
1346
+
1347
+ """
1348
+ filter_node = filter_node_factory(
1349
+ FFMpegFilterDef(name="feedback", typings_input=("video", "video"), typings_output=("video", "video")),
1350
+ _default,
1351
+ _feedin,
1352
+ **{
1353
+ "x": x,
1354
+ "w": w,
1355
+ }
1356
+ | (extra_options or {}),
1357
+ )
1358
+ return (
1359
+ filter_node.video(0),
1360
+ filter_node.video(1),
1361
+ )
1362
+
1363
+
1364
+ def fieldmatch(
1365
+ *streams: VideoStream,
1366
+ order: Int | Literal["auto", "bff", "tff"] | Default = Default("auto"),
1367
+ mode: Int | Literal["pc", "pc_n", "pc_u", "pc_n_ub", "pcn", "pcn_ub"] | Default = Default("pc_n"),
1368
+ ppsrc: Boolean = Default(False),
1369
+ field: Int | Literal["auto", "bottom", "top"] | Default = Default("auto"),
1370
+ mchroma: Boolean = Default(True),
1371
+ y0: Int = Default(0),
1372
+ scthresh: Double = Default(12.0),
1373
+ combmatch: Int | Literal["none", "sc", "full"] | Default = Default("sc"),
1374
+ combdbg: Int | Literal["none", "pcn", "pcnub"] | Default = Default("none"),
1375
+ cthresh: Int = Default(9),
1376
+ chroma: Boolean = Default(False),
1377
+ blockx: Int = Default(16),
1378
+ blocky: Int = Default(16),
1379
+ combpel: Int = Default(80),
1380
+ extra_options: dict[str, Any] = None,
1381
+ ) -> VideoStream:
1382
+ """
1383
+
1384
+ Field matching for inverse telecine.
1385
+
1386
+ Args:
1387
+ order: specify the assumed field order (from -1 to 1) (default auto)
1388
+ mode: set the matching mode or strategy to use (from 0 to 5) (default pc_n)
1389
+ ppsrc: mark main input as a pre-processed input and activate clean source input stream (default false)
1390
+ field: set the field to match from (from -1 to 1) (default auto)
1391
+ mchroma: set whether or not chroma is included during the match comparisons (default true)
1392
+ y0: define an exclusion band which excludes the lines between y0 and y1 from the field matching decision (from 0 to INT_MAX) (default 0)
1393
+ scthresh: set scene change detection threshold (from 0 to 100) (default 12)
1394
+ combmatch: set combmatching mode (from 0 to 2) (default sc)
1395
+ combdbg: enable comb debug (from 0 to 2) (default none)
1396
+ cthresh: set the area combing threshold used for combed frame detection (from -1 to 255) (default 9)
1397
+ chroma: set whether or not chroma is considered in the combed frame decision (default false)
1398
+ blockx: set the x-axis size of the window used during combed frame detection (from 4 to 512) (default 16)
1399
+ blocky: set the y-axis size of the window used during combed frame detection (from 4 to 512) (default 16)
1400
+ combpel: set the number of combed pixels inside any of the blocky by blockx size blocks on the frame for the frame to be detected as combed (from 0 to INT_MAX) (default 80)
1401
+
1402
+ Returns:
1403
+ default: the video stream
1404
+
1405
+ References:
1406
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#fieldmatch)
1407
+
1408
+ """
1409
+ filter_node = filter_node_factory(
1410
+ FFMpegFilterDef(
1411
+ name="fieldmatch",
1412
+ typings_input="[StreamType.video] + [StreamType.video] if ppsrc else []",
1413
+ typings_output=("video",),
1414
+ ),
1415
+ *streams,
1416
+ **{
1417
+ "order": order,
1418
+ "mode": mode,
1419
+ "ppsrc": ppsrc,
1420
+ "field": field,
1421
+ "mchroma": mchroma,
1422
+ "y0": y0,
1423
+ "scthresh": scthresh,
1424
+ "combmatch": combmatch,
1425
+ "combdbg": combdbg,
1426
+ "cthresh": cthresh,
1427
+ "chroma": chroma,
1428
+ "blockx": blockx,
1429
+ "blocky": blocky,
1430
+ "combpel": combpel,
1431
+ }
1432
+ | (extra_options or {}),
1433
+ )
1434
+ return filter_node.video(0)
1435
+
1436
+
1437
+ def framepack(
1438
+ _left: VideoStream,
1439
+ _right: VideoStream,
1440
+ *,
1441
+ format: Int | Literal["sbs", "tab", "frameseq", "lines", "columns"] | Default = Default("sbs"),
1442
+ extra_options: dict[str, Any] = None,
1443
+ ) -> VideoStream:
1444
+ """
1445
+
1446
+ Generate a frame packed stereoscopic video.
1447
+
1448
+ Args:
1449
+ format: Frame pack output format (from 0 to INT_MAX) (default sbs)
1450
+
1451
+ Returns:
1452
+ packed: the video stream
1453
+
1454
+ References:
1455
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#framepack)
1456
+
1457
+ """
1458
+ filter_node = filter_node_factory(
1459
+ FFMpegFilterDef(name="framepack", typings_input=("video", "video"), typings_output=("video",)),
1460
+ _left,
1461
+ _right,
1462
+ **{
1463
+ "format": format,
1464
+ }
1465
+ | (extra_options or {}),
1466
+ )
1467
+ return filter_node.video(0)
1468
+
1469
+
1470
+ def freezeframes(
1471
+ _source: VideoStream,
1472
+ _replace: VideoStream,
1473
+ *,
1474
+ first: Int64 = Default(0),
1475
+ last: Int64 = Default(0),
1476
+ replace: Int64 = Default(0),
1477
+ extra_options: dict[str, Any] = None,
1478
+ ) -> VideoStream:
1479
+ """
1480
+
1481
+ Freeze video frames.
1482
+
1483
+ Args:
1484
+ first: set first frame to freeze (from 0 to I64_MAX) (default 0)
1485
+ last: set last frame to freeze (from 0 to I64_MAX) (default 0)
1486
+ replace: set frame to replace (from 0 to I64_MAX) (default 0)
1487
+
1488
+ Returns:
1489
+ default: the video stream
1490
+
1491
+ References:
1492
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#freezeframes)
1493
+
1494
+ """
1495
+ filter_node = filter_node_factory(
1496
+ FFMpegFilterDef(name="freezeframes", typings_input=("video", "video"), typings_output=("video",)),
1497
+ _source,
1498
+ _replace,
1499
+ **{
1500
+ "first": first,
1501
+ "last": last,
1502
+ "replace": replace,
1503
+ }
1504
+ | (extra_options or {}),
1505
+ )
1506
+ return filter_node.video(0)
1507
+
1508
+
1509
+ def guided(
1510
+ *streams: VideoStream,
1511
+ radius: Int = Default(3),
1512
+ eps: Float = Default(0.01),
1513
+ mode: Int | Literal["basic", "fast"] | Default = Default("basic"),
1514
+ sub: Int = Default(4),
1515
+ guidance: Int | Literal["off", "on"] | Default = Default("off"),
1516
+ planes: Int = Default(1),
1517
+ enable: String = Default(None),
1518
+ extra_options: dict[str, Any] = None,
1519
+ ) -> VideoStream:
1520
+ """
1521
+
1522
+ Apply Guided filter.
1523
+
1524
+ Args:
1525
+ radius: set the box radius (from 1 to 20) (default 3)
1526
+ eps: set the regularization parameter (with square) (from 0 to 1) (default 0.01)
1527
+ mode: set filtering mode (0: basic mode; 1: fast mode) (from 0 to 1) (default basic)
1528
+ sub: subsampling ratio for fast mode (from 2 to 64) (default 4)
1529
+ guidance: set guidance mode (0: off mode; 1: on mode) (from 0 to 1) (default off)
1530
+ planes: set planes to filter (from 0 to 15) (default 1)
1531
+ enable: timeline editing
1532
+
1533
+ Returns:
1534
+ default: the video stream
1535
+
1536
+ References:
1537
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#guided)
1538
+
1539
+ """
1540
+ filter_node = filter_node_factory(
1541
+ FFMpegFilterDef(
1542
+ name="guided",
1543
+ typings_input="[StreamType.video] + [StreamType.video] if guidance else []",
1544
+ typings_output=("video",),
1545
+ ),
1546
+ *streams,
1547
+ **{
1548
+ "radius": radius,
1549
+ "eps": eps,
1550
+ "mode": mode,
1551
+ "sub": sub,
1552
+ "guidance": guidance,
1553
+ "planes": planes,
1554
+ "enable": enable,
1555
+ }
1556
+ | (extra_options or {}),
1557
+ )
1558
+ return filter_node.video(0)
1559
+
1560
+
1561
+ def haldclut(
1562
+ _main: VideoStream,
1563
+ _clut: VideoStream,
1564
+ *,
1565
+ clut: Int | Literal["first", "all"] | Default = Default("all"),
1566
+ interp: Int | Literal["nearest", "trilinear", "tetrahedral", "pyramid", "prism"] | Default = Default("tetrahedral"),
1567
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1568
+ shortest: Boolean = Default(False),
1569
+ repeatlast: Boolean = Default(True),
1570
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1571
+ enable: String = Default(None),
1572
+ extra_options: dict[str, Any] = None,
1573
+ ) -> VideoStream:
1574
+ """
1575
+
1576
+ Adjust colors using a Hald CLUT.
1577
+
1578
+ Args:
1579
+ clut: when to process CLUT (from 0 to 1) (default all)
1580
+ interp: select interpolation mode (from 0 to 4) (default tetrahedral)
1581
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1582
+ shortest: force termination when the shortest input terminates (default false)
1583
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1584
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1585
+ enable: timeline editing
1586
+
1587
+ Returns:
1588
+ default: the video stream
1589
+
1590
+ References:
1591
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#haldclut)
1592
+
1593
+ """
1594
+ filter_node = filter_node_factory(
1595
+ FFMpegFilterDef(name="haldclut", typings_input=("video", "video"), typings_output=("video",)),
1596
+ _main,
1597
+ _clut,
1598
+ **{
1599
+ "clut": clut,
1600
+ "interp": interp,
1601
+ "eof_action": eof_action,
1602
+ "shortest": shortest,
1603
+ "repeatlast": repeatlast,
1604
+ "ts_sync_mode": ts_sync_mode,
1605
+ "enable": enable,
1606
+ }
1607
+ | (extra_options or {}),
1608
+ )
1609
+ return filter_node.video(0)
1610
+
1611
+
1612
+ def headphone(
1613
+ *streams: AudioStream,
1614
+ map: String = Default(None),
1615
+ gain: Float = Default(0.0),
1616
+ lfe: Float = Default(0.0),
1617
+ type: Int | Literal["time", "freq"] | Default = Default("freq"),
1618
+ size: Int = Default(1024),
1619
+ hrir: Int | Literal["stereo", "multich"] | Default = Default("stereo"),
1620
+ extra_options: dict[str, Any] = None,
1621
+ ) -> AudioStream:
1622
+ """
1623
+
1624
+ Apply headphone binaural spatialization with HRTFs in additional streams.
1625
+
1626
+ Args:
1627
+ map: set channels convolution mappings
1628
+ gain: set gain in dB (from -20 to 40) (default 0)
1629
+ lfe: set lfe gain in dB (from -20 to 40) (default 0)
1630
+ type: set processing (from 0 to 1) (default freq)
1631
+ size: set frame size (from 1024 to 96000) (default 1024)
1632
+ hrir: set hrir format (from 0 to 1) (default stereo)
1633
+
1634
+ Returns:
1635
+ default: the audio stream
1636
+
1637
+ References:
1638
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#headphone)
1639
+
1640
+ """
1641
+ filter_node = filter_node_factory(
1642
+ FFMpegFilterDef(
1643
+ name="headphone",
1644
+ typings_input="[StreamType.audio] + [StreamType.audio] * (len(str(map).split('|')) - 1) if int(hrir) == 1 else []",
1645
+ typings_output=("audio",),
1646
+ ),
1647
+ *streams,
1648
+ **{
1649
+ "map": map,
1650
+ "gain": gain,
1651
+ "lfe": lfe,
1652
+ "type": type,
1653
+ "size": size,
1654
+ "hrir": hrir,
1655
+ }
1656
+ | (extra_options or {}),
1657
+ )
1658
+ return filter_node.audio(0)
1659
+
1660
+
1661
+ def hstack(
1662
+ *streams: VideoStream,
1663
+ inputs: Int = Auto("len(streams)"),
1664
+ shortest: Boolean = Default(False),
1665
+ extra_options: dict[str, Any] = None,
1666
+ ) -> VideoStream:
1667
+ """
1668
+
1669
+ Stack video inputs horizontally.
1670
+
1671
+ Args:
1672
+ inputs: set number of inputs (from 2 to INT_MAX) (default 2)
1673
+ shortest: force termination when the shortest input terminates (default false)
1674
+
1675
+ Returns:
1676
+ default: the video stream
1677
+
1678
+ References:
1679
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#hstack)
1680
+
1681
+ """
1682
+ filter_node = filter_node_factory(
1683
+ FFMpegFilterDef(name="hstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
1684
+ *streams,
1685
+ **{
1686
+ "inputs": inputs,
1687
+ "shortest": shortest,
1688
+ }
1689
+ | (extra_options or {}),
1690
+ )
1691
+ return filter_node.video(0)
1692
+
1693
+
1694
+ def hysteresis(
1695
+ _base: VideoStream,
1696
+ _alt: VideoStream,
1697
+ *,
1698
+ planes: Int = Default(15),
1699
+ threshold: Int = Default(0),
1700
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1701
+ shortest: Boolean = Default(False),
1702
+ repeatlast: Boolean = Default(True),
1703
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1704
+ enable: String = Default(None),
1705
+ extra_options: dict[str, Any] = None,
1706
+ ) -> VideoStream:
1707
+ """
1708
+
1709
+ Grow first stream into second stream by connecting components.
1710
+
1711
+ Args:
1712
+ planes: set planes (from 0 to 15) (default 15)
1713
+ threshold: set threshold (from 0 to 65535) (default 0)
1714
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1715
+ shortest: force termination when the shortest input terminates (default false)
1716
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1717
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1718
+ enable: timeline editing
1719
+
1720
+ Returns:
1721
+ default: the video stream
1722
+
1723
+ References:
1724
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#hysteresis)
1725
+
1726
+ """
1727
+ filter_node = filter_node_factory(
1728
+ FFMpegFilterDef(name="hysteresis", typings_input=("video", "video"), typings_output=("video",)),
1729
+ _base,
1730
+ _alt,
1731
+ **{
1732
+ "planes": planes,
1733
+ "threshold": threshold,
1734
+ "eof_action": eof_action,
1735
+ "shortest": shortest,
1736
+ "repeatlast": repeatlast,
1737
+ "ts_sync_mode": ts_sync_mode,
1738
+ "enable": enable,
1739
+ }
1740
+ | (extra_options or {}),
1741
+ )
1742
+ return filter_node.video(0)
1743
+
1744
+
1745
+ def identity(
1746
+ _main: VideoStream,
1747
+ _reference: VideoStream,
1748
+ *,
1749
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1750
+ shortest: Boolean = Default(False),
1751
+ repeatlast: Boolean = Default(True),
1752
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1753
+ enable: String = Default(None),
1754
+ extra_options: dict[str, Any] = None,
1755
+ ) -> VideoStream:
1756
+ """
1757
+
1758
+ Calculate the Identity between two video streams.
1759
+
1760
+ Args:
1761
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1762
+ shortest: force termination when the shortest input terminates (default false)
1763
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1764
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1765
+ enable: timeline editing
1766
+
1767
+ Returns:
1768
+ default: the video stream
1769
+
1770
+ References:
1771
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#identity)
1772
+
1773
+ """
1774
+ filter_node = filter_node_factory(
1775
+ FFMpegFilterDef(name="identity", typings_input=("video", "video"), typings_output=("video",)),
1776
+ _main,
1777
+ _reference,
1778
+ **{
1779
+ "eof_action": eof_action,
1780
+ "shortest": shortest,
1781
+ "repeatlast": repeatlast,
1782
+ "ts_sync_mode": ts_sync_mode,
1783
+ "enable": enable,
1784
+ }
1785
+ | (extra_options or {}),
1786
+ )
1787
+ return filter_node.video(0)
1788
+
1789
+
1790
+ def interleave(
1791
+ *streams: VideoStream,
1792
+ nb_inputs: Int = Auto("len(streams)"),
1793
+ duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
1794
+ extra_options: dict[str, Any] = None,
1795
+ ) -> VideoStream:
1796
+ """
1797
+
1798
+ Temporally interleave video inputs.
1799
+
1800
+ Args:
1801
+ nb_inputs: set number of inputs (from 1 to INT_MAX) (default 2)
1802
+ duration: how to determine the end-of-stream (from 0 to 2) (default longest)
1803
+
1804
+ Returns:
1805
+ default: the video stream
1806
+
1807
+ References:
1808
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#interleave_002c-ainterleave)
1809
+
1810
+ """
1811
+ filter_node = filter_node_factory(
1812
+ FFMpegFilterDef(
1813
+ name="interleave", typings_input="[StreamType.video] * int(nb_inputs)", typings_output=("video",)
1814
+ ),
1815
+ *streams,
1816
+ **{
1817
+ "nb_inputs": nb_inputs,
1818
+ "duration": duration,
1819
+ }
1820
+ | (extra_options or {}),
1821
+ )
1822
+ return filter_node.video(0)
1823
+
1824
+
1825
+ def join(
1826
+ *streams: AudioStream,
1827
+ inputs: Int = Auto("len(streams)"),
1828
+ channel_layout: String = Default("stereo"),
1829
+ map: String = Default(None),
1830
+ extra_options: dict[str, Any] = None,
1831
+ ) -> AudioStream:
1832
+ """
1833
+
1834
+ Join multiple audio streams into multi-channel output.
1835
+
1836
+ Args:
1837
+ inputs: Number of input streams. (from 1 to INT_MAX) (default 2)
1838
+ channel_layout: Channel layout of the output stream. (default "stereo")
1839
+ map: A comma-separated list of channels maps in the format 'input_stream.input_channel-output_channel.
1840
+
1841
+ Returns:
1842
+ default: the audio stream
1843
+
1844
+ References:
1845
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#join)
1846
+
1847
+ """
1848
+ filter_node = filter_node_factory(
1849
+ FFMpegFilterDef(name="join", typings_input="[StreamType.audio] * int(inputs)", typings_output=("audio",)),
1850
+ *streams,
1851
+ **{
1852
+ "inputs": inputs,
1853
+ "channel_layout": channel_layout,
1854
+ "map": map,
1855
+ }
1856
+ | (extra_options or {}),
1857
+ )
1858
+ return filter_node.audio(0)
1859
+
1860
+
1861
+ def libvmaf(
1862
+ _main: VideoStream,
1863
+ _reference: VideoStream,
1864
+ *,
1865
+ log_path: String = Default(None),
1866
+ log_fmt: String = Default("xml"),
1867
+ pool: String = Default(None),
1868
+ n_threads: Int = Default(0),
1869
+ n_subsample: Int = Default(1),
1870
+ model: String = Default("version=vmaf_v0.6.1"),
1871
+ feature: String = Default(None),
1872
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1873
+ shortest: Boolean = Default(False),
1874
+ repeatlast: Boolean = Default(True),
1875
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1876
+ extra_options: dict[str, Any] = None,
1877
+ ) -> VideoStream:
1878
+ """
1879
+
1880
+ Calculate the VMAF between two video streams.
1881
+
1882
+ Args:
1883
+ log_path: Set the file path to be used to write log.
1884
+ log_fmt: Set the format of the log (csv, json, xml, or sub). (default "xml")
1885
+ pool: Set the pool method to be used for computing vmaf.
1886
+ n_threads: Set number of threads to be used when computing vmaf. (from 0 to UINT32_MAX) (default 0)
1887
+ n_subsample: Set interval for frame subsampling used when computing vmaf. (from 1 to UINT32_MAX) (default 1)
1888
+ model: Set the model to be used for computing vmaf. (default "version=vmaf_v0.6.1")
1889
+ feature: Set the feature to be used for computing vmaf.
1890
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1891
+ shortest: force termination when the shortest input terminates (default false)
1892
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1893
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
1894
+
1895
+ Returns:
1896
+ default: the video stream
1897
+
1898
+ References:
1899
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#libvmaf)
1900
+
1901
+ """
1902
+ filter_node = filter_node_factory(
1903
+ FFMpegFilterDef(name="libvmaf", typings_input=("video", "video"), typings_output=("video",)),
1904
+ _main,
1905
+ _reference,
1906
+ **{
1907
+ "log_path": log_path,
1908
+ "log_fmt": log_fmt,
1909
+ "pool": pool,
1910
+ "n_threads": n_threads,
1911
+ "n_subsample": n_subsample,
1912
+ "model": model,
1913
+ "feature": feature,
1914
+ "eof_action": eof_action,
1915
+ "shortest": shortest,
1916
+ "repeatlast": repeatlast,
1917
+ "ts_sync_mode": ts_sync_mode,
1918
+ }
1919
+ | (extra_options or {}),
1920
+ )
1921
+ return filter_node.video(0)
1922
+
1923
+
1924
+ def limitdiff(
1925
+ *streams: VideoStream,
1926
+ threshold: Float = Default(0.00392157),
1927
+ elasticity: Float = Default(2.0),
1928
+ reference: Boolean = Default(False),
1929
+ planes: Int = Default(15),
1930
+ enable: String = Default(None),
1931
+ extra_options: dict[str, Any] = None,
1932
+ ) -> VideoStream:
1933
+ """
1934
+
1935
+ Apply filtering with limiting difference.
1936
+
1937
+ Args:
1938
+ threshold: set the threshold (from 0 to 1) (default 0.00392157)
1939
+ elasticity: set the elasticity (from 0 to 10) (default 2)
1940
+ reference: enable reference stream (default false)
1941
+ planes: set the planes to filter (from 0 to 15) (default 15)
1942
+ enable: timeline editing
1943
+
1944
+ Returns:
1945
+ default: the video stream
1946
+
1947
+ References:
1948
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#limitdiff)
1949
+
1950
+ """
1951
+ filter_node = filter_node_factory(
1952
+ FFMpegFilterDef(
1953
+ name="limitdiff",
1954
+ typings_input="[StreamType.video, StreamType.video] + ([StreamType.video] if reference else [])",
1955
+ typings_output=("video",),
1956
+ ),
1957
+ *streams,
1958
+ **{
1959
+ "threshold": threshold,
1960
+ "elasticity": elasticity,
1961
+ "reference": reference,
1962
+ "planes": planes,
1963
+ "enable": enable,
1964
+ }
1965
+ | (extra_options or {}),
1966
+ )
1967
+ return filter_node.video(0)
1968
+
1969
+
1970
+ def lut2(
1971
+ _srcx: VideoStream,
1972
+ _srcy: VideoStream,
1973
+ *,
1974
+ c0: String = Default("x"),
1975
+ c1: String = Default("x"),
1976
+ c2: String = Default("x"),
1977
+ c3: String = Default("x"),
1978
+ d: Int = Default(0),
1979
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
1980
+ shortest: Boolean = Default(False),
1981
+ repeatlast: Boolean = Default(True),
1982
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
1983
+ enable: String = Default(None),
1984
+ extra_options: dict[str, Any] = None,
1985
+ ) -> VideoStream:
1986
+ """
1987
+
1988
+ Compute and apply a lookup table from two video inputs.
1989
+
1990
+ Args:
1991
+ c0: set component #0 expression (default "x")
1992
+ c1: set component #1 expression (default "x")
1993
+ c2: set component #2 expression (default "x")
1994
+ c3: set component #3 expression (default "x")
1995
+ d: set output depth (from 0 to 16) (default 0)
1996
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
1997
+ shortest: force termination when the shortest input terminates (default false)
1998
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
1999
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
2000
+ enable: timeline editing
2001
+
2002
+ Returns:
2003
+ default: the video stream
2004
+
2005
+ References:
2006
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#lut2_002c-tlut2)
2007
+
2008
+ """
2009
+ filter_node = filter_node_factory(
2010
+ FFMpegFilterDef(name="lut2", typings_input=("video", "video"), typings_output=("video",)),
2011
+ _srcx,
2012
+ _srcy,
2013
+ **{
2014
+ "c0": c0,
2015
+ "c1": c1,
2016
+ "c2": c2,
2017
+ "c3": c3,
2018
+ "d": d,
2019
+ "eof_action": eof_action,
2020
+ "shortest": shortest,
2021
+ "repeatlast": repeatlast,
2022
+ "ts_sync_mode": ts_sync_mode,
2023
+ "enable": enable,
2024
+ }
2025
+ | (extra_options or {}),
2026
+ )
2027
+ return filter_node.video(0)
2028
+
2029
+
2030
+ def maskedclamp(
2031
+ _base: VideoStream,
2032
+ _dark: VideoStream,
2033
+ _bright: VideoStream,
2034
+ *,
2035
+ undershoot: Int = Default(0),
2036
+ overshoot: Int = Default(0),
2037
+ planes: Int = Default(15),
2038
+ enable: String = Default(None),
2039
+ extra_options: dict[str, Any] = None,
2040
+ ) -> VideoStream:
2041
+ """
2042
+
2043
+ Clamp first stream with second stream and third stream.
2044
+
2045
+ Args:
2046
+ undershoot: set undershoot (from 0 to 65535) (default 0)
2047
+ overshoot: set overshoot (from 0 to 65535) (default 0)
2048
+ planes: set planes (from 0 to 15) (default 15)
2049
+ enable: timeline editing
2050
+
2051
+ Returns:
2052
+ default: the video stream
2053
+
2054
+ References:
2055
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedclamp)
2056
+
2057
+ """
2058
+ filter_node = filter_node_factory(
2059
+ FFMpegFilterDef(name="maskedclamp", typings_input=("video", "video", "video"), typings_output=("video",)),
2060
+ _base,
2061
+ _dark,
2062
+ _bright,
2063
+ **{
2064
+ "undershoot": undershoot,
2065
+ "overshoot": overshoot,
2066
+ "planes": planes,
2067
+ "enable": enable,
2068
+ }
2069
+ | (extra_options or {}),
2070
+ )
2071
+ return filter_node.video(0)
2072
+
2073
+
2074
+ def maskedmax(
2075
+ _source: VideoStream,
2076
+ _filter1: VideoStream,
2077
+ _filter2: VideoStream,
2078
+ *,
2079
+ planes: Int = Default(15),
2080
+ enable: String = Default(None),
2081
+ extra_options: dict[str, Any] = None,
2082
+ ) -> VideoStream:
2083
+ """
2084
+
2085
+ Apply filtering with maximum difference of two streams.
2086
+
2087
+ Args:
2088
+ planes: set planes (from 0 to 15) (default 15)
2089
+ enable: timeline editing
2090
+
2091
+ Returns:
2092
+ default: the video stream
2093
+
2094
+ References:
2095
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmax)
2096
+
2097
+ """
2098
+ filter_node = filter_node_factory(
2099
+ FFMpegFilterDef(name="maskedmax", typings_input=("video", "video", "video"), typings_output=("video",)),
2100
+ _source,
2101
+ _filter1,
2102
+ _filter2,
2103
+ **{
2104
+ "planes": planes,
2105
+ "enable": enable,
2106
+ }
2107
+ | (extra_options or {}),
2108
+ )
2109
+ return filter_node.video(0)
2110
+
2111
+
2112
+ def maskedmerge(
2113
+ _base: VideoStream,
2114
+ _overlay: VideoStream,
2115
+ _mask: VideoStream,
2116
+ *,
2117
+ planes: Int = Default(15),
2118
+ enable: String = Default(None),
2119
+ extra_options: dict[str, Any] = None,
2120
+ ) -> VideoStream:
2121
+ """
2122
+
2123
+ Merge first stream with second stream using third stream as mask.
2124
+
2125
+ Args:
2126
+ planes: set planes (from 0 to 15) (default 15)
2127
+ enable: timeline editing
2128
+
2129
+ Returns:
2130
+ default: the video stream
2131
+
2132
+ References:
2133
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmerge)
2134
+
2135
+ """
2136
+ filter_node = filter_node_factory(
2137
+ FFMpegFilterDef(name="maskedmerge", typings_input=("video", "video", "video"), typings_output=("video",)),
2138
+ _base,
2139
+ _overlay,
2140
+ _mask,
2141
+ **{
2142
+ "planes": planes,
2143
+ "enable": enable,
2144
+ }
2145
+ | (extra_options or {}),
2146
+ )
2147
+ return filter_node.video(0)
2148
+
2149
+
2150
+ def maskedmin(
2151
+ _source: VideoStream,
2152
+ _filter1: VideoStream,
2153
+ _filter2: VideoStream,
2154
+ *,
2155
+ planes: Int = Default(15),
2156
+ enable: String = Default(None),
2157
+ extra_options: dict[str, Any] = None,
2158
+ ) -> VideoStream:
2159
+ """
2160
+
2161
+ Apply filtering with minimum difference of two streams.
2162
+
2163
+ Args:
2164
+ planes: set planes (from 0 to 15) (default 15)
2165
+ enable: timeline editing
2166
+
2167
+ Returns:
2168
+ default: the video stream
2169
+
2170
+ References:
2171
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedmin)
2172
+
2173
+ """
2174
+ filter_node = filter_node_factory(
2175
+ FFMpegFilterDef(name="maskedmin", typings_input=("video", "video", "video"), typings_output=("video",)),
2176
+ _source,
2177
+ _filter1,
2178
+ _filter2,
2179
+ **{
2180
+ "planes": planes,
2181
+ "enable": enable,
2182
+ }
2183
+ | (extra_options or {}),
2184
+ )
2185
+ return filter_node.video(0)
2186
+
2187
+
2188
+ def maskedthreshold(
2189
+ _source: VideoStream,
2190
+ _reference: VideoStream,
2191
+ *,
2192
+ threshold: Int = Default(1),
2193
+ planes: Int = Default(15),
2194
+ mode: Int | Literal["abs", "diff"] | Default = Default("abs"),
2195
+ enable: String = Default(None),
2196
+ extra_options: dict[str, Any] = None,
2197
+ ) -> VideoStream:
2198
+ """
2199
+
2200
+ Pick pixels comparing absolute difference of two streams with threshold.
2201
+
2202
+ Args:
2203
+ threshold: set threshold (from 0 to 65535) (default 1)
2204
+ planes: set planes (from 0 to 15) (default 15)
2205
+ mode: set mode (from 0 to 1) (default abs)
2206
+ enable: timeline editing
2207
+
2208
+ Returns:
2209
+ default: the video stream
2210
+
2211
+ References:
2212
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#maskedthreshold)
2213
+
2214
+ """
2215
+ filter_node = filter_node_factory(
2216
+ FFMpegFilterDef(name="maskedthreshold", typings_input=("video", "video"), typings_output=("video",)),
2217
+ _source,
2218
+ _reference,
2219
+ **{
2220
+ "threshold": threshold,
2221
+ "planes": planes,
2222
+ "mode": mode,
2223
+ "enable": enable,
2224
+ }
2225
+ | (extra_options or {}),
2226
+ )
2227
+ return filter_node.video(0)
2228
+
2229
+
2230
+ def mergeplanes(
2231
+ *streams: VideoStream,
2232
+ mapping: Int = Default(-1),
2233
+ format: Pix_fmt = Default("yuva444p"),
2234
+ map0s: Int = Default(0),
2235
+ map0p: Int = Default(0),
2236
+ map1s: Int = Default(0),
2237
+ map1p: Int = Default(0),
2238
+ map2s: Int = Default(0),
2239
+ map2p: Int = Default(0),
2240
+ map3s: Int = Default(0),
2241
+ map3p: Int = Default(0),
2242
+ extra_options: dict[str, Any] = None,
2243
+ ) -> VideoStream:
2244
+ """
2245
+
2246
+ Merge planes.
2247
+
2248
+ Args:
2249
+ mapping: set input to output plane mapping (from -1 to 8.58993e+08) (default -1)
2250
+ format: set output pixel format (default yuva444p)
2251
+ map0s: set 1st input to output stream mapping (from 0 to 3) (default 0)
2252
+ map0p: set 1st input to output plane mapping (from 0 to 3) (default 0)
2253
+ map1s: set 2nd input to output stream mapping (from 0 to 3) (default 0)
2254
+ map1p: set 2nd input to output plane mapping (from 0 to 3) (default 0)
2255
+ map2s: set 3rd input to output stream mapping (from 0 to 3) (default 0)
2256
+ map2p: set 3rd input to output plane mapping (from 0 to 3) (default 0)
2257
+ map3s: set 4th input to output stream mapping (from 0 to 3) (default 0)
2258
+ map3p: set 4th input to output plane mapping (from 0 to 3) (default 0)
2259
+
2260
+ Returns:
2261
+ default: the video stream
2262
+
2263
+ References:
2264
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#mergeplanes)
2265
+
2266
+ """
2267
+ filter_node = filter_node_factory(
2268
+ FFMpegFilterDef(
2269
+ name="mergeplanes",
2270
+ typings_input="[StreamType.video] * int(max(hex(int(mapping))[2::2]))",
2271
+ typings_output=("video",),
2272
+ ),
2273
+ *streams,
2274
+ **{
2275
+ "mapping": mapping,
2276
+ "format": format,
2277
+ "map0s": map0s,
2278
+ "map0p": map0p,
2279
+ "map1s": map1s,
2280
+ "map1p": map1p,
2281
+ "map2s": map2s,
2282
+ "map2p": map2p,
2283
+ "map3s": map3s,
2284
+ "map3p": map3p,
2285
+ }
2286
+ | (extra_options or {}),
2287
+ )
2288
+ return filter_node.video(0)
2289
+
2290
+
2291
+ def midequalizer(
2292
+ _in0: VideoStream,
2293
+ _in1: VideoStream,
2294
+ *,
2295
+ planes: Int = Default(15),
2296
+ enable: String = Default(None),
2297
+ extra_options: dict[str, Any] = None,
2298
+ ) -> VideoStream:
2299
+ """
2300
+
2301
+ Apply Midway Equalization.
2302
+
2303
+ Args:
2304
+ planes: set planes (from 0 to 15) (default 15)
2305
+ enable: timeline editing
2306
+
2307
+ Returns:
2308
+ default: the video stream
2309
+
2310
+ References:
2311
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#midequalizer)
2312
+
2313
+ """
2314
+ filter_node = filter_node_factory(
2315
+ FFMpegFilterDef(name="midequalizer", typings_input=("video", "video"), typings_output=("video",)),
2316
+ _in0,
2317
+ _in1,
2318
+ **{
2319
+ "planes": planes,
2320
+ "enable": enable,
2321
+ }
2322
+ | (extra_options or {}),
2323
+ )
2324
+ return filter_node.video(0)
2325
+
2326
+
2327
+ def mix(
2328
+ *streams: VideoStream,
2329
+ inputs: Int = Auto("len(streams)"),
2330
+ weights: String = Default("1 1"),
2331
+ scale: Float = Default(0.0),
2332
+ planes: Flags = Default("F"),
2333
+ duration: Int | Literal["longest", "shortest", "first"] | Default = Default("longest"),
2334
+ enable: String = Default(None),
2335
+ extra_options: dict[str, Any] = None,
2336
+ ) -> VideoStream:
2337
+ """
2338
+
2339
+ Mix video inputs.
2340
+
2341
+ Args:
2342
+ inputs: set number of inputs (from 2 to 32767) (default 2)
2343
+ weights: set weight for each input (default "1 1")
2344
+ scale: set scale (from 0 to 32767) (default 0)
2345
+ planes: set what planes to filter (default F)
2346
+ duration: how to determine end of stream (from 0 to 2) (default longest)
2347
+ enable: timeline editing
2348
+
2349
+ Returns:
2350
+ default: the video stream
2351
+
2352
+ References:
2353
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#mix)
2354
+
2355
+ """
2356
+ filter_node = filter_node_factory(
2357
+ FFMpegFilterDef(name="mix", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
2358
+ *streams,
2359
+ **{
2360
+ "inputs": inputs,
2361
+ "weights": weights,
2362
+ "scale": scale,
2363
+ "planes": planes,
2364
+ "duration": duration,
2365
+ "enable": enable,
2366
+ }
2367
+ | (extra_options or {}),
2368
+ )
2369
+ return filter_node.video(0)
2370
+
2371
+
2372
+ def morpho(
2373
+ _default: VideoStream,
2374
+ _structure: VideoStream,
2375
+ *,
2376
+ mode: Int
2377
+ | Literal["erode", "dilate", "open", "close", "gradient", "tophat", "blackhat"]
2378
+ | Default = Default("erode"),
2379
+ planes: Int = Default(7),
2380
+ structure: Int | Literal["first", "all"] | Default = Default("all"),
2381
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
2382
+ shortest: Boolean = Default(False),
2383
+ repeatlast: Boolean = Default(True),
2384
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
2385
+ enable: String = Default(None),
2386
+ extra_options: dict[str, Any] = None,
2387
+ ) -> VideoStream:
2388
+ """
2389
+
2390
+ Apply Morphological filter.
2391
+
2392
+ Args:
2393
+ mode: set morphological transform (from 0 to 6) (default erode)
2394
+ planes: set planes to filter (from 0 to 15) (default 7)
2395
+ structure: when to process structures (from 0 to 1) (default all)
2396
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
2397
+ shortest: force termination when the shortest input terminates (default false)
2398
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
2399
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
2400
+ enable: timeline editing
2401
+
2402
+ Returns:
2403
+ default: the video stream
2404
+
2405
+ References:
2406
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#morpho)
2407
+
2408
+ """
2409
+ filter_node = filter_node_factory(
2410
+ FFMpegFilterDef(name="morpho", typings_input=("video", "video"), typings_output=("video",)),
2411
+ _default,
2412
+ _structure,
2413
+ **{
2414
+ "mode": mode,
2415
+ "planes": planes,
2416
+ "structure": structure,
2417
+ "eof_action": eof_action,
2418
+ "shortest": shortest,
2419
+ "repeatlast": repeatlast,
2420
+ "ts_sync_mode": ts_sync_mode,
2421
+ "enable": enable,
2422
+ }
2423
+ | (extra_options or {}),
2424
+ )
2425
+ return filter_node.video(0)
2426
+
2427
+
2428
+ def msad(
2429
+ _main: VideoStream,
2430
+ _reference: VideoStream,
2431
+ *,
2432
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
2433
+ shortest: Boolean = Default(False),
2434
+ repeatlast: Boolean = Default(True),
2435
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
2436
+ enable: String = Default(None),
2437
+ extra_options: dict[str, Any] = None,
2438
+ ) -> VideoStream:
2439
+ """
2440
+
2441
+ Calculate the MSAD between two video streams.
2442
+
2443
+ Args:
2444
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
2445
+ shortest: force termination when the shortest input terminates (default false)
2446
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
2447
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
2448
+ enable: timeline editing
2449
+
2450
+ Returns:
2451
+ default: the video stream
2452
+
2453
+ References:
2454
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#msad)
2455
+
2456
+ """
2457
+ filter_node = filter_node_factory(
2458
+ FFMpegFilterDef(name="msad", typings_input=("video", "video"), typings_output=("video",)),
2459
+ _main,
2460
+ _reference,
2461
+ **{
2462
+ "eof_action": eof_action,
2463
+ "shortest": shortest,
2464
+ "repeatlast": repeatlast,
2465
+ "ts_sync_mode": ts_sync_mode,
2466
+ "enable": enable,
2467
+ }
2468
+ | (extra_options or {}),
2469
+ )
2470
+ return filter_node.video(0)
2471
+
2472
+
2473
+ def multiply(
2474
+ _source: VideoStream,
2475
+ _factor: VideoStream,
2476
+ *,
2477
+ scale: Float = Default(1.0),
2478
+ offset: Float = Default(0.5),
2479
+ planes: Flags = Default("F"),
2480
+ enable: String = Default(None),
2481
+ extra_options: dict[str, Any] = None,
2482
+ ) -> VideoStream:
2483
+ """
2484
+
2485
+ Multiply first video stream with second video stream.
2486
+
2487
+ Args:
2488
+ scale: set scale (from 0 to 9) (default 1)
2489
+ offset: set offset (from -1 to 1) (default 0.5)
2490
+ planes: set planes (default F)
2491
+ enable: timeline editing
2492
+
2493
+ Returns:
2494
+ default: the video stream
2495
+
2496
+ References:
2497
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#multiply)
2498
+
2499
+ """
2500
+ filter_node = filter_node_factory(
2501
+ FFMpegFilterDef(name="multiply", typings_input=("video", "video"), typings_output=("video",)),
2502
+ _source,
2503
+ _factor,
2504
+ **{
2505
+ "scale": scale,
2506
+ "offset": offset,
2507
+ "planes": planes,
2508
+ "enable": enable,
2509
+ }
2510
+ | (extra_options or {}),
2511
+ )
2512
+ return filter_node.video(0)
2513
+
2514
+
2515
+ def overlay(
2516
+ _main: VideoStream,
2517
+ _overlay: VideoStream,
2518
+ *,
2519
+ x: String = Default("0"),
2520
+ y: String = Default("0"),
2521
+ eof_action: Int | Literal["repeat", "endall", "pass", "repeat", "endall", "pass"] | Default = Default("repeat"),
2522
+ eval: Int | Literal["init", "frame"] | Default = Default("frame"),
2523
+ shortest: Boolean = Default(False),
2524
+ format: Int
2525
+ | Literal["yuv420", "yuv420p10", "yuv422", "yuv422p10", "yuv444", "yuv444p10", "rgb", "gbrp", "auto"]
2526
+ | Default = Default("yuv420"),
2527
+ repeatlast: Boolean = Default(True),
2528
+ alpha: Int | Literal["straight", "premultiplied"] | Default = Default("straight"),
2529
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
2530
+ enable: String = Default(None),
2531
+ extra_options: dict[str, Any] = None,
2532
+ ) -> VideoStream:
2533
+ """
2534
+
2535
+ Overlay a video source on top of the input.
2536
+
2537
+ Args:
2538
+ x: set the x expression (default "0")
2539
+ y: set the y expression (default "0")
2540
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
2541
+ eval: specify when to evaluate expressions (from 0 to 1) (default frame)
2542
+ shortest: force termination when the shortest input terminates (default false)
2543
+ format: set output format (from 0 to 8) (default yuv420)
2544
+ repeatlast: repeat overlay of the last overlay frame (default true)
2545
+ alpha: alpha format (from 0 to 1) (default straight)
2546
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
2547
+ enable: timeline editing
2548
+
2549
+ Returns:
2550
+ default: the video stream
2551
+
2552
+ References:
2553
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#overlay)
2554
+
2555
+ """
2556
+ filter_node = filter_node_factory(
2557
+ FFMpegFilterDef(name="overlay", typings_input=("video", "video"), typings_output=("video",)),
2558
+ _main,
2559
+ _overlay,
2560
+ **{
2561
+ "x": x,
2562
+ "y": y,
2563
+ "eof_action": eof_action,
2564
+ "eval": eval,
2565
+ "shortest": shortest,
2566
+ "format": format,
2567
+ "repeatlast": repeatlast,
2568
+ "alpha": alpha,
2569
+ "ts_sync_mode": ts_sync_mode,
2570
+ "enable": enable,
2571
+ }
2572
+ | (extra_options or {}),
2573
+ )
2574
+ return filter_node.video(0)
2575
+
2576
+
2577
+ def paletteuse(
2578
+ _default: VideoStream,
2579
+ _palette: VideoStream,
2580
+ *,
2581
+ dither: Int
2582
+ | Literal["bayer", "heckbert", "floyd_steinberg", "sierra2", "sierra2_4a", "sierra3", "burkes", "atkinson"]
2583
+ | Default = Default("sierra2_4a"),
2584
+ bayer_scale: Int = Default(2),
2585
+ diff_mode: Int | Literal["rectangle"] | Default = Default(0),
2586
+ new: Boolean = Default(False),
2587
+ alpha_threshold: Int = Default(128),
2588
+ debug_kdtree: String = Default(None),
2589
+ extra_options: dict[str, Any] = None,
2590
+ ) -> VideoStream:
2591
+ """
2592
+
2593
+ Use a palette to downsample an input video stream.
2594
+
2595
+ Args:
2596
+ dither: select dithering mode (from 0 to 8) (default sierra2_4a)
2597
+ bayer_scale: set scale for bayer dithering (from 0 to 5) (default 2)
2598
+ diff_mode: set frame difference mode (from 0 to 1) (default 0)
2599
+ new: take new palette for each output frame (default false)
2600
+ alpha_threshold: set the alpha threshold for transparency (from 0 to 255) (default 128)
2601
+ debug_kdtree: save Graphviz graph of the kdtree in specified file
2602
+
2603
+ Returns:
2604
+ default: the video stream
2605
+
2606
+ References:
2607
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#paletteuse)
2608
+
2609
+ """
2610
+ filter_node = filter_node_factory(
2611
+ FFMpegFilterDef(name="paletteuse", typings_input=("video", "video"), typings_output=("video",)),
2612
+ _default,
2613
+ _palette,
2614
+ **{
2615
+ "dither": dither,
2616
+ "bayer_scale": bayer_scale,
2617
+ "diff_mode": diff_mode,
2618
+ "new": new,
2619
+ "alpha_threshold": alpha_threshold,
2620
+ "debug_kdtree": debug_kdtree,
2621
+ }
2622
+ | (extra_options or {}),
2623
+ )
2624
+ return filter_node.video(0)
2625
+
2626
+
2627
+ def premultiply(
2628
+ *streams: VideoStream,
2629
+ planes: Int = Default(15),
2630
+ inplace: Boolean = Default(False),
2631
+ enable: String = Default(None),
2632
+ extra_options: dict[str, Any] = None,
2633
+ ) -> VideoStream:
2634
+ """
2635
+
2636
+ PreMultiply first stream with first plane of second stream.
2637
+
2638
+ Args:
2639
+ planes: set planes (from 0 to 15) (default 15)
2640
+ inplace: enable inplace mode (default false)
2641
+ enable: timeline editing
2642
+
2643
+ Returns:
2644
+ default: the video stream
2645
+
2646
+ References:
2647
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#premultiply)
2648
+
2649
+ """
2650
+ filter_node = filter_node_factory(
2651
+ FFMpegFilterDef(
2652
+ name="premultiply",
2653
+ typings_input="[StreamType.video] + [StreamType.video] if inplace else []",
2654
+ typings_output=("video",),
2655
+ ),
2656
+ *streams,
2657
+ **{
2658
+ "planes": planes,
2659
+ "inplace": inplace,
2660
+ "enable": enable,
2661
+ }
2662
+ | (extra_options or {}),
2663
+ )
2664
+ return filter_node.video(0)
2665
+
2666
+
2667
+ def psnr(
2668
+ _main: VideoStream,
2669
+ _reference: VideoStream,
2670
+ *,
2671
+ stats_file: String = Default(None),
2672
+ stats_version: Int = Default(1),
2673
+ output_max: Boolean = Default(False),
2674
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
2675
+ shortest: Boolean = Default(False),
2676
+ repeatlast: Boolean = Default(True),
2677
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
2678
+ enable: String = Default(None),
2679
+ extra_options: dict[str, Any] = None,
2680
+ ) -> VideoStream:
2681
+ """
2682
+
2683
+ Calculate the PSNR between two video streams.
2684
+
2685
+ Args:
2686
+ stats_file: Set file where to store per-frame difference information
2687
+ stats_version: Set the format version for the stats file. (from 1 to 2) (default 1)
2688
+ output_max: Add raw stats (max values) to the output log. (default false)
2689
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
2690
+ shortest: force termination when the shortest input terminates (default false)
2691
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
2692
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
2693
+ enable: timeline editing
2694
+
2695
+ Returns:
2696
+ default: the video stream
2697
+
2698
+ References:
2699
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#psnr)
2700
+
2701
+ """
2702
+ filter_node = filter_node_factory(
2703
+ FFMpegFilterDef(name="psnr", typings_input=("video", "video"), typings_output=("video",)),
2704
+ _main,
2705
+ _reference,
2706
+ **{
2707
+ "stats_file": stats_file,
2708
+ "stats_version": stats_version,
2709
+ "output_max": output_max,
2710
+ "eof_action": eof_action,
2711
+ "shortest": shortest,
2712
+ "repeatlast": repeatlast,
2713
+ "ts_sync_mode": ts_sync_mode,
2714
+ "enable": enable,
2715
+ }
2716
+ | (extra_options or {}),
2717
+ )
2718
+ return filter_node.video(0)
2719
+
2720
+
2721
+ def remap(
2722
+ _source: VideoStream,
2723
+ _xmap: VideoStream,
2724
+ _ymap: VideoStream,
2725
+ *,
2726
+ format: Int | Literal["color", "gray"] | Default = Default("color"),
2727
+ fill: Color = Default("black"),
2728
+ extra_options: dict[str, Any] = None,
2729
+ ) -> VideoStream:
2730
+ """
2731
+
2732
+ Remap pixels.
2733
+
2734
+ Args:
2735
+ format: set output format (from 0 to 1) (default color)
2736
+ fill: set the color of the unmapped pixels (default "black")
2737
+
2738
+ Returns:
2739
+ default: the video stream
2740
+
2741
+ References:
2742
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#remap)
2743
+
2744
+ """
2745
+ filter_node = filter_node_factory(
2746
+ FFMpegFilterDef(name="remap", typings_input=("video", "video", "video"), typings_output=("video",)),
2747
+ _source,
2748
+ _xmap,
2749
+ _ymap,
2750
+ **{
2751
+ "format": format,
2752
+ "fill": fill,
2753
+ }
2754
+ | (extra_options or {}),
2755
+ )
2756
+ return filter_node.video(0)
2757
+
2758
+
2759
+ def sidechaincompress(
2760
+ _main: AudioStream,
2761
+ _sidechain: AudioStream,
2762
+ *,
2763
+ level_in: Double = Default(1.0),
2764
+ mode: Int | Literal["downward", "upward"] | Default = Default("downward"),
2765
+ threshold: Double = Default(0.125),
2766
+ ratio: Double = Default(2.0),
2767
+ attack: Double = Default(20.0),
2768
+ release: Double = Default(250.0),
2769
+ makeup: Double = Default(1.0),
2770
+ knee: Double = Default(2.82843),
2771
+ link: Int | Literal["average", "maximum"] | Default = Default("average"),
2772
+ detection: Int | Literal["peak", "rms"] | Default = Default("rms"),
2773
+ level_sc: Double = Default(1.0),
2774
+ mix: Double = Default(1.0),
2775
+ extra_options: dict[str, Any] = None,
2776
+ ) -> AudioStream:
2777
+ """
2778
+
2779
+ Sidechain compressor.
2780
+
2781
+ Args:
2782
+ level_in: set input gain (from 0.015625 to 64) (default 1)
2783
+ mode: set mode (from 0 to 1) (default downward)
2784
+ threshold: set threshold (from 0.000976563 to 1) (default 0.125)
2785
+ ratio: set ratio (from 1 to 20) (default 2)
2786
+ attack: set attack (from 0.01 to 2000) (default 20)
2787
+ release: set release (from 0.01 to 9000) (default 250)
2788
+ makeup: set make up gain (from 1 to 64) (default 1)
2789
+ knee: set knee (from 1 to 8) (default 2.82843)
2790
+ link: set link type (from 0 to 1) (default average)
2791
+ detection: set detection (from 0 to 1) (default rms)
2792
+ level_sc: set sidechain gain (from 0.015625 to 64) (default 1)
2793
+ mix: set mix (from 0 to 1) (default 1)
2794
+
2795
+ Returns:
2796
+ default: the audio stream
2797
+
2798
+ References:
2799
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#sidechaincompress)
2800
+
2801
+ """
2802
+ filter_node = filter_node_factory(
2803
+ FFMpegFilterDef(name="sidechaincompress", typings_input=("audio", "audio"), typings_output=("audio",)),
2804
+ _main,
2805
+ _sidechain,
2806
+ **{
2807
+ "level_in": level_in,
2808
+ "mode": mode,
2809
+ "threshold": threshold,
2810
+ "ratio": ratio,
2811
+ "attack": attack,
2812
+ "release": release,
2813
+ "makeup": makeup,
2814
+ "knee": knee,
2815
+ "link": link,
2816
+ "detection": detection,
2817
+ "level_sc": level_sc,
2818
+ "mix": mix,
2819
+ }
2820
+ | (extra_options or {}),
2821
+ )
2822
+ return filter_node.audio(0)
2823
+
2824
+
2825
+ def sidechaingate(
2826
+ _main: AudioStream,
2827
+ _sidechain: AudioStream,
2828
+ *,
2829
+ level_in: Double = Default(1.0),
2830
+ mode: Int | Literal["downward", "upward"] | Default = Default("downward"),
2831
+ range: Double = Default(0.06125),
2832
+ threshold: Double = Default(0.125),
2833
+ ratio: Double = Default(2.0),
2834
+ attack: Double = Default(20.0),
2835
+ release: Double = Default(250.0),
2836
+ makeup: Double = Default(1.0),
2837
+ knee: Double = Default(2.82843),
2838
+ detection: Int | Literal["peak", "rms"] | Default = Default("rms"),
2839
+ link: Int | Literal["average", "maximum"] | Default = Default("average"),
2840
+ level_sc: Double = Default(1.0),
2841
+ enable: String = Default(None),
2842
+ extra_options: dict[str, Any] = None,
2843
+ ) -> AudioStream:
2844
+ """
2845
+
2846
+ Audio sidechain gate.
2847
+
2848
+ Args:
2849
+ level_in: set input level (from 0.015625 to 64) (default 1)
2850
+ mode: set mode (from 0 to 1) (default downward)
2851
+ range: set max gain reduction (from 0 to 1) (default 0.06125)
2852
+ threshold: set threshold (from 0 to 1) (default 0.125)
2853
+ ratio: set ratio (from 1 to 9000) (default 2)
2854
+ attack: set attack (from 0.01 to 9000) (default 20)
2855
+ release: set release (from 0.01 to 9000) (default 250)
2856
+ makeup: set makeup gain (from 1 to 64) (default 1)
2857
+ knee: set knee (from 1 to 8) (default 2.82843)
2858
+ detection: set detection (from 0 to 1) (default rms)
2859
+ link: set link (from 0 to 1) (default average)
2860
+ level_sc: set sidechain gain (from 0.015625 to 64) (default 1)
2861
+ enable: timeline editing
2862
+
2863
+ Returns:
2864
+ default: the audio stream
2865
+
2866
+ References:
2867
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#sidechaingate)
2868
+
2869
+ """
2870
+ filter_node = filter_node_factory(
2871
+ FFMpegFilterDef(name="sidechaingate", typings_input=("audio", "audio"), typings_output=("audio",)),
2872
+ _main,
2873
+ _sidechain,
2874
+ **{
2875
+ "level_in": level_in,
2876
+ "mode": mode,
2877
+ "range": range,
2878
+ "threshold": threshold,
2879
+ "ratio": ratio,
2880
+ "attack": attack,
2881
+ "release": release,
2882
+ "makeup": makeup,
2883
+ "knee": knee,
2884
+ "detection": detection,
2885
+ "link": link,
2886
+ "level_sc": level_sc,
2887
+ "enable": enable,
2888
+ }
2889
+ | (extra_options or {}),
2890
+ )
2891
+ return filter_node.audio(0)
2892
+
2893
+
2894
+ def signature(
2895
+ *streams: VideoStream,
2896
+ detectmode: Int | Literal["off", "full", "fast"] | Default = Default("off"),
2897
+ nb_inputs: Int = Auto("len(streams)"),
2898
+ filename: String = Default(""),
2899
+ format: Int | Literal["binary", "xml"] | Default = Default("binary"),
2900
+ th_d: Int = Default(9000),
2901
+ th_dc: Int = Default(60000),
2902
+ th_xh: Int = Default(116),
2903
+ th_di: Int = Default(0),
2904
+ th_it: Double = Default(0.5),
2905
+ extra_options: dict[str, Any] = None,
2906
+ ) -> VideoStream:
2907
+ """
2908
+
2909
+ Calculate the MPEG-7 video signature
2910
+
2911
+ Args:
2912
+ detectmode: set the detectmode (from 0 to 2) (default off)
2913
+ nb_inputs: number of inputs (from 1 to INT_MAX) (default 1)
2914
+ filename: filename for output files (default "")
2915
+ format: set output format (from 0 to 1) (default binary)
2916
+ th_d: threshold to detect one word as similar (from 1 to INT_MAX) (default 9000)
2917
+ th_dc: threshold to detect all words as similar (from 1 to INT_MAX) (default 60000)
2918
+ th_xh: threshold to detect frames as similar (from 1 to INT_MAX) (default 116)
2919
+ th_di: minimum length of matching sequence in frames (from 0 to INT_MAX) (default 0)
2920
+ th_it: threshold for relation of good to all frames (from 0 to 1) (default 0.5)
2921
+
2922
+ Returns:
2923
+ default: the video stream
2924
+
2925
+ References:
2926
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#signature)
2927
+
2928
+ """
2929
+ filter_node = filter_node_factory(
2930
+ FFMpegFilterDef(
2931
+ name="signature", typings_input="[StreamType.video] * int(nb_inputs)", typings_output=("video",)
2932
+ ),
2933
+ *streams,
2934
+ **{
2935
+ "detectmode": detectmode,
2936
+ "nb_inputs": nb_inputs,
2937
+ "filename": filename,
2938
+ "format": format,
2939
+ "th_d": th_d,
2940
+ "th_dc": th_dc,
2941
+ "th_xh": th_xh,
2942
+ "th_di": th_di,
2943
+ "th_it": th_it,
2944
+ }
2945
+ | (extra_options or {}),
2946
+ )
2947
+ return filter_node.video(0)
2948
+
2949
+
2950
+ def spectrumsynth(
2951
+ _magnitude: VideoStream,
2952
+ _phase: VideoStream,
2953
+ *,
2954
+ sample_rate: Int = Default(44100),
2955
+ channels: Int = Default(1),
2956
+ scale: Int | Literal["lin", "log"] | Default = Default("log"),
2957
+ slide: Int | Literal["replace", "scroll", "fullframe", "rscroll"] | Default = Default("fullframe"),
2958
+ win_func: Int
2959
+ | Literal[
2960
+ "rect",
2961
+ "bartlett",
2962
+ "hann",
2963
+ "hanning",
2964
+ "hamming",
2965
+ "blackman",
2966
+ "welch",
2967
+ "flattop",
2968
+ "bharris",
2969
+ "bnuttall",
2970
+ "bhann",
2971
+ "sine",
2972
+ "nuttall",
2973
+ "lanczos",
2974
+ "gauss",
2975
+ "tukey",
2976
+ "dolph",
2977
+ "cauchy",
2978
+ "parzen",
2979
+ "poisson",
2980
+ "bohman",
2981
+ "kaiser",
2982
+ ]
2983
+ | Default = Default("rect"),
2984
+ overlap: Float = Default(1.0),
2985
+ orientation: Int | Literal["vertical", "horizontal"] | Default = Default("vertical"),
2986
+ extra_options: dict[str, Any] = None,
2987
+ ) -> AudioStream:
2988
+ """
2989
+
2990
+ Convert input spectrum videos to audio output.
2991
+
2992
+ Args:
2993
+ sample_rate: set sample rate (from 15 to INT_MAX) (default 44100)
2994
+ channels: set channels (from 1 to 8) (default 1)
2995
+ scale: set input amplitude scale (from 0 to 1) (default log)
2996
+ slide: set input sliding mode (from 0 to 3) (default fullframe)
2997
+ win_func: set window function (from 0 to 20) (default rect)
2998
+ overlap: set window overlap (from 0 to 1) (default 1)
2999
+ orientation: set orientation (from 0 to 1) (default vertical)
3000
+
3001
+ Returns:
3002
+ default: the audio stream
3003
+
3004
+ References:
3005
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#spectrumsynth)
3006
+
3007
+ """
3008
+ filter_node = filter_node_factory(
3009
+ FFMpegFilterDef(name="spectrumsynth", typings_input=("video", "video"), typings_output=("audio",)),
3010
+ _magnitude,
3011
+ _phase,
3012
+ **{
3013
+ "sample_rate": sample_rate,
3014
+ "channels": channels,
3015
+ "scale": scale,
3016
+ "slide": slide,
3017
+ "win_func": win_func,
3018
+ "overlap": overlap,
3019
+ "orientation": orientation,
3020
+ }
3021
+ | (extra_options or {}),
3022
+ )
3023
+ return filter_node.audio(0)
3024
+
3025
+
3026
+ def ssim(
3027
+ _main: VideoStream,
3028
+ _reference: VideoStream,
3029
+ *,
3030
+ stats_file: String = Default(None),
3031
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
3032
+ shortest: Boolean = Default(False),
3033
+ repeatlast: Boolean = Default(True),
3034
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
3035
+ enable: String = Default(None),
3036
+ extra_options: dict[str, Any] = None,
3037
+ ) -> VideoStream:
3038
+ """
3039
+
3040
+ Calculate the SSIM between two video streams.
3041
+
3042
+ Args:
3043
+ stats_file: Set file where to store per-frame difference information
3044
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
3045
+ shortest: force termination when the shortest input terminates (default false)
3046
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
3047
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
3048
+ enable: timeline editing
3049
+
3050
+ Returns:
3051
+ default: the video stream
3052
+
3053
+ References:
3054
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#ssim)
3055
+
3056
+ """
3057
+ filter_node = filter_node_factory(
3058
+ FFMpegFilterDef(name="ssim", typings_input=("video", "video"), typings_output=("video",)),
3059
+ _main,
3060
+ _reference,
3061
+ **{
3062
+ "stats_file": stats_file,
3063
+ "eof_action": eof_action,
3064
+ "shortest": shortest,
3065
+ "repeatlast": repeatlast,
3066
+ "ts_sync_mode": ts_sync_mode,
3067
+ "enable": enable,
3068
+ }
3069
+ | (extra_options or {}),
3070
+ )
3071
+ return filter_node.video(0)
3072
+
3073
+
3074
+ def streamselect(
3075
+ *streams: VideoStream,
3076
+ inputs: Int = Auto("len(streams)"),
3077
+ map: String = Default(None),
3078
+ extra_options: dict[str, Any] = None,
3079
+ ) -> FilterNode:
3080
+ """
3081
+
3082
+ Select video streams
3083
+
3084
+ Args:
3085
+ inputs: number of input streams (from 2 to INT_MAX) (default 2)
3086
+ map: input indexes to remap to outputs
3087
+
3088
+ Returns:
3089
+ filter_node: the filter node
3090
+
3091
+
3092
+ References:
3093
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#streamselect_002c-astreamselect)
3094
+
3095
+ """
3096
+ filter_node = filter_node_factory(
3097
+ FFMpegFilterDef(
3098
+ name="streamselect",
3099
+ typings_input="[StreamType.video] * int(inputs)",
3100
+ typings_output="[StreamType.video] * len(re.findall(r'\\d+', str(map)))",
3101
+ ),
3102
+ *streams,
3103
+ **{
3104
+ "inputs": inputs,
3105
+ "map": map,
3106
+ }
3107
+ | (extra_options or {}),
3108
+ )
3109
+
3110
+ return filter_node
3111
+
3112
+
3113
+ def threshold(
3114
+ _default: VideoStream,
3115
+ _threshold: VideoStream,
3116
+ _min: VideoStream,
3117
+ _max: VideoStream,
3118
+ *,
3119
+ planes: Int = Default(15),
3120
+ enable: String = Default(None),
3121
+ extra_options: dict[str, Any] = None,
3122
+ ) -> VideoStream:
3123
+ """
3124
+
3125
+ Threshold first video stream using other video streams.
3126
+
3127
+ Args:
3128
+ planes: set planes to filter (from 0 to 15) (default 15)
3129
+ enable: timeline editing
3130
+
3131
+ Returns:
3132
+ default: the video stream
3133
+
3134
+ References:
3135
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#threshold)
3136
+
3137
+ """
3138
+ filter_node = filter_node_factory(
3139
+ FFMpegFilterDef(
3140
+ name="threshold", typings_input=("video", "video", "video", "video"), typings_output=("video",)
3141
+ ),
3142
+ _default,
3143
+ _threshold,
3144
+ _min,
3145
+ _max,
3146
+ **{
3147
+ "planes": planes,
3148
+ "enable": enable,
3149
+ }
3150
+ | (extra_options or {}),
3151
+ )
3152
+ return filter_node.video(0)
3153
+
3154
+
3155
+ def unpremultiply(
3156
+ *streams: VideoStream,
3157
+ planes: Int = Default(15),
3158
+ inplace: Boolean = Default(False),
3159
+ enable: String = Default(None),
3160
+ extra_options: dict[str, Any] = None,
3161
+ ) -> VideoStream:
3162
+ """
3163
+
3164
+ UnPreMultiply first stream with first plane of second stream.
3165
+
3166
+ Args:
3167
+ planes: set planes (from 0 to 15) (default 15)
3168
+ inplace: enable inplace mode (default false)
3169
+ enable: timeline editing
3170
+
3171
+ Returns:
3172
+ default: the video stream
3173
+
3174
+ References:
3175
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#unpremultiply)
3176
+
3177
+ """
3178
+ filter_node = filter_node_factory(
3179
+ FFMpegFilterDef(
3180
+ name="unpremultiply",
3181
+ typings_input="[StreamType.video] + ([StreamType.video] if inplace else [])",
3182
+ typings_output=("video",),
3183
+ ),
3184
+ *streams,
3185
+ **{
3186
+ "planes": planes,
3187
+ "inplace": inplace,
3188
+ "enable": enable,
3189
+ }
3190
+ | (extra_options or {}),
3191
+ )
3192
+ return filter_node.video(0)
3193
+
3194
+
3195
+ def varblur(
3196
+ _default: VideoStream,
3197
+ _radius: VideoStream,
3198
+ *,
3199
+ min_r: Int = Default(0),
3200
+ max_r: Int = Default(8),
3201
+ planes: Int = Default(15),
3202
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
3203
+ shortest: Boolean = Default(False),
3204
+ repeatlast: Boolean = Default(True),
3205
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
3206
+ enable: String = Default(None),
3207
+ extra_options: dict[str, Any] = None,
3208
+ ) -> VideoStream:
3209
+ """
3210
+
3211
+ Apply Variable Blur filter.
3212
+
3213
+ Args:
3214
+ min_r: set min blur radius (from 0 to 254) (default 0)
3215
+ max_r: set max blur radius (from 1 to 255) (default 8)
3216
+ planes: set planes to filter (from 0 to 15) (default 15)
3217
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
3218
+ shortest: force termination when the shortest input terminates (default false)
3219
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
3220
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
3221
+ enable: timeline editing
3222
+
3223
+ Returns:
3224
+ default: the video stream
3225
+
3226
+ References:
3227
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#varblur)
3228
+
3229
+ """
3230
+ filter_node = filter_node_factory(
3231
+ FFMpegFilterDef(name="varblur", typings_input=("video", "video"), typings_output=("video",)),
3232
+ _default,
3233
+ _radius,
3234
+ **{
3235
+ "min_r": min_r,
3236
+ "max_r": max_r,
3237
+ "planes": planes,
3238
+ "eof_action": eof_action,
3239
+ "shortest": shortest,
3240
+ "repeatlast": repeatlast,
3241
+ "ts_sync_mode": ts_sync_mode,
3242
+ "enable": enable,
3243
+ }
3244
+ | (extra_options or {}),
3245
+ )
3246
+ return filter_node.video(0)
3247
+
3248
+
3249
+ def vif(
3250
+ _main: VideoStream,
3251
+ _reference: VideoStream,
3252
+ *,
3253
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
3254
+ shortest: Boolean = Default(False),
3255
+ repeatlast: Boolean = Default(True),
3256
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
3257
+ enable: String = Default(None),
3258
+ extra_options: dict[str, Any] = None,
3259
+ ) -> VideoStream:
3260
+ """
3261
+
3262
+ Calculate the VIF between two video streams.
3263
+
3264
+ Args:
3265
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
3266
+ shortest: force termination when the shortest input terminates (default false)
3267
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
3268
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
3269
+ enable: timeline editing
3270
+
3271
+ Returns:
3272
+ default: the video stream
3273
+
3274
+ References:
3275
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#vif)
3276
+
3277
+ """
3278
+ filter_node = filter_node_factory(
3279
+ FFMpegFilterDef(name="vif", typings_input=("video", "video"), typings_output=("video",)),
3280
+ _main,
3281
+ _reference,
3282
+ **{
3283
+ "eof_action": eof_action,
3284
+ "shortest": shortest,
3285
+ "repeatlast": repeatlast,
3286
+ "ts_sync_mode": ts_sync_mode,
3287
+ "enable": enable,
3288
+ }
3289
+ | (extra_options or {}),
3290
+ )
3291
+ return filter_node.video(0)
3292
+
3293
+
3294
+ def vstack(
3295
+ *streams: VideoStream,
3296
+ inputs: Int = Auto("len(streams)"),
3297
+ shortest: Boolean = Default(False),
3298
+ extra_options: dict[str, Any] = None,
3299
+ ) -> VideoStream:
3300
+ """
3301
+
3302
+ Stack video inputs vertically.
3303
+
3304
+ Args:
3305
+ inputs: set number of inputs (from 2 to INT_MAX) (default 2)
3306
+ shortest: force termination when the shortest input terminates (default false)
3307
+
3308
+ Returns:
3309
+ default: the video stream
3310
+
3311
+ References:
3312
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#vstack)
3313
+
3314
+ """
3315
+ filter_node = filter_node_factory(
3316
+ FFMpegFilterDef(name="vstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
3317
+ *streams,
3318
+ **{
3319
+ "inputs": inputs,
3320
+ "shortest": shortest,
3321
+ }
3322
+ | (extra_options or {}),
3323
+ )
3324
+ return filter_node.video(0)
3325
+
3326
+
3327
+ def xcorrelate(
3328
+ _primary: VideoStream,
3329
+ _secondary: VideoStream,
3330
+ *,
3331
+ planes: Int = Default(7),
3332
+ secondary: Int | Literal["first", "all"] | Default = Default("all"),
3333
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
3334
+ shortest: Boolean = Default(False),
3335
+ repeatlast: Boolean = Default(True),
3336
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
3337
+ enable: String = Default(None),
3338
+ extra_options: dict[str, Any] = None,
3339
+ ) -> VideoStream:
3340
+ """
3341
+
3342
+ Cross-correlate first video stream with second video stream.
3343
+
3344
+ Args:
3345
+ planes: set planes to cross-correlate (from 0 to 15) (default 7)
3346
+ secondary: when to process secondary frame (from 0 to 1) (default all)
3347
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
3348
+ shortest: force termination when the shortest input terminates (default false)
3349
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
3350
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
3351
+ enable: timeline editing
3352
+
3353
+ Returns:
3354
+ default: the video stream
3355
+
3356
+ References:
3357
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xcorrelate)
3358
+
3359
+ """
3360
+ filter_node = filter_node_factory(
3361
+ FFMpegFilterDef(name="xcorrelate", typings_input=("video", "video"), typings_output=("video",)),
3362
+ _primary,
3363
+ _secondary,
3364
+ **{
3365
+ "planes": planes,
3366
+ "secondary": secondary,
3367
+ "eof_action": eof_action,
3368
+ "shortest": shortest,
3369
+ "repeatlast": repeatlast,
3370
+ "ts_sync_mode": ts_sync_mode,
3371
+ "enable": enable,
3372
+ }
3373
+ | (extra_options or {}),
3374
+ )
3375
+ return filter_node.video(0)
3376
+
3377
+
3378
+ def xfade(
3379
+ _main: VideoStream,
3380
+ _xfade: VideoStream,
3381
+ *,
3382
+ transition: Int
3383
+ | Literal[
3384
+ "custom",
3385
+ "fade",
3386
+ "wipeleft",
3387
+ "wiperight",
3388
+ "wipeup",
3389
+ "wipedown",
3390
+ "slideleft",
3391
+ "slideright",
3392
+ "slideup",
3393
+ "slidedown",
3394
+ "circlecrop",
3395
+ "rectcrop",
3396
+ "distance",
3397
+ "fadeblack",
3398
+ "fadewhite",
3399
+ "radial",
3400
+ "smoothleft",
3401
+ "smoothright",
3402
+ "smoothup",
3403
+ "smoothdown",
3404
+ "circleopen",
3405
+ "circleclose",
3406
+ "vertopen",
3407
+ "vertclose",
3408
+ "horzopen",
3409
+ "horzclose",
3410
+ "dissolve",
3411
+ "pixelize",
3412
+ "diagtl",
3413
+ "diagtr",
3414
+ "diagbl",
3415
+ "diagbr",
3416
+ "hlslice",
3417
+ "hrslice",
3418
+ "vuslice",
3419
+ "vdslice",
3420
+ "hblur",
3421
+ "fadegrays",
3422
+ "wipetl",
3423
+ "wipetr",
3424
+ "wipebl",
3425
+ "wipebr",
3426
+ "squeezeh",
3427
+ "squeezev",
3428
+ "zoomin",
3429
+ "fadefast",
3430
+ "fadeslow",
3431
+ "hlwind",
3432
+ "hrwind",
3433
+ "vuwind",
3434
+ "vdwind",
3435
+ "coverleft",
3436
+ "coverright",
3437
+ "coverup",
3438
+ "coverdown",
3439
+ "revealleft",
3440
+ "revealright",
3441
+ "revealup",
3442
+ "revealdown",
3443
+ ]
3444
+ | Default = Default("fade"),
3445
+ duration: Duration = Default(1.0),
3446
+ offset: Duration = Default(0.0),
3447
+ expr: String = Default(None),
3448
+ extra_options: dict[str, Any] = None,
3449
+ ) -> VideoStream:
3450
+ """
3451
+
3452
+ Cross fade one video with another video.
3453
+
3454
+ Args:
3455
+ transition: set cross fade transition (from -1 to 57) (default fade)
3456
+ duration: set cross fade duration (default 1)
3457
+ offset: set cross fade start relative to first input stream (default 0)
3458
+ expr: set expression for custom transition
3459
+
3460
+ Returns:
3461
+ default: the video stream
3462
+
3463
+ References:
3464
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xfade)
3465
+
3466
+ """
3467
+ filter_node = filter_node_factory(
3468
+ FFMpegFilterDef(name="xfade", typings_input=("video", "video"), typings_output=("video",)),
3469
+ _main,
3470
+ _xfade,
3471
+ **{
3472
+ "transition": transition,
3473
+ "duration": duration,
3474
+ "offset": offset,
3475
+ "expr": expr,
3476
+ }
3477
+ | (extra_options or {}),
3478
+ )
3479
+ return filter_node.video(0)
3480
+
3481
+
3482
+ def xmedian(
3483
+ *streams: VideoStream,
3484
+ inputs: Int = Auto("len(streams)"),
3485
+ planes: Int = Default(15),
3486
+ percentile: Float = Default(0.5),
3487
+ eof_action: Int | Literal["repeat", "endall", "pass"] | Default = Default("repeat"),
3488
+ shortest: Boolean = Default(False),
3489
+ repeatlast: Boolean = Default(True),
3490
+ ts_sync_mode: Int | Literal["default", "nearest"] | Default = Default("default"),
3491
+ enable: String = Default(None),
3492
+ extra_options: dict[str, Any] = None,
3493
+ ) -> VideoStream:
3494
+ """
3495
+
3496
+ Pick median pixels from several video inputs.
3497
+
3498
+ Args:
3499
+ inputs: set number of inputs (from 3 to 255) (default 3)
3500
+ planes: set planes to filter (from 0 to 15) (default 15)
3501
+ percentile: set percentile (from 0 to 1) (default 0.5)
3502
+ eof_action: Action to take when encountering EOF from secondary input (from 0 to 2) (default repeat)
3503
+ shortest: force termination when the shortest input terminates (default false)
3504
+ repeatlast: extend last frame of secondary streams beyond EOF (default true)
3505
+ ts_sync_mode: How strictly to sync streams based on secondary input timestamps (from 0 to 1) (default default)
3506
+ enable: timeline editing
3507
+
3508
+ Returns:
3509
+ default: the video stream
3510
+
3511
+ References:
3512
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xmedian)
3513
+
3514
+ """
3515
+ filter_node = filter_node_factory(
3516
+ FFMpegFilterDef(name="xmedian", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
3517
+ *streams,
3518
+ **{
3519
+ "inputs": inputs,
3520
+ "planes": planes,
3521
+ "percentile": percentile,
3522
+ "eof_action": eof_action,
3523
+ "shortest": shortest,
3524
+ "repeatlast": repeatlast,
3525
+ "ts_sync_mode": ts_sync_mode,
3526
+ "enable": enable,
3527
+ }
3528
+ | (extra_options or {}),
3529
+ )
3530
+ return filter_node.video(0)
3531
+
3532
+
3533
+ def xstack(
3534
+ *streams: VideoStream,
3535
+ inputs: Int = Auto("len(streams)"),
3536
+ layout: String = Default(None),
3537
+ grid: Image_size = Default(None),
3538
+ shortest: Boolean = Default(False),
3539
+ fill: String = Default("none"),
3540
+ extra_options: dict[str, Any] = None,
3541
+ ) -> VideoStream:
3542
+ """
3543
+
3544
+ Stack video inputs into custom layout.
3545
+
3546
+ Args:
3547
+ inputs: set number of inputs (from 2 to INT_MAX) (default 2)
3548
+ layout: set custom layout
3549
+ grid: set fixed size grid layout
3550
+ shortest: force termination when the shortest input terminates (default false)
3551
+ fill: set the color for unused pixels (default "none")
3552
+
3553
+ Returns:
3554
+ default: the video stream
3555
+
3556
+ References:
3557
+ [FFmpeg Documentation](https://ffmpeg.org/ffmpeg-filters.html#xstack)
3558
+
3559
+ """
3560
+ filter_node = filter_node_factory(
3561
+ FFMpegFilterDef(name="xstack", typings_input="[StreamType.video] * int(inputs)", typings_output=("video",)),
3562
+ *streams,
3563
+ **{
3564
+ "inputs": inputs,
3565
+ "layout": layout,
3566
+ "grid": grid,
3567
+ "shortest": shortest,
3568
+ "fill": fill,
3569
+ }
3570
+ | (extra_options or {}),
3571
+ )
3572
+ return filter_node.video(0)