typed-ffmpeg-compatible 2.7.3__py3-none-any.whl → 3.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. typed_ffmpeg/__init__.py +2 -1
  2. typed_ffmpeg/common/cache/.gitignore +3 -0
  3. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/acrossover.json +6 -0
  4. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/afir.json +9 -0
  5. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/aiir.json +6 -0
  6. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/ainterleave.json +9 -0
  7. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/amerge.json +9 -0
  8. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/amix.json +9 -0
  9. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/amovie.json +6 -0
  10. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/anequalizer.json +6 -0
  11. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/aphasemeter.json +6 -0
  12. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/asegment.json +6 -0
  13. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/aselect.json +6 -0
  14. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/asplit.json +6 -0
  15. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/astreamselect.json +9 -0
  16. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/bm3d.json +6 -0
  17. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/channelsplit.json +6 -0
  18. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/concat.json +9 -0
  19. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/decimate.json +6 -0
  20. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/ebur128.json +6 -0
  21. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/extractplanes.json +6 -0
  22. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/fieldmatch.json +6 -0
  23. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/guided.json +6 -0
  24. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/headphone.json +6 -0
  25. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/hstack.json +9 -0
  26. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/interleave.json +9 -0
  27. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/join.json +9 -0
  28. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/libplacebo.json +9 -0
  29. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/limitdiff.json +6 -0
  30. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/mergeplanes.json +6 -0
  31. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/mix.json +9 -0
  32. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/movie.json +6 -0
  33. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/premultiply.json +6 -0
  34. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/segment.json +6 -0
  35. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/select.json +6 -0
  36. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/signature.json +9 -0
  37. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/split.json +6 -0
  38. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/streamselect.json +9 -0
  39. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/unpremultiply.json +6 -0
  40. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/vstack.json +9 -0
  41. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/xmedian.json +9 -0
  42. typed_ffmpeg/common/cache/FFMpegFilterManuallyDefined/xstack.json +9 -0
  43. typed_ffmpeg/common/cache/list/filters.json +90747 -0
  44. typed_ffmpeg/common/cache.py +66 -0
  45. typed_ffmpeg/common/schema.py +9 -5
  46. typed_ffmpeg/common/serialize.py +12 -8
  47. typed_ffmpeg/compile/__init__.py +0 -0
  48. typed_ffmpeg/compile/compile_cli.py +407 -0
  49. typed_ffmpeg/compile/compile_json.py +38 -0
  50. typed_ffmpeg/compile/compile_python.py +319 -0
  51. typed_ffmpeg/{dag → compile}/context.py +26 -45
  52. typed_ffmpeg/{dag → compile}/validate.py +2 -2
  53. typed_ffmpeg/dag/global_runnable/runnable.py +6 -6
  54. typed_ffmpeg/dag/nodes.py +1 -227
  55. typed_ffmpeg/dag/schema.py +2 -18
  56. typed_ffmpeg/utils/view.py +1 -1
  57. {typed_ffmpeg_compatible-2.7.3.dist-info → typed_ffmpeg_compatible-3.0.0a0.dist-info}/METADATA +1 -1
  58. typed_ffmpeg_compatible-3.0.0a0.dist-info/RECORD +94 -0
  59. typed_ffmpeg/dag/compile.py +0 -86
  60. typed_ffmpeg_compatible-2.7.3.dist-info/RECORD +0 -48
  61. {typed_ffmpeg_compatible-2.7.3.dist-info → typed_ffmpeg_compatible-3.0.0a0.dist-info}/LICENSE +0 -0
  62. {typed_ffmpeg_compatible-2.7.3.dist-info → typed_ffmpeg_compatible-3.0.0a0.dist-info}/WHEEL +0 -0
  63. {typed_ffmpeg_compatible-2.7.3.dist-info → typed_ffmpeg_compatible-3.0.0a0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,319 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import Any
5
+
6
+ from ffmpeg.streams.audio import AudioStream
7
+ from ffmpeg.streams.av import AVStream
8
+ from ffmpeg.streams.video import VideoStream
9
+
10
+ from ..common.cache import load
11
+ from ..common.schema import FFMpegFilter
12
+ from ..dag.nodes import (
13
+ FilterableStream,
14
+ FilterNode,
15
+ GlobalNode,
16
+ GlobalStream,
17
+ InputNode,
18
+ OutputNode,
19
+ OutputStream,
20
+ )
21
+ from ..dag.schema import Node, Stream
22
+ from .context import DAGContext
23
+ from .validate import validate
24
+
25
+
26
+ def filter_stream_typed_index(
27
+ matched_stream: FilterableStream, context: DAGContext
28
+ ) -> int:
29
+ """
30
+ Get the index of the matched stream in the outgoing streams of the node.
31
+
32
+ This is used to get the index of the stream in the outgoing streams of the node.
33
+ For example, if the node has 2 outgoing streams, and the first one is a video stream, and the second one is an audio stream,
34
+ and the matched stream is the first video stream, the index will be 0.
35
+
36
+ Args:
37
+ matched_stream: The stream to get the index of.
38
+ context: The context of the node.
39
+
40
+ Returns:
41
+ The index of the matched stream in the outgoing streams of the node.
42
+ """
43
+ matched_outgoing_streams = [
44
+ k
45
+ for k in context.get_outgoing_streams(matched_stream.node)
46
+ if isinstance(k, matched_stream.__class__)
47
+ ]
48
+ assert matched_stream in matched_outgoing_streams
49
+ assert all(k.index is not None for k in matched_outgoing_streams)
50
+ matched_outgoing_streams = sorted(
51
+ matched_outgoing_streams, key=lambda s: s.index or 9999
52
+ )
53
+
54
+ return matched_outgoing_streams.index(matched_stream)
55
+
56
+
57
+ def get_input_var_name(
58
+ stream: Stream, context: DAGContext, filter_data_dict: dict[str, FFMpegFilter]
59
+ ) -> str:
60
+ """
61
+ Get the input variable name for the stream.
62
+
63
+ This is used to get the input variable name for the stream.
64
+ For example, if the stream is a video stream, the input variable name will be "video_stream_0".
65
+
66
+ Args:
67
+ stream: The stream to get the input variable name for.
68
+ context: The context of the node.
69
+
70
+ Returns:
71
+ The input variable name for the stream.
72
+ """
73
+ match stream:
74
+ case AVStream():
75
+ assert stream.index is None
76
+ return get_output_var_name(stream.node, context)
77
+ case VideoStream():
78
+ match stream.node:
79
+ case InputNode():
80
+ return f"{get_output_var_name(stream.node, context)}.video"
81
+ case FilterNode():
82
+ if filter_data_dict[stream.node.name].is_dynamic_output:
83
+ return f"{get_output_var_name(stream.node, context)}.video({filter_stream_typed_index(stream, context)})"
84
+ elif (
85
+ len(filter_data_dict[stream.node.name].stream_typings_output)
86
+ > 1
87
+ ):
88
+ return f"{get_output_var_name(stream.node, context)}[{stream.index}]"
89
+ else:
90
+ return f"{get_output_var_name(stream.node, context)}"
91
+ case AudioStream():
92
+ match stream.node:
93
+ case InputNode():
94
+ return f"{get_output_var_name(stream.node, context)}.audio"
95
+ case FilterNode():
96
+ if filter_data_dict[stream.node.name].is_dynamic_output:
97
+ return f"{get_output_var_name(stream.node, context)}.audio({filter_stream_typed_index(stream, context)})"
98
+ elif (
99
+ len(filter_data_dict[stream.node.name].stream_typings_output)
100
+ > 1
101
+ ):
102
+ return f"{get_output_var_name(stream.node, context)}[{stream.index}]"
103
+ else:
104
+ return f"{get_output_var_name(stream.node, context)}"
105
+ case OutputStream():
106
+ return f"{get_output_var_name(stream.node, context)}"
107
+ case GlobalStream():
108
+ return f"{get_output_var_name(stream.node, context)}"
109
+ case _:
110
+ raise ValueError(f"Unknown node type: {type(stream.node)}")
111
+
112
+
113
+ def get_output_var_name(node: Node, context: DAGContext) -> str:
114
+ """
115
+ Get the output variable name for the node.
116
+
117
+ This is used to get the output variable name for the node.
118
+ For example, if the node is an input node, the output variable name will be "input_0".
119
+
120
+ Args:
121
+ node: The node to get the output variable name for.
122
+ context: The context of the node.
123
+
124
+ Returns:
125
+ The output variable name for the node.
126
+ """
127
+ match node:
128
+ case InputNode():
129
+ return f"input_{context.node_ids[node]}"
130
+ case FilterNode():
131
+ return f"node_{context.node_ids[node]}"
132
+ case OutputNode():
133
+ return f"output_{context.node_ids[node]}"
134
+ case GlobalNode():
135
+ return f"global_{context.node_ids[node]}"
136
+ case _:
137
+ raise ValueError(f"Unknown node type: {type(node)}")
138
+
139
+
140
+ def compile_kwargs(kwargs: Mapping[str, Any]) -> str:
141
+ """
142
+ Compile the kwargs for the node.
143
+
144
+ This is used to compile the kwargs for the node.
145
+ For example, if the kwargs is {"a": 1, "b": 2}, the compiled kwargs will be "a=1, b=2".
146
+
147
+ Args:
148
+ kwargs: The kwargs to compile.
149
+
150
+ Returns:
151
+ The compiled kwargs.
152
+ """
153
+ return ", ".join(f"{k}={repr(v)}" for k, v in kwargs.items())
154
+
155
+
156
+ def compile_fluent(code: list[str]) -> list[str]:
157
+ """
158
+ Compile the fluent code.
159
+
160
+ This is used to compile the fluent code.
161
+ For example, if the code is ["a=1", "b=2"], the compiled code will be ["a=1", "b=2"].
162
+
163
+ Args:
164
+ code: The code to compile.
165
+
166
+ Returns:
167
+ The compiled code.
168
+ """
169
+ buffer = [k.split("=", 1)[:2] for k in code]
170
+
171
+ # if the var used in the following expr only once, we can remove the assignment and replace the var with the expr, otherwise, we keep it
172
+ processed_index = 0
173
+ while processed_index < len(buffer):
174
+ var, expr = buffer[processed_index]
175
+ var = var.strip()
176
+ expr = expr.strip()
177
+
178
+ matched_times = sum(
179
+ _expr.count(var) for _var, _expr in buffer[processed_index + 1 :]
180
+ )
181
+ if matched_times != 1:
182
+ processed_index += 1
183
+ continue
184
+
185
+ for i, (_var, _expr) in enumerate(buffer[processed_index + 1 :]):
186
+ if var in _expr:
187
+ buffer[processed_index + 1 + i] = [_var, _expr.replace(var, expr)]
188
+
189
+ del buffer[processed_index]
190
+
191
+ return [f"{k.strip()} = {v.strip()}" for k, v in buffer]
192
+
193
+
194
+ def compile(stream: Stream, auto_fix: bool = True, fluent: bool = True) -> str:
195
+ """
196
+ Compile the python code.
197
+
198
+ This is used to compile the python code.
199
+ For example, if the stream is a video stream, the compiled code will be the python code to create the video stream.
200
+
201
+ Args:
202
+ stream: The stream to compile.
203
+ auto_fix: Whether to auto fix the stream.
204
+ fluent: Whether to use fluent syntax.
205
+
206
+ Returns:
207
+ The compiled python code.
208
+ """
209
+ stream = validate(stream, auto_fix=auto_fix)
210
+ node = stream.node
211
+ context = DAGContext.build(node)
212
+
213
+ code = []
214
+
215
+ input_nodes = sorted(
216
+ (node for node in context.nodes if isinstance(node, InputNode)),
217
+ key=lambda k: context.node_ids[k],
218
+ )
219
+
220
+ for node in input_nodes:
221
+ # NOTE: technically, the expression returns a stream, but since input node can reuse the same stream multiple times, we need to assign the stream to the node
222
+ code.append(
223
+ f"{get_output_var_name(node, context)} = ffmpeg.input('{node.filename}', {compile_kwargs(node.kwargs)})"
224
+ )
225
+
226
+ filter_data = load(list[FFMpegFilter], "filters")
227
+ filter_data_dict = {f.name: f for f in filter_data}
228
+ filter_nodes = sorted(
229
+ (node for node in context.nodes if isinstance(node, FilterNode)),
230
+ key=lambda k: context.node_ids[k],
231
+ )
232
+
233
+ for node in filter_nodes:
234
+ filter_def = filter_data_dict[node.name]
235
+
236
+ if (
237
+ not filter_def.is_dynamic_input
238
+ and len(filter_def.stream_typings_input) == 1
239
+ ):
240
+ expression = f"{get_input_var_name(node.inputs[0], context, filter_data_dict)}.{node.name}({compile_kwargs(node.kwargs)})"
241
+ else:
242
+ in_streams_names = ", ".join(
243
+ get_input_var_name(stream, context, filter_data_dict)
244
+ for stream in node.inputs
245
+ )
246
+ expression = f"ffmpeg.filters.{node.name}({in_streams_names}, {compile_kwargs(node.kwargs)})"
247
+
248
+ code.append(f"{get_output_var_name(node, context)} = {expression}")
249
+
250
+ output_nodes = sorted(
251
+ (node for node in context.nodes if isinstance(node, OutputNode)),
252
+ key=lambda k: context.node_ids[k],
253
+ )
254
+
255
+ for node in output_nodes:
256
+ in_streams_names = ", ".join(
257
+ get_input_var_name(stream, context, filter_data_dict)
258
+ for stream in node.inputs
259
+ )
260
+
261
+ if len(node.inputs) == 1:
262
+ code.append(
263
+ f"{get_output_var_name(node, context)} = {get_input_var_name(node.inputs[0], context, filter_data_dict)}.output(filename='{node.filename}', {compile_kwargs(node.kwargs)})"
264
+ )
265
+ else:
266
+ code.append(
267
+ f"{get_output_var_name(node, context)} = ffmpeg.output({in_streams_names}, filename='{node.filename}', {compile_kwargs(node.kwargs)})"
268
+ )
269
+
270
+ global_nodes = sorted(
271
+ (node for node in context.nodes if isinstance(node, GlobalNode)),
272
+ key=lambda k: context.node_ids[k],
273
+ )
274
+
275
+ assert len(global_nodes) <= 1, "Only one global node is supported"
276
+
277
+ if global_nodes:
278
+ node = global_nodes[0]
279
+
280
+ if len(node.inputs) > 1:
281
+ in_streams_names = ", ".join(
282
+ get_input_var_name(s, context, filter_data_dict) for s in node.inputs
283
+ )
284
+ code.append(
285
+ f"{get_output_var_name(node, context)} = ffmpeg.merge_outputs({in_streams_names}).global_args({compile_kwargs(node.kwargs)})"
286
+ )
287
+ else:
288
+ code.append(
289
+ f"{get_output_var_name(node, context)} = {get_input_var_name(node.inputs[0], context, filter_data_dict)}.global_args({compile_kwargs(node.kwargs)})"
290
+ )
291
+
292
+ code.append(f"result = {get_output_var_name(node, context)}")
293
+ code = [k.replace(", )", ")") for k in code]
294
+
295
+ if fluent:
296
+ code = compile_fluent(code)
297
+
298
+ return "\n".join(["import ffmpeg", *code])
299
+
300
+
301
+ def parse(code: str) -> Stream:
302
+ """
303
+ Parse the python code.
304
+
305
+ This is used to parse the python code.
306
+ For example, if the code is ["import ffmpeg", "input_0 = ffmpeg.input('input1.mp4')", "result = input_0"], the parsed stream will be the stream created by the input node.
307
+
308
+ Args:
309
+ code: The code to parse.
310
+
311
+ Returns:
312
+ The parsed stream.
313
+ """
314
+ local_vars: dict[str, Any] = {}
315
+ exec(code, {}, local_vars)
316
+ result = local_vars["result"]
317
+
318
+ assert isinstance(result, Stream)
319
+ return validate(result, auto_fix=True)
@@ -10,18 +10,19 @@ during graph validation and command-line compilation.
10
10
  from __future__ import annotations
11
11
 
12
12
  from collections import defaultdict
13
+ from collections.abc import Iterable
13
14
  from dataclasses import dataclass
14
15
  from functools import cached_property
15
- from typing import Any, TypeVar
16
+ from typing import TypeVar
16
17
 
18
+ from ..dag.nodes import FilterNode, InputNode
19
+ from ..dag.schema import Node, Stream
17
20
  from ..utils.typing import override
18
- from .nodes import FilterNode, InputNode
19
- from .schema import Node, Stream
20
21
 
21
22
  T = TypeVar("T")
22
23
 
23
24
 
24
- def _remove_duplicates(seq: list[T]) -> list[T]:
25
+ def _remove_duplicates(seq: Iterable[T]) -> list[T]:
25
26
  """
26
27
  Remove duplicates from a list while preserving the original order.
27
28
 
@@ -36,7 +37,7 @@ def _remove_duplicates(seq: list[T]) -> list[T]:
36
37
  A new list with duplicates removed, preserving the original order
37
38
  """
38
39
  seen = set()
39
- output = []
40
+ output: list[T] = []
40
41
 
41
42
  for x in seq:
42
43
  if x not in seen:
@@ -62,7 +63,8 @@ def _collect(node: Node) -> tuple[list[Node], list[Stream]]:
62
63
  - A list of all nodes in the upstream path (including the starting node)
63
64
  - A list of all streams connecting these nodes
64
65
  """
65
- nodes, streams = [node], [*node.inputs]
66
+ nodes: list[Node] = [node]
67
+ streams: list[Stream] = list(node.inputs)
66
68
 
67
69
  for stream in node.inputs:
68
70
  _nodes, _streams = _collect(stream.node)
@@ -204,6 +206,24 @@ class DAGContext:
204
206
 
205
207
  return outgoing_streams
206
208
 
209
+ @cached_property
210
+ def node_ids(self) -> dict[Node, int]:
211
+ """
212
+ Get a mapping of nodes to their unique integer IDs.
213
+ This property assigns a unique integer ID to each node in the graph,
214
+ based on the node type and its position in the processing chain.
215
+ Returns:
216
+ A dictionary mapping nodes to their unique integer IDs
217
+ """
218
+ node_index: dict[type[Node], int] = defaultdict(int)
219
+ node_ids: dict[Node, int] = {}
220
+
221
+ for node in sorted(self.nodes, key=lambda node: node.max_depth):
222
+ node_ids[node] = node_index[node.__class__]
223
+ node_index[node.__class__] += 1
224
+
225
+ return node_ids
226
+
207
227
  @cached_property
208
228
  def node_labels(self) -> dict[Node, str]:
209
229
  """
@@ -274,9 +294,6 @@ class DAGContext:
274
294
  AssertionError: If the node is not an InputNode or FilterNode
275
295
  """
276
296
 
277
- assert isinstance(node, (InputNode, FilterNode)), (
278
- "Only input and filter nodes have labels"
279
- )
280
297
  return self.node_labels[node]
281
298
 
282
299
  @override
@@ -296,39 +313,3 @@ class DAGContext:
296
313
  A list of streams that originate from this node
297
314
  """
298
315
  return self.outgoing_streams[node]
299
-
300
- def render(self, obj: Any) -> Any:
301
- """
302
- Recursively convert graph objects to a human-readable representation.
303
-
304
- This method processes arbitrary objects, with special handling for graph
305
- elements like nodes and streams. It converts them to a readable string format
306
- that includes node labels. It recursively handles nested structures like
307
- lists, tuples, and dictionaries.
308
-
309
- This is primarily used for debugging, logging, and visualization purposes.
310
-
311
- Args:
312
- obj: The object to render, which may be a Node, Stream, or a container
313
- with these objects nested inside
314
-
315
- Returns:
316
- The rendered representation of the object:
317
- - For nodes: "Node(repr#label)"
318
- - For streams: "Stream(node_repr#label#index)"
319
- - For containers: recursively rendered contents
320
- - For other objects: the original object unchanged
321
- """
322
-
323
- if isinstance(obj, (list, tuple)):
324
- return [self.render(o) for o in obj]
325
- elif isinstance(obj, dict):
326
- return {self.render(k): self.render(v) for k, v in obj.items()}
327
-
328
- if isinstance(obj, Node):
329
- return f"Node({obj.repr()}#{self.node_labels[obj]})"
330
-
331
- if isinstance(obj, Stream):
332
- return f"Stream({self.render(obj.node)}#{obj.index})"
333
-
334
- return obj
@@ -12,12 +12,12 @@ from __future__ import annotations
12
12
 
13
13
  from dataclasses import replace
14
14
 
15
+ from ..dag.nodes import FilterNode, InputNode
16
+ from ..dag.schema import Node, Stream
15
17
  from ..exceptions import FFMpegValueError
16
18
  from ..streams.audio import AudioStream
17
19
  from ..streams.video import VideoStream
18
20
  from .context import DAGContext
19
- from .nodes import FilterNode, InputNode
20
- from .schema import Node, Stream
21
21
 
22
22
 
23
23
  def remove_split(
@@ -81,7 +81,7 @@ class GlobalRunable(GlobalArgs):
81
81
  def compile(
82
82
  self,
83
83
  cmd: str | list[str] = "ffmpeg",
84
- overwrite_output: bool = None,
84
+ overwrite_output: bool | None = None,
85
85
  auto_fix: bool = True,
86
86
  ) -> list[str]:
87
87
  """
@@ -111,7 +111,7 @@ class GlobalRunable(GlobalArgs):
111
111
  # Result: ['ffmpeg', '-i', 'input.mp4', 'output.mp4']
112
112
  ```
113
113
  """
114
- from ..compile import compile
114
+ from ...compile.compile_cli import compile_as_list
115
115
 
116
116
  if isinstance(cmd, str):
117
117
  cmd = [cmd]
@@ -121,12 +121,12 @@ class GlobalRunable(GlobalArgs):
121
121
  elif overwrite_output is False:
122
122
  return self.global_args(n=True).compile(cmd, auto_fix=auto_fix)
123
123
 
124
- return cmd + compile(self._global_node().stream(), auto_fix=auto_fix)
124
+ return cmd + compile_as_list(self._global_node().stream(), auto_fix=auto_fix)
125
125
 
126
126
  def compile_line(
127
127
  self,
128
128
  cmd: str | list[str] = "ffmpeg",
129
- overwrite_output: bool = None,
129
+ overwrite_output: bool | None = None,
130
130
  auto_fix: bool = True,
131
131
  ) -> str:
132
132
  """
@@ -165,7 +165,7 @@ class GlobalRunable(GlobalArgs):
165
165
  pipe_stdout: bool = False,
166
166
  pipe_stderr: bool = False,
167
167
  quiet: bool = False,
168
- overwrite_output: bool = None,
168
+ overwrite_output: bool | None = None,
169
169
  auto_fix: bool = True,
170
170
  ) -> subprocess.Popen[bytes]:
171
171
  """
@@ -222,7 +222,7 @@ class GlobalRunable(GlobalArgs):
222
222
  capture_stderr: bool = False,
223
223
  input: bytes | None = None,
224
224
  quiet: bool = False,
225
- overwrite_output: bool = None,
225
+ overwrite_output: bool | None = None,
226
226
  auto_fix: bool = True,
227
227
  ) -> tuple[bytes, bytes]:
228
228
  """