auto-editor 25.3.1__py3-none-any.whl → 26.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,26 +1,21 @@
1
1
  from __future__ import annotations
2
2
 
3
- import os.path
4
3
  from dataclasses import dataclass
5
- from subprocess import DEVNULL, PIPE
6
- from sys import platform
7
4
  from typing import TYPE_CHECKING
8
5
 
9
6
  import av
10
7
  import numpy as np
11
8
 
12
- from auto_editor.output import video_quality
9
+ from auto_editor.output import parse_bitrate
13
10
  from auto_editor.timeline import TlImage, TlRect, TlVideo
14
- from auto_editor.utils.encoder import encoders
15
- from auto_editor.utils.types import color
16
11
 
17
12
  if TYPE_CHECKING:
18
13
  from collections.abc import Iterator
14
+ from typing import Any
19
15
 
20
- from auto_editor.ffwrapper import FFmpeg, FileInfo
16
+ from auto_editor.ffwrapper import FileInfo
21
17
  from auto_editor.timeline import v3
22
18
  from auto_editor.utils.bar import Bar
23
- from auto_editor.utils.container import Container
24
19
  from auto_editor.utils.log import Log
25
20
  from auto_editor.utils.types import Args
26
21
 
@@ -90,8 +85,10 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
90
85
 
91
86
 
92
87
  def render_av(
93
- ffmpeg: FFmpeg, tl: v3, args: Args, bar: Bar, ctr: Container, log: Log
94
- ) -> tuple[str, bool]:
88
+ output: av.container.OutputContainer, tl: v3, args: Args, bar: Bar, log: Log
89
+ ) -> Any:
90
+ from_ndarray = av.VideoFrame.from_ndarray
91
+
95
92
  src = tl.src
96
93
  cns: dict[FileInfo, av.container.InputContainer] = {}
97
94
  decoders: dict[FileInfo, Iterator[av.VideoFrame]] = {}
@@ -99,8 +96,8 @@ def render_av(
99
96
  tous: dict[FileInfo, int] = {}
100
97
 
101
98
  target_pix_fmt = "yuv420p" # Reasonable default
99
+ target_fps = tl.tb # Always constant
102
100
  img_cache = make_image_cache(tl)
103
- temp = log.temp
104
101
 
105
102
  first_src: FileInfo | None = None
106
103
  for src in tl.sources:
@@ -133,14 +130,27 @@ def render_av(
133
130
  log.debug(f"Tous: {tous}")
134
131
  log.debug(f"Clips: {tl.v}")
135
132
 
136
- target_pix_fmt = target_pix_fmt if target_pix_fmt in allowed_pix_fmt else "yuv420p"
137
- log.debug(f"Target pix_fmt: {target_pix_fmt}")
138
-
139
- apply_video_later = True
140
- if args.video_codec in encoders:
141
- apply_video_later = set(encoders[args.video_codec]).isdisjoint(allowed_pix_fmt)
133
+ if args.video_codec == "gif":
134
+ _c = av.Codec("gif", "w")
135
+ if _c.video_formats is not None and target_pix_fmt in (
136
+ f.name for f in _c.video_formats
137
+ ):
138
+ target_pix_fmt = target_pix_fmt
139
+ else:
140
+ target_pix_fmt = "rgb8"
141
+ del _c
142
+ else:
143
+ target_pix_fmt = (
144
+ target_pix_fmt if target_pix_fmt in allowed_pix_fmt else "yuv420p"
145
+ )
142
146
 
143
- log.debug(f"apply video quality settings now: {not apply_video_later}")
147
+ ops = {"mov_flags": "faststart"}
148
+ output_stream = output.add_stream(args.video_codec, rate=target_fps, options=ops)
149
+ yield output_stream
150
+ if not isinstance(output_stream, av.VideoStream):
151
+ log.error(f"Not a known video codec: {args.video_codec}")
152
+ if src.videos and src.videos[0].lang is not None:
153
+ output_stream.metadata["language"] = src.videos[0].lang
144
154
 
145
155
  if args.scale == 1.0:
146
156
  target_width, target_height = tl.res
@@ -157,44 +167,33 @@ def render_av(
157
167
  scale_graph.add("buffersink"),
158
168
  )
159
169
 
160
- spedup = os.path.join(temp, "spedup0.mp4")
161
-
162
- cmd = [
163
- "-hide_banner",
164
- "-y",
165
- "-f",
166
- "rawvideo",
167
- "-c:v",
168
- "rawvideo",
169
- "-pix_fmt",
170
- target_pix_fmt,
171
- "-s",
172
- f"{target_width}*{target_height}",
173
- "-framerate",
174
- f"{tl.tb}",
175
- "-i",
176
- "-",
177
- "-pix_fmt",
178
- target_pix_fmt,
179
- ]
180
-
181
- if platform == "darwin":
182
- # Fix videotoolbox issue with legacy macs
183
- cmd += ["-allow_sw", "1"]
184
-
185
- if apply_video_later:
186
- cmd += ["-c:v", "mpeg4", "-qscale:v", "1"]
170
+ output_stream.width = target_width
171
+ output_stream.height = target_height
172
+ output_stream.pix_fmt = target_pix_fmt
173
+ output_stream.framerate = target_fps
174
+
175
+ color_range = src.videos[0].color_range
176
+ colorspace = src.videos[0].color_space
177
+ color_prim = src.videos[0].color_primaries
178
+ color_trc = src.videos[0].color_transfer
179
+
180
+ if color_range == 1 or color_range == 2:
181
+ output_stream.color_range = color_range
182
+ if colorspace in (0, 1) or (colorspace >= 3 and colorspace < 16):
183
+ output_stream.colorspace = colorspace
184
+ if color_prim == 1 or (color_prim >= 4 and color_prim < 17):
185
+ output_stream.color_primaries = color_prim
186
+ if color_trc == 1 or (color_trc >= 4 and color_trc < 22):
187
+ output_stream.color_trc = color_trc
188
+
189
+ if args.video_bitrate != "auto":
190
+ output_stream.bit_rate = parse_bitrate(args.video_bitrate, log)
191
+ log.debug(f"video bitrate: {output_stream.bit_rate}")
187
192
  else:
188
- cmd += video_quality(args)
193
+ log.debug(f"[auto] video bitrate: {output_stream.bit_rate}")
189
194
 
190
- # Setting SAR requires re-encoding so we do it here.
191
195
  if src is not None and src.videos and (sar := src.videos[0].sar) is not None:
192
- cmd.extend(["-vf", f"setsar={sar}"])
193
-
194
- cmd.append(spedup)
195
-
196
- process2 = ffmpeg.Popen(cmd, stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL)
197
- assert process2.stdin is not None
196
+ output_stream.sample_aspect_ratio = sar
198
197
 
199
198
  # First few frames can have an abnormal keyframe count, so never seek there.
200
199
  seek = 10
@@ -203,145 +202,131 @@ def render_av(
203
202
 
204
203
  bar.start(tl.end, "Creating new video")
205
204
 
206
- bg = color(args.background)
205
+ bg = args.background
207
206
  null_frame = make_solid(target_width, target_height, target_pix_fmt, bg)
208
207
  frame_index = -1
209
- try:
210
- for index in range(tl.end):
211
- obj_list: list[VideoFrame | TlRect | TlImage] = []
212
- for layer in tl.v:
213
- for lobj in layer:
214
- if isinstance(lobj, TlVideo):
215
- if index >= lobj.start and index < (lobj.start + lobj.dur):
216
- _i = round((lobj.offset + index - lobj.start) * lobj.speed)
217
- obj_list.append(VideoFrame(_i, lobj.src))
218
- elif index >= lobj.start and index < lobj.start + lobj.dur:
219
- obj_list.append(lobj)
220
208
 
209
+ for index in range(tl.end):
210
+ obj_list: list[VideoFrame | TlRect | TlImage] = []
211
+ for layer in tl.v:
212
+ for lobj in layer:
213
+ if isinstance(lobj, TlVideo):
214
+ if index >= lobj.start and index < (lobj.start + lobj.dur):
215
+ _i = round((lobj.offset + index - lobj.start) * lobj.speed)
216
+ obj_list.append(VideoFrame(_i, lobj.src))
217
+ elif index >= lobj.start and index < lobj.start + lobj.dur:
218
+ obj_list.append(lobj)
219
+
220
+ if tl.v1 is not None:
221
+ # When there can be valid gaps in the timeline.
221
222
  frame = null_frame
222
- for obj in obj_list:
223
- if isinstance(obj, VideoFrame):
224
- my_stream = cns[obj.src].streams.video[0]
225
- if frame_index > obj.index:
226
- log.debug(f"Seek: {frame_index} -> 0")
227
- cns[obj.src].seek(0)
228
- try:
229
- frame = next(decoders[obj.src])
230
- frame_index = round(frame.time * tl.tb)
231
- except StopIteration:
232
- pass
233
-
234
- while frame_index < obj.index:
235
- # Check if skipping ahead is worth it.
236
- if (
237
- obj.index - frame_index > seek_cost[obj.src]
238
- and frame_index > seek
239
- ):
240
- seek = frame_index + (seek_cost[obj.src] // 2)
241
- seek_frame = frame_index
242
- log.debug(f"Seek: {frame_index} -> {obj.index}")
243
- cns[obj.src].seek(
244
- obj.index * tous[obj.src],
245
- stream=my_stream,
246
- )
247
-
248
- try:
249
- frame = next(decoders[obj.src])
250
- frame_index = round(frame.time * tl.tb)
251
- except StopIteration:
252
- log.debug(f"No source frame at {index=}. Using null frame")
253
- frame = null_frame
254
- break
255
-
256
- if seek_frame is not None:
257
- log.debug(
258
- f"Skipped {frame_index - seek_frame} frame indexes"
259
- )
260
- frames_saved += frame_index - seek_frame
261
- seek_frame = None
262
- if frame.key_frame:
263
- log.debug(f"Keyframe {frame_index} {frame.pts}")
264
-
265
- if (frame.width, frame.height) != tl.res:
266
- width, height = tl.res
267
- graph = av.filter.Graph()
268
- graph.link_nodes(
269
- graph.add_buffer(template=my_stream),
270
- graph.add(
271
- "scale",
272
- f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
273
- ),
274
- graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
275
- graph.add("buffersink"),
276
- ).vpush(frame)
277
- frame = graph.vpull()
278
- elif isinstance(obj, TlRect):
223
+ # else, use the last frame
224
+
225
+ for obj in obj_list:
226
+ if isinstance(obj, VideoFrame):
227
+ my_stream = cns[obj.src].streams.video[0]
228
+ if frame_index > obj.index:
229
+ log.debug(f"Seek: {frame_index} -> 0")
230
+ cns[obj.src].seek(0)
231
+ try:
232
+ frame = next(decoders[obj.src])
233
+ frame_index = round(frame.time * tl.tb)
234
+ except StopIteration:
235
+ pass
236
+
237
+ while frame_index < obj.index:
238
+ # Check if skipping ahead is worth it.
239
+ if (
240
+ obj.index - frame_index > seek_cost[obj.src]
241
+ and frame_index > seek
242
+ ):
243
+ seek = frame_index + (seek_cost[obj.src] // 2)
244
+ seek_frame = frame_index
245
+ log.debug(f"Seek: {frame_index} -> {obj.index}")
246
+ cns[obj.src].seek(obj.index * tous[obj.src], stream=my_stream)
247
+
248
+ try:
249
+ frame = next(decoders[obj.src])
250
+ frame_index = round(frame.time * tl.tb)
251
+ except StopIteration:
252
+ log.debug(f"No source frame at {index=}. Using null frame")
253
+ frame = null_frame
254
+ break
255
+
256
+ if seek_frame is not None:
257
+ log.debug(f"Skipped {frame_index - seek_frame} frame indexes")
258
+ frames_saved += frame_index - seek_frame
259
+ seek_frame = None
260
+ if frame.key_frame:
261
+ log.debug(f"Keyframe {frame_index} {frame.pts}")
262
+
263
+ if (frame.width, frame.height) != tl.res:
264
+ width, height = tl.res
279
265
  graph = av.filter.Graph()
280
- x, y = obj.x, obj.y
281
266
  graph.link_nodes(
282
267
  graph.add_buffer(template=my_stream),
283
268
  graph.add(
284
- "drawbox",
285
- f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
269
+ "scale",
270
+ f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
286
271
  ),
272
+ graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
287
273
  graph.add("buffersink"),
288
274
  ).vpush(frame)
289
275
  frame = graph.vpull()
290
- elif isinstance(obj, TlImage):
291
- img = img_cache[(obj.src, obj.width)]
292
- array = frame.to_ndarray(format="rgb24")
293
-
294
- overlay_h, overlay_w, _ = img.shape
295
- x_pos, y_pos = obj.x, obj.y
296
-
297
- x_start = max(x_pos, 0)
298
- y_start = max(y_pos, 0)
299
- x_end = min(x_pos + overlay_w, frame.width)
300
- y_end = min(y_pos + overlay_h, frame.height)
301
-
302
- # Clip the overlay image to fit into the frame
303
- overlay_x_start = max(-x_pos, 0)
304
- overlay_y_start = max(-y_pos, 0)
305
- overlay_x_end = overlay_w - max(
306
- (x_pos + overlay_w) - frame.width, 0
307
- )
308
- overlay_y_end = overlay_h - max(
309
- (y_pos + overlay_h) - frame.height, 0
310
- )
311
- clipped_overlay = img[
312
- overlay_y_start:overlay_y_end, overlay_x_start:overlay_x_end
313
- ]
314
-
315
- # Create a region of interest (ROI) on the video frame
316
- roi = array[y_start:y_end, x_start:x_end]
317
-
318
- # Blend the overlay image with the ROI based on the opacity
319
- roi = (1 - obj.opacity) * roi + obj.opacity * clipped_overlay
320
- array[y_start:y_end, x_start:x_end] = roi
321
- array = np.clip(array, 0, 255).astype(np.uint8)
322
-
323
- frame = av.VideoFrame.from_ndarray(array, format="rgb24")
324
-
325
- if scale_graph is not None and frame.width != target_width:
326
- scale_graph.vpush(frame)
327
- frame = scale_graph.vpull()
328
-
329
- if frame.format.name != target_pix_fmt:
330
- frame = frame.reformat(format=target_pix_fmt)
331
- bar.tick(index)
332
- elif index % 3 == 0:
333
- bar.tick(index)
334
-
335
- process2.stdin.write(frame.to_ndarray().tobytes())
336
-
337
- bar.end()
338
- process2.stdin.close()
339
- process2.wait()
340
- except (OSError, BrokenPipeError):
341
- bar.end()
342
- ffmpeg.run_check_errors(cmd, True)
343
- log.error("FFmpeg Error!")
344
-
276
+ elif isinstance(obj, TlRect):
277
+ graph = av.filter.Graph()
278
+ x, y = obj.x, obj.y
279
+ graph.link_nodes(
280
+ graph.add_buffer(template=my_stream),
281
+ graph.add(
282
+ "drawbox",
283
+ f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
284
+ ),
285
+ graph.add("buffersink"),
286
+ ).vpush(frame)
287
+ frame = graph.vpull()
288
+ elif isinstance(obj, TlImage):
289
+ img = img_cache[(obj.src, obj.width)]
290
+ array = frame.to_ndarray(format="rgb24")
291
+
292
+ overlay_h, overlay_w, _ = img.shape
293
+ x_pos, y_pos = obj.x, obj.y
294
+
295
+ x_start = max(x_pos, 0)
296
+ y_start = max(y_pos, 0)
297
+ x_end = min(x_pos + overlay_w, frame.width)
298
+ y_end = min(y_pos + overlay_h, frame.height)
299
+
300
+ # Clip the overlay image to fit into the frame
301
+ overlay_x_start = max(-x_pos, 0)
302
+ overlay_y_start = max(-y_pos, 0)
303
+ overlay_x_end = overlay_w - max((x_pos + overlay_w) - frame.width, 0)
304
+ overlay_y_end = overlay_h - max((y_pos + overlay_h) - frame.height, 0)
305
+ clipped_overlay = img[
306
+ overlay_y_start:overlay_y_end, overlay_x_start:overlay_x_end
307
+ ]
308
+
309
+ # Create a region of interest (ROI) on the video frame
310
+ roi = array[y_start:y_end, x_start:x_end]
311
+
312
+ # Blend the overlay image with the ROI based on the opacity
313
+ roi = (1 - obj.opacity) * roi + obj.opacity * clipped_overlay
314
+ array[y_start:y_end, x_start:x_end] = roi
315
+ array = np.clip(array, 0, 255).astype(np.uint8)
316
+
317
+ frame = from_ndarray(array, format="rgb24")
318
+
319
+ if scale_graph is not None and frame.width != target_width:
320
+ scale_graph.vpush(frame)
321
+ frame = scale_graph.vpull()
322
+
323
+ if frame.format.name != target_pix_fmt:
324
+ frame = frame.reformat(format=target_pix_fmt)
325
+ bar.tick(index)
326
+ elif index % 3 == 0:
327
+ bar.tick(index)
328
+
329
+ yield from_ndarray(frame.to_ndarray(), format=frame.format.name)
330
+
331
+ bar.end()
345
332
  log.debug(f"Total frames saved seeking: {frames_saved}")
346
-
347
- return spedup, apply_video_later
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import sys
4
4
  from dataclasses import dataclass, field
5
5
  from fractions import Fraction
6
+ from os import environ
6
7
 
7
8
  import auto_editor
8
9
  from auto_editor.analyze import Levels
@@ -71,11 +72,19 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
71
72
  print(f"Auto-Editor {auto_editor.__version__}")
72
73
  text = None
73
74
 
75
+ no_color = bool(environ.get("NO_COLOR") or environ.get("AV_LOG_FORCE_NOCOLOR"))
76
+ if no_color:
77
+ bold_pink = bold_red = reset = ""
78
+ else:
79
+ bold_pink = "\033[1;95m"
80
+ bold_red = "\033[1;31m"
81
+ reset = "\033[0m"
82
+
74
83
  try:
75
84
  while True:
76
85
  try:
77
86
  if text is None:
78
- text = input("> ")
87
+ text = input(f"{bold_pink}>{reset} ")
79
88
  else:
80
89
  text += "\n" + input(" ")
81
90
  except KeyboardInterrupt as e:
@@ -97,8 +106,8 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
97
106
 
98
107
  except ClosingError:
99
108
  continue # Allow user to continue adding text
100
- except (MyError, ZeroDivisionError) as e:
101
- print(f"error: {e}")
109
+ except MyError as e:
110
+ print(f"{bold_red}error{reset}: {e}")
102
111
 
103
112
  text = None
104
113
 
@@ -141,7 +141,10 @@ def run_tests(tests: list[Callable], args: TestArgs) -> None:
141
141
  except Exception as e:
142
142
  dur = perf_counter() - start
143
143
  total_time += dur
144
- print(f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs [FAILED]")
144
+ print(
145
+ f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs \033[1;31m[FAILED]\033[0m",
146
+ flush=True,
147
+ )
145
148
  if args.no_fail_fast:
146
149
  print(f"\n{e}")
147
150
  else:
@@ -150,8 +153,10 @@ def run_tests(tests: list[Callable], args: TestArgs) -> None:
150
153
  raise e
151
154
  else:
152
155
  passed += 1
153
- print(f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs [PASSED]")
154
-
156
+ print(
157
+ f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs [\033[1;32mPASSED\033[0m]",
158
+ flush=True,
159
+ )
155
160
  if outputs is not None:
156
161
  if isinstance(outputs, str):
157
162
  outputs = [outputs]
@@ -234,7 +239,7 @@ def main(sys_args: list[str] | None = None):
234
239
  video = cn.videos[0]
235
240
 
236
241
  assert video.fps == 30
237
- assert video.time_base == Fraction(1, 30)
242
+ # assert video.time_base == Fraction(1, 30)
238
243
  assert video.width == 1280
239
244
  assert video.height == 720
240
245
  assert video.codec == "h264"
@@ -339,7 +344,10 @@ def main(sys_args: list[str] | None = None):
339
344
  )
340
345
 
341
346
  def track_tests():
342
- return run.main(["resources/multi-track.mov"], ["--keep_tracks_seperate"])
347
+ out = run.main(["resources/multi-track.mov"], ["--keep_tracks_seperate"])
348
+ assert len(fileinfo(out).audios) == 2
349
+
350
+ return out
343
351
 
344
352
  def export_json_tests():
345
353
  out = run.main(["example.mp4"], ["--export_as_json"])
@@ -352,17 +360,17 @@ def main(sys_args: list[str] | None = None):
352
360
  """{"version": "1", "source": "example.mp4", "chunks": [ [0, 26, 1.0], [26, 34, 0] ]}"""
353
361
  )
354
362
 
355
- return run.main(["v1.json"], [])
363
+ return "v1.json", run.main(["v1.json"], [])
356
364
 
357
365
  def premiere_named_export():
358
366
  run.main(["example.mp4"], ["--export", 'premiere:name="Foo Bar"'])
359
367
 
360
368
  def export_subtitles():
361
- cn = fileinfo(run.main(["resources/mov_text.mp4"], []))
369
+ # cn = fileinfo(run.main(["resources/mov_text.mp4"], []))
362
370
 
363
- assert len(cn.videos) == 1
364
- assert len(cn.audios) == 1
365
- assert len(cn.subtitles) == 1
371
+ # assert len(cn.videos) == 1
372
+ # assert len(cn.audios) == 1
373
+ # assert len(cn.subtitles) == 1
366
374
 
367
375
  cn = fileinfo(run.main(["resources/webvtt.mkv"], []))
368
376
 
@@ -465,47 +473,44 @@ def main(sys_args: list[str] | None = None):
465
473
  def frame_rate():
466
474
  cn = fileinfo(run.main(["example.mp4"], ["-r", "15", "--no-seek"]))
467
475
  video = cn.videos[0]
468
- assert video.fps == 15
469
- assert video.time_base == Fraction(1, 15)
470
- assert float(video.duration) - 17.33333333333333333333333 < 3
476
+ assert video.fps == 15, video.fps
477
+ assert video.duration - 17.33333333333333333333333 < 3, video.duration
471
478
 
472
479
  cn = fileinfo(run.main(["example.mp4"], ["-r", "20"]))
473
480
  video = cn.videos[0]
474
- assert video.fps == 20
475
- assert video.time_base == Fraction(1, 20)
476
- assert float(video.duration) - 17.33333333333333333333333 < 2
481
+ assert video.fps == 20, video.fps
482
+ assert video.duration - 17.33333333333333333333333 < 2
477
483
 
478
484
  cn = fileinfo(out := run.main(["example.mp4"], ["-r", "60"]))
479
485
  video = cn.videos[0]
480
486
 
481
- assert video.fps == 60
482
- assert video.time_base == Fraction(1, 60)
483
- assert float(video.duration) - 17.33333333333333333333333 < 0.3
487
+ assert video.fps == 60, video.fps
488
+ assert video.duration - 17.33333333333333333333333 < 0.3
484
489
 
485
490
  return out
486
491
 
487
- def embedded_image():
488
- out1 = run.main(["resources/embedded-image/h264-png.mp4"], [])
489
- cn = fileinfo(out1)
490
- assert cn.videos[0].codec == "h264"
491
- assert cn.videos[1].codec == "png"
492
+ # def embedded_image():
493
+ # out1 = run.main(["resources/embedded-image/h264-png.mp4"], [])
494
+ # cn = fileinfo(out1)
495
+ # assert cn.videos[0].codec == "h264"
496
+ # assert cn.videos[1].codec == "png"
492
497
 
493
- out2 = run.main(["resources/embedded-image/h264-mjpeg.mp4"], [])
494
- cn = fileinfo(out2)
495
- assert cn.videos[0].codec == "h264"
496
- assert cn.videos[1].codec == "mjpeg"
498
+ # out2 = run.main(["resources/embedded-image/h264-mjpeg.mp4"], [])
499
+ # cn = fileinfo(out2)
500
+ # assert cn.videos[0].codec == "h264"
501
+ # assert cn.videos[1].codec == "mjpeg"
497
502
 
498
- out3 = run.main(["resources/embedded-image/h264-png.mkv"], [])
499
- cn = fileinfo(out3)
500
- assert cn.videos[0].codec == "h264"
501
- assert cn.videos[1].codec == "png"
503
+ # out3 = run.main(["resources/embedded-image/h264-png.mkv"], [])
504
+ # cn = fileinfo(out3)
505
+ # assert cn.videos[0].codec == "h264"
506
+ # assert cn.videos[1].codec == "png"
502
507
 
503
- out4 = run.main(["resources/embedded-image/h264-mjpeg.mkv"], [])
504
- cn = fileinfo(out4)
505
- assert cn.videos[0].codec == "h264"
506
- assert cn.videos[1].codec == "mjpeg"
508
+ # out4 = run.main(["resources/embedded-image/h264-mjpeg.mkv"], [])
509
+ # cn = fileinfo(out4)
510
+ # assert cn.videos[0].codec == "h264"
511
+ # assert cn.videos[1].codec == "mjpeg"
507
512
 
508
- return out1, out2, out3, out4
513
+ # return out1, out2, out3, out4
509
514
 
510
515
  def motion():
511
516
  out = run.main(
@@ -751,7 +756,6 @@ def main(sys_args: list[str] | None = None):
751
756
  sr_units,
752
757
  backwards_range,
753
758
  cut_out,
754
- embedded_image,
755
759
  gif,
756
760
  margin_tests,
757
761
  input_extension,
auto_editor/timeline.py CHANGED
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING
6
6
  from auto_editor.ffwrapper import initFileInfo, mux
7
7
  from auto_editor.lib.contracts import *
8
8
  from auto_editor.utils.cmdkw import Required, pAttr, pAttrs
9
- from auto_editor.utils.types import color, natural, number, threshold
9
+ from auto_editor.utils.types import natural, number, parse_color, threshold
10
10
 
11
11
  if TYPE_CHECKING:
12
12
  from collections.abc import Iterator
@@ -165,7 +165,7 @@ rect_builder = pAttrs(
165
165
  pAttr("y", Required, is_int, int),
166
166
  pAttr("width", Required, is_int, int),
167
167
  pAttr("height", Required, is_int, int),
168
- pAttr("fill", "#c4c4c4", is_str, color),
168
+ pAttr("fill", "#c4c4c4", is_str, parse_color),
169
169
  )
170
170
  visual_objects = {
171
171
  "rect": (TlRect, rect_builder),
@@ -35,11 +35,6 @@ class pAttrs:
35
35
  self.attrs = attrs
36
36
 
37
37
 
38
- def _norm_name(s: str) -> str:
39
- # Python does not allow - in variable names
40
- return s.replace("-", "_")
41
-
42
-
43
38
  class PLexer:
44
39
  __slots__ = ("text", "pos", "char")
45
40
 
@@ -101,6 +96,10 @@ def parse_with_palet(
101
96
  KEYWORD_SEP = "="
102
97
  kwargs: dict[str, Any] = {}
103
98
 
99
+ def _norm_name(s: str) -> str:
100
+ # Python does not allow - in variable names
101
+ return s.replace("-", "_")
102
+
104
103
  def go(text: str, c: Any) -> Any:
105
104
  try:
106
105
  env = _env if isinstance(_env, Env) else Env(_env)
@@ -174,9 +173,7 @@ def parse_with_palet(
174
173
  return kwargs
175
174
 
176
175
 
177
- def parse_method(
178
- name: str, text: str, env: Env
179
- ) -> tuple[str, list[Any], dict[str, Any]]:
176
+ def parse_method(name: str, text: str) -> tuple[str, list[Any], dict[str, Any]]:
180
177
  from auto_editor.lang.palet import Lexer, Parser
181
178
 
182
179
  # Positional Arguments