auto-editor 28.0.1__py3-none-any.whl → 28.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,12 @@ from __future__ import annotations
3
3
  from fractions import Fraction
4
4
  from io import BytesIO
5
5
  from pathlib import Path
6
- from typing import TYPE_CHECKING
6
+ from typing import TYPE_CHECKING, cast
7
7
 
8
- import bv
8
+ import av
9
9
  import numpy as np
10
- from bv import AudioFrame
11
- from bv.filter.loudnorm import stats
10
+ from av import AudioFrame
11
+ from av.filter.loudnorm import stats
12
12
 
13
13
  from auto_editor.ffwrapper import FileInfo
14
14
  from auto_editor.json import load
@@ -22,7 +22,6 @@ from auto_editor.utils.log import Log
22
22
 
23
23
  if TYPE_CHECKING:
24
24
  from collections.abc import Iterator
25
- from typing import Any
26
25
 
27
26
  from auto_editor.__main__ import Args
28
27
 
@@ -102,7 +101,7 @@ def apply_audio_normalization(
102
101
  f"i={norm['i']}:lra={norm['lra']}:tp={norm['tp']}:offset={norm['gain']}"
103
102
  )
104
103
  log.debug(f"audio norm first pass: {first_pass}")
105
- with bv.open(f"{pre_master}") as container:
104
+ with av.open(f"{pre_master}") as container:
106
105
  stats_ = stats(first_pass, container.streams.audio[0])
107
106
 
108
107
  name, filter_args = parse_ebu_bytes(norm, stats_, log)
@@ -117,7 +116,7 @@ def apply_audio_normalization(
117
116
  return -20.0 * np.log10(max_amplitude)
118
117
  return -99.0
119
118
 
120
- with bv.open(pre_master) as container:
119
+ with av.open(pre_master) as container:
121
120
  max_peak_level = -99.0
122
121
  assert len(container.streams.video) == 0
123
122
  for frame in container.decode(audio=0):
@@ -129,13 +128,13 @@ def apply_audio_normalization(
129
128
  log.print(f"peak adjustment: {adjustment:.3f}dB")
130
129
  name, filter_args = "volume", f"{adjustment}"
131
130
 
132
- with bv.open(pre_master) as container:
131
+ with av.open(pre_master) as container:
133
132
  input_stream = container.streams.audio[0]
134
133
 
135
- output_file = bv.open(path, mode="w")
134
+ output_file = av.open(path, mode="w")
136
135
  output_stream = output_file.add_stream("pcm_s16le", rate=input_stream.rate)
137
136
 
138
- graph = bv.filter.Graph()
137
+ graph = av.filter.Graph()
139
138
  graph.link_nodes(
140
139
  graph.add_abuffer(template=input_stream),
141
140
  graph.add(name, filter_args),
@@ -148,7 +147,7 @@ def apply_audio_normalization(
148
147
  aframe = graph.pull()
149
148
  assert isinstance(aframe, AudioFrame)
150
149
  output_file.mux(output_stream.encode(aframe))
151
- except (bv.BlockingIOError, bv.EOFError):
150
+ except (av.BlockingIOError, av.EOFError):
152
151
  break
153
152
 
154
153
  output_file.mux(output_stream.encode(None))
@@ -156,10 +155,10 @@ def apply_audio_normalization(
156
155
 
157
156
 
158
157
  def process_audio_clip(clip: Clip, data: np.ndarray, sr: int, log: Log) -> np.ndarray:
159
- to_s16 = bv.AudioResampler(format="s16", layout="stereo", rate=sr)
158
+ to_s16 = av.AudioResampler(format="s16", layout="stereo", rate=sr)
160
159
  input_buffer = BytesIO()
161
160
 
162
- with bv.open(input_buffer, "w", format="wav") as container:
161
+ with av.open(input_buffer, "w", format="wav") as container:
163
162
  output_stream = container.add_stream(
164
163
  "pcm_s16le", sample_rate=sr, format="s16", layout="stereo"
165
164
  )
@@ -173,10 +172,10 @@ def process_audio_clip(clip: Clip, data: np.ndarray, sr: int, log: Log) -> np.nd
173
172
 
174
173
  input_buffer.seek(0)
175
174
 
176
- input_file = bv.open(input_buffer, "r")
175
+ input_file = av.open(input_buffer, "r")
177
176
  input_stream = input_file.streams.audio[0]
178
177
 
179
- graph = bv.filter.Graph()
178
+ graph = av.filter.Graph()
180
179
  args = [graph.add_abuffer(template=input_stream)]
181
180
 
182
181
  if clip.speed != 1:
@@ -202,7 +201,7 @@ def process_audio_clip(clip: Clip, data: np.ndarray, sr: int, log: Log) -> np.nd
202
201
  graph.link_nodes(*args).configure()
203
202
 
204
203
  all_frames = []
205
- resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=sr)
204
+ resampler = av.AudioResampler(format="s16p", layout="stereo", rate=sr)
206
205
 
207
206
  for frame in input_file.decode(input_stream):
208
207
  graph.push(frame)
@@ -214,7 +213,7 @@ def process_audio_clip(clip: Clip, data: np.ndarray, sr: int, log: Log) -> np.nd
214
213
  for resampled_frame in resampler.resample(aframe):
215
214
  all_frames.append(resampled_frame.to_ndarray())
216
215
 
217
- except (bv.BlockingIOError, bv.EOFError):
216
+ except (av.BlockingIOError, av.EOFError):
218
217
  break
219
218
 
220
219
  if not all_frames:
@@ -229,7 +228,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
229
228
 
230
229
  # First pass: determine the maximum length
231
230
  for path in audio_paths:
232
- container = bv.open(path)
231
+ container = av.open(path)
233
232
  stream = container.streams.audio[0]
234
233
 
235
234
  # Calculate duration in samples
@@ -241,14 +240,11 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
241
240
 
242
241
  # Second pass: read and mix audio
243
242
  for path in audio_paths:
244
- container = bv.open(path)
243
+ container = av.open(path)
245
244
  stream = container.streams.audio[0]
246
245
 
247
- resampler = bv.audio.resampler.AudioResampler(
248
- format="s16", layout="mono", rate=sr
249
- )
250
-
251
246
  audio_array: list[np.ndarray] = []
247
+ resampler = av.AudioResampler(format="s16", layout="mono", rate=sr)
252
248
  for frame in container.decode(audio=0):
253
249
  frame.pts = None
254
250
  resampled = resampler.resample(frame)[0]
@@ -277,7 +273,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
277
273
  mixed_audio = mixed_audio * (32767 / max_val)
278
274
  mixed_audio = mixed_audio.astype(np.int16)
279
275
 
280
- output_container = bv.open(output_path, mode="w")
276
+ output_container = av.open(output_path, mode="w")
281
277
  output_stream = output_container.add_stream("pcm_s16le", rate=sr)
282
278
 
283
279
  chunk_size = sr # Process 1 second at a time
@@ -298,9 +294,9 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
298
294
  def file_to_ndarray(src: FileInfo, stream: int, sr: int) -> np.ndarray:
299
295
  all_frames = []
300
296
 
301
- resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=sr)
297
+ resampler = av.AudioResampler(format="s16p", layout="stereo", rate=sr)
302
298
 
303
- with bv.open(src.path) as container:
299
+ with av.open(src.path) as container:
304
300
  for frame in container.decode(audio=stream):
305
301
  for resampled_frame in resampler.resample(frame):
306
302
  all_frames.append(resampled_frame.to_ndarray())
@@ -311,10 +307,10 @@ def file_to_ndarray(src: FileInfo, stream: int, sr: int) -> np.ndarray:
311
307
  def ndarray_to_file(audio_data: np.ndarray, rate: int, out: str | Path) -> None:
312
308
  layout = "stereo"
313
309
 
314
- with bv.open(out, mode="w") as output:
310
+ with av.open(out, mode="w") as output:
315
311
  stream = output.add_stream("pcm_s16le", rate=rate, format="s16", layout=layout)
316
312
 
317
- frame = bv.AudioFrame.from_ndarray(audio_data, format="s16p", layout=layout)
313
+ frame = AudioFrame.from_ndarray(audio_data, format="s16p", layout=layout)
318
314
  frame.rate = rate
319
315
 
320
316
  output.mux(stream.encode(frame))
@@ -322,11 +318,11 @@ def ndarray_to_file(audio_data: np.ndarray, rate: int, out: str | Path) -> None:
322
318
 
323
319
 
324
320
  def ndarray_to_iter(
325
- audio_data: np.ndarray, fmt: bv.AudioFormat, layout: str, rate: int
321
+ audio_data: np.ndarray, fmt: av.AudioFormat, layout: str, rate: int
326
322
  ) -> Iterator[AudioFrame]:
327
323
  chunk_size = rate // 4 # Process 0.25 seconds at a time
328
324
 
329
- resampler = bv.AudioResampler(rate=rate, format=fmt, layout=layout)
325
+ resampler = av.AudioResampler(rate=rate, format=fmt, layout=layout)
330
326
  for i in range(0, audio_data.shape[1], chunk_size):
331
327
  chunk = audio_data[:, i : i + chunk_size]
332
328
 
@@ -338,15 +334,15 @@ def ndarray_to_iter(
338
334
 
339
335
 
340
336
  def make_new_audio(
341
- output: bv.container.OutputContainer,
342
- audio_format: bv.AudioFormat,
337
+ output: av.container.OutputContainer,
338
+ audio_format: av.AudioFormat,
343
339
  tl: v3,
344
340
  args: Args,
345
341
  log: Log,
346
- ) -> tuple[list[bv.AudioStream], list[Iterator[AudioFrame]]]:
342
+ ) -> tuple[list[av.AudioStream], list[Iterator[AudioFrame]]]:
347
343
  audio_inputs = []
348
344
  audio_gen_frames = []
349
- audio_streams: list[bv.AudioStream] = []
345
+ audio_streams: list[av.AudioStream] = []
350
346
  audio_paths = _make_new_audio(tl, audio_format, args, log)
351
347
 
352
348
  for i, audio_path in enumerate(audio_paths):
@@ -357,7 +353,7 @@ def make_new_audio(
357
353
  layout=tl.T.layout,
358
354
  time_base=Fraction(1, tl.sr),
359
355
  )
360
- if not isinstance(audio_stream, bv.AudioStream):
356
+ if not isinstance(audio_stream, av.AudioStream):
361
357
  log.error(f"Not a known audio codec: {args.audio_codec}")
362
358
 
363
359
  if args.audio_bitrate != "auto":
@@ -372,7 +368,7 @@ def make_new_audio(
372
368
  audio_streams.append(audio_stream)
373
369
 
374
370
  if isinstance(audio_path, str):
375
- audio_input = bv.open(audio_path)
371
+ audio_input = av.open(audio_path)
376
372
  audio_inputs.append(audio_input)
377
373
  audio_gen_frames.append(audio_input.decode(audio=0))
378
374
  else:
@@ -385,7 +381,7 @@ class Getter:
385
381
  __slots__ = ("container", "stream", "rate")
386
382
 
387
383
  def __init__(self, path: Path, stream: int, rate: int):
388
- self.container = bv.open(path)
384
+ self.container = av.open(path)
389
385
  self.stream = self.container.streams.audio[stream]
390
386
  self.rate = rate
391
387
 
@@ -394,7 +390,7 @@ class Getter:
394
390
 
395
391
  container = self.container
396
392
  stream = self.stream
397
- resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=self.rate)
393
+ resampler = av.AudioResampler(format="s16p", layout="stereo", rate=self.rate)
398
394
 
399
395
  time_base = stream.time_base
400
396
  assert time_base is not None
@@ -436,10 +432,12 @@ class Getter:
436
432
  return result # Return NumPy array with shape (channels, samples)
437
433
 
438
434
 
439
- def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[Any]:
435
+ def _make_new_audio(
436
+ tl: v3, fmt: av.AudioFormat, args: Args, log: Log
437
+ ) -> list[str | Iterator[AudioFrame]]:
440
438
  sr = tl.sr
441
439
  tb = tl.tb
442
- output: list[Any] = []
440
+ output: list[str | Iterator[AudioFrame]] = []
443
441
  samples: dict[tuple[FileInfo, int], Getter] = {}
444
442
 
445
443
  norm = parse_norm(args.audio_normalize, log)
@@ -449,7 +447,7 @@ def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[A
449
447
 
450
448
  layout = tl.T.layout
451
449
  try:
452
- bv.AudioLayout(layout)
450
+ av.AudioLayout(layout)
453
451
  except ValueError:
454
452
  log.error(f"Invalid audio layout: {layout}")
455
453
 
@@ -511,7 +509,9 @@ def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[A
511
509
 
512
510
  if args.mix_audio_streams and len(output) > 1:
513
511
  new_a_file = f"{Path(log.temp, 'new_audio.wav')}"
514
- mix_audio_files(sr, output, new_a_file)
512
+ # When mix_audio_streams is True, output only contains strings
513
+ audio_paths = cast(list[str], output)
514
+ mix_audio_files(sr, audio_paths, new_a_file)
515
515
  return [new_a_file]
516
516
 
517
517
  return output
@@ -6,7 +6,7 @@ import re
6
6
  from dataclasses import dataclass
7
7
  from typing import TYPE_CHECKING
8
8
 
9
- import bv
9
+ import av
10
10
 
11
11
  from auto_editor.utils.func import to_timecode
12
12
 
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
17
17
  from auto_editor.utils.chunks import Chunks
18
18
  from auto_editor.utils.log import Log
19
19
 
20
- Input = bv.container.InputContainer
20
+ Input = av.container.InputContainer
21
21
 
22
22
 
23
23
  @dataclass(slots=True)
@@ -143,7 +143,7 @@ def make_srt(input_: Input, stream: int) -> str:
143
143
  end = to_timecode(start_num + packet.duration * input_stream.time_base, "srt")
144
144
 
145
145
  for sub in packet.decode():
146
- assert isinstance(sub, bv.subtitles.subtitle.AssSubtitle)
146
+ assert isinstance(sub, av.subtitles.subtitle.AssSubtitle)
147
147
 
148
148
  output_bytes.write(f"{s}\n{start} --> {end}\n")
149
149
  output_bytes.write(sub.dialogue.decode("utf-8", errors="ignore") + "\n\n")
@@ -155,7 +155,7 @@ def make_srt(input_: Input, stream: int) -> str:
155
155
 
156
156
  def _ensure(input_: Input, format: str, stream: int) -> str:
157
157
  output_bytes = io.BytesIO()
158
- output = bv.open(output_bytes, "w", format=format)
158
+ output = av.open(output_bytes, "w", format=format)
159
159
 
160
160
  in_stream = input_.streams.subtitles[stream]
161
161
  out_stream = output.add_stream_from_template(in_stream)
@@ -175,7 +175,7 @@ def make_new_subtitles(tl: v3, log: Log) -> list[str]:
175
175
  if tl.v1 is None:
176
176
  return []
177
177
 
178
- input_ = bv.open(tl.v1.source.path)
178
+ input_ = av.open(tl.v1.source.path)
179
179
  new_paths = []
180
180
 
181
181
  for s, sub in enumerate(tl.v1.source.subtitles):
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from dataclasses import dataclass
4
4
  from typing import TYPE_CHECKING
5
5
 
6
- import bv
6
+ import av
7
7
  import numpy as np
8
8
 
9
9
  from auto_editor.timeline import Clip, TlImage, TlRect
@@ -24,12 +24,12 @@ class VideoFrame:
24
24
  src: FileInfo
25
25
 
26
26
 
27
- def make_solid(width: int, height: int, pix_fmt: str, bg: str) -> bv.VideoFrame:
27
+ def make_solid(width: int, height: int, pix_fmt: str, bg: str) -> av.VideoFrame:
28
28
  hex_color = bg.lstrip("#").upper()
29
29
  rgb_color = tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
30
30
 
31
31
  rgb_array = np.full((height, width, 3), rgb_color, dtype=np.uint8)
32
- rgb_frame = bv.VideoFrame.from_ndarray(rgb_array, format="rgb24")
32
+ rgb_frame = av.VideoFrame.from_ndarray(rgb_array, format="rgb24")
33
33
  return rgb_frame.reformat(format=pix_fmt)
34
34
 
35
35
 
@@ -38,11 +38,11 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
38
38
  for clip in tl.v:
39
39
  for obj in clip:
40
40
  if isinstance(obj, TlImage) and (obj.src, obj.width) not in img_cache:
41
- with bv.open(obj.src.path) as cn:
41
+ with av.open(obj.src.path) as cn:
42
42
  my_stream = cn.streams.video[0]
43
43
  for frame in cn.decode(my_stream):
44
44
  if obj.width != 0:
45
- graph = bv.filter.Graph()
45
+ graph = av.filter.Graph()
46
46
  graph.link_nodes(
47
47
  graph.add_buffer(template=my_stream),
48
48
  graph.add("scale", f"{obj.width}:-1"),
@@ -57,12 +57,12 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
57
57
 
58
58
 
59
59
  def render_av(
60
- output: bv.container.OutputContainer, tl: v3, args: Args, log: Log
61
- ) -> Iterator[tuple[int, bv.VideoFrame]]:
62
- from_ndarray = bv.VideoFrame.from_ndarray
60
+ output: av.container.OutputContainer, tl: v3, args: Args, log: Log
61
+ ) -> Iterator[tuple[int, av.VideoFrame]]:
62
+ from_ndarray = av.VideoFrame.from_ndarray
63
63
 
64
- cns: dict[FileInfo, bv.container.InputContainer] = {}
65
- decoders: dict[FileInfo, Iterator[bv.VideoFrame]] = {}
64
+ cns: dict[FileInfo, av.container.InputContainer] = {}
65
+ decoders: dict[FileInfo, Iterator[av.VideoFrame]] = {}
66
66
  seek_cost: dict[FileInfo, int] = {}
67
67
  tous: dict[FileInfo, int] = {}
68
68
 
@@ -76,7 +76,7 @@ def render_av(
76
76
  first_src = src
77
77
 
78
78
  if src not in cns:
79
- cns[src] = bv.open(f"{src.path}")
79
+ cns[src] = av.open(f"{src.path}")
80
80
 
81
81
  for src, cn in cns.items():
82
82
  if len(cn.streams.video) > 0:
@@ -101,26 +101,21 @@ def render_av(
101
101
  log.debug(f"Tous: {tous}")
102
102
  log.debug(f"Clips: {tl.v}")
103
103
 
104
- codec = bv.Codec(args.video_codec, "w")
105
-
106
- need_valid_fmt = True
107
- if codec.video_formats is not None:
108
- for video_format in codec.video_formats:
109
- if pix_fmt == video_format.name:
110
- need_valid_fmt = False
111
- break
104
+ output_stream = output.add_stream(args.video_codec, rate=target_fps)
105
+ output_stream.options = {"x265-params": "log-level=error"}
112
106
 
107
+ need_valid_fmt = not (
108
+ output_stream.codec.video_formats
109
+ and any(pix_fmt == vfmt.name for vfmt in output_stream.codec.video_formats)
110
+ )
113
111
  if need_valid_fmt:
114
- if codec.canonical_name == "gif":
115
- pix_fmt = "rgb8"
116
- elif codec.canonical_name == "prores":
117
- pix_fmt = "yuv422p10le"
118
- else:
119
- pix_fmt = "yuv420p"
120
-
121
- del codec
122
- output_stream = output.add_stream(args.video_codec, rate=target_fps)
123
- output_stream.options = {"x265-params": "log-level=error"} # type: ignore
112
+ match output_stream.codec.canonical_name:
113
+ case "gif":
114
+ pix_fmt = "rgb8"
115
+ case "prores":
116
+ pix_fmt = "yuv422p10le"
117
+ case _:
118
+ pix_fmt = "yuv420p"
124
119
 
125
120
  cc = output_stream.codec_context
126
121
  if args.vprofile is not None:
@@ -133,7 +128,7 @@ def render_av(
133
128
  cc.profile = args.vprofile.title()
134
129
 
135
130
  yield output_stream # type: ignore
136
- if not isinstance(output_stream, bv.VideoStream):
131
+ if not isinstance(output_stream, av.VideoStream):
137
132
  log.error(f"Not a known video codec: {args.video_codec}")
138
133
  if src.videos and src.videos[0].lang is not None:
139
134
  output_stream.metadata["language"] = src.videos[0].lang
@@ -144,7 +139,7 @@ def render_av(
144
139
  else:
145
140
  target_width = max(round(tl.res[0] * args.scale), 2)
146
141
  target_height = max(round(tl.res[1] * args.scale), 2)
147
- scale_graph = bv.filter.Graph()
142
+ scale_graph = av.filter.Graph()
148
143
  scale_graph.link_nodes(
149
144
  scale_graph.add(
150
145
  "buffer", video_size="1x1", time_base="1/1", pix_fmt=pix_fmt
@@ -246,7 +241,7 @@ def render_av(
246
241
 
247
242
  if (frame.width, frame.height) != tl.res:
248
243
  width, height = tl.res
249
- graph = bv.filter.Graph()
244
+ graph = av.filter.Graph()
250
245
  graph.link_nodes(
251
246
  graph.add_buffer(template=my_stream),
252
247
  graph.add(
@@ -258,7 +253,7 @@ def render_av(
258
253
  ).vpush(frame)
259
254
  frame = graph.vpull()
260
255
  elif isinstance(obj, TlRect):
261
- graph = bv.filter.Graph()
256
+ graph = av.filter.Graph()
262
257
  x, y = obj.x, obj.y
263
258
  graph.link_nodes(
264
259
  graph.add_buffer(template=my_stream),
auto_editor/timeline.py CHANGED
@@ -282,41 +282,6 @@ video\n"""
282
282
 
283
283
  return result
284
284
 
285
- def as_dict(self) -> dict:
286
- def aclip_to_dict(self: Clip) -> dict:
287
- return {
288
- "name": "audio",
289
- "src": self.src,
290
- "start": self.start,
291
- "dur": self.dur,
292
- "offset": self.offset,
293
- "speed": self.speed,
294
- "volume": self.volume,
295
- "stream": self.stream,
296
- }
297
-
298
- v = []
299
- a = []
300
- for vlayer in self.v:
301
- vb = [vobj.as_dict() for vobj in vlayer]
302
- if vb:
303
- v.append(vb)
304
- for layer in self.a:
305
- ab = [aclip_to_dict(clip) for clip in layer]
306
- if ab:
307
- a.append(ab)
308
-
309
- return {
310
- "version": "3",
311
- "timebase": f"{self.tb.numerator}/{self.tb.denominator}",
312
- "background": self.background,
313
- "resolution": self.T.res,
314
- "samplerate": self.T.sr,
315
- "layout": self.T.layout,
316
- "v": v,
317
- "a": a,
318
- }
319
-
320
285
  @property
321
286
  def T(self) -> Template:
322
287
  return self.template
@@ -3,8 +3,7 @@ from __future__ import annotations
3
3
  from dataclasses import dataclass
4
4
  from typing import TypedDict
5
5
 
6
- import bv
7
- from bv.codec import Codec
6
+ from av import Codec, open
8
7
 
9
8
  from auto_editor.utils.log import Log
10
9
 
@@ -64,7 +63,7 @@ def codec_type(x: str) -> str:
64
63
 
65
64
  def container_constructor(ext: str, log: Log) -> Container:
66
65
  try:
67
- container = bv.open(f".{ext}", "w")
66
+ container = open(f".{ext}", "w")
68
67
  except ValueError:
69
68
  log.error(f"Could not find a suitable format for extension: {ext}")
70
69
 
auto_editor/utils/log.py CHANGED
@@ -100,8 +100,10 @@ class Log:
100
100
  sys.stderr.write(f"\033[1m\033[33m{message}\033[0m\n")
101
101
 
102
102
  def error(self, message: str | Exception) -> NoReturn:
103
- if self.is_debug and isinstance(message, Exception):
103
+ if self.is_debug:
104
104
  self.cleanup()
105
+ if isinstance(message, str):
106
+ raise Exception(message)
105
107
  raise message
106
108
 
107
109
  self.conwrite("")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: auto-editor
3
- Version: 28.0.1
3
+ Version: 28.1.0
4
4
  Summary: Auto-Editor: Effort free video editing!
5
5
  Author-email: WyattBlue <wyattblue@auto-editor.com>
6
6
  License-Expression: Unlicense
@@ -12,7 +12,7 @@ Requires-Python: <3.14,>=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: numpy<3.0,>=2
15
- Requires-Dist: basswood-av<16,>=15.2.1
15
+ Requires-Dist: av<16,>=15.0
16
16
  Dynamic: license-file
17
17
 
18
18
  <p align="center"><img src="https://auto-editor.com/img/auto-editor-banner.webp" title="Auto-Editor" width="700"></p>
@@ -1,28 +1,29 @@
1
- auto_editor/__init__.py,sha256=qvc7djhJkqlXmlxcWZWLvK6JKBp01_I3fJuFV5euTBg,23
2
- auto_editor/__main__.py,sha256=t5Um7M9ess9xYRfmn6iDvJoBqj_KRQLhlkpFdfhx3MU,15478
3
- auto_editor/analyze.py,sha256=CeJG0LI9wXZk1R-QPrNGPS4za-_Avd8y7H-D437DqLg,12300
4
- auto_editor/edit.py,sha256=GhOfKIe_FdbBGDQ_WH02CJl7Oi-AwaaSSh73F81h_kA,19234
5
- auto_editor/ffwrapper.py,sha256=ueYdN7-cBAdJBJbv5yT05iuQP_BPFIrR5hFeMkk5c3U,5040
6
- auto_editor/help.py,sha256=8Kkos7Bpcn-3YN7ngv0fZp-8a8Qw6b70ZZKerL8SEQ4,7901
1
+ auto_editor/__init__.py,sha256=22AwAEXAu8nCjcGi5JKpN5CwGqL0avyRi9p7xsPeXQk,23
2
+ auto_editor/__main__.py,sha256=46l-4asehWV-GJIDDULm2Sh9RKD6KjVBariy6ijgFSI,15552
3
+ auto_editor/analyze.py,sha256=b3x7efPza_PJ5E6zzSsgqFGYIRRulcpL1AEcrGVcFAg,12300
4
+ auto_editor/edit.py,sha256=pU5uhyKJSsOoSVeZDWBUjWi2_UPEBXXEpHUsCD8CmwA,19690
5
+ auto_editor/ffwrapper.py,sha256=YNGvHnkX0PTHsBx19kc-O3ZAiZX8WoqqZvIfjJ3tovQ,5474
6
+ auto_editor/help.py,sha256=YAw0b_-b6_RXXuDSl6OHevCMrhDocxWbcvoIlj5Qq-E,7959
7
7
  auto_editor/json.py,sha256=g6ZsrSOXtjQj-8Otea3TIY2G-HZ6g-IXUblwukgTO1g,9332
8
- auto_editor/make_layers.py,sha256=Udxn7Vw0Jz9HOarLeswQ-WwTu87PWcsyduBdtFwSOyc,10079
9
- auto_editor/preview.py,sha256=yYn5jJLQ3TiJiNC6JffaggLVPQfBU558zJeRQz7nCDY,3046
10
- auto_editor/timeline.py,sha256=ebnEX23HubNm444SIB2QNPkNdZcxHGhci1BpAeVDwcQ,9186
8
+ auto_editor/make_layers.py,sha256=JAel8N1wr3NSmd0LRhEQsMDHQILqcMXjGDZ_qTrs5No,10076
9
+ auto_editor/preview.py,sha256=pBCw21wTORG0-Zs4Zf2anCecTNlnfiCq50tINeOcFmg,2815
10
+ auto_editor/timeline.py,sha256=84k-sSK3c2kTZ28atPoIaWgSecZvO-fOGfsXFlmifP4,8165
11
11
  auto_editor/vanparse.py,sha256=Ug5A2QaRqGiw4l55Z_h9T2QU1x0WqRibR7yY5rQ0WTk,10002
12
12
  auto_editor/cmds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  auto_editor/cmds/cache.py,sha256=rK06dX3Z1pDAK0t5rqR7S2cMEpnT_Tv17t3Q5qPQKLI,1959
14
- auto_editor/cmds/desc.py,sha256=kdRJfabdde5thuFOoTXlW3J3G8F0ZvbsJoOkUn7E3GA,805
14
+ auto_editor/cmds/desc.py,sha256=VKs6ibRMbC4I4ro5Qh2B-3xsfnYSwunkyEvgHidBTqk,805
15
15
  auto_editor/cmds/info.py,sha256=NwzZu6l6IIdK0ajZdn8WX_iiJwlsMrGIZ2vffDRu3gc,6535
16
- auto_editor/cmds/levels.py,sha256=2Hbvoy6wMbRRoHdZf25PMqq1uvhRKyPcITNPMpZFo7s,5632
16
+ auto_editor/cmds/levels.py,sha256=sHaaDqCCU8fwV311kL_otVCBanrD37m8KvsIs5BQ1HY,5853
17
17
  auto_editor/cmds/palet.py,sha256=t2O_xM82PXUjnSgcHxyt9OG24gwQS4ksR2sKiQ1SvUA,743
18
18
  auto_editor/cmds/repl.py,sha256=HSUTDaVykPb5Bd-v_jz_8R7HvFmKOcT_ZVmP-d0AbUY,3247
19
- auto_editor/cmds/subdump.py,sha256=Zxs9X0gEaQYA5F1EqccJmVUYfI1o3J7ltyc9bvy1tA4,2393
20
- auto_editor/cmds/test.py,sha256=p7z2cojsiyWnRN_UVeT4iYakkGlsQTjt_dRZ4cSvIQg,29835
19
+ auto_editor/cmds/subdump.py,sha256=QPMX-Tb5ZIU857-u6UXSMf2pkoJON-OQ1zuZjhOFQgk,2393
20
+ auto_editor/cmds/test.py,sha256=sFAbMKC_kTEIvFCjK7sm9cb2HL-ZyE0dNGCJpz-le9M,29946
21
21
  auto_editor/exports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- auto_editor/exports/fcp11.py,sha256=LiWHtA-jgwzbiSoBzNBUaTjOEuqnvr3dYBQmdlw7m3Y,5244
22
+ auto_editor/exports/fcp11.py,sha256=uOonQPBoSQSfUpdi58vqDgpZdmN6Jevm2YNPo9m9nI0,6195
23
23
  auto_editor/exports/fcp7.py,sha256=shZtn5i66hqGVrpR1PP1hiwKiZirr1cRUxATzbW-BOo,12069
24
- auto_editor/exports/json.py,sha256=Z7RKtD5-OB3saOtWG9qfI9OrLtylhU9VyfpnVRNcuU8,768
25
- auto_editor/exports/shotcut.py,sha256=MpzYqDO_XSnd-bKCKQjJFqOf_mSdR9QOWi8_Y1Y_BwY,4696
24
+ auto_editor/exports/json.py,sha256=Q176-MtctEyPVenxbjuhMx_j8oVx-_G4Zwa-PJ3f7oQ,1579
25
+ auto_editor/exports/kdenlive.py,sha256=ZgMivXb2pBEF9zvU0Mnfm_a7CGupcb02jCt9EwDjwoQ,11798
26
+ auto_editor/exports/shotcut.py,sha256=miIOvzM73tUJcHFCe7fUu66YZxEvlDSkV1PHxv8ED8k,4699
26
27
  auto_editor/imports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
28
  auto_editor/imports/fcp7.py,sha256=Vep1syViRZOrtZBlF6Ek_eRy0rLINpTso5nrh3uY3zk,8563
28
29
  auto_editor/imports/json.py,sha256=5c_vKoS8FzxAWagCetrwATHdnZ2beWQ77g8JYKVM0i8,6993
@@ -30,27 +31,27 @@ auto_editor/lang/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
30
31
  auto_editor/lang/libintrospection.py,sha256=6H1rGp0wqaCud5IPaoEmzULGnYt6ec7_0h32ATcw2oY,261
31
32
  auto_editor/lang/libmath.py,sha256=z33A161Oe6vYYK7R6pgYjdZZe63dQkN38Qf36TL3prg,847
32
33
  auto_editor/lang/palet.py,sha256=bsuEWNEefTb0-69Dc4lq4aotqu7aarH1okETK8ab-Gw,22794
33
- auto_editor/lang/stdenv.py,sha256=munuKA5rkg_5krQ_km92JhoNY6Vio9wo5E79YWlD2eg,43654
34
+ auto_editor/lang/stdenv.py,sha256=Ier6zmn8eFd6cpMdgHnnShjmxXXPWH1ItGyqZQvSkjc,43475
34
35
  auto_editor/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
36
  auto_editor/lib/contracts.py,sha256=bADXnFXyq_-Tku59saxwDOlvexfS3wMAMdrKNL-Ql2w,7494
36
37
  auto_editor/lib/data_structs.py,sha256=2SxsOXKiY0H95wRIFOrQBjv1ykJUQar0Vr5dK9zZOiQ,6971
37
38
  auto_editor/lib/err.py,sha256=UlszQJdzMZwkbT8x3sY4GkCV_5x9yrd6uVVUzvA8iiI,35
38
39
  auto_editor/render/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- auto_editor/render/audio.py,sha256=08Leg9R66ROz8meZyY03e7yfUd5otxdd6DdNO9vX5ys,17518
40
- auto_editor/render/subtitle.py,sha256=F27T8OsAojUIGTGBWqTdH36h0BnHXSExxIqzOtqyZoE,6129
41
- auto_editor/render/video.py,sha256=8somlAxtbGMSRREab8psTdIlaETZQ2s2gY20PWY9sww,12072
40
+ auto_editor/render/audio.py,sha256=O4aUBfyLg4-6QmyXoOjwnKmw3n5ZMHvFJKGzu07BpgM,17629
41
+ auto_editor/render/subtitle.py,sha256=6zPRbW2ksoBMDMm449vslLqs8v7htDEe5MRFEjXCGog,6129
42
+ auto_editor/render/video.py,sha256=hrj2b6xJ7cu7JeALF_XaOOxGtJfYYvXAEIgylEXrh9k,11964
42
43
  auto_editor/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
44
  auto_editor/utils/bar.py,sha256=Ky9JRf37JTgLyvNuIXDfucaUE8H1vBbCqKLjttmsmmo,4156
44
45
  auto_editor/utils/chunks.py,sha256=J-eGKtEz68gFtRrj1kOSgH4Tj_Yz6prNQ7Xr-d9NQJw,52
45
46
  auto_editor/utils/cmdkw.py,sha256=aUGBvBel2Ko1o6Rwmr4rEL-BMc5hEnzYLbyZ1GeJdcY,5729
46
- auto_editor/utils/container.py,sha256=CNHChHbhzIrjmDdWw6UzMqscrr9u7A-ZqKWejGjJwYE,2628
47
+ auto_editor/utils/container.py,sha256=tdvTsY5kcz46DAQ7jvKoXk_UPzhsz430kSKEHL7RUTQ,2615
47
48
  auto_editor/utils/func.py,sha256=ODyjXnzSDatEu08w398K8_xBKYdXMY3IPHiJpGRZDyQ,3250
48
- auto_editor/utils/log.py,sha256=wPNf6AabV-0cnoS_bPLv1Lh7llQBtNqPKeh07einOuc,3701
49
+ auto_editor/utils/log.py,sha256=hTBpBpadu6uD161Uhi0X5Ks7aA1azbtjXVNmN_sz1ko,3748
49
50
  auto_editor/utils/types.py,sha256=j2hd4zMQ9EftDy41Ji2_PFru_7HEZObd9yKA0BJxFaY,7616
50
- auto_editor-28.0.1.dist-info/licenses/LICENSE,sha256=yiq99pWITHfqS0pbZMp7cy2dnbreTuvBwudsU-njvIM,1210
51
+ auto_editor-28.1.0.dist-info/licenses/LICENSE,sha256=yiq99pWITHfqS0pbZMp7cy2dnbreTuvBwudsU-njvIM,1210
51
52
  docs/build.py,sha256=g1uc1H9T_naGaermUiVMMwUpbT0IWElRhjgT0fvCh8w,1914
52
- auto_editor-28.0.1.dist-info/METADATA,sha256=DqTFmUbe3CYS0CefGlNadtwH80JaemOw0MOCXtDGCsg,6176
53
- auto_editor-28.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
54
- auto_editor-28.0.1.dist-info/entry_points.txt,sha256=UAsTc7qJQbnAzHd7KWg-ALo_X9Hj2yDs3M9I2DV3eyI,212
55
- auto_editor-28.0.1.dist-info/top_level.txt,sha256=jBV5zlbWRbKOa-xaWPvTD45QL7lGExx2BDzv-Ji4dTw,17
56
- auto_editor-28.0.1.dist-info/RECORD,,
53
+ auto_editor-28.1.0.dist-info/METADATA,sha256=KEs0ikYYnmLrSLLhJXU4tgU9_LZGcCqWl82DjgGfmJI,6165
54
+ auto_editor-28.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
55
+ auto_editor-28.1.0.dist-info/entry_points.txt,sha256=UAsTc7qJQbnAzHd7KWg-ALo_X9Hj2yDs3M9I2DV3eyI,212
56
+ auto_editor-28.1.0.dist-info/top_level.txt,sha256=jBV5zlbWRbKOa-xaWPvTD45QL7lGExx2BDzv-Ji4dTw,17
57
+ auto_editor-28.1.0.dist-info/RECORD,,