auto-editor 27.0.0__py3-none-any.whl → 27.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
- import io
3
+ from fractions import Fraction
4
+ from io import BytesIO
4
5
  from pathlib import Path
5
6
  from typing import TYPE_CHECKING
6
7
 
7
8
  import bv
8
9
  import numpy as np
10
+ from bv import AudioFrame
9
11
  from bv.filter.loudnorm import stats
10
12
 
11
13
  from auto_editor.ffwrapper import FileInfo
@@ -13,17 +15,18 @@ from auto_editor.json import load
13
15
  from auto_editor.lang.palet import env
14
16
  from auto_editor.lib.contracts import andc, between_c, is_int_or_float
15
17
  from auto_editor.lib.err import MyError
16
- from auto_editor.output import Ensure
17
18
  from auto_editor.timeline import TlAudio, v3
18
- from auto_editor.utils.bar import Bar
19
19
  from auto_editor.utils.cmdkw import ParserError, parse_with_palet, pAttr, pAttrs
20
- from auto_editor.utils.container import Container
20
+ from auto_editor.utils.func import parse_bitrate
21
21
  from auto_editor.utils.log import Log
22
- from auto_editor.wavfile import AudioData, read, write
23
22
 
24
23
  if TYPE_CHECKING:
24
+ from collections.abc import Iterator
25
+ from typing import Any
26
+
25
27
  from auto_editor.__main__ import Args
26
28
 
29
+
27
30
  norm_types = {
28
31
  "ebu": pAttrs(
29
32
  "ebu",
@@ -106,7 +109,7 @@ def apply_audio_normalization(
106
109
  else:
107
110
  assert "t" in norm
108
111
 
109
- def get_peak_level(frame: bv.AudioFrame) -> float:
112
+ def get_peak_level(frame: AudioFrame) -> float:
110
113
  # Calculate peak level in dB
111
114
  # Should be equivalent to: -af astats=measure_overall=Peak_level:measure_perchannel=0
112
115
  max_amplitude = np.abs(frame.to_ndarray()).max()
@@ -143,7 +146,7 @@ def apply_audio_normalization(
143
146
  while True:
144
147
  try:
145
148
  aframe = graph.pull()
146
- assert isinstance(aframe, bv.AudioFrame)
149
+ assert isinstance(aframe, AudioFrame)
147
150
  output_file.mux(output_stream.encode(aframe))
148
151
  except (bv.BlockingIOError, bv.EOFError):
149
152
  break
@@ -152,20 +155,27 @@ def apply_audio_normalization(
152
155
  output_file.close()
153
156
 
154
157
 
155
- def process_audio_clip(
156
- clip: TlAudio, samp_list: AudioData, samp_start: int, samp_end: int, sr: int
157
- ) -> AudioData:
158
- input_buffer = io.BytesIO()
159
- write(input_buffer, sr, samp_list[samp_start:samp_end])
158
+ def process_audio_clip(clip: TlAudio, data: np.ndarray, sr: int) -> np.ndarray:
159
+ to_s16 = bv.AudioResampler(format="s16", layout="stereo", rate=sr)
160
+ input_buffer = BytesIO()
161
+
162
+ with bv.open(input_buffer, "w", format="wav") as container:
163
+ output_stream = container.add_stream(
164
+ "pcm_s16le", sample_rate=sr, format="s16", layout="stereo"
165
+ )
166
+
167
+ frame = AudioFrame.from_ndarray(data, format="s16p", layout="stereo")
168
+ frame.rate = sr
169
+
170
+ for reframe in to_s16.resample(frame):
171
+ container.mux(output_stream.encode(reframe))
172
+ container.mux(output_stream.encode(None))
173
+
160
174
  input_buffer.seek(0)
161
175
 
162
176
  input_file = bv.open(input_buffer, "r")
163
177
  input_stream = input_file.streams.audio[0]
164
178
 
165
- output_bytes = io.BytesIO()
166
- output_file = bv.open(output_bytes, mode="w", format="wav")
167
- output_stream = output_file.add_stream("pcm_s16le", rate=sr)
168
-
169
179
  graph = bv.filter.Graph()
170
180
  args = [graph.add_abuffer(template=input_stream)]
171
181
 
@@ -191,29 +201,23 @@ def process_audio_clip(
191
201
  args.append(graph.add("abuffersink"))
192
202
  graph.link_nodes(*args).configure()
193
203
 
204
+ all_frames = []
205
+ resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=sr)
206
+
194
207
  for frame in input_file.decode(input_stream):
195
208
  graph.push(frame)
196
209
  while True:
197
210
  try:
198
211
  aframe = graph.pull()
199
- assert isinstance(aframe, bv.AudioFrame)
200
- output_file.mux(output_stream.encode(aframe))
201
- except (bv.BlockingIOError, bv.EOFError):
202
- break
212
+ assert isinstance(aframe, AudioFrame)
203
213
 
204
- # Flush the stream
205
- output_file.mux(output_stream.encode(None))
214
+ for resampled_frame in resampler.resample(aframe):
215
+ all_frames.append(resampled_frame.to_ndarray())
206
216
 
207
- input_file.close()
208
- output_file.close()
209
-
210
- output_bytes.seek(0)
211
- has_filesig = output_bytes.read(4)
212
- output_bytes.seek(0)
213
- if not has_filesig: # Can rarely happen when clip is extremely small
214
- return np.empty((0, 2), dtype=np.int16)
217
+ except (bv.BlockingIOError, bv.EOFError):
218
+ break
215
219
 
216
- return read(output_bytes)[1]
220
+ return np.concatenate(all_frames, axis=1)
217
221
 
218
222
 
219
223
  def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
@@ -278,7 +282,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
278
282
  # Shape becomes (1, samples) for mono
279
283
  chunk = np.array([mixed_audio[i : i + chunk_size]])
280
284
 
281
- frame = bv.AudioFrame.from_ndarray(chunk, format="s16", layout="mono")
285
+ frame = AudioFrame.from_ndarray(chunk, format="s16", layout="mono")
282
286
  frame.rate = sr
283
287
  frame.pts = i # Set presentation timestamp
284
288
 
@@ -288,92 +292,223 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
288
292
  output_container.close()
289
293
 
290
294
 
295
+ def file_to_ndarray(src: FileInfo, stream: int, sr: int) -> np.ndarray:
296
+ all_frames = []
297
+
298
+ resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=sr)
299
+
300
+ with bv.open(src.path) as container:
301
+ for frame in container.decode(audio=stream):
302
+ for resampled_frame in resampler.resample(frame):
303
+ all_frames.append(resampled_frame.to_ndarray())
304
+
305
+ return np.concatenate(all_frames, axis=1)
306
+
307
+
308
+ def ndarray_to_file(audio_data: np.ndarray, rate: int, out: str | Path) -> None:
309
+ layout = "stereo"
310
+
311
+ with bv.open(out, mode="w") as output:
312
+ stream = output.add_stream("pcm_s16le", rate=rate, format="s16", layout=layout)
313
+
314
+ frame = bv.AudioFrame.from_ndarray(audio_data, format="s16p", layout=layout)
315
+ frame.rate = rate
316
+
317
+ output.mux(stream.encode(frame))
318
+ output.mux(stream.encode(None))
319
+
320
+
321
+ def ndarray_to_iter(
322
+ audio_data: np.ndarray, fmt: bv.AudioFormat, layout: str, rate: int
323
+ ) -> Iterator[AudioFrame]:
324
+ chunk_size = rate // 4 # Process 0.25 seconds at a time
325
+
326
+ resampler = bv.AudioResampler(rate=rate, format=fmt, layout=layout)
327
+ for i in range(0, audio_data.shape[1], chunk_size):
328
+ chunk = audio_data[:, i : i + chunk_size]
329
+
330
+ frame = AudioFrame.from_ndarray(chunk, format="s16p", layout="stereo")
331
+ frame.rate = rate
332
+ frame.pts = i
333
+
334
+ yield from resampler.resample(frame)
335
+
336
+
291
337
  def make_new_audio(
292
- tl: v3, ctr: Container, ensure: Ensure, args: Args, bar: Bar, log: Log
293
- ) -> list[str]:
338
+ output: bv.container.OutputContainer,
339
+ audio_format: bv.AudioFormat,
340
+ tl: v3,
341
+ args: Args,
342
+ log: Log,
343
+ ) -> tuple[list[bv.AudioStream], list[Iterator[AudioFrame]]]:
344
+ audio_inputs = []
345
+ audio_gen_frames = []
346
+ audio_streams: list[bv.AudioStream] = []
347
+ audio_paths = _make_new_audio(tl, audio_format, args, log)
348
+
349
+ for i, audio_path in enumerate(audio_paths):
350
+ audio_stream = output.add_stream(
351
+ args.audio_codec,
352
+ rate=tl.sr,
353
+ format=audio_format,
354
+ layout=tl.T.layout,
355
+ time_base=Fraction(1, tl.sr),
356
+ )
357
+ if not isinstance(audio_stream, bv.AudioStream):
358
+ log.error(f"Not a known audio codec: {args.audio_codec}")
359
+
360
+ if args.audio_bitrate != "auto":
361
+ audio_stream.bit_rate = parse_bitrate(args.audio_bitrate, log)
362
+ log.debug(f"audio bitrate: {audio_stream.bit_rate}")
363
+ else:
364
+ log.debug(f"[auto] audio bitrate: {audio_stream.bit_rate}")
365
+
366
+ if i < len(tl.T.audios) and (lang := tl.T.audios[i].lang) is not None:
367
+ audio_stream.metadata["language"] = lang
368
+
369
+ audio_streams.append(audio_stream)
370
+
371
+ if isinstance(audio_path, str):
372
+ audio_input = bv.open(audio_path)
373
+ audio_inputs.append(audio_input)
374
+ audio_gen_frames.append(audio_input.decode(audio=0))
375
+ else:
376
+ audio_gen_frames.append(audio_path)
377
+
378
+ return audio_streams, audio_gen_frames
379
+
380
+
381
+ class Getter:
382
+ __slots__ = ("container", "stream", "rate")
383
+
384
+ def __init__(self, path: Path, stream: int, rate: int):
385
+ self.container = bv.open(path)
386
+ self.stream = self.container.streams.audio[0]
387
+ self.rate = rate
388
+
389
+ def get(self, start: int, end: int) -> np.ndarray:
390
+ # start/end is in samples
391
+
392
+ container = self.container
393
+ stream = self.stream
394
+ resampler = bv.AudioResampler(format="s16p", layout="stereo", rate=self.rate)
395
+
396
+ time_base = stream.time_base
397
+ assert time_base is not None
398
+ start_pts = int(start / self.rate / time_base)
399
+
400
+ # Seek to the approximate position
401
+ container.seek(start_pts, stream=stream)
402
+
403
+ all_frames = []
404
+ total_samples = 0
405
+ target_samples = end - start
406
+
407
+ # Decode frames until we have enough samples
408
+ for frame in container.decode(stream):
409
+ for resampled_frame in resampler.resample(frame):
410
+ frame_array = resampled_frame.to_ndarray()
411
+ all_frames.append(frame_array)
412
+ total_samples += frame_array.shape[1]
413
+
414
+ if total_samples >= target_samples:
415
+ break
416
+
417
+ if total_samples >= target_samples:
418
+ break
419
+
420
+ result = np.concatenate(all_frames, axis=1)
421
+
422
+ # Trim to exact size
423
+ if result.shape[1] > target_samples:
424
+ result = result[:, :target_samples]
425
+ elif result.shape[1] < target_samples:
426
+ # Pad with zeros if we don't have enough samples
427
+ padding = np.zeros(
428
+ (result.shape[0], target_samples - result.shape[1]), dtype=result.dtype
429
+ )
430
+ result = np.concatenate([result, padding], axis=1)
431
+
432
+ assert result.shape[1] == end - start
433
+ return result # Return NumPy array with shape (channels, samples)
434
+
435
+
436
+ def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[Any]:
294
437
  sr = tl.sr
295
438
  tb = tl.tb
296
- output: list[str] = []
297
- samples: dict[tuple[FileInfo, int], AudioData] = {}
439
+ output: list[Any] = []
440
+ samples: dict[tuple[FileInfo, int], Getter] = {}
298
441
 
299
442
  norm = parse_norm(args.audio_normalize, log)
300
443
 
301
- temp = log.temp
302
-
303
- if not tl.a or not tl.a[0]:
444
+ if not tl.a[0]:
304
445
  log.error("Trying to render empty audio timeline")
305
446
 
306
- for i, layer in enumerate(tl.a):
307
- bar.start(len(layer), "Creating new audio")
447
+ layout = tl.T.layout
448
+ try:
449
+ bv.AudioLayout(layout)
450
+ except ValueError:
451
+ log.error(f"Invalid audio layout: {layout}")
308
452
 
309
- path = Path(temp, f"new{i}.wav")
310
- output.append(f"{path}")
311
- arr: AudioData | None = None
453
+ for i, layer in enumerate(tl.a):
454
+ arr: np.ndarray | None = None
455
+ use_iter = False
312
456
 
313
457
  for c, clip in enumerate(layer):
314
458
  if (clip.src, clip.stream) not in samples:
315
- audio_path = ensure.audio(clip.src, clip.stream)
316
- with open(audio_path, "rb") as file:
317
- samples[(clip.src, clip.stream)] = read(file)[1]
459
+ samples[(clip.src, clip.stream)] = Getter(
460
+ clip.src.path, clip.stream, sr
461
+ )
318
462
 
463
+ log.conwrite("Creating audio")
319
464
  if arr is None:
320
465
  leng = max(round((layer[-1].start + layer[-1].dur) * sr / tb), sr // tb)
321
- dtype = np.int32
322
- for _samp_arr in samples.values():
323
- dtype = _samp_arr.dtype
324
- break
466
+ map_path = Path(log.temp, f"{i}.map")
467
+ arr = np.memmap(map_path, mode="w+", dtype=np.int16, shape=(2, leng))
325
468
 
326
- arr = np.memmap(
327
- Path(temp, "asdf.map"),
328
- mode="w+",
329
- dtype=dtype,
330
- shape=(leng, 2),
331
- )
332
- del leng
333
-
334
- samp_list = samples[(clip.src, clip.stream)]
335
469
  samp_start = round(clip.offset * clip.speed * sr / tb)
336
470
  samp_end = round((clip.offset + clip.dur) * clip.speed * sr / tb)
337
- if samp_end > len(samp_list):
338
- samp_end = len(samp_list)
471
+
472
+ getter = samples[(clip.src, clip.stream)]
339
473
 
340
474
  if clip.speed != 1 or clip.volume != 1:
341
- clip_arr = process_audio_clip(clip, samp_list, samp_start, samp_end, sr)
475
+ clip_arr = process_audio_clip(
476
+ clip, getter.get(samp_start, samp_end), sr
477
+ )
342
478
  else:
343
- clip_arr = samp_list[samp_start:samp_end]
479
+ clip_arr = getter.get(samp_start, samp_end)
344
480
 
345
481
  # Mix numpy arrays
346
482
  start = clip.start * sr // tb
347
- car_len = clip_arr.shape[0]
348
-
349
- if start + car_len > len(arr):
350
- # Clip 'clip_arr' if bigger than expected.
351
- arr[start:] += clip_arr[: len(arr) - start]
483
+ clip_samples = clip_arr.shape[1]
484
+ if start + clip_samples > arr.shape[1]:
485
+ # Shorten `clip_arr` if bigger than expected.
486
+ arr[:, start:] += clip_arr[:, : arr.shape[1] - start]
352
487
  else:
353
- arr[start : start + car_len] += clip_arr
354
-
355
- bar.tick(c)
488
+ arr[:, start : start + clip_samples] += clip_arr
356
489
 
357
490
  if arr is not None:
358
491
  if norm is None:
359
- with open(path, "wb") as fid:
360
- write(fid, sr, arr)
492
+ if args.mix_audio_streams:
493
+ path = Path(log.temp, f"new{i}.wav")
494
+ ndarray_to_file(arr, sr, path)
495
+ output.append(f"{path}")
496
+ else:
497
+ use_iter = True
361
498
  else:
362
- pre_master = Path(temp, "premaster.wav")
363
- with open(pre_master, "wb") as fid:
364
- write(fid, sr, arr)
499
+ path = Path(log.temp, f"new{i}.wav")
500
+ pre_master = Path(log.temp, "premaster.wav")
365
501
 
502
+ ndarray_to_file(arr, sr, pre_master)
366
503
  apply_audio_normalization(norm, pre_master, path, log)
504
+ output.append(f"{path}")
367
505
 
368
- bar.end()
369
-
370
- try:
371
- Path(temp, "asdf.map").unlink(missing_ok=True)
372
- except PermissionError:
373
- pass
506
+ if use_iter and arr is not None:
507
+ output.append(ndarray_to_iter(arr, fmt, layout, sr))
374
508
 
375
509
  if args.mix_audio_streams and len(output) > 1:
376
- new_a_file = f"{Path(temp, 'new_audio.wav')}"
510
+ new_a_file = f"{Path(log.temp, 'new_audio.wav')}"
377
511
  mix_audio_files(sr, output, new_a_file)
378
512
  return [new_a_file]
513
+
379
514
  return output
@@ -6,8 +6,8 @@ from typing import TYPE_CHECKING
6
6
  import bv
7
7
  import numpy as np
8
8
 
9
- from auto_editor.output import parse_bitrate
10
9
  from auto_editor.timeline import TlImage, TlRect, TlVideo
10
+ from auto_editor.utils.func import parse_bitrate
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from collections.abc import Iterator
@@ -61,7 +61,6 @@ def render_av(
61
61
  ) -> Iterator[tuple[int, bv.VideoFrame]]:
62
62
  from_ndarray = bv.VideoFrame.from_ndarray
63
63
 
64
- src = tl.src
65
64
  cns: dict[FileInfo, bv.container.InputContainer] = {}
66
65
  decoders: dict[FileInfo, Iterator[bv.VideoFrame]] = {}
67
66
  seek_cost: dict[FileInfo, int] = {}
auto_editor/timeline.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from dataclasses import dataclass
4
4
  from typing import TYPE_CHECKING
5
5
 
6
- from auto_editor.ffwrapper import initFileInfo, mux
6
+ from auto_editor.ffwrapper import FileInfo, mux
7
7
  from auto_editor.lib.contracts import *
8
8
  from auto_editor.utils.cmdkw import Required, pAttr, pAttrs
9
9
  from auto_editor.utils.types import CoerceError, natural, number, parse_color
@@ -187,13 +187,52 @@ ALayer = list[TlAudio]
187
187
  ASpace = list[ALayer]
188
188
 
189
189
 
190
+ @dataclass(slots=True)
191
+ class AudioTemplate:
192
+ lang: str | None
193
+
194
+
195
+ @dataclass(slots=True)
196
+ class SubtitleTemplate:
197
+ lang: str | None
198
+
199
+
200
+ @dataclass(slots=True)
201
+ class Template:
202
+ sr: int
203
+ layout: str
204
+ res: tuple[int, int]
205
+ audios: list[AudioTemplate]
206
+ subtitles: list[SubtitleTemplate]
207
+
208
+ @classmethod
209
+ def init(
210
+ self,
211
+ src: FileInfo,
212
+ sr: int | None = None,
213
+ layout: str | None = None,
214
+ res: tuple[int, int] | None = None,
215
+ ) -> Template:
216
+ alist = [AudioTemplate(x.lang) for x in src.audios]
217
+ slist = [SubtitleTemplate(x.lang) for x in src.subtitles]
218
+
219
+ if sr is None:
220
+ sr = src.get_sr()
221
+
222
+ if layout is None:
223
+ layout = "stereo" if not src.audios else src.audios[0].layout
224
+
225
+ if res is None:
226
+ res = src.get_res()
227
+
228
+ return Template(sr, layout, res, alist, slist)
229
+
230
+
190
231
  @dataclass
191
232
  class v3:
192
- src: FileInfo | None # Used as a template for timeline settings
193
233
  tb: Fraction
194
- sr: int
195
- res: tuple[int, int]
196
234
  background: str
235
+ template: Template
197
236
  v: VSpace
198
237
  a: ASpace
199
238
  v1: v1 | None # Is it v1 compatible (linear and only one source)?
@@ -286,41 +325,55 @@ video\n"""
286
325
 
287
326
  return {
288
327
  "version": "3",
289
- "resolution": self.res,
290
328
  "timebase": f"{self.tb.numerator}/{self.tb.denominator}",
291
- "samplerate": self.sr,
292
329
  "background": self.background,
330
+ "resolution": self.T.res,
331
+ "samplerate": self.T.sr,
332
+ "layout": self.T.layout,
293
333
  "v": v,
294
334
  "a": a,
295
335
  }
296
336
 
337
+ @property
338
+ def T(self) -> Template:
339
+ return self.template
340
+
341
+ @property
342
+ def res(self) -> tuple[int, int]:
343
+ return self.T.res
344
+
345
+ @property
346
+ def sr(self) -> int:
347
+ return self.T.sr
348
+
297
349
 
298
- def make_tracks_dir(path: Path) -> Path:
350
+ def make_tracks_dir(tracks_dir: Path, path: Path) -> None:
299
351
  from os import mkdir
300
352
  from shutil import rmtree
301
353
 
302
- tracks_dir = path.parent / f"{path.stem}_tracks"
303
-
304
354
  try:
305
355
  mkdir(tracks_dir)
306
356
  except OSError:
307
357
  rmtree(tracks_dir)
308
358
  mkdir(tracks_dir)
309
359
 
310
- return tracks_dir
311
360
 
312
-
313
- def set_stream_to_0(tl: v3, log: Log) -> None:
314
- src = tl.src
315
- assert src is not None
316
- fold = make_tracks_dir(src.path)
361
+ def set_stream_to_0(src: FileInfo, tl: v3, log: Log) -> None:
362
+ dir_exists = False
317
363
  cache: dict[Path, FileInfo] = {}
318
364
 
319
365
  def make_track(i: int, path: Path) -> FileInfo:
366
+ nonlocal dir_exists
367
+
368
+ fold = path.parent / f"{path.stem}_tracks"
369
+ if not dir_exists:
370
+ make_tracks_dir(fold, path)
371
+ dir_exists = True
372
+
320
373
  newtrack = fold / f"{path.stem}_{i}.wav"
321
374
  if newtrack not in cache:
322
375
  mux(path, output=newtrack, stream=i)
323
- cache[newtrack] = initFileInfo(f"{newtrack}", log)
376
+ cache[newtrack] = FileInfo.init(f"{newtrack}", log)
324
377
  return cache[newtrack]
325
378
 
326
379
  for alayer in tl.a:
@@ -6,6 +6,8 @@ from typing import TypedDict
6
6
  import bv
7
7
  from bv.codec import Codec
8
8
 
9
+ from auto_editor.utils.log import Log
10
+
9
11
 
10
12
  class DictContainer(TypedDict, total=False):
11
13
  max_videos: int | None
@@ -60,18 +62,23 @@ def codec_type(x: str) -> str:
60
62
  return ""
61
63
 
62
64
 
63
- def container_constructor(ext: str) -> Container:
64
- with bv.open(f".{ext}", "w") as container:
65
- codecs = container.supported_codecs
66
- if ext == "webm":
67
- vdefault = "vp9"
68
- else:
69
- vdefault = container.default_video_codec
70
- adefault = container.default_audio_codec
71
- sdefault = container.default_subtitle_codec
72
- if sdefault == "none" and ext == "mp4":
73
- sdefault = "srt"
74
-
65
+ def container_constructor(ext: str, log: Log) -> Container:
66
+ try:
67
+ container = bv.open(f".{ext}", "w")
68
+ except ValueError:
69
+ log.error(f"Could not find a suitable format for extension: {ext}")
70
+
71
+ codecs = container.supported_codecs
72
+ if ext == "webm":
73
+ vdefault = "vp9"
74
+ else:
75
+ vdefault = container.default_video_codec
76
+ adefault = container.default_audio_codec
77
+ sdefault = container.default_subtitle_codec
78
+ if sdefault == "none" and ext == "mp4":
79
+ sdefault = "srt"
80
+
81
+ container.close()
75
82
  vcodecs = set()
76
83
  acodecs = set()
77
84
  scodecs = set()
auto_editor/utils/func.py CHANGED
@@ -4,6 +4,9 @@ from typing import TYPE_CHECKING
4
4
 
5
5
  import numpy as np
6
6
 
7
+ from auto_editor.utils.log import Log
8
+ from auto_editor.utils.types import split_num_str
9
+
7
10
  if TYPE_CHECKING:
8
11
  from collections.abc import Callable
9
12
  from fractions import Fraction
@@ -105,3 +108,21 @@ def aspect_ratio(width: int, height: int) -> tuple[int, int]:
105
108
 
106
109
  c = gcd(width, height)
107
110
  return width // c, height // c
111
+
112
+
113
+ def parse_bitrate(input_: str, log: Log) -> int:
114
+ try:
115
+ val, unit = split_num_str(input_)
116
+ except Exception as e:
117
+ log.error(e)
118
+
119
+ if unit.lower() == "k":
120
+ return int(val * 1000)
121
+ if unit == "M":
122
+ return int(val * 1_000_000)
123
+ if unit == "G":
124
+ return int(val * 1_000_000_000)
125
+ if unit == "":
126
+ return int(val)
127
+
128
+ log.error(f"Unknown bitrate: {input_}")
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: auto-editor
3
- Version: 27.0.0
3
+ Version: 27.1.1
4
4
  Summary: Auto-Editor: Effort free video editing!
5
5
  Author-email: WyattBlue <wyattblue@auto-editor.com>
6
- License: Unlicense
6
+ License-Expression: Unlicense
7
7
  Project-URL: Bug Tracker, https://github.com/WyattBlue/auto-editor/issues
8
8
  Project-URL: Source Code, https://github.com/WyattBlue/auto-editor
9
9
  Project-URL: homepage, https://auto-editor.com