auto-editor 26.0.0__py3-none-any.whl → 26.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
auto_editor/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "26.0.0"
1
+ __version__ = "26.1.0"
auto_editor/__main__.py CHANGED
@@ -8,15 +8,14 @@ from subprocess import run
8
8
 
9
9
  import auto_editor
10
10
  from auto_editor.edit import edit_media
11
- from auto_editor.ffwrapper import FFmpeg, initFFmpeg
12
11
  from auto_editor.utils.func import get_stdout
13
12
  from auto_editor.utils.log import Log
14
13
  from auto_editor.utils.types import (
15
14
  Args,
16
- color,
17
15
  frame_rate,
18
16
  margin,
19
17
  number,
18
+ parse_color,
20
19
  resolution,
21
20
  sample_rate,
22
21
  speed,
@@ -34,13 +33,12 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
34
33
  "-m",
35
34
  type=margin,
36
35
  metavar="LENGTH",
37
- help='Set sections near "loud" as "loud" too if section is less than LENGTH away.',
36
+ help='Set sections near "loud" as "loud" too if section is less than LENGTH away',
38
37
  )
39
38
  parser.add_argument(
40
- "--edit-based-on",
41
39
  "--edit",
42
40
  metavar="METHOD",
43
- help="Decide which method to use when making edits",
41
+ help="Set an expression which determines how to make auto edits",
44
42
  )
45
43
  parser.add_argument(
46
44
  "--silent-speed",
@@ -108,7 +106,7 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
108
106
  parser.add_argument(
109
107
  "--background",
110
108
  "-b",
111
- type=color,
109
+ type=parse_color,
112
110
  metavar="COLOR",
113
111
  help="Set the background as a solid RGB color",
114
112
  )
@@ -148,7 +146,7 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
148
146
  "--output",
149
147
  "-o",
150
148
  metavar="FILE",
151
- help="Set the name/path of the new output file.",
149
+ help="Set the name/path of the new output file",
152
150
  )
153
151
  parser.add_argument(
154
152
  "--player", "-p", metavar="CMD", help="Set player to open output media files"
@@ -161,16 +159,6 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
161
159
  metavar="PATH",
162
160
  help="Set where the temporary directory is located",
163
161
  )
164
- parser.add_argument(
165
- "--ffmpeg-location",
166
- metavar="PATH",
167
- help="Set a custom path to the ffmpeg location",
168
- )
169
- parser.add_argument(
170
- "--my-ffmpeg",
171
- flag=True,
172
- help="Use the ffmpeg on your PATH instead of the one packaged",
173
- )
174
162
  parser.add_text("Display Options:")
175
163
  parser.add_argument(
176
164
  "--progress",
@@ -179,12 +167,6 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
179
167
  help="Set what type of progress bar to use",
180
168
  )
181
169
  parser.add_argument("--debug", flag=True, help="Show debugging messages and values")
182
- parser.add_argument(
183
- "--show-ffmpeg-commands", flag=True, help="Show ffmpeg commands"
184
- )
185
- parser.add_argument(
186
- "--show-ffmpeg-output", flag=True, help="Show ffmpeg stdout and stderr"
187
- )
188
170
  parser.add_argument("--quiet", "-q", flag=True, help="Display less output")
189
171
  parser.add_argument(
190
172
  "--preview",
@@ -252,11 +234,6 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
252
234
  flag=True,
253
235
  help="Disable the inclusion of data streams in the output file",
254
236
  )
255
- parser.add_argument(
256
- "--extras",
257
- metavar="CMD",
258
- help="Add extra options for ffmpeg. Must be in quotes",
259
- )
260
237
  parser.add_argument(
261
238
  "--config", flag=True, help="When set, look for `config.pal` and run it"
262
239
  )
@@ -267,7 +244,7 @@ def main_options(parser: ArgumentParser) -> ArgumentParser:
267
244
  return parser
268
245
 
269
246
 
270
- def download_video(my_input: str, args: Args, ffmpeg: FFmpeg, log: Log) -> str:
247
+ def download_video(my_input: str, args: Args, log: Log) -> str:
271
248
  log.conwrite("Downloading video...")
272
249
 
273
250
  def get_domain(url: str) -> str:
@@ -283,18 +260,15 @@ def download_video(my_input: str, args: Args, ffmpeg: FFmpeg, log: Log) -> str:
283
260
  else:
284
261
  output_format = args.output_format
285
262
 
286
- yt_dlp_path = args.yt_dlp_location
287
-
288
- cmd = ["--ffmpeg-location", ffmpeg.path]
289
-
263
+ cmd = []
290
264
  if download_format is not None:
291
265
  cmd.extend(["-f", download_format])
292
266
 
293
267
  cmd.extend(["-o", output_format, my_input])
294
-
295
268
  if args.yt_dlp_extras is not None:
296
269
  cmd.extend(args.yt_dlp_extras.split(" "))
297
270
 
271
+ yt_dlp_path = args.yt_dlp_location
298
272
  try:
299
273
  location = get_stdout(
300
274
  [yt_dlp_path, "--get-filename", "--no-warnings"] + cmd
@@ -337,6 +311,7 @@ def main() -> None:
337
311
  ({"--export-as-json"}, ["--export", "json"]),
338
312
  ({"--export-as-clip-sequence", "-excs"}, ["--export", "clip-sequence"]),
339
313
  ({"--keep-tracks-seperate"}, ["--keep-tracks-separate"]),
314
+ ({"--edit-based-on"}, ["--edit"]),
340
315
  ],
341
316
  )
342
317
 
@@ -363,17 +338,10 @@ def main() -> None:
363
338
  is_machine = args.progress == "machine"
364
339
  log = Log(args.debug, args.quiet, args.temp_dir, is_machine, no_color)
365
340
 
366
- ffmpeg = initFFmpeg(
367
- log,
368
- args.ffmpeg_location,
369
- args.my_ffmpeg,
370
- args.show_ffmpeg_commands,
371
- args.show_ffmpeg_output,
372
- )
373
341
  paths = []
374
342
  for my_input in args.input:
375
343
  if my_input.startswith("http://") or my_input.startswith("https://"):
376
- paths.append(download_video(my_input, args, ffmpeg, log))
344
+ paths.append(download_video(my_input, args, log))
377
345
  else:
378
346
  if not splitext(my_input)[1]:
379
347
  if isdir(my_input):
@@ -387,7 +355,7 @@ def main() -> None:
387
355
  paths.append(my_input)
388
356
 
389
357
  try:
390
- edit_media(paths, ffmpeg, args, log)
358
+ edit_media(paths, args, log)
391
359
  except KeyboardInterrupt:
392
360
  log.error("Keyboard Interrupt")
393
361
  log.cleanup()
auto_editor/analyze.py CHANGED
@@ -27,6 +27,9 @@ if TYPE_CHECKING:
27
27
  from auto_editor.utils.log import Log
28
28
 
29
29
 
30
+ __all__ = ("LevelError", "Levels", "iter_audio", "iter_motion")
31
+
32
+
30
33
  class LevelError(Exception):
31
34
  pass
32
35
 
@@ -69,45 +72,39 @@ def mut_remove_large(
69
72
  active = False
70
73
 
71
74
 
72
- def iter_audio(src, tb: Fraction, stream: int = 0) -> Iterator[np.float32]:
75
+ def iter_audio(audio_stream: av.AudioStream, tb: Fraction) -> Iterator[np.float32]:
73
76
  fifo = AudioFifo()
74
- try:
75
- container = av.open(src.path, "r")
76
- audio_stream = container.streams.audio[stream]
77
- sample_rate = audio_stream.rate
77
+ sr = audio_stream.rate
78
78
 
79
- exact_size = (1 / tb) * sample_rate
80
- accumulated_error = 0
79
+ exact_size = (1 / tb) * sr
80
+ accumulated_error = Fraction(0)
81
81
 
82
- # Resample so that audio data is between [-1, 1]
83
- resampler = av.AudioResampler(
84
- av.AudioFormat("flt"), audio_stream.layout, sample_rate
85
- )
82
+ # Resample so that audio data is between [-1, 1]
83
+ resampler = av.AudioResampler(av.AudioFormat("flt"), audio_stream.layout, sr)
86
84
 
87
- for frame in container.decode(audio=stream):
88
- frame.pts = None # Skip time checks
85
+ container = audio_stream.container
86
+ assert isinstance(container, av.container.InputContainer)
89
87
 
90
- for reframe in resampler.resample(frame):
91
- fifo.write(reframe)
88
+ for frame in container.decode(audio_stream):
89
+ frame.pts = None # Skip time checks
92
90
 
93
- while fifo.samples >= ceil(exact_size):
94
- size_with_error = exact_size + accumulated_error
95
- current_size = round(size_with_error)
96
- accumulated_error = size_with_error - current_size
91
+ for reframe in resampler.resample(frame):
92
+ fifo.write(reframe)
97
93
 
98
- audio_chunk = fifo.read(current_size)
99
- assert audio_chunk is not None
100
- arr = audio_chunk.to_ndarray().flatten()
101
- yield np.max(np.abs(arr))
102
-
103
- finally:
104
- container.close()
94
+ while fifo.samples >= ceil(exact_size):
95
+ size_with_error = exact_size + accumulated_error
96
+ current_size = round(size_with_error)
97
+ accumulated_error = size_with_error - current_size
105
98
 
99
+ audio_chunk = fifo.read(current_size)
100
+ assert audio_chunk is not None
101
+ arr = audio_chunk.to_ndarray().flatten()
102
+ yield np.max(np.abs(arr))
106
103
 
107
- def iter_motion(src, tb, stream: int, blur: int, width: int) -> Iterator[np.float32]:
108
- container = av.open(src.path, "r")
109
104
 
110
- video = container.streams.video[stream]
105
+ def iter_motion(
106
+ video: av.VideoStream, tb: Fraction, blur: int, width: int
107
+ ) -> Iterator[np.float32]:
111
108
  video.thread_type = "AUTO"
112
109
 
113
110
  prev_frame = None
@@ -125,6 +122,9 @@ def iter_motion(src, tb, stream: int, blur: int, width: int) -> Iterator[np.floa
125
122
  graph.add("buffersink"),
126
123
  ).configure()
127
124
 
125
+ container = video.container
126
+ assert isinstance(container, av.container.InputContainer)
127
+
128
128
  for unframe in container.decode(video):
129
129
  if unframe.pts is None:
130
130
  continue
@@ -151,8 +151,6 @@ def iter_motion(src, tb, stream: int, blur: int, width: int) -> Iterator[np.floa
151
151
  prev_frame = current_frame
152
152
  prev_index = index
153
153
 
154
- container.close()
155
-
156
154
 
157
155
  def obj_tag(path: Path, kind: str, tb: Fraction, obj: Sequence[object]) -> str:
158
156
  mod_time = int(path.stat().st_mtime)
@@ -175,7 +173,11 @@ class Levels:
175
173
  if (arr := self.read_cache("audio", (0,))) is not None:
176
174
  return len(arr)
177
175
 
178
- result = sum(1 for _ in iter_audio(self.src, self.tb, 0))
176
+ with av.open(self.src.path, "r") as container:
177
+ audio_stream = container.streams.audio[0]
178
+ self.log.experimental(audio_stream.codec)
179
+ result = sum(1 for _ in iter_audio(audio_stream, self.tb))
180
+
179
181
  self.log.debug(f"Audio Length: {result}")
180
182
  return result
181
183
 
@@ -239,21 +241,26 @@ class Levels:
239
241
  if (arr := self.read_cache("audio", (stream,))) is not None:
240
242
  return arr
241
243
 
242
- with av.open(self.src.path, "r") as container:
243
- audio = container.streams.audio[stream]
244
- if audio.duration is not None and audio.time_base is not None:
245
- inaccurate_dur = int(audio.duration * audio.time_base * self.tb)
246
- elif container.duration is not None:
247
- inaccurate_dur = int(container.duration / av.time_base * self.tb)
248
- else:
249
- inaccurate_dur = 1024
244
+ container = av.open(self.src.path, "r")
245
+ audio = container.streams.audio[stream]
246
+
247
+ if audio.codec.experimental:
248
+ self.log.error(f"`{audio.codec.name}` is an experimental codec")
249
+
250
+ if audio.duration is not None and audio.time_base is not None:
251
+ inaccurate_dur = int(audio.duration * audio.time_base * self.tb)
252
+ elif container.duration is not None:
253
+ inaccurate_dur = int(container.duration / av.time_base * self.tb)
254
+ else:
255
+ inaccurate_dur = 1024
250
256
 
251
257
  bar = self.bar
252
258
  bar.start(inaccurate_dur, "Analyzing audio volume")
253
259
 
254
260
  result = np.zeros((inaccurate_dur), dtype=np.float32)
255
261
  index = 0
256
- for value in iter_audio(self.src, self.tb, stream):
262
+
263
+ for value in iter_audio(audio, self.tb):
257
264
  if index > len(result) - 1:
258
265
  result = np.concatenate(
259
266
  (result, np.zeros((len(result)), dtype=np.float32))
@@ -263,6 +270,7 @@ class Levels:
263
270
  index += 1
264
271
 
265
272
  bar.end()
273
+ assert len(result) > 0
266
274
  return self.cache(result[:index], "audio", (stream,))
267
275
 
268
276
  def motion(self, stream: int, blur: int, width: int) -> NDArray[np.float32]:
@@ -273,20 +281,25 @@ class Levels:
273
281
  if (arr := self.read_cache("motion", mobj)) is not None:
274
282
  return arr
275
283
 
276
- with av.open(self.src.path, "r") as container:
277
- video = container.streams.video[stream]
278
- inaccurate_dur = (
279
- 1024
280
- if video.duration is None or video.time_base is None
281
- else int(video.duration * video.time_base * self.tb)
282
- )
284
+ container = av.open(self.src.path, "r")
285
+ video = container.streams.video[stream]
286
+
287
+ if video.codec.experimental:
288
+ self.log.experimental(video.codec)
289
+
290
+ inaccurate_dur = (
291
+ 1024
292
+ if video.duration is None or video.time_base is None
293
+ else int(video.duration * video.time_base * self.tb)
294
+ )
283
295
 
284
296
  bar = self.bar
285
297
  bar.start(inaccurate_dur, "Analyzing motion")
286
298
 
287
299
  result = np.zeros((inaccurate_dur), dtype=np.float32)
288
300
  index = 0
289
- for value in iter_motion(self.src, self.tb, stream, blur, width):
301
+
302
+ for value in iter_motion(video, self.tb, blur, width):
290
303
  if index > len(result) - 1:
291
304
  result = np.concatenate(
292
305
  (result, np.zeros((len(result)), dtype=np.float32))
auto_editor/edit.py CHANGED
@@ -3,15 +3,16 @@ from __future__ import annotations
3
3
  import os
4
4
  import sys
5
5
  from fractions import Fraction
6
+ from os.path import splitext
6
7
  from subprocess import run
7
8
  from typing import Any
8
9
 
9
10
  import av
10
11
  from av import AudioResampler
11
12
 
12
- from auto_editor.ffwrapper import FFmpeg, FileInfo, initFileInfo
13
+ from auto_editor.ffwrapper import FileInfo, initFileInfo
13
14
  from auto_editor.lib.contracts import is_int, is_str
14
- from auto_editor.make_layers import make_timeline
15
+ from auto_editor.make_layers import clipify, make_av, make_timeline
15
16
  from auto_editor.output import Ensure, parse_bitrate
16
17
  from auto_editor.render.audio import make_new_audio
17
18
  from auto_editor.render.subtitle import make_new_subtitles
@@ -31,7 +32,7 @@ def set_output(
31
32
  if src is None:
32
33
  root, ext = "out", ".mp4"
33
34
  else:
34
- root, ext = os.path.splitext(str(src.path) if out is None else out)
35
+ root, ext = splitext(src.path if out is None else out)
35
36
  if ext == "":
36
37
  ext = src.path.suffix
37
38
 
@@ -159,12 +160,12 @@ def parse_export(export: str, log: Log) -> dict[str, Any]:
159
160
  log.error(f"'{name}': Export must be [{', '.join([s for s in parsing.keys()])}]")
160
161
 
161
162
 
162
- def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
163
+ def edit_media(paths: list[str], args: Args, log: Log) -> None:
163
164
  bar = initBar(args.progress)
164
165
  tl = None
165
166
 
166
167
  if paths:
167
- path_ext = os.path.splitext(paths[0])[1].lower()
168
+ path_ext = splitext(paths[0])[1].lower()
168
169
  if path_ext == ".xml":
169
170
  from auto_editor.formats.fcp7 import fcp7_read_xml
170
171
 
@@ -243,7 +244,7 @@ def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
243
244
  from auto_editor.formats.fcp7 import fcp7_write_xml
244
245
 
245
246
  is_resolve = export.startswith("resolve")
246
- fcp7_write_xml(export_ops["name"], output, is_resolve, tl, log)
247
+ fcp7_write_xml(export_ops["name"], output, is_resolve, tl)
247
248
  return
248
249
 
249
250
  if export == "final-cut-pro":
@@ -267,7 +268,7 @@ def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
267
268
  shotcut_write_mlt(output, tl)
268
269
  return
269
270
 
270
- out_ext = os.path.splitext(output)[1].replace(".", "")
271
+ out_ext = splitext(output)[1].replace(".", "")
271
272
 
272
273
  # Check if export options make sense.
273
274
  ctr = container_constructor(out_ext.lower())
@@ -293,27 +294,7 @@ def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
293
294
 
294
295
  if ctr.default_aud != "none":
295
296
  ensure = Ensure(bar, samplerate, log)
296
- audio_paths = make_new_audio(tl, ensure, args, ffmpeg, bar, log)
297
- if (
298
- not (args.keep_tracks_separate and ctr.max_audios is None)
299
- and len(audio_paths) > 1
300
- ):
301
- # Merge all the audio a_tracks into one.
302
- new_a_file = os.path.join(log.temp, "new_audio.wav")
303
- new_cmd = []
304
- for path in audio_paths:
305
- new_cmd.extend(["-i", path])
306
- new_cmd.extend(
307
- [
308
- "-filter_complex",
309
- f"amix=inputs={len(audio_paths)}:duration=longest",
310
- "-ac",
311
- "2",
312
- new_a_file,
313
- ]
314
- )
315
- ffmpeg.run(new_cmd)
316
- audio_paths = [new_a_file]
297
+ audio_paths = make_new_audio(tl, ctr, ensure, args, bar, log)
317
298
  else:
318
299
  audio_paths = []
319
300
 
@@ -362,8 +343,8 @@ def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
362
343
  for i, sub_path in enumerate(sub_paths):
363
344
  subtitle_input = av.open(sub_path)
364
345
  subtitle_inputs.append(subtitle_input)
365
- subtitle_stream = output.add_stream(
366
- template=subtitle_input.streams.subtitles[0]
346
+ subtitle_stream = output.add_stream_from_template(
347
+ subtitle_input.streams.subtitles[0]
367
348
  )
368
349
  if i < len(src.subtitles) and src.subtitles[i].lang is not None:
369
350
  subtitle_stream.metadata["language"] = src.subtitles[i].lang # type: ignore
@@ -430,14 +411,15 @@ def edit_media(paths: list[str], ffmpeg: FFmpeg, args: Args, log: Log) -> None:
430
411
  if tl.v1 is None:
431
412
  log.error("Timeline too complex to use clip-sequence export")
432
413
 
433
- from auto_editor.make_layers import clipify, make_av
434
- from auto_editor.utils.func import append_filename
435
-
436
414
  def pad_chunk(chunk: Chunk, total: int) -> Chunks:
437
415
  start = [] if chunk[0] == 0 else [(0, chunk[0], 99999.0)]
438
416
  end = [] if chunk[1] == total else [(chunk[1], total, 99999.0)]
439
417
  return start + [chunk] + end
440
418
 
419
+ def append_filename(path: str, val: str) -> str:
420
+ root, ext = splitext(path)
421
+ return root + val + ext
422
+
441
423
  total_frames = tl.v1.chunks[-1][1] - 1
442
424
  clip_num = 0
443
425
  for chunk in tl.v1.chunks:
auto_editor/ffwrapper.py CHANGED
@@ -1,63 +1,14 @@
1
1
  from __future__ import annotations
2
2
 
3
- import sys
4
3
  from dataclasses import dataclass
5
4
  from fractions import Fraction
6
5
  from pathlib import Path
7
- from shutil import which
8
- from subprocess import PIPE, Popen, run
9
- from typing import Any
10
6
 
11
7
  import av
12
8
 
13
9
  from auto_editor.utils.log import Log
14
10
 
15
11
 
16
- def initFFmpeg(
17
- log: Log, ff_location: str | None, my_ffmpeg: bool, show_cmd: bool, debug: bool
18
- ) -> FFmpeg:
19
- if ff_location is not None:
20
- program = ff_location
21
- elif my_ffmpeg:
22
- program = "ffmpeg"
23
- else:
24
- try:
25
- import ae_ffmpeg
26
-
27
- program = ae_ffmpeg.get_path()
28
- except ImportError:
29
- program = "ffmpeg"
30
-
31
- path: str | None = which(program)
32
- if path is None:
33
- log.error("Did not find ffmpeg on PATH.")
34
-
35
- return FFmpeg(log, path, show_cmd, debug)
36
-
37
-
38
- @dataclass(slots=True)
39
- class FFmpeg:
40
- log: Log
41
- path: str
42
- show_cmd: bool
43
- debug: bool
44
-
45
- def run(self, cmd: list[str]) -> None:
46
- cmd = [self.path, "-hide_banner", "-y"] + cmd
47
- if not self.debug:
48
- cmd.extend(["-nostats", "-loglevel", "error"])
49
- if self.show_cmd:
50
- sys.stderr.write(f"{' '.join(cmd)}\n\n")
51
- run(cmd)
52
-
53
- def Popen(
54
- self, cmd: list[str], stdin: Any = None, stdout: Any = PIPE, stderr: Any = None
55
- ) -> Popen:
56
- if self.show_cmd:
57
- sys.stderr.write(f"{self.path} {' '.join(cmd)}\n\n")
58
- return Popen([self.path] + cmd, stdin=stdin, stdout=stdout, stderr=stderr)
59
-
60
-
61
12
  def mux(input: Path, output: Path, stream: int) -> None:
62
13
  input_container = av.open(input, "r")
63
14
  output_container = av.open(output, "w")
@@ -485,7 +485,7 @@ def premiere_write_audio(audio: Element, make_filedef, src: FileInfo, tl: v3) ->
485
485
  audio.append(track)
486
486
 
487
487
 
488
- def fcp7_write_xml(name: str, output: str, resolve: bool, tl: v3, log: Log) -> None:
488
+ def fcp7_write_xml(name: str, output: str, resolve: bool, tl: v3) -> None:
489
489
  width, height = tl.res
490
490
  timebase, ntsc = set_tb_ntsc(tl.tb)
491
491
 
auto_editor/help.py CHANGED
@@ -24,10 +24,23 @@ example:
24
24
  will set the speed from 400 ticks to 800 ticks to 2.5x
25
25
  If timebase is 30, 400 ticks to 800 means 13.33 to 26.66 seconds
26
26
  """.strip(),
27
- "--edit-based-on": """
27
+ "--edit": """
28
28
  Evaluates a palet expression that returns a bool-array?. The array is then used for
29
29
  editing.
30
30
 
31
+ Examples:
32
+ --edit audio
33
+ --edit audio:0.03 ; Change the threshold. Can be a value between 0-1.
34
+ --edit audio:3% ; You can also use the `%` macro.
35
+ --edit audio:0.03,stream=0 ; Only consider the first stream for editing.
36
+ --edit audio:stream=1,threshold=0.05 ; Here's how you use keyword arguments.
37
+ --edit (or audio:0.04,stream=0 audio:0.08,stream=1) ; Consider both streams for editing (merge with logical or), but with different thresholds.
38
+ --edit motion
39
+ --edit motion:0.02,blur=3
40
+ --edit (or audio:0.04 motion:0.02,blur=3)
41
+ --edit none
42
+ --edit all/e
43
+
31
44
  Editing Methods:
32
45
  - audio ; Audio silence/loudness detection
33
46
  - threshold threshold? : 4%
@@ -52,19 +65,6 @@ Editing Methods:
52
65
 
53
66
  - none ; Do not modify the media in anyway; mark all sections as "loud" (1).
54
67
  - all/e ; Cut out everything out; mark all sections as "silent" (0).
55
-
56
-
57
- Command-line Examples:
58
- --edit audio
59
- --edit audio:threshold=4%
60
- --edit audio:threshold=0.03
61
- --edit audio:stream=1
62
- --edit (or audio:4%,stream=0 audio:8%,stream=1) ; `threshold` is first
63
- --edit motion
64
- --edit motion:threshold=2%,blur=3
65
- --edit (or audio:4% motion:2%,blur=3)
66
- --edit none
67
- --edit all/e
68
68
  """.strip(),
69
69
  "--export": """
70
70
  This option controls how timelines are exported.
@@ -144,8 +144,6 @@ If not set, tempdir will be set with Python's tempfile module
144
144
  The directory doesn't have to exist beforehand, however, the root path must be valid.
145
145
  Beware that the temp directory can get quite big.
146
146
  """.strip(),
147
- "--ffmpeg-location": "This takes precedence over `--my-ffmpeg`.",
148
- "--my-ffmpeg": "This is equivalent to `--ffmpeg-location ffmpeg`.",
149
147
  "--audio-bitrate": """
150
148
  `--audio-bitrate` sets the target bitrate for the audio encoder.
151
149
  By default, the value is `auto` (let the encoder decide).
auto_editor/lang/palet.py CHANGED
@@ -353,9 +353,7 @@ class Lexer:
353
353
  if is_method:
354
354
  from auto_editor.utils.cmdkw import parse_method
355
355
 
356
- return Token(
357
- M, parse_method(name, result, env), self.lineno, self.column
358
- )
356
+ return Token(M, parse_method(name, result), self.lineno, self.column)
359
357
 
360
358
  if self.char == ".": # handle `object.method` syntax
361
359
  self.advance()
@@ -635,6 +633,8 @@ def edit_subtitle(pattern, stream=0, **kwargs):
635
633
 
636
634
 
637
635
  class StackTraceManager:
636
+ __slots__ = ("stack",)
637
+
638
638
  def __init__(self) -> None:
639
639
  self.stack: list[Sym] = []
640
640
 
@@ -645,12 +645,6 @@ class StackTraceManager:
645
645
  if self.stack:
646
646
  self.stack.pop()
647
647
 
648
- def get_stacktrace(self) -> str:
649
- return "\n".join(
650
- f" at {sym.val} ({sym.lineno}:{sym.column})"
651
- for sym in reversed(self.stack)
652
- )
653
-
654
648
 
655
649
  stack_trace_manager = StackTraceManager()
656
650
 
@@ -14,7 +14,6 @@ if TYPE_CHECKING:
14
14
  from numpy.typing import NDArray
15
15
 
16
16
  Number = int | float | complex | Fraction
17
- Real = int | float | Fraction
18
17
  BoolList = NDArray[np.bool_]
19
18
  Node = tuple
20
19
 
@@ -831,12 +830,6 @@ def make_standard_env() -> dict[str, Any]:
831
830
  check_args("xor", vals, (2, None), (is_bool,))
832
831
  return reduce(lambda a, b: a ^ b, vals)
833
832
 
834
- def string_ref(s: str, ref: int) -> Char:
835
- try:
836
- return Char(s[ref])
837
- except IndexError:
838
- raise MyError(f"string index {ref} is out of range")
839
-
840
833
  def number_to_string(val: Number) -> str:
841
834
  if isinstance(val, complex):
842
835
  join = "" if val.imag < 0 else "+"
@@ -139,7 +139,7 @@ def make_timeline(
139
139
 
140
140
  for i, src in enumerate(sources):
141
141
  try:
142
- parser = Parser(Lexer("`--edit`", args.edit_based_on))
142
+ parser = Parser(Lexer("`--edit`", args.edit))
143
143
  if log.is_debug:
144
144
  log.debug(f"edit: {parser}")
145
145
 
@@ -169,6 +169,8 @@ def make_timeline(
169
169
  has_loud = concat((has_loud, result))
170
170
  src_index = concat((src_index, np.full(len(result), i, dtype=np.int32)))
171
171
 
172
+ assert len(has_loud) > 0
173
+
172
174
  # Setup for handling custom speeds
173
175
  speed_index = has_loud.astype(np.uint)
174
176
  speed_map = [args.silent_speed, args.video_speed]
auto_editor/output.py CHANGED
@@ -13,7 +13,10 @@ from auto_editor.utils.types import _split_num_str
13
13
 
14
14
 
15
15
  def parse_bitrate(input_: str, log: Log) -> int:
16
- val, unit = _split_num_str(input_)
16
+ try:
17
+ val, unit = _split_num_str(input_)
18
+ except Exception as e:
19
+ log.error(e)
17
20
 
18
21
  if unit.lower() == "k":
19
22
  return int(val * 1000)