auto-editor 28.0.0__py3-none-any.whl → 28.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ from urllib.parse import unquote
7
7
  from xml.etree.ElementTree import Element
8
8
 
9
9
  from auto_editor.ffwrapper import FileInfo
10
- from auto_editor.timeline import ASpace, Template, TlAudio, TlVideo, VSpace, v3
10
+ from auto_editor.timeline import ASpace, Clip, Template, VSpace, v3
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from auto_editor.utils.log import Log
@@ -213,7 +213,7 @@ def fcp7_read_xml(path: str, log: Log) -> v3:
213
213
  for t, track in enumerate(tracks["track"]):
214
214
  if len(track["clipitem"]) > 0:
215
215
  vobjs.append([])
216
- for i, clipitem in enumerate(track["clipitem"]):
216
+ for clipitem in track["clipitem"]:
217
217
  file_id = clipitem["file"].attrib["id"]
218
218
  if file_id not in sources:
219
219
  fileobj = parse(clipitem["file"], {"pathurl": str})
@@ -239,7 +239,7 @@ def fcp7_read_xml(path: str, log: Log) -> v3:
239
239
  offset = clipitem["in"]
240
240
 
241
241
  vobjs[t].append(
242
- TlVideo(start, dur, sources[file_id], offset, speed, stream=0)
242
+ Clip(start, dur, sources[file_id], offset, stream=0, speed=speed)
243
243
  )
244
244
 
245
245
  if "audio" in av:
@@ -250,7 +250,7 @@ def fcp7_read_xml(path: str, log: Log) -> v3:
250
250
  for t, track in enumerate(tracks["track"]):
251
251
  if len(track["clipitem"]) > 0:
252
252
  aobjs.append([])
253
- for i, clipitem in enumerate(track["clipitem"]):
253
+ for clipitem in track["clipitem"]:
254
254
  file_id = clipitem["file"].attrib["id"]
255
255
  if file_id not in sources:
256
256
  fileobj = parse(clipitem["file"], {"pathurl": str})
@@ -268,9 +268,7 @@ def fcp7_read_xml(path: str, log: Log) -> v3:
268
268
  offset = clipitem["in"]
269
269
 
270
270
  aobjs[t].append(
271
- TlAudio(
272
- start, dur, sources[file_id], offset, speed, volume=1, stream=0
273
- )
271
+ Clip(start, dur, sources[file_id], offset, stream=0, speed=speed)
274
272
  )
275
273
 
276
274
  T = Template.init(sources[next(iter(sources))], sr, res=res)
@@ -9,9 +9,8 @@ from auto_editor.ffwrapper import FileInfo
9
9
  from auto_editor.json import load
10
10
  from auto_editor.lib.err import MyError
11
11
  from auto_editor.timeline import (
12
+ Clip,
12
13
  Template,
13
- TlAudio,
14
- TlVideo,
15
14
  audio_builder,
16
15
  v1,
17
16
  v3,
@@ -112,7 +111,7 @@ def read_v3(tl: Any, log: Log) -> v3:
112
111
  tb = Fraction(tl["timebase"])
113
112
 
114
113
  v: Any = []
115
- a: list[list[TlAudio]] = []
114
+ a: list[list[Clip]] = []
116
115
 
117
116
  for vlayers in tl["v"]:
118
117
  if vlayers:
@@ -141,7 +140,7 @@ def read_v3(tl: Any, log: Log) -> v3:
141
140
  log.error(f"Unknown audio object: {adict['name']}")
142
141
 
143
142
  try:
144
- a_out.append(TlAudio(**parse_obj(adict, audio_builder)))
143
+ a_out.append(Clip(**parse_obj(adict, audio_builder)))
145
144
  except ParserError as e:
146
145
  log.error(e)
147
146
 
@@ -198,10 +197,10 @@ def read_v1(tl: Any, log: Log) -> v3:
198
197
  if src.videos:
199
198
  if len(vtl) == 0:
200
199
  vtl.append([])
201
- vtl[0].append(TlVideo(c.start, c.dur, c.src, c.offset, c.speed, 0))
200
+ vtl[0].append(Clip(c.start, c.dur, c.src, c.offset, 0, c.speed))
202
201
 
203
202
  for a in range(len(src.audios)):
204
- atl[a].append(TlAudio(c.start, c.dur, c.src, c.offset, c.speed, 1, a))
203
+ atl[a].append(Clip(c.start, c.dur, c.src, c.offset, a, c.speed))
205
204
 
206
205
  return v3(
207
206
  src.get_fps(),
auto_editor/json.py CHANGED
@@ -89,7 +89,7 @@ class Lexer:
89
89
 
90
90
  if self.char == "u":
91
91
  buf = ""
92
- for i in range(4):
92
+ for _ in range(4):
93
93
  self.advance()
94
94
  if self.char is None:
95
95
  self.error("\\u escape sequence needs 4 hexs")
@@ -162,7 +162,7 @@ class Lexer:
162
162
  return (key, None)
163
163
 
164
164
  keyword = ""
165
- for i in range(5): # Longest valid keyword length
165
+ for _ in range(5): # Longest valid keyword length
166
166
  if self.char is None or self.char in " \t\n\r\x0b\x0c[]}{,":
167
167
  break
168
168
  keyword += self.char
auto_editor/lang/palet.py CHANGED
@@ -65,20 +65,11 @@ class Token:
65
65
 
66
66
 
67
67
  class Lexer:
68
- __slots__ = (
69
- "filename",
70
- "text",
71
- "allow_lang_prag",
72
- "pos",
73
- "char",
74
- "lineno",
75
- "column",
76
- )
77
-
78
- def __init__(self, filename: str, text: str, langprag: bool = False):
68
+ __slots__ = ("filename", "text", "pos", "char", "lineno", "column")
69
+
70
+ def __init__(self, filename: str, text: str):
79
71
  self.filename = filename
80
72
  self.text = text
81
- self.allow_lang_prag = langprag
82
73
  self.pos: int = 0
83
74
  self.lineno: int = 1
84
75
  self.column: int = 1
@@ -157,7 +148,7 @@ class Lexer:
157
148
  token = SEC
158
149
  elif unit == "dB":
159
150
  token = DB
160
- elif unit != "i" and unit != "%":
151
+ elif unit != "%":
161
152
  return Token(
162
153
  VAL,
163
154
  Sym(result + unit, self.lineno, self.column),
@@ -166,9 +157,7 @@ class Lexer:
166
157
  )
167
158
 
168
159
  try:
169
- if unit == "i":
170
- return Token(VAL, complex(result + "j"), self.lineno, self.column)
171
- elif unit == "%":
160
+ if unit == "%":
172
161
  return Token(VAL, float(result) / 100, self.lineno, self.column)
173
162
  elif "/" in result:
174
163
  return Token(token, Fraction(result), self.lineno, self.column)
@@ -289,32 +278,6 @@ class Lexer:
289
278
  self.advance()
290
279
  if self.char is None or self.char == "\n":
291
280
  continue
292
-
293
- elif self.char == "l" and self.peek() == "a":
294
- buf = StringIO()
295
- while self.char_is_norm():
296
- assert self.char is not None
297
- buf.write(self.char)
298
- self.advance()
299
-
300
- result = buf.getvalue()
301
- if result != "lang":
302
- self.error(f"Unknown hash literal `#{result}`")
303
- if not self.allow_lang_prag:
304
- self.error("#lang pragma is not allowed here")
305
-
306
- self.advance()
307
- buf = StringIO()
308
- while not self.is_whitespace():
309
- assert self.char is not None
310
- buf.write(self.char)
311
- self.advance()
312
-
313
- result = buf.getvalue()
314
- if result != "palet":
315
- self.error(f"Invalid #lang: {result}")
316
- self.allow_lang_prag = False
317
- continue
318
281
  else:
319
282
  return self.hash_literal()
320
283
 
@@ -19,14 +19,13 @@ if TYPE_CHECKING:
19
19
  import numpy as np
20
20
  from numpy.typing import NDArray
21
21
 
22
- Number = int | float | complex | Fraction
22
+ Number = int | float | Fraction
23
23
  BoolList = NDArray[np.bool_]
24
24
  Node = tuple
25
25
 
26
26
 
27
27
  def make_standard_env() -> dict[str, Any]:
28
28
  import os.path
29
- from cmath import sqrt as complex_sqrt
30
29
  from functools import reduce
31
30
  from operator import add, ge, gt, is_, le, lt, mod, mul
32
31
  from subprocess import run
@@ -512,7 +511,7 @@ def make_standard_env() -> dict[str, Any]:
512
511
  for c in node[2:]:
513
512
  my_eval(env, c)
514
513
 
515
- def syn_quote(env: Env, node: Node) -> Any:
514
+ def syn_quote(_: Env, node: Node) -> Any:
516
515
  guard_term(node, 2, 2)
517
516
  if type(node[1]) is Keyword:
518
517
  return QuotedKeyword(node[1])
@@ -828,14 +827,6 @@ def make_standard_env() -> dict[str, Any]:
828
827
 
829
828
  return reduce(lambda a, b: a // b, m, n)
830
829
 
831
- def _sqrt(v: Number) -> Number:
832
- r = complex_sqrt(v)
833
- if r.imag == 0:
834
- if int(r.real) == r.real:
835
- return int(r.real)
836
- return r.real
837
- return r
838
-
839
830
  def _xor(*vals: Any) -> bool | BoolList:
840
831
  if is_boolarr(vals[0]):
841
832
  check_args("xor", vals, (2, None), (is_boolarr,))
@@ -844,9 +835,6 @@ def make_standard_env() -> dict[str, Any]:
844
835
  return reduce(lambda a, b: a ^ b, vals)
845
836
 
846
837
  def number_to_string(val: Number) -> str:
847
- if isinstance(val, complex):
848
- join = "" if val.imag < 0 else "+"
849
- return f"{val.real}{join}{val.imag}i"
850
838
  return f"{val}"
851
839
 
852
840
  def string_to_number(val) -> float:
@@ -971,6 +959,7 @@ def make_standard_env() -> dict[str, Any]:
971
959
  # syntax
972
960
  "lambda": Syntax(syn_lambda),
973
961
  "λ": Syntax(syn_lambda),
962
+ "defn": Syntax(syn_define),
974
963
  "define": Syntax(syn_define),
975
964
  "define/c": Syntax(syn_definec),
976
965
  "set!": Syntax(syn_set),
@@ -997,7 +986,6 @@ def make_standard_env() -> dict[str, Any]:
997
986
  "int?": is_int,
998
987
  "float?": is_float,
999
988
  "frac?": is_frac,
1000
- "complex?": Contract("complex?", lambda v: type(v) is complex),
1001
989
  "nat?": is_nat,
1002
990
  "nat1?": is_nat1,
1003
991
  "threshold?": is_threshold,
@@ -1052,9 +1040,6 @@ def make_standard_env() -> dict[str, Any]:
1052
1040
  "div": Proc("div", int_div, (2, None), is_int),
1053
1041
  "add1": Proc("add1", lambda z: z + 1, (1, 1), is_num),
1054
1042
  "sub1": Proc("sub1", lambda z: z - 1, (1, 1), is_num),
1055
- "sqrt": Proc("sqrt", _sqrt, (1, 1), is_num),
1056
- "real-part": Proc("real-part", lambda v: v.real, (1, 1), is_num),
1057
- "imag-part": Proc("imag-part", lambda v: v.imag, (1, 1), is_num),
1058
1043
  # reals
1059
1044
  "pow": Proc("pow", pow, (2, 2), is_real),
1060
1045
  "abs": Proc("abs", abs, (1, 1), is_real),
@@ -46,7 +46,7 @@ def check_contract(c: object, val: object) -> bool:
46
46
  return val is True
47
47
  if c is False:
48
48
  return val is False
49
- if type(c) in (int, float, float64, Fraction, complex, str, Sym):
49
+ if type(c) in (int, float, float64, Fraction, str, Sym):
50
50
  return val == c
51
51
  raise MyError(f"Invalid contract, got: {print_str(c)}")
52
52
 
@@ -164,7 +164,7 @@ def is_contract(c: object) -> bool:
164
164
  return True
165
165
  if c is True or c is False:
166
166
  return True
167
- return type(c) in (int, float, Fraction, complex, str, Sym)
167
+ return type(c) in (int, float, Fraction, str, Sym)
168
168
 
169
169
 
170
170
  is_bool = Contract("bool?", lambda v: type(v) is bool)
@@ -172,10 +172,8 @@ is_int = Contract("int?", lambda v: type(v) is int)
172
172
  is_nat = Contract("nat?", lambda v: type(v) is int and v > -1)
173
173
  is_nat1 = Contract("nat1?", lambda v: type(v) is int and v > 0)
174
174
  int_not_zero = Contract("(or/c (not/c 0) int?)", lambda v: v != 0 and is_int(v))
175
- is_num = Contract(
176
- "number?", lambda v: type(v) in (int, float, float64, Fraction, complex)
177
- )
178
175
  is_real = Contract("real?", lambda v: type(v) in (int, float, float64, Fraction))
176
+ is_num = is_real
179
177
  is_float = Contract("float?", lambda v: type(v) in (float, float64))
180
178
  is_frac = Contract("frac?", lambda v: type(v) is Fraction)
181
179
  is_str = Contract("string?", lambda v: type(v) is str)
@@ -182,9 +182,6 @@ def display_str(val: object) -> str:
182
182
  return f"{val}"
183
183
  if type(val) is range:
184
184
  return "#<range>"
185
- if type(val) is complex:
186
- join = "" if val.imag < 0 else "+"
187
- return f"{val.real}{join}{val.imag}i"
188
185
  if type(val) is np.bool_:
189
186
  return "1" if val else "0"
190
187
  if type(val) is np.float64 or type(val) is np.float32:
@@ -11,7 +11,7 @@ from auto_editor.ffwrapper import FileInfo
11
11
  from auto_editor.lang.palet import Lexer, Parser, env, interpret, is_boolean_array
12
12
  from auto_editor.lib.data_structs import print_str
13
13
  from auto_editor.lib.err import MyError
14
- from auto_editor.timeline import ASpace, Template, TlAudio, TlVideo, VSpace, v1, v3
14
+ from auto_editor.timeline import ASpace, Clip, Template, VSpace, v1, v3
15
15
  from auto_editor.utils.func import mut_margin
16
16
  from auto_editor.utils.types import CoerceError, time
17
17
 
@@ -26,7 +26,7 @@ if TYPE_CHECKING:
26
26
  BoolList = NDArray[np.bool_]
27
27
 
28
28
 
29
- class Clip(NamedTuple):
29
+ class VirClip(NamedTuple):
30
30
  start: int
31
31
  dur: int
32
32
  offset: int
@@ -34,8 +34,8 @@ class Clip(NamedTuple):
34
34
  src: FileInfo
35
35
 
36
36
 
37
- def clipify(chunks: Chunks, src: FileInfo, start: int = 0) -> list[Clip]:
38
- clips: list[Clip] = []
37
+ def clipify(chunks: Chunks, src: FileInfo, start: int = 0) -> list[VirClip]:
38
+ clips: list[VirClip] = []
39
39
  i = 0
40
40
  for chunk in chunks:
41
41
  if chunk[2] > 0 and chunk[2] < 99999.0:
@@ -46,14 +46,14 @@ def clipify(chunks: Chunks, src: FileInfo, start: int = 0) -> list[Clip]:
46
46
  offset = int(chunk[0] / chunk[2])
47
47
 
48
48
  if not (clips and clips[-1].start == round(start)):
49
- clips.append(Clip(start, dur, offset, chunk[2], src))
49
+ clips.append(VirClip(start, dur, offset, chunk[2], src))
50
50
  start += dur
51
51
  i += 1
52
52
 
53
53
  return clips
54
54
 
55
55
 
56
- def make_av(src: FileInfo, all_clips: list[list[Clip]]) -> tuple[VSpace, ASpace]:
56
+ def make_av(src: FileInfo, all_clips: list[list[VirClip]]) -> tuple[VSpace, ASpace]:
57
57
  assert type(src) is FileInfo
58
58
  vtl: VSpace = []
59
59
  atl: ASpace = [[] for _ in range(len(src.audios))]
@@ -63,11 +63,11 @@ def make_av(src: FileInfo, all_clips: list[list[Clip]]) -> tuple[VSpace, ASpace]
63
63
  if src.videos:
64
64
  if len(vtl) == 0:
65
65
  vtl.append([])
66
- vtl[0].append(TlVideo(c.start, c.dur, c.src, c.offset, c.speed, 0))
66
+ vtl[0].append(Clip(c.start, c.dur, c.src, c.offset, 0, c.speed))
67
67
 
68
68
  for c in clips:
69
69
  for a in range(len(src.audios)):
70
- atl[a].append(TlAudio(c.start, c.dur, c.src, c.offset, c.speed, 1, a))
70
+ atl[a].append(Clip(c.start, c.dur, c.src, c.offset, a, c.speed))
71
71
 
72
72
  return vtl, atl
73
73
 
@@ -139,7 +139,7 @@ def make_timeline(
139
139
  interpret(env, parser)
140
140
 
141
141
  results = []
142
- for i, src in enumerate(sources):
142
+ for src in sources:
143
143
  try:
144
144
  parser = Parser(Lexer("`--edit`", args.edit))
145
145
  if log.is_debug:
@@ -247,7 +247,7 @@ def make_timeline(
247
247
  check_monotonic = len(sources) == 1
248
248
  last_i = 0
249
249
 
250
- clips: list[Clip] = []
250
+ clips: list[VirClip] = []
251
251
  start = 0
252
252
 
253
253
  for chunk in echunk(speed_index, src_index):
@@ -265,7 +265,7 @@ def make_timeline(
265
265
  raise ValueError("not monotonic", sources, this_i, last_i)
266
266
  last_i = this_i
267
267
 
268
- clips.append(Clip(start, dur, offset, chunk[3], chunk[0]))
268
+ clips.append(VirClip(start, dur, offset, chunk[3], chunk[0]))
269
269
 
270
270
  start += dur
271
271
 
@@ -275,13 +275,13 @@ def make_timeline(
275
275
  if c.src.videos:
276
276
  if len(vtl) == 0:
277
277
  vtl.append([])
278
- vtl[0].append(TlVideo(c.start, c.dur, c.src, c.offset, c.speed, 0))
278
+ vtl[0].append(Clip(c.start, c.dur, c.src, c.offset, 0, c.speed))
279
279
 
280
280
  for c in clips:
281
281
  for a in range(len(c.src.audios)):
282
282
  if a >= len(atl):
283
283
  atl.append([])
284
- atl[a].append(TlAudio(c.start, c.dur, c.src, c.offset, c.speed, 1, a))
284
+ atl[a].append(Clip(c.start, c.dur, c.src, c.offset, a, c.speed))
285
285
 
286
286
  # Turn long silent/loud array to formatted chunk list.
287
287
  # Example: [1, 1, 1, 2, 2], {1: 1.0, 2: 1.5} => [(0, 3, 1.0), (3, 5, 1.5)]
auto_editor/preview.py CHANGED
@@ -28,23 +28,24 @@ def time_frame(
28
28
  def all_cuts(tl: v3, in_len: int) -> list[int]:
29
29
  # Calculate cuts
30
30
  tb = tl.tb
31
- oe: list[tuple[int, int]] = []
31
+ clip_spans: list[tuple[int, int]] = []
32
32
 
33
33
  for clip in tl.a[0]:
34
34
  old_offset = clip.offset * clip.speed
35
- oe.append((round(old_offset * clip.speed), round(old_offset + clip.dur)))
35
+ clip_spans.append((round(old_offset), round(old_offset + clip.dur)))
36
36
 
37
37
  cut_lens = []
38
38
  i = 0
39
- while i < len(oe) - 1:
40
- if i == 0 and oe[i][0] != 0:
41
- cut_lens.append(oe[i][1])
39
+ while i < len(clip_spans) - 1:
40
+ if i == 0 and clip_spans[i][0] != 0:
41
+ cut_lens.append(clip_spans[i][0])
42
42
 
43
- cut_lens.append(oe[i + 1][0] - oe[i][1])
43
+ cut_lens.append(clip_spans[i + 1][0] - clip_spans[i][1])
44
44
  i += 1
45
45
 
46
- if len(oe) > 0 and oe[-1][1] < round(in_len * tb):
47
- cut_lens.append(in_len - oe[-1][1])
46
+ if len(clip_spans) > 0 and clip_spans[-1][1] < round(in_len / tb):
47
+ cut_lens.append(in_len - clip_spans[-1][1])
48
+
48
49
  return cut_lens
49
50
 
50
51
 
@@ -53,19 +54,9 @@ def preview(tl: v3, log: Log) -> None:
53
54
  tb = tl.tb
54
55
 
55
56
  # Calculate input videos length
56
- all_sources = set()
57
- for vlayer in tl.v:
58
- for vclip in vlayer:
59
- if hasattr(vclip, "src"):
60
- all_sources.add(vclip.src)
61
- for alayer in tl.a:
62
- for aclip in alayer:
63
- if hasattr(aclip, "src"):
64
- all_sources.add(aclip.src)
65
-
66
57
  in_len = 0
67
58
  bar = initBar("none")
68
- for src in all_sources:
59
+ for src in tl.unique_sources():
69
60
  in_len += initLevels(src, tb, bar, False, log).media_length
70
61
 
71
62
  out_len = len(tl)
@@ -77,7 +68,7 @@ def preview(tl: v3, log: Log) -> None:
77
68
  time_frame(fp, "output", out_len, tb, f"{round((out_len / in_len) * 100, 2)}%")
78
69
  time_frame(fp, "diff", diff, tb, f"{round((diff / in_len) * 100, 2)}%")
79
70
 
80
- clip_lens = [clip.dur / clip.speed for clip in tl.a[0]]
71
+ clip_lens = [clip.dur for clip in tl.a[0]]
81
72
  log.debug(clip_lens)
82
73
 
83
74
  fp.write(f"clips:\n - amount: {len(clip_lens)}\n")
@@ -90,7 +81,7 @@ def preview(tl: v3, log: Log) -> None:
90
81
 
91
82
  cut_lens = all_cuts(tl, in_len)
92
83
  log.debug(cut_lens)
93
- fp.write(f"cuts:\n - amount: {len(clip_lens)}\n")
84
+ fp.write(f"cuts:\n - amount: {len(cut_lens)}\n")
94
85
  if len(cut_lens) > 0:
95
86
  time_frame(fp, "smallest", min(cut_lens), tb)
96
87
  time_frame(fp, "largest", max(cut_lens), tb)
@@ -15,7 +15,7 @@ from auto_editor.json import load
15
15
  from auto_editor.lang.palet import env
16
16
  from auto_editor.lib.contracts import andc, between_c, is_int_or_float
17
17
  from auto_editor.lib.err import MyError
18
- from auto_editor.timeline import TlAudio, v3
18
+ from auto_editor.timeline import Clip, v3
19
19
  from auto_editor.utils.cmdkw import ParserError, parse_with_palet, pAttr, pAttrs
20
20
  from auto_editor.utils.func import parse_bitrate
21
21
  from auto_editor.utils.log import Log
@@ -155,7 +155,7 @@ def apply_audio_normalization(
155
155
  output_file.close()
156
156
 
157
157
 
158
- def process_audio_clip(clip: TlAudio, data: np.ndarray, sr: int) -> np.ndarray:
158
+ def process_audio_clip(clip: Clip, data: np.ndarray, sr: int, log: Log) -> np.ndarray:
159
159
  to_s16 = bv.AudioResampler(format="s16", layout="stereo", rate=sr)
160
160
  input_buffer = BytesIO()
161
161
 
@@ -217,6 +217,9 @@ def process_audio_clip(clip: TlAudio, data: np.ndarray, sr: int) -> np.ndarray:
217
217
  except (bv.BlockingIOError, bv.EOFError):
218
218
  break
219
219
 
220
+ if not all_frames:
221
+ log.debug(f"No audio frames at {clip=}")
222
+ return np.zeros_like(data)
220
223
  return np.concatenate(all_frames, axis=1)
221
224
 
222
225
 
@@ -383,7 +386,7 @@ class Getter:
383
386
 
384
387
  def __init__(self, path: Path, stream: int, rate: int):
385
388
  self.container = bv.open(path)
386
- self.stream = self.container.streams.audio[0]
389
+ self.stream = self.container.streams.audio[stream]
387
390
  self.rate = rate
388
391
 
389
392
  def get(self, start: int, end: int) -> np.ndarray:
@@ -454,7 +457,7 @@ def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[A
454
457
  arr: np.ndarray | None = None
455
458
  use_iter = False
456
459
 
457
- for c, clip in enumerate(layer):
460
+ for clip in layer:
458
461
  if (clip.src, clip.stream) not in samples:
459
462
  samples[(clip.src, clip.stream)] = Getter(
460
463
  clip.src.path, clip.stream, sr
@@ -473,7 +476,7 @@ def _make_new_audio(tl: v3, fmt: bv.AudioFormat, args: Args, log: Log) -> list[A
473
476
 
474
477
  if clip.speed != 1 or clip.volume != 1:
475
478
  clip_arr = process_audio_clip(
476
- clip, getter.get(samp_start, samp_end), sr
479
+ clip, getter.get(samp_start, samp_end), sr, log
477
480
  )
478
481
  else:
479
482
  clip_arr = getter.get(samp_start, samp_end)
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING
6
6
  import bv
7
7
  import numpy as np
8
8
 
9
- from auto_editor.timeline import TlImage, TlRect, TlVideo
9
+ from auto_editor.timeline import Clip, TlImage, TlRect
10
10
  from auto_editor.utils.func import parse_bitrate
11
11
 
12
12
  if TYPE_CHECKING:
@@ -120,6 +120,7 @@ def render_av(
120
120
 
121
121
  del codec
122
122
  output_stream = output.add_stream(args.video_codec, rate=target_fps)
123
+ output_stream.options = {"x265-params": "log-level=error"} # type: ignore
123
124
 
124
125
  cc = output_stream.codec_context
125
126
  if args.vprofile is not None:
@@ -193,7 +194,7 @@ def render_av(
193
194
  obj_list: list[VideoFrame | TlRect | TlImage] = []
194
195
  for layer in tl.v:
195
196
  for lobj in layer:
196
- if isinstance(lobj, TlVideo):
197
+ if isinstance(lobj, Clip):
197
198
  if index >= lobj.start and index < (lobj.start + lobj.dur):
198
199
  _i = round((lobj.offset + index - lobj.start) * lobj.speed)
199
200
  obj_list.append(VideoFrame(_i, lobj.src))