auto-editor 26.3.3__py3-none-any.whl → 27.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +1 -1
- auto_editor/__main__.py +17 -5
- auto_editor/analyze.py +30 -36
- auto_editor/cmds/desc.py +2 -2
- auto_editor/cmds/info.py +3 -3
- auto_editor/cmds/levels.py +5 -5
- auto_editor/cmds/repl.py +3 -8
- auto_editor/cmds/subdump.py +62 -8
- auto_editor/cmds/test.py +92 -42
- auto_editor/edit.py +59 -111
- auto_editor/ffwrapper.py +91 -87
- auto_editor/formats/fcp11.py +10 -8
- auto_editor/formats/fcp7.py +11 -12
- auto_editor/formats/json.py +10 -11
- auto_editor/{lang/json.py → json.py} +39 -43
- auto_editor/lang/palet.py +2 -2
- auto_editor/lang/stdenv.py +13 -0
- auto_editor/make_layers.py +18 -8
- auto_editor/render/audio.py +239 -102
- auto_editor/render/subtitle.py +10 -14
- auto_editor/render/video.py +41 -46
- auto_editor/timeline.py +60 -10
- auto_editor/utils/container.py +21 -14
- auto_editor/utils/func.py +21 -0
- {auto_editor-26.3.3.dist-info → auto_editor-27.1.0.dist-info}/METADATA +8 -7
- auto_editor-27.1.0.dist-info/RECORD +54 -0
- {auto_editor-26.3.3.dist-info → auto_editor-27.1.0.dist-info}/WHEEL +1 -1
- docs/build.py +16 -7
- auto_editor/output.py +0 -86
- auto_editor/wavfile.py +0 -310
- auto_editor-26.3.3.dist-info/RECORD +0 -56
- {auto_editor-26.3.3.dist-info → auto_editor-27.1.0.dist-info}/entry_points.txt +0 -0
- {auto_editor-26.3.3.dist-info → auto_editor-27.1.0.dist-info/licenses}/LICENSE +0 -0
- {auto_editor-26.3.3.dist-info → auto_editor-27.1.0.dist-info}/top_level.txt +0 -0
auto_editor/formats/json.py
CHANGED
@@ -6,11 +6,12 @@ from difflib import get_close_matches
|
|
6
6
|
from fractions import Fraction
|
7
7
|
from typing import Any
|
8
8
|
|
9
|
-
from auto_editor.ffwrapper import FileInfo
|
10
|
-
from auto_editor.
|
9
|
+
from auto_editor.ffwrapper import FileInfo
|
10
|
+
from auto_editor.json import dump, load
|
11
11
|
from auto_editor.lib.err import MyError
|
12
12
|
from auto_editor.timeline import (
|
13
13
|
ASpace,
|
14
|
+
Template,
|
14
15
|
TlAudio,
|
15
16
|
TlVideo,
|
16
17
|
VSpace,
|
@@ -59,7 +60,7 @@ def read_v3(tl: Any, log: Log) -> v3:
|
|
59
60
|
def make_src(v: str) -> FileInfo:
|
60
61
|
if v in srcs:
|
61
62
|
return srcs[v]
|
62
|
-
temp =
|
63
|
+
temp = FileInfo.init(v, log)
|
63
64
|
srcs[v] = temp
|
64
65
|
return temp
|
65
66
|
|
@@ -151,11 +152,11 @@ def read_v3(tl: Any, log: Log) -> v3:
|
|
151
152
|
a.append(a_out)
|
152
153
|
|
153
154
|
try:
|
154
|
-
|
155
|
+
T = Template.init(srcs[next(iter(srcs))])
|
155
156
|
except StopIteration:
|
156
|
-
|
157
|
+
T = Template(sr, "stereo", res, [], [])
|
157
158
|
|
158
|
-
return v3(
|
159
|
+
return v3(tb, bg, T, v, a, v1=None)
|
159
160
|
|
160
161
|
|
161
162
|
def read_v1(tl: Any, log: Log) -> v3:
|
@@ -168,7 +169,7 @@ def read_v1(tl: Any, log: Log) -> v3:
|
|
168
169
|
|
169
170
|
check_file(path, log)
|
170
171
|
|
171
|
-
src =
|
172
|
+
src = FileInfo.init(path, log)
|
172
173
|
|
173
174
|
vtl: VSpace = []
|
174
175
|
atl: ASpace = [[] for _ in range(len(src.audios))]
|
@@ -207,11 +208,9 @@ def read_v1(tl: Any, log: Log) -> v3:
|
|
207
208
|
atl[a].append(TlAudio(c.start, c.dur, c.src, c.offset, c.speed, 1, a))
|
208
209
|
|
209
210
|
return v3(
|
210
|
-
src,
|
211
211
|
src.get_fps(),
|
212
|
-
src.get_sr(),
|
213
|
-
src.get_res(),
|
214
212
|
"#000",
|
213
|
+
Template.init(src),
|
215
214
|
vtl,
|
216
215
|
atl,
|
217
216
|
v1(src, chunks),
|
@@ -221,7 +220,7 @@ def read_v1(tl: Any, log: Log) -> v3:
|
|
221
220
|
def read_json(path: str, log: Log) -> v3:
|
222
221
|
with open(path, encoding="utf-8", errors="ignore") as f:
|
223
222
|
try:
|
224
|
-
tl =
|
223
|
+
tl = load(path, f)
|
225
224
|
except MyError as e:
|
226
225
|
log.error(e)
|
227
226
|
|
@@ -11,29 +11,10 @@ if TYPE_CHECKING:
|
|
11
11
|
|
12
12
|
from _typeshed import SupportsWrite
|
13
13
|
|
14
|
-
|
15
|
-
class Token:
|
16
|
-
__slots__ = ("type", "value")
|
17
|
-
|
18
|
-
def __init__(self, type: int, value: object):
|
19
|
-
self.type = type
|
20
|
-
self.value = value
|
21
|
-
|
22
|
-
def __str__(self) -> str:
|
23
|
-
return f"{self.type=} {self.value=}"
|
24
|
-
|
25
|
-
__repr__ = __str__
|
14
|
+
Token = tuple[int, object]
|
26
15
|
|
27
16
|
|
28
17
|
EOF, LCUR, RCUR, LBRAC, RBRAC, COL, COMMA, STR, VAL = range(9)
|
29
|
-
table = {
|
30
|
-
"{": LCUR,
|
31
|
-
"}": RCUR,
|
32
|
-
"[": LBRAC,
|
33
|
-
"]": RBRAC,
|
34
|
-
":": COL,
|
35
|
-
",": COMMA,
|
36
|
-
}
|
37
18
|
str_escape = {
|
38
19
|
"\\": "\\",
|
39
20
|
"/": "/",
|
@@ -88,6 +69,12 @@ class Lexer:
|
|
88
69
|
self.char = self.text[self.pos]
|
89
70
|
self.column += 1
|
90
71
|
|
72
|
+
def rewind(self) -> None:
|
73
|
+
self.pos = 0
|
74
|
+
self.lineno = 1
|
75
|
+
self.column = 1
|
76
|
+
self.char = self.text[self.pos] if self.text else None
|
77
|
+
|
91
78
|
def peek(self) -> str | None:
|
92
79
|
peek_pos = self.pos + 1
|
93
80
|
return None if peek_pos > len(self.text) - 1 else self.text[peek_pos]
|
@@ -142,7 +129,7 @@ class Lexer:
|
|
142
129
|
result = buf.getvalue()
|
143
130
|
|
144
131
|
try:
|
145
|
-
return
|
132
|
+
return (VAL, float(result) if has_dot else int(result))
|
146
133
|
except ValueError:
|
147
134
|
self.error(f"`{result}` is not a valid JSON Number")
|
148
135
|
|
@@ -158,7 +145,7 @@ class Lexer:
|
|
158
145
|
|
159
146
|
if self.char == '"':
|
160
147
|
self.advance()
|
161
|
-
return
|
148
|
+
return (STR, self.string())
|
162
149
|
|
163
150
|
if self.char == "-":
|
164
151
|
_peek = self.peek()
|
@@ -168,10 +155,11 @@ class Lexer:
|
|
168
155
|
if self.char in "0123456789.":
|
169
156
|
return self.number()
|
170
157
|
|
158
|
+
table = {"{": LCUR, "}": RCUR, "[": LBRAC, "]": RBRAC, ":": COL, ",": COMMA}
|
171
159
|
if self.char in table:
|
172
160
|
key = table[self.char]
|
173
161
|
self.advance()
|
174
|
-
return
|
162
|
+
return (key, None)
|
175
163
|
|
176
164
|
keyword = ""
|
177
165
|
for i in range(5): # Longest valid keyword length
|
@@ -181,14 +169,14 @@ class Lexer:
|
|
181
169
|
self.advance()
|
182
170
|
|
183
171
|
if keyword == "true":
|
184
|
-
return
|
172
|
+
return (VAL, True)
|
185
173
|
if keyword == "false":
|
186
|
-
return
|
174
|
+
return (VAL, False)
|
187
175
|
if keyword == "null":
|
188
|
-
return
|
176
|
+
return (VAL, None)
|
189
177
|
|
190
178
|
self.error(f"Invalid keyword: `{keyword}`")
|
191
|
-
return
|
179
|
+
return (EOF, None)
|
192
180
|
|
193
181
|
|
194
182
|
class Parser:
|
@@ -204,49 +192,49 @@ class Parser:
|
|
204
192
|
def expr(self) -> Any:
|
205
193
|
self.current_token
|
206
194
|
|
207
|
-
if self.current_token
|
208
|
-
val = self.current_token
|
195
|
+
if self.current_token[0] in {STR, VAL}:
|
196
|
+
val = self.current_token[1]
|
209
197
|
self.eat()
|
210
198
|
return val
|
211
199
|
|
212
|
-
if self.current_token
|
200
|
+
if self.current_token[0] == LCUR:
|
213
201
|
self.eat()
|
214
202
|
|
215
203
|
my_dic = {}
|
216
|
-
while self.current_token
|
217
|
-
if self.current_token
|
218
|
-
if self.current_token
|
204
|
+
while self.current_token[0] != RCUR:
|
205
|
+
if self.current_token[0] != STR:
|
206
|
+
if self.current_token[0] in {LBRAC, VAL}:
|
219
207
|
self.lexer.error("JSON Objects only allow strings as keys")
|
220
208
|
self.lexer.error("Expected closing `}`")
|
221
|
-
key = self.current_token
|
209
|
+
key = self.current_token[1]
|
222
210
|
if key in my_dic:
|
223
211
|
self.lexer.error(f"Object has repeated key `{key}`")
|
224
212
|
self.eat()
|
225
|
-
if self.current_token
|
213
|
+
if self.current_token[0] != COL:
|
226
214
|
self.lexer.error("Expected `:`")
|
227
215
|
self.eat()
|
228
216
|
|
229
217
|
my_dic[key] = self.expr()
|
230
|
-
if self.current_token
|
231
|
-
if self.current_token
|
218
|
+
if self.current_token[0] != RCUR:
|
219
|
+
if self.current_token[0] != COMMA:
|
232
220
|
self.lexer.error("Expected `,` between Object entries")
|
233
221
|
self.eat()
|
234
|
-
if self.current_token
|
222
|
+
if self.current_token[0] == RCUR:
|
235
223
|
self.lexer.error("Trailing `,` in Object")
|
236
224
|
|
237
225
|
self.eat()
|
238
226
|
return my_dic
|
239
227
|
|
240
|
-
if self.current_token
|
228
|
+
if self.current_token[0] == LBRAC:
|
241
229
|
self.eat()
|
242
230
|
my_arr = []
|
243
|
-
while self.current_token
|
231
|
+
while self.current_token[0] != RBRAC:
|
244
232
|
my_arr.append(self.expr())
|
245
|
-
if self.current_token
|
246
|
-
if self.current_token
|
233
|
+
if self.current_token[0] != RBRAC:
|
234
|
+
if self.current_token[0] != COMMA:
|
247
235
|
self.lexer.error("Expected `,` between array entries")
|
248
236
|
self.eat()
|
249
|
-
if self.current_token
|
237
|
+
if self.current_token[0] == RBRAC:
|
250
238
|
self.lexer.error("Trailing `,` in array")
|
251
239
|
self.eat()
|
252
240
|
return my_arr
|
@@ -254,6 +242,14 @@ class Parser:
|
|
254
242
|
raise MyError(f"Unknown token: {self.current_token}")
|
255
243
|
|
256
244
|
|
245
|
+
def load(path: str, f: str | bytes | TextIOWrapper) -> dict[str, object]:
|
246
|
+
lexer = Lexer(path, f)
|
247
|
+
if lexer.get_next_token()[0] != LCUR:
|
248
|
+
raise MyError("Expected JSON Object")
|
249
|
+
lexer.rewind()
|
250
|
+
return Parser(lexer).expr()
|
251
|
+
|
252
|
+
|
257
253
|
def dump(
|
258
254
|
data: object, file: SupportsWrite[str], indent: int | None = None, level: int = 0
|
259
255
|
) -> None:
|
auto_editor/lang/palet.py
CHANGED
@@ -646,8 +646,8 @@ stack_trace_manager = StackTraceManager()
|
|
646
646
|
|
647
647
|
|
648
648
|
def my_eval(env: Env, node: object) -> Any:
|
649
|
-
def make_trace(sym:
|
650
|
-
return f" at {sym.val} ({sym.lineno}:{sym.column})"
|
649
|
+
def make_trace(sym: object) -> str:
|
650
|
+
return f" at {sym.val} ({sym.lineno}:{sym.column})" if type(sym) is Sym else ""
|
651
651
|
|
652
652
|
if type(node) is Sym:
|
653
653
|
val = env.get(node.val)
|
auto_editor/lang/stdenv.py
CHANGED
@@ -2,6 +2,8 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from typing import TYPE_CHECKING
|
4
4
|
|
5
|
+
import bv
|
6
|
+
|
5
7
|
from auto_editor.analyze import mut_remove_large, mut_remove_small
|
6
8
|
from auto_editor.lib.contracts import *
|
7
9
|
from auto_editor.lib.data_structs import *
|
@@ -11,6 +13,7 @@ from .palet import Syntax, env, is_boolarr, is_iterable, my_eval, p_slice, raise
|
|
11
13
|
if TYPE_CHECKING:
|
12
14
|
from typing import Any, Literal
|
13
15
|
|
16
|
+
import numpy as np
|
14
17
|
from numpy.typing import NDArray
|
15
18
|
|
16
19
|
Number = int | float | complex | Fraction
|
@@ -749,6 +752,13 @@ def make_standard_env() -> dict[str, Any]:
|
|
749
752
|
raise MyError("@r: attribute must be an identifier")
|
750
753
|
|
751
754
|
base = my_eval(env, node[1])
|
755
|
+
|
756
|
+
if hasattr(base, "__pyx_vtable__"):
|
757
|
+
try:
|
758
|
+
return getattr(base, node[2].val)
|
759
|
+
except AttributeError as e:
|
760
|
+
raise MyError(e)
|
761
|
+
|
752
762
|
if type(base) is PaletClass:
|
753
763
|
if type(name := node[2]) is not Sym:
|
754
764
|
raise MyError("@r: class attribute must be an identifier")
|
@@ -1171,6 +1181,9 @@ def make_standard_env() -> dict[str, Any]:
|
|
1171
1181
|
"string->vector", lambda s: [Char(c) for c in s], (1, 1), is_str
|
1172
1182
|
),
|
1173
1183
|
"range->vector": Proc("range->vector", list, (1, 1), is_range),
|
1184
|
+
# av
|
1185
|
+
"encoder": Proc("encoder", lambda x: bv.Codec(x, "w"), (1, 1), is_str),
|
1186
|
+
"decoder": Proc("decoder", lambda x: bv.Codec(x), (1, 1), is_str),
|
1174
1187
|
# reflexion
|
1175
1188
|
"var-exists?": Proc("var-exists?", lambda sym: sym.val in env, (1, 1), is_symbol),
|
1176
1189
|
"rename": Syntax(syn_rename),
|
auto_editor/make_layers.py
CHANGED
@@ -11,7 +11,7 @@ from auto_editor.ffwrapper import FileInfo
|
|
11
11
|
from auto_editor.lang.palet import Lexer, Parser, env, interpret, is_boolean_array
|
12
12
|
from auto_editor.lib.data_structs import print_str
|
13
13
|
from auto_editor.lib.err import MyError
|
14
|
-
from auto_editor.timeline import ASpace, TlAudio, TlVideo, VSpace, v1, v3
|
14
|
+
from auto_editor.timeline import ASpace, Template, TlAudio, TlVideo, VSpace, v1, v3
|
15
15
|
from auto_editor.utils.func import mut_margin
|
16
16
|
from auto_editor.utils.types import CoerceError, time
|
17
17
|
|
@@ -99,16 +99,17 @@ def parse_time(val: str, arr: NDArray, tb: Fraction) -> int: # raises: `CoerceE
|
|
99
99
|
|
100
100
|
|
101
101
|
def make_timeline(
|
102
|
-
sources: list[FileInfo],
|
103
|
-
args: Args,
|
104
|
-
sr: int,
|
105
|
-
bar: Bar,
|
106
|
-
log: Log,
|
102
|
+
sources: list[FileInfo], args: Args, sr: int, bar: Bar, log: Log
|
107
103
|
) -> v3:
|
108
104
|
inp = None if not sources else sources[0]
|
109
105
|
|
110
106
|
if inp is None:
|
111
|
-
tb
|
107
|
+
tb = (
|
108
|
+
Fraction(30)
|
109
|
+
if args.frame_rate is None
|
110
|
+
else make_sane_timebase(args.frame_rate)
|
111
|
+
)
|
112
|
+
res = (1920, 1080) if args.resolution is None else args.resolution
|
112
113
|
else:
|
113
114
|
tb = make_sane_timebase(
|
114
115
|
inp.get_fps() if args.frame_rate is None else args.frame_rate
|
@@ -302,4 +303,13 @@ def make_timeline(
|
|
302
303
|
else:
|
303
304
|
v1_compatiable = None
|
304
305
|
|
305
|
-
|
306
|
+
if len(vtl) == 0 and len(atl) == 0:
|
307
|
+
log.error("Timeline is empty, nothing to do.")
|
308
|
+
|
309
|
+
if inp is None:
|
310
|
+
layout = "stereo" if args.audio_layout is None else args.audio_layout
|
311
|
+
template = Template(sr, layout, res, [], [])
|
312
|
+
else:
|
313
|
+
template = Template.init(inp, sr, args.audio_layout, res)
|
314
|
+
|
315
|
+
return v3(tb, args.background, template, vtl, atl, v1_compatiable)
|