auto-editor 28.1.0__py3-none-any.whl → 29.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +3 -1
- auto_editor/__main__.py +31 -497
- auto_editor/cli.py +12 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/METADATA +5 -6
- auto_editor-29.0.1.dist-info/RECORD +9 -0
- auto_editor-29.0.1.dist-info/entry_points.txt +2 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/top_level.txt +0 -1
- auto_editor/analyze.py +0 -393
- auto_editor/cmds/__init__.py +0 -0
- auto_editor/cmds/cache.py +0 -69
- auto_editor/cmds/desc.py +0 -32
- auto_editor/cmds/info.py +0 -213
- auto_editor/cmds/levels.py +0 -199
- auto_editor/cmds/palet.py +0 -29
- auto_editor/cmds/repl.py +0 -113
- auto_editor/cmds/subdump.py +0 -72
- auto_editor/cmds/test.py +0 -816
- auto_editor/edit.py +0 -560
- auto_editor/exports/__init__.py +0 -0
- auto_editor/exports/fcp11.py +0 -195
- auto_editor/exports/fcp7.py +0 -313
- auto_editor/exports/json.py +0 -63
- auto_editor/exports/kdenlive.py +0 -322
- auto_editor/exports/shotcut.py +0 -147
- auto_editor/ffwrapper.py +0 -187
- auto_editor/help.py +0 -224
- auto_editor/imports/__init__.py +0 -0
- auto_editor/imports/fcp7.py +0 -275
- auto_editor/imports/json.py +0 -234
- auto_editor/json.py +0 -297
- auto_editor/lang/__init__.py +0 -0
- auto_editor/lang/libintrospection.py +0 -10
- auto_editor/lang/libmath.py +0 -23
- auto_editor/lang/palet.py +0 -724
- auto_editor/lang/stdenv.py +0 -1179
- auto_editor/lib/__init__.py +0 -0
- auto_editor/lib/contracts.py +0 -235
- auto_editor/lib/data_structs.py +0 -278
- auto_editor/lib/err.py +0 -2
- auto_editor/make_layers.py +0 -315
- auto_editor/preview.py +0 -93
- auto_editor/render/__init__.py +0 -0
- auto_editor/render/audio.py +0 -517
- auto_editor/render/subtitle.py +0 -205
- auto_editor/render/video.py +0 -307
- auto_editor/timeline.py +0 -331
- auto_editor/utils/__init__.py +0 -0
- auto_editor/utils/bar.py +0 -142
- auto_editor/utils/chunks.py +0 -2
- auto_editor/utils/cmdkw.py +0 -206
- auto_editor/utils/container.py +0 -101
- auto_editor/utils/func.py +0 -128
- auto_editor/utils/log.py +0 -126
- auto_editor/utils/types.py +0 -277
- auto_editor/vanparse.py +0 -313
- auto_editor-28.1.0.dist-info/RECORD +0 -57
- auto_editor-28.1.0.dist-info/entry_points.txt +0 -6
- docs/build.py +0 -70
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/WHEEL +0 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/licenses/LICENSE +0 -0
auto_editor/make_layers.py
DELETED
@@ -1,315 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
from fractions import Fraction
|
4
|
-
from math import ceil
|
5
|
-
from typing import TYPE_CHECKING, NamedTuple
|
6
|
-
|
7
|
-
import numpy as np
|
8
|
-
|
9
|
-
from auto_editor.analyze import initLevels
|
10
|
-
from auto_editor.ffwrapper import FileInfo
|
11
|
-
from auto_editor.lang.palet import Lexer, Parser, env, interpret, is_boolean_array
|
12
|
-
from auto_editor.lib.data_structs import print_str
|
13
|
-
from auto_editor.lib.err import MyError
|
14
|
-
from auto_editor.timeline import ASpace, Clip, Template, VSpace, v1, v3
|
15
|
-
from auto_editor.utils.func import mut_margin
|
16
|
-
from auto_editor.utils.types import CoerceError, time
|
17
|
-
|
18
|
-
if TYPE_CHECKING:
|
19
|
-
from numpy.typing import NDArray
|
20
|
-
|
21
|
-
from auto_editor.__main__ import Args
|
22
|
-
from auto_editor.utils.bar import Bar
|
23
|
-
from auto_editor.utils.chunks import Chunks
|
24
|
-
from auto_editor.utils.log import Log
|
25
|
-
|
26
|
-
BoolList = NDArray[np.bool_]
|
27
|
-
|
28
|
-
|
29
|
-
class VirClip(NamedTuple):
|
30
|
-
start: int
|
31
|
-
dur: int
|
32
|
-
offset: int
|
33
|
-
speed: float
|
34
|
-
src: FileInfo
|
35
|
-
|
36
|
-
|
37
|
-
def clipify(chunks: Chunks, src: FileInfo, start: int = 0) -> list[VirClip]:
|
38
|
-
clips: list[VirClip] = []
|
39
|
-
i = 0
|
40
|
-
for chunk in chunks:
|
41
|
-
if chunk[2] > 0 and chunk[2] < 99999.0:
|
42
|
-
dur = round((chunk[1] - chunk[0]) / chunk[2])
|
43
|
-
if dur == 0:
|
44
|
-
continue
|
45
|
-
|
46
|
-
offset = int(chunk[0] / chunk[2])
|
47
|
-
|
48
|
-
if not (clips and clips[-1].start == round(start)):
|
49
|
-
clips.append(VirClip(start, dur, offset, chunk[2], src))
|
50
|
-
start += dur
|
51
|
-
i += 1
|
52
|
-
|
53
|
-
return clips
|
54
|
-
|
55
|
-
|
56
|
-
def make_av(src: FileInfo, all_clips: list[list[VirClip]]) -> tuple[VSpace, ASpace]:
|
57
|
-
assert type(src) is FileInfo
|
58
|
-
vtl: VSpace = []
|
59
|
-
atl: ASpace = [[] for _ in range(len(src.audios))]
|
60
|
-
|
61
|
-
for clips in all_clips:
|
62
|
-
for c in clips:
|
63
|
-
if src.videos:
|
64
|
-
if len(vtl) == 0:
|
65
|
-
vtl.append([])
|
66
|
-
vtl[0].append(Clip(c.start, c.dur, c.src, c.offset, 0, c.speed))
|
67
|
-
|
68
|
-
for c in clips:
|
69
|
-
for a in range(len(src.audios)):
|
70
|
-
atl[a].append(Clip(c.start, c.dur, c.src, c.offset, a, c.speed))
|
71
|
-
|
72
|
-
return vtl, atl
|
73
|
-
|
74
|
-
|
75
|
-
def make_sane_timebase(fps: Fraction) -> Fraction:
|
76
|
-
tb = round(fps, 2)
|
77
|
-
|
78
|
-
ntsc_60 = Fraction(60_000, 1001)
|
79
|
-
ntsc = Fraction(30_000, 1001)
|
80
|
-
film_ntsc = Fraction(24_000, 1001)
|
81
|
-
|
82
|
-
if tb == round(ntsc_60, 2):
|
83
|
-
return ntsc_60
|
84
|
-
if tb == round(ntsc, 2):
|
85
|
-
return ntsc
|
86
|
-
if tb == round(film_ntsc, 2):
|
87
|
-
return film_ntsc
|
88
|
-
return tb
|
89
|
-
|
90
|
-
|
91
|
-
def parse_time(val: str, arr: NDArray, tb: Fraction) -> int: # raises: `CoerceError`
|
92
|
-
if val == "start":
|
93
|
-
return 0
|
94
|
-
if val == "end":
|
95
|
-
return len(arr)
|
96
|
-
|
97
|
-
num = time(val, tb)
|
98
|
-
return num if num >= 0 else num + len(arr)
|
99
|
-
|
100
|
-
|
101
|
-
def make_timeline(
|
102
|
-
sources: list[FileInfo], args: Args, sr: int, bar: Bar, log: Log
|
103
|
-
) -> v3:
|
104
|
-
inp = None if not sources else sources[0]
|
105
|
-
|
106
|
-
if inp is None:
|
107
|
-
tb = (
|
108
|
-
Fraction(30)
|
109
|
-
if args.frame_rate is None
|
110
|
-
else make_sane_timebase(args.frame_rate)
|
111
|
-
)
|
112
|
-
res = (1920, 1080) if args.resolution is None else args.resolution
|
113
|
-
else:
|
114
|
-
tb = make_sane_timebase(
|
115
|
-
inp.get_fps() if args.frame_rate is None else args.frame_rate
|
116
|
-
)
|
117
|
-
res = inp.get_res() if args.resolution is None else args.resolution
|
118
|
-
|
119
|
-
try:
|
120
|
-
start_margin = time(args.margin[0], tb)
|
121
|
-
end_margin = time(args.margin[1], tb)
|
122
|
-
except CoerceError as e:
|
123
|
-
log.error(e)
|
124
|
-
|
125
|
-
has_loud = np.array([], dtype=np.bool_)
|
126
|
-
src_index = np.array([], dtype=np.int32)
|
127
|
-
|
128
|
-
try:
|
129
|
-
stdenv = __import__("auto_editor.lang.stdenv", fromlist=["lang"])
|
130
|
-
env.update(stdenv.make_standard_env())
|
131
|
-
except ImportError:
|
132
|
-
func = log.error if args.config else log.debug
|
133
|
-
func("Failed to import standard env")
|
134
|
-
|
135
|
-
if args.config:
|
136
|
-
# Edit `env` with user-defined code.
|
137
|
-
with open("config.pal") as file:
|
138
|
-
parser = Parser(Lexer("config.pal", file.read()))
|
139
|
-
interpret(env, parser)
|
140
|
-
|
141
|
-
results = []
|
142
|
-
for src in sources:
|
143
|
-
try:
|
144
|
-
parser = Parser(Lexer("`--edit`", args.edit))
|
145
|
-
if log.is_debug:
|
146
|
-
log.debug(f"edit: {parser}")
|
147
|
-
|
148
|
-
env["timebase"] = tb
|
149
|
-
env["@levels"] = initLevels(src, tb, bar, args.no_cache, log)
|
150
|
-
|
151
|
-
inter_result = interpret(env, parser)
|
152
|
-
if len(inter_result) == 0:
|
153
|
-
log.error("Expression in --edit must return a bool-array, got nothing")
|
154
|
-
|
155
|
-
result = inter_result[-1]
|
156
|
-
if callable(result):
|
157
|
-
result = result()
|
158
|
-
except MyError as e:
|
159
|
-
log.error(e)
|
160
|
-
|
161
|
-
if not is_boolean_array(result):
|
162
|
-
log.error(
|
163
|
-
f"Expression in --edit must return a bool-array, got {print_str(result)}"
|
164
|
-
)
|
165
|
-
mut_margin(result, start_margin, end_margin)
|
166
|
-
results.append(result)
|
167
|
-
|
168
|
-
if all(len(result) == 0 for result in results):
|
169
|
-
if "subtitle" in args.edit:
|
170
|
-
log.error("No file(s) have the selected subtitle stream.")
|
171
|
-
if "motion" in args.edit:
|
172
|
-
log.error("No file(s) have the selected video stream.")
|
173
|
-
if "audio" in args.edit:
|
174
|
-
log.error("No file(s) have the selected audio stream.")
|
175
|
-
|
176
|
-
src_indexes = []
|
177
|
-
for i in range(0, len(results)):
|
178
|
-
if len(results[i]) == 0:
|
179
|
-
results[i] = initLevels(sources[i], tb, bar, args.no_cache, log).all()
|
180
|
-
src_indexes.append(np.full(len(results[i]), i, dtype=np.int32))
|
181
|
-
|
182
|
-
has_loud = np.concatenate(results)
|
183
|
-
src_index = np.concatenate(src_indexes)
|
184
|
-
if len(has_loud) == 0:
|
185
|
-
log.error("Empty timeline. Nothing to do.")
|
186
|
-
|
187
|
-
# Setup for handling custom speeds
|
188
|
-
speed_index = has_loud.astype(np.uint)
|
189
|
-
speed_map = [args.silent_speed, args.video_speed]
|
190
|
-
speed_hash = {
|
191
|
-
0: args.silent_speed,
|
192
|
-
1: args.video_speed,
|
193
|
-
}
|
194
|
-
|
195
|
-
def get_speed_index(speed: float) -> int:
|
196
|
-
if speed in speed_map:
|
197
|
-
return speed_map.index(speed)
|
198
|
-
speed_map.append(speed)
|
199
|
-
speed_hash[len(speed_map) - 1] = speed
|
200
|
-
return len(speed_map) - 1
|
201
|
-
|
202
|
-
try:
|
203
|
-
for _range in args.cut_out:
|
204
|
-
# always cut out even if 'silent_speed' is not 99,999
|
205
|
-
pair = [parse_time(val, speed_index, tb) for val in _range]
|
206
|
-
speed_index[pair[0] : pair[1]] = get_speed_index(99_999)
|
207
|
-
|
208
|
-
for _range in args.add_in:
|
209
|
-
# set to 'video_speed' index
|
210
|
-
pair = [parse_time(val, speed_index, tb) for val in _range]
|
211
|
-
speed_index[pair[0] : pair[1]] = 1
|
212
|
-
|
213
|
-
for speed_range in args.set_speed_for_range:
|
214
|
-
start_in = parse_time(speed_range[1], speed_index, tb)
|
215
|
-
end_in = parse_time(speed_range[2], speed_index, tb)
|
216
|
-
speed_index[start_in:end_in] = get_speed_index(speed_range[0])
|
217
|
-
except CoerceError as e:
|
218
|
-
log.error(e)
|
219
|
-
|
220
|
-
def echunk(
|
221
|
-
arr: NDArray, src_index: NDArray[np.int32]
|
222
|
-
) -> list[tuple[FileInfo, int, int, float]]:
|
223
|
-
arr_length = len(has_loud)
|
224
|
-
|
225
|
-
chunks = []
|
226
|
-
start = 0
|
227
|
-
doi = 0
|
228
|
-
for j in range(1, arr_length):
|
229
|
-
if (arr[j] != arr[j - 1]) or (src_index[j] != src_index[j - 1]):
|
230
|
-
src = sources[src_index[j - 1]]
|
231
|
-
chunks.append((src, start, j - doi, speed_map[arr[j - 1]]))
|
232
|
-
start = j - doi
|
233
|
-
|
234
|
-
if src_index[j] != src_index[j - 1]:
|
235
|
-
start = 0
|
236
|
-
doi = j
|
237
|
-
|
238
|
-
src = sources[src_index[j]]
|
239
|
-
chunks.append((src, start, arr_length, speed_map[arr[j]]))
|
240
|
-
return chunks
|
241
|
-
|
242
|
-
# Assert timeline is monotonic because non-monotonic timelines are incorrect
|
243
|
-
# here and causes back-seeking (performance issue) in video rendering.
|
244
|
-
|
245
|
-
# We don't properly check monotonicity for multiple sources, so skip those.
|
246
|
-
|
247
|
-
check_monotonic = len(sources) == 1
|
248
|
-
last_i = 0
|
249
|
-
|
250
|
-
clips: list[VirClip] = []
|
251
|
-
start = 0
|
252
|
-
|
253
|
-
for chunk in echunk(speed_index, src_index):
|
254
|
-
if chunk[3] != 99999:
|
255
|
-
dur = int((chunk[2] - chunk[1]) / chunk[3])
|
256
|
-
if dur == 0:
|
257
|
-
continue
|
258
|
-
|
259
|
-
offset = ceil(chunk[1] / chunk[3])
|
260
|
-
|
261
|
-
if check_monotonic:
|
262
|
-
max_end = start + dur - 1
|
263
|
-
this_i = round((offset + max_end - start) * chunk[3])
|
264
|
-
if this_i < last_i:
|
265
|
-
raise ValueError("not monotonic", sources, this_i, last_i)
|
266
|
-
last_i = this_i
|
267
|
-
|
268
|
-
clips.append(VirClip(start, dur, offset, chunk[3], chunk[0]))
|
269
|
-
|
270
|
-
start += dur
|
271
|
-
|
272
|
-
vtl: VSpace = []
|
273
|
-
atl: ASpace = []
|
274
|
-
for c in clips:
|
275
|
-
if c.src.videos:
|
276
|
-
if len(vtl) == 0:
|
277
|
-
vtl.append([])
|
278
|
-
vtl[0].append(Clip(c.start, c.dur, c.src, c.offset, 0, c.speed))
|
279
|
-
|
280
|
-
for c in clips:
|
281
|
-
for a in range(len(c.src.audios)):
|
282
|
-
if a >= len(atl):
|
283
|
-
atl.append([])
|
284
|
-
atl[a].append(Clip(c.start, c.dur, c.src, c.offset, a, c.speed))
|
285
|
-
|
286
|
-
# Turn long silent/loud array to formatted chunk list.
|
287
|
-
# Example: [1, 1, 1, 2, 2], {1: 1.0, 2: 1.5} => [(0, 3, 1.0), (3, 5, 1.5)]
|
288
|
-
def chunkify(arr: NDArray, smap: dict[int, float]) -> Chunks:
|
289
|
-
arr_length = len(arr)
|
290
|
-
|
291
|
-
chunks = []
|
292
|
-
start = 0
|
293
|
-
for j in range(1, arr_length):
|
294
|
-
if arr[j] != arr[j - 1]:
|
295
|
-
chunks.append((start, j, smap[arr[j - 1]]))
|
296
|
-
start = j
|
297
|
-
chunks.append((start, arr_length, smap[arr[j]]))
|
298
|
-
return chunks
|
299
|
-
|
300
|
-
if len(sources) == 1 and inp is not None:
|
301
|
-
chunks = chunkify(speed_index, speed_hash)
|
302
|
-
v1_compatible = v1(inp, chunks)
|
303
|
-
else:
|
304
|
-
v1_compatible = None
|
305
|
-
|
306
|
-
if len(vtl) == 0 and len(atl) == 0:
|
307
|
-
log.error("Timeline is empty, nothing to do.")
|
308
|
-
|
309
|
-
if inp is None:
|
310
|
-
layout = "stereo" if args.audio_layout is None else args.audio_layout
|
311
|
-
template = Template(sr, layout, res, [], [])
|
312
|
-
else:
|
313
|
-
template = Template.init(inp, sr, args.audio_layout, res)
|
314
|
-
|
315
|
-
return v3(tb, args.background, template, vtl, atl, v1_compatible)
|
auto_editor/preview.py
DELETED
@@ -1,93 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import sys
|
4
|
-
from fractions import Fraction
|
5
|
-
from statistics import fmean, median
|
6
|
-
from typing import TextIO
|
7
|
-
|
8
|
-
from auto_editor.analyze import initLevels
|
9
|
-
from auto_editor.timeline import v3
|
10
|
-
from auto_editor.utils.bar import initBar
|
11
|
-
from auto_editor.utils.func import to_timecode
|
12
|
-
from auto_editor.utils.log import Log
|
13
|
-
|
14
|
-
|
15
|
-
def time_frame(
|
16
|
-
fp: TextIO, title: str, ticks: float, tb: Fraction, per: str | None = None
|
17
|
-
) -> None:
|
18
|
-
tc = to_timecode(ticks / tb, "ass")
|
19
|
-
|
20
|
-
tp = 9 if tc.startswith("-") else 10
|
21
|
-
tcp = 12 if tc.startswith("-") else 11
|
22
|
-
preci = 0 if int(ticks) == ticks else 2
|
23
|
-
end = "" if per is None else f" {per:>7}"
|
24
|
-
|
25
|
-
fp.write(f" - {f'{title}:':<{tp}} {tc:<{tcp}} {f'({ticks:.{preci}f})':<6}{end}\n")
|
26
|
-
|
27
|
-
|
28
|
-
def all_cuts(tl: v3, in_len: int) -> list[int]:
|
29
|
-
# Calculate cuts
|
30
|
-
tb = tl.tb
|
31
|
-
clip_spans: list[tuple[int, int]] = []
|
32
|
-
|
33
|
-
for clip in tl.a[0]:
|
34
|
-
old_offset = clip.offset * clip.speed
|
35
|
-
clip_spans.append((round(old_offset), round(old_offset + clip.dur)))
|
36
|
-
|
37
|
-
cut_lens = []
|
38
|
-
i = 0
|
39
|
-
while i < len(clip_spans) - 1:
|
40
|
-
if i == 0 and clip_spans[i][0] != 0:
|
41
|
-
cut_lens.append(clip_spans[i][0])
|
42
|
-
|
43
|
-
cut_lens.append(clip_spans[i + 1][0] - clip_spans[i][1])
|
44
|
-
i += 1
|
45
|
-
|
46
|
-
if len(clip_spans) > 0 and clip_spans[-1][1] < round(in_len / tb):
|
47
|
-
cut_lens.append(in_len - clip_spans[-1][1])
|
48
|
-
|
49
|
-
return cut_lens
|
50
|
-
|
51
|
-
|
52
|
-
def preview(tl: v3, log: Log) -> None:
|
53
|
-
log.conwrite("")
|
54
|
-
tb = tl.tb
|
55
|
-
|
56
|
-
# Calculate input videos length
|
57
|
-
in_len = 0
|
58
|
-
bar = initBar("none")
|
59
|
-
for src in tl.unique_sources():
|
60
|
-
in_len += initLevels(src, tb, bar, False, log).media_length
|
61
|
-
|
62
|
-
out_len = len(tl)
|
63
|
-
diff = out_len - in_len
|
64
|
-
|
65
|
-
fp = sys.stdout
|
66
|
-
fp.write("\nlength:\n")
|
67
|
-
time_frame(fp, "input", in_len, tb, "100.0%")
|
68
|
-
time_frame(fp, "output", out_len, tb, f"{round((out_len / in_len) * 100, 2)}%")
|
69
|
-
time_frame(fp, "diff", diff, tb, f"{round((diff / in_len) * 100, 2)}%")
|
70
|
-
|
71
|
-
clip_lens = [clip.dur for clip in tl.a[0]]
|
72
|
-
log.debug(clip_lens)
|
73
|
-
|
74
|
-
fp.write(f"clips:\n - amount: {len(clip_lens)}\n")
|
75
|
-
if len(clip_lens) > 0:
|
76
|
-
time_frame(fp, "smallest", min(clip_lens), tb)
|
77
|
-
time_frame(fp, "largest", max(clip_lens), tb)
|
78
|
-
if len(clip_lens) > 1:
|
79
|
-
time_frame(fp, "median", median(clip_lens), tb)
|
80
|
-
time_frame(fp, "average", fmean(clip_lens), tb)
|
81
|
-
|
82
|
-
cut_lens = all_cuts(tl, in_len)
|
83
|
-
log.debug(cut_lens)
|
84
|
-
fp.write(f"cuts:\n - amount: {len(cut_lens)}\n")
|
85
|
-
if len(cut_lens) > 0:
|
86
|
-
time_frame(fp, "smallest", min(cut_lens), tb)
|
87
|
-
time_frame(fp, "largest", max(cut_lens), tb)
|
88
|
-
if len(cut_lens) > 1:
|
89
|
-
time_frame(fp, "median", median(cut_lens), tb)
|
90
|
-
time_frame(fp, "average", fmean(cut_lens), tb)
|
91
|
-
|
92
|
-
fp.write("\n")
|
93
|
-
fp.flush()
|
auto_editor/render/__init__.py
DELETED
File without changes
|