auto-editor 24.29.1__py3-none-any.whl → 24.30.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +2 -2
- auto_editor/analyze.py +143 -132
- auto_editor/edit.py +14 -21
- auto_editor/make_layers.py +3 -6
- auto_editor/output.py +4 -4
- auto_editor/preview.py +2 -3
- auto_editor/render/subtitle.py +1 -1
- auto_editor/render/video.py +8 -18
- auto_editor/subcommands/levels.py +18 -17
- auto_editor/subcommands/repl.py +3 -12
- auto_editor/subcommands/subdump.py +5 -8
- auto_editor/utils/container.py +71 -313
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/METADATA +2 -2
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/RECORD +18 -18
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/WHEEL +1 -1
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/LICENSE +0 -0
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/entry_points.txt +0 -0
- {auto_editor-24.29.1.dist-info → auto_editor-24.30.1.dist-info}/top_level.txt +0 -0
auto_editor/__init__.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
__version__ = "24.
|
2
|
-
version = "
|
1
|
+
__version__ = "24.30.1"
|
2
|
+
version = "24w30a"
|
auto_editor/analyze.py
CHANGED
@@ -4,23 +4,25 @@ import os
|
|
4
4
|
import re
|
5
5
|
from dataclasses import dataclass
|
6
6
|
from fractions import Fraction
|
7
|
+
from math import ceil
|
7
8
|
from typing import TYPE_CHECKING
|
8
9
|
|
10
|
+
import av
|
9
11
|
import numpy as np
|
12
|
+
from av.audio.fifo import AudioFifo
|
13
|
+
from av.subtitles.subtitle import AssSubtitle
|
10
14
|
|
11
15
|
from auto_editor import version
|
12
16
|
from auto_editor.utils.subtitle_tools import convert_ass_to_text
|
13
|
-
from auto_editor.wavfile import read
|
14
17
|
|
15
18
|
if TYPE_CHECKING:
|
19
|
+
from collections.abc import Iterator
|
16
20
|
from fractions import Fraction
|
17
21
|
from typing import Any
|
18
22
|
|
19
|
-
from av.filter import FilterContext
|
20
23
|
from numpy.typing import NDArray
|
21
24
|
|
22
25
|
from auto_editor.ffwrapper import FileInfo
|
23
|
-
from auto_editor.output import Ensure
|
24
26
|
from auto_editor.utils.bar import Bar
|
25
27
|
from auto_editor.utils.log import Log
|
26
28
|
|
@@ -28,7 +30,6 @@ if TYPE_CHECKING:
|
|
28
30
|
@dataclass(slots=True)
|
29
31
|
class FileSetup:
|
30
32
|
src: FileInfo
|
31
|
-
ensure: Ensure
|
32
33
|
strict: bool
|
33
34
|
tb: Fraction
|
34
35
|
bar: Bar
|
@@ -40,11 +41,6 @@ class LevelError(Exception):
|
|
40
41
|
pass
|
41
42
|
|
42
43
|
|
43
|
-
def link_nodes(*nodes: FilterContext) -> None:
|
44
|
-
for c, n in zip(nodes, nodes[1:]):
|
45
|
-
c.link_to(n)
|
46
|
-
|
47
|
-
|
48
44
|
def mut_remove_small(
|
49
45
|
arr: NDArray[np.bool_], lim: int, replace: int, with_: int
|
50
46
|
) -> None:
|
@@ -92,9 +88,90 @@ def obj_tag(tag: str, tb: Fraction, obj: dict[str, Any]) -> str:
|
|
92
88
|
return key
|
93
89
|
|
94
90
|
|
91
|
+
def iter_audio(src, tb: Fraction, stream: int = 0) -> Iterator[float]:
|
92
|
+
fifo = AudioFifo()
|
93
|
+
try:
|
94
|
+
container = av.open(src.path, "r")
|
95
|
+
audio_stream = container.streams.audio[stream]
|
96
|
+
sample_rate = audio_stream.rate
|
97
|
+
|
98
|
+
exact_size = (1 / tb) * sample_rate
|
99
|
+
accumulated_error = 0
|
100
|
+
|
101
|
+
# Resample so that audio data is between [-1, 1]
|
102
|
+
resampler = av.AudioResampler(
|
103
|
+
av.AudioFormat("flt"), audio_stream.layout, sample_rate
|
104
|
+
)
|
105
|
+
|
106
|
+
for frame in container.decode(audio=stream):
|
107
|
+
frame.pts = None # Skip time checks
|
108
|
+
|
109
|
+
for reframe in resampler.resample(frame):
|
110
|
+
fifo.write(reframe)
|
111
|
+
|
112
|
+
while fifo.samples >= ceil(exact_size):
|
113
|
+
size_with_error = exact_size + accumulated_error
|
114
|
+
current_size = round(size_with_error)
|
115
|
+
accumulated_error = size_with_error - current_size
|
116
|
+
|
117
|
+
audio_chunk = fifo.read(current_size)
|
118
|
+
assert audio_chunk is not None
|
119
|
+
arr = audio_chunk.to_ndarray().flatten()
|
120
|
+
yield float(np.max(np.abs(arr)))
|
121
|
+
|
122
|
+
finally:
|
123
|
+
container.close()
|
124
|
+
|
125
|
+
|
126
|
+
def iter_motion(src, tb, stream: int, blur: int, width: int) -> Iterator[float]:
|
127
|
+
container = av.open(src.path, "r")
|
128
|
+
|
129
|
+
video = container.streams.video[stream]
|
130
|
+
video.thread_type = "AUTO"
|
131
|
+
|
132
|
+
prev_frame = None
|
133
|
+
current_frame = None
|
134
|
+
total_pixels = src.videos[0].width * src.videos[0].height
|
135
|
+
index = 0
|
136
|
+
prev_index = -1
|
137
|
+
|
138
|
+
graph = av.filter.Graph()
|
139
|
+
graph.link_nodes(
|
140
|
+
graph.add_buffer(template=video),
|
141
|
+
graph.add("scale", f"{width}:-1"),
|
142
|
+
graph.add("format", "gray"),
|
143
|
+
graph.add("gblur", f"sigma={blur}"),
|
144
|
+
graph.add("buffersink"),
|
145
|
+
).configure()
|
146
|
+
|
147
|
+
for unframe in container.decode(video):
|
148
|
+
if unframe.pts is None:
|
149
|
+
continue
|
150
|
+
|
151
|
+
graph.push(unframe)
|
152
|
+
frame = graph.pull()
|
153
|
+
assert frame.time is not None
|
154
|
+
index = round(frame.time * tb)
|
155
|
+
|
156
|
+
current_frame = frame.to_ndarray()
|
157
|
+
if prev_frame is None:
|
158
|
+
value = 0.0
|
159
|
+
else:
|
160
|
+
# Use `int16` to avoid underflow with `uint8` datatype
|
161
|
+
diff = np.abs(prev_frame.astype(np.int16) - current_frame.astype(np.int16))
|
162
|
+
value = np.count_nonzero(diff) / total_pixels
|
163
|
+
|
164
|
+
for _ in range(index - prev_index):
|
165
|
+
yield value
|
166
|
+
|
167
|
+
prev_frame = current_frame
|
168
|
+
prev_index = index
|
169
|
+
|
170
|
+
container.close()
|
171
|
+
|
172
|
+
|
95
173
|
@dataclass(slots=True)
|
96
174
|
class Levels:
|
97
|
-
ensure: Ensure
|
98
175
|
src: FileInfo
|
99
176
|
tb: Fraction
|
100
177
|
bar: Bar
|
@@ -107,26 +184,16 @@ class Levels:
|
|
107
184
|
if (arr := self.read_cache("audio", {"stream": 0})) is not None:
|
108
185
|
return len(arr)
|
109
186
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
samp_per_ticks = sr / self.tb
|
115
|
-
ticks = int(samp_count / samp_per_ticks)
|
116
|
-
self.log.debug(f"Audio Length: {ticks}")
|
117
|
-
self.log.debug(
|
118
|
-
f"... without rounding: {float(samp_count / samp_per_ticks)}"
|
119
|
-
)
|
120
|
-
return ticks
|
187
|
+
result = sum(1 for _ in iter_audio(self.src, self.tb, 0))
|
188
|
+
self.log.debug(f"Audio Length: {result}")
|
189
|
+
return result
|
121
190
|
|
122
191
|
# If there's no audio, get length in video metadata.
|
123
|
-
|
124
|
-
|
125
|
-
with av.open(f"{self.src.path}") as cn:
|
126
|
-
if len(cn.streams.video) < 1:
|
192
|
+
with av.open(self.src.path) as container:
|
193
|
+
if len(container.streams.video) == 0:
|
127
194
|
self.log.error("Could not get media duration")
|
128
195
|
|
129
|
-
video =
|
196
|
+
video = container.streams.video[0]
|
130
197
|
|
131
198
|
if video.duration is None or video.time_base is None:
|
132
199
|
dur = 0
|
@@ -171,56 +238,70 @@ class Levels:
|
|
171
238
|
return arr
|
172
239
|
|
173
240
|
def audio(self, stream: int) -> NDArray[np.float64]:
|
174
|
-
if stream
|
241
|
+
if stream >= len(self.src.audios):
|
175
242
|
raise LevelError(f"audio: audio stream '{stream}' does not exist.")
|
176
243
|
|
177
244
|
if (arr := self.read_cache("audio", {"stream": stream})) is not None:
|
178
245
|
return arr
|
179
246
|
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
max_volume = get_max_volume(samples)
|
189
|
-
self.log.debug(f"Max volume: {max_volume}")
|
247
|
+
with av.open(self.src.path, "r") as container:
|
248
|
+
audio = container.streams.audio[stream]
|
249
|
+
if audio.duration is not None and audio.time_base is not None:
|
250
|
+
inaccurate_dur = int(audio.duration * audio.time_base * self.tb)
|
251
|
+
elif container.duration is not None:
|
252
|
+
inaccurate_dur = int(container.duration / av.time_base * self.tb)
|
253
|
+
else:
|
254
|
+
inaccurate_dur = 1024
|
190
255
|
|
191
|
-
|
192
|
-
|
256
|
+
bar = self.bar
|
257
|
+
bar.start(inaccurate_dur, "Analyzing audio volume")
|
193
258
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
259
|
+
result = np.zeros((inaccurate_dur), dtype=np.float64)
|
260
|
+
index = 0
|
261
|
+
for value in iter_audio(self.src, self.tb, stream):
|
262
|
+
if index > len(result) - 1:
|
263
|
+
result = np.concatenate(
|
264
|
+
(result, np.zeros((len(result)), dtype=np.float64))
|
265
|
+
)
|
266
|
+
result[index] = value
|
267
|
+
bar.tick(index)
|
268
|
+
index += 1
|
200
269
|
|
201
|
-
|
202
|
-
self.
|
203
|
-
f"analyze: audio length: {audio_ticks} ({float(samp_count / samp_per_ticks)})"
|
204
|
-
)
|
205
|
-
self.bar.start(audio_ticks, "Analyzing audio volume")
|
270
|
+
bar.end()
|
271
|
+
return self.cache("audio", {"stream": stream}, result[:index])
|
206
272
|
|
207
|
-
|
273
|
+
def motion(self, stream: int, blur: int, width: int) -> NDArray[np.float64]:
|
274
|
+
if stream >= len(self.src.videos):
|
275
|
+
raise LevelError(f"motion: video stream '{stream}' does not exist.")
|
208
276
|
|
209
|
-
|
210
|
-
|
277
|
+
mobj = {"stream": stream, "width": width, "blur": blur}
|
278
|
+
if (arr := self.read_cache("motion", mobj)) is not None:
|
279
|
+
return arr
|
211
280
|
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
281
|
+
with av.open(self.src.path, "r") as container:
|
282
|
+
video = container.streams.video[stream]
|
283
|
+
inaccurate_dur = (
|
284
|
+
1024
|
285
|
+
if video.duration is None or video.time_base is None
|
286
|
+
else int(video.duration * video.time_base * self.tb)
|
287
|
+
)
|
216
288
|
|
217
|
-
|
218
|
-
|
289
|
+
bar = self.bar
|
290
|
+
bar.start(inaccurate_dur, "Analyzing motion")
|
219
291
|
|
220
|
-
|
292
|
+
result = np.zeros((inaccurate_dur), dtype=np.float64)
|
293
|
+
index = 0
|
294
|
+
for value in iter_motion(self.src, self.tb, stream, blur, width):
|
295
|
+
if index > len(result) - 1:
|
296
|
+
result = np.concatenate(
|
297
|
+
(result, np.zeros((len(result)), dtype=np.float64))
|
298
|
+
)
|
299
|
+
result[index] = value
|
300
|
+
bar.tick(index)
|
301
|
+
index += 1
|
221
302
|
|
222
|
-
|
223
|
-
return self.cache("
|
303
|
+
bar.end()
|
304
|
+
return self.cache("motion", mobj, result[:index])
|
224
305
|
|
225
306
|
def subtitle(
|
226
307
|
self,
|
@@ -238,9 +319,6 @@ class Levels:
|
|
238
319
|
except re.error as e:
|
239
320
|
self.log.error(e)
|
240
321
|
|
241
|
-
import av
|
242
|
-
from av.subtitles.subtitle import AssSubtitle, TextSubtitle
|
243
|
-
|
244
322
|
try:
|
245
323
|
container = av.open(self.src.path, "r")
|
246
324
|
subtitle_stream = container.streams.subtitles[stream]
|
@@ -287,8 +365,6 @@ class Levels:
|
|
287
365
|
for sub in subset:
|
288
366
|
if isinstance(sub, AssSubtitle):
|
289
367
|
line = convert_ass_to_text(sub.ass.decode(errors="ignore"))
|
290
|
-
elif isinstance(sub, TextSubtitle):
|
291
|
-
line = sub.text.decode(errors="ignore")
|
292
368
|
else:
|
293
369
|
continue
|
294
370
|
|
@@ -299,68 +375,3 @@ class Levels:
|
|
299
375
|
container.close()
|
300
376
|
|
301
377
|
return result
|
302
|
-
|
303
|
-
def motion(self, stream: int, blur: int, width: int) -> NDArray[np.float64]:
|
304
|
-
import av
|
305
|
-
|
306
|
-
if stream >= len(self.src.videos):
|
307
|
-
raise LevelError(f"motion: video stream '{stream}' does not exist.")
|
308
|
-
|
309
|
-
mobj = {"stream": stream, "width": width, "blur": blur}
|
310
|
-
if (arr := self.read_cache("motion", mobj)) is not None:
|
311
|
-
return arr
|
312
|
-
|
313
|
-
container = av.open(f"{self.src.path}", "r")
|
314
|
-
|
315
|
-
video = container.streams.video[stream]
|
316
|
-
video.thread_type = "AUTO"
|
317
|
-
|
318
|
-
inaccurate_dur = 1 if video.duration is None else video.duration
|
319
|
-
self.bar.start(inaccurate_dur, "Analyzing motion")
|
320
|
-
|
321
|
-
prev_frame = None
|
322
|
-
current_frame = None
|
323
|
-
total_pixels = self.src.videos[0].width * self.src.videos[0].height
|
324
|
-
index = 0
|
325
|
-
|
326
|
-
graph = av.filter.Graph()
|
327
|
-
link_nodes(
|
328
|
-
graph.add_buffer(template=video),
|
329
|
-
graph.add("scale", f"{width}:-1"),
|
330
|
-
graph.add("format", "gray"),
|
331
|
-
graph.add("gblur", f"sigma={blur}"),
|
332
|
-
graph.add("buffersink"),
|
333
|
-
)
|
334
|
-
graph.configure()
|
335
|
-
|
336
|
-
threshold_list = np.zeros((1024), dtype=np.float64)
|
337
|
-
|
338
|
-
for unframe in container.decode(video):
|
339
|
-
graph.push(unframe)
|
340
|
-
frame = graph.pull()
|
341
|
-
|
342
|
-
# Showing progress ...
|
343
|
-
assert frame.time is not None
|
344
|
-
index = int(frame.time * self.tb)
|
345
|
-
if frame.pts is not None:
|
346
|
-
self.bar.tick(frame.pts)
|
347
|
-
|
348
|
-
current_frame = frame.to_ndarray()
|
349
|
-
|
350
|
-
if index > len(threshold_list) - 1:
|
351
|
-
threshold_list = np.concatenate(
|
352
|
-
(threshold_list, np.zeros((len(threshold_list)), dtype=np.float64)),
|
353
|
-
axis=0,
|
354
|
-
)
|
355
|
-
|
356
|
-
if prev_frame is not None:
|
357
|
-
# Use `int16` to avoid underflow with `uint8` datatype
|
358
|
-
diff = np.abs(
|
359
|
-
prev_frame.astype(np.int16) - current_frame.astype(np.int16)
|
360
|
-
)
|
361
|
-
threshold_list[index] = np.count_nonzero(diff) / total_pixels
|
362
|
-
|
363
|
-
prev_frame = current_frame
|
364
|
-
|
365
|
-
self.bar.end()
|
366
|
-
return self.cache("motion", mobj, threshold_list[:index])
|
auto_editor/edit.py
CHANGED
@@ -68,12 +68,8 @@ def set_video_codec(
|
|
68
68
|
) -> str:
|
69
69
|
if codec == "auto":
|
70
70
|
codec = "h264" if (src is None or not src.videos) else src.videos[0].codec
|
71
|
-
if ctr.vcodecs
|
72
|
-
|
73
|
-
return ctr.vcodecs[0]
|
74
|
-
|
75
|
-
if codec in ctr.disallow_v:
|
76
|
-
return ctr.vcodecs[0]
|
71
|
+
if codec not in ctr.vcodecs and ctr.default_vid != "none":
|
72
|
+
return ctr.default_vid
|
77
73
|
return codec
|
78
74
|
|
79
75
|
if codec == "copy":
|
@@ -83,12 +79,7 @@ def set_video_codec(
|
|
83
79
|
log.error("Input file does not have a video stream to copy codec from.")
|
84
80
|
codec = src.videos[0].codec
|
85
81
|
|
86
|
-
if ctr.
|
87
|
-
assert ctr.vcodecs is not None
|
88
|
-
if codec not in ctr.vcodecs:
|
89
|
-
log.error(codec_error.format(codec, out_ext))
|
90
|
-
|
91
|
-
if codec in ctr.disallow_v:
|
82
|
+
if ctr.vcodecs is not None and codec not in ctr.vcodecs:
|
92
83
|
log.error(codec_error.format(codec, out_ext))
|
93
84
|
|
94
85
|
return codec
|
@@ -99,8 +90,10 @@ def set_audio_codec(
|
|
99
90
|
) -> str:
|
100
91
|
if codec == "auto":
|
101
92
|
codec = "aac" if (src is None or not src.audios) else src.audios[0].codec
|
102
|
-
if
|
103
|
-
return ctr.
|
93
|
+
if codec not in ctr.acodecs and ctr.default_aud != "none":
|
94
|
+
return ctr.default_aud
|
95
|
+
if codec == "mp3float":
|
96
|
+
return "mp3"
|
104
97
|
return codec
|
105
98
|
|
106
99
|
if codec == "copy":
|
@@ -209,10 +202,8 @@ def edit_media(
|
|
209
202
|
else:
|
210
203
|
samplerate = args.sample_rate
|
211
204
|
|
212
|
-
ensure = Ensure(ffmpeg, bar, samplerate, temp, log)
|
213
|
-
|
214
205
|
if tl is None:
|
215
|
-
tl = make_timeline(sources,
|
206
|
+
tl = make_timeline(sources, args, samplerate, bar, temp, log)
|
216
207
|
|
217
208
|
if export["export"] == "timeline":
|
218
209
|
from auto_editor.formats.json import make_json_timeline
|
@@ -223,7 +214,7 @@ def edit_media(
|
|
223
214
|
if args.preview:
|
224
215
|
from auto_editor.preview import preview
|
225
216
|
|
226
|
-
preview(
|
217
|
+
preview(tl, temp, log)
|
227
218
|
return
|
228
219
|
|
229
220
|
if export["export"] == "json":
|
@@ -272,13 +263,15 @@ def edit_media(
|
|
272
263
|
sub_output = []
|
273
264
|
apply_later = False
|
274
265
|
|
275
|
-
|
266
|
+
ensure = Ensure(ffmpeg, bar, samplerate, temp, log)
|
267
|
+
|
268
|
+
if ctr.default_sub != "none" and not args.sn:
|
276
269
|
sub_output = make_new_subtitles(tl, ensure, temp)
|
277
270
|
|
278
|
-
if ctr.
|
271
|
+
if ctr.default_aud != "none":
|
279
272
|
audio_output = make_new_audio(tl, ensure, args, ffmpeg, bar, temp, log)
|
280
273
|
|
281
|
-
if ctr.
|
274
|
+
if ctr.default_vid != "none":
|
282
275
|
if tl.v:
|
283
276
|
out_path, apply_later = render_av(ffmpeg, tl, args, bar, ctr, temp, log)
|
284
277
|
visual_output.append((True, out_path))
|
auto_editor/make_layers.py
CHANGED
@@ -18,7 +18,6 @@ from auto_editor.utils.types import Args, CoerceError, time
|
|
18
18
|
if TYPE_CHECKING:
|
19
19
|
from numpy.typing import NDArray
|
20
20
|
|
21
|
-
from auto_editor.output import Ensure
|
22
21
|
from auto_editor.utils.bar import Bar
|
23
22
|
from auto_editor.utils.chunks import Chunks
|
24
23
|
from auto_editor.utils.log import Log
|
@@ -75,7 +74,6 @@ def make_av(src: FileInfo, all_clips: list[list[Clip]]) -> tuple[VSpace, ASpace]
|
|
75
74
|
def run_interpreter_for_edit_option(
|
76
75
|
text: str, filesetup: FileSetup
|
77
76
|
) -> NDArray[np.bool_]:
|
78
|
-
ensure = filesetup.ensure
|
79
77
|
src = filesetup.src
|
80
78
|
tb = filesetup.tb
|
81
79
|
bar = filesetup.bar
|
@@ -87,8 +85,8 @@ def run_interpreter_for_edit_option(
|
|
87
85
|
if log.is_debug:
|
88
86
|
log.debug(f"edit: {parser}")
|
89
87
|
|
90
|
-
env["timebase"] =
|
91
|
-
env["@levels"] = Levels(
|
88
|
+
env["timebase"] = tb
|
89
|
+
env["@levels"] = Levels(src, tb, bar, temp, log)
|
92
90
|
env["@filesetup"] = filesetup
|
93
91
|
|
94
92
|
results = interpret(env, parser)
|
@@ -139,7 +137,6 @@ def parse_time(val: str, arr: NDArray, tb: Fraction) -> int: # raises: `CoerceE
|
|
139
137
|
|
140
138
|
def make_timeline(
|
141
139
|
sources: list[FileInfo],
|
142
|
-
ensure: Ensure,
|
143
140
|
args: Args,
|
144
141
|
sr: int,
|
145
142
|
bar: Bar,
|
@@ -169,7 +166,7 @@ def make_timeline(
|
|
169
166
|
concat = np.concatenate
|
170
167
|
|
171
168
|
for i, src in enumerate(sources):
|
172
|
-
filesetup = FileSetup(src,
|
169
|
+
filesetup = FileSetup(src, len(sources) < 2, tb, bar, temp, log)
|
173
170
|
|
174
171
|
edit_result = run_interpreter_for_edit_option(method, filesetup)
|
175
172
|
mut_margin(edit_result, start_margin, end_margin)
|
auto_editor/output.py
CHANGED
@@ -57,7 +57,7 @@ class Ensure:
|
|
57
57
|
output_astream = out_container.add_stream("pcm_s16le", rate=sample_rate)
|
58
58
|
assert isinstance(output_astream, av.audio.stream.AudioStream)
|
59
59
|
|
60
|
-
resampler = AudioResampler(format="s16", layout="stereo", rate=sample_rate)
|
60
|
+
resampler = AudioResampler(format="s16", layout="stereo", rate=sample_rate)
|
61
61
|
for i, frame in enumerate(in_container.decode(astream)):
|
62
62
|
if i % 1500 == 0:
|
63
63
|
bar.tick(0 if frame.time is None else frame.time)
|
@@ -99,7 +99,7 @@ def _ffset(option: str, value: str | None) -> list[str]:
|
|
99
99
|
return [option] + [value]
|
100
100
|
|
101
101
|
|
102
|
-
def video_quality(args: Args
|
102
|
+
def video_quality(args: Args) -> list[str]:
|
103
103
|
return (
|
104
104
|
_ffset("-b:v", args.video_bitrate)
|
105
105
|
+ ["-c:v", args.video_codec]
|
@@ -174,7 +174,7 @@ def mux_quality_media(
|
|
174
174
|
for is_video, path in visual_output:
|
175
175
|
if is_video:
|
176
176
|
if apply_v:
|
177
|
-
cmd += video_quality(args
|
177
|
+
cmd += video_quality(args)
|
178
178
|
else:
|
179
179
|
# Real video is only allowed on track 0
|
180
180
|
cmd += ["-c:v:0", "copy"]
|
@@ -211,7 +211,7 @@ def mux_quality_media(
|
|
211
211
|
cmd.extend(["-c:s", scodec])
|
212
212
|
elif ctr.scodecs is not None:
|
213
213
|
if scodec not in ctr.scodecs:
|
214
|
-
scodec = ctr.
|
214
|
+
scodec = ctr.default_sub
|
215
215
|
cmd.extend(["-c:s", scodec])
|
216
216
|
|
217
217
|
if a_tracks > 0:
|
auto_editor/preview.py
CHANGED
@@ -6,7 +6,6 @@ from statistics import fmean, median
|
|
6
6
|
from typing import TextIO
|
7
7
|
|
8
8
|
from auto_editor.analyze import Levels
|
9
|
-
from auto_editor.output import Ensure
|
10
9
|
from auto_editor.timeline import v3
|
11
10
|
from auto_editor.utils.bar import Bar
|
12
11
|
from auto_editor.utils.func import to_timecode
|
@@ -49,7 +48,7 @@ def all_cuts(tl: v3, in_len: int) -> list[int]:
|
|
49
48
|
return cut_lens
|
50
49
|
|
51
50
|
|
52
|
-
def preview(
|
51
|
+
def preview(tl: v3, temp: str, log: Log) -> None:
|
53
52
|
log.conwrite("")
|
54
53
|
tb = tl.tb
|
55
54
|
|
@@ -66,7 +65,7 @@ def preview(ensure: Ensure, tl: v3, temp: str, log: Log) -> None:
|
|
66
65
|
|
67
66
|
in_len = 0
|
68
67
|
for src in all_sources:
|
69
|
-
in_len += Levels(
|
68
|
+
in_len += Levels(src, tb, Bar("none"), temp, log).media_length
|
70
69
|
|
71
70
|
out_len = tl.out_len()
|
72
71
|
|
auto_editor/render/subtitle.py
CHANGED
@@ -49,7 +49,7 @@ class SubtitleParser:
|
|
49
49
|
self.codec = codec
|
50
50
|
self.contents = []
|
51
51
|
|
52
|
-
if codec == "ass":
|
52
|
+
if codec == "ass" or codec == "ssa":
|
53
53
|
time_code = re.compile(r"(.*)(\d+:\d+:[\d.]+)(.*)(\d+:\d+:[\d.]+)(.*)")
|
54
54
|
elif codec == "webvtt":
|
55
55
|
time_code = re.compile(r"()(\d+:[\d.]+)( --> )(\d+:[\d.]+)(\n.*)")
|
auto_editor/render/video.py
CHANGED
@@ -17,8 +17,6 @@ from auto_editor.utils.types import color
|
|
17
17
|
if TYPE_CHECKING:
|
18
18
|
from collections.abc import Iterator
|
19
19
|
|
20
|
-
from av.filter import FilterContext
|
21
|
-
|
22
20
|
from auto_editor.ffwrapper import FFmpeg, FileInfo
|
23
21
|
from auto_editor.timeline import v3
|
24
22
|
from auto_editor.utils.bar import Bar
|
@@ -33,11 +31,6 @@ class VideoFrame:
|
|
33
31
|
src: FileInfo
|
34
32
|
|
35
33
|
|
36
|
-
def link_nodes(*nodes: FilterContext) -> None:
|
37
|
-
for c, n in zip(nodes, nodes[1:]):
|
38
|
-
c.link_to(n)
|
39
|
-
|
40
|
-
|
41
34
|
# From: github.com/PyAV-Org/PyAV/blob/main/av/video/frame.pyx
|
42
35
|
allowed_pix_fmt = {
|
43
36
|
"yuv420p",
|
@@ -98,12 +91,11 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
|
|
98
91
|
for frame in cn.decode(my_stream):
|
99
92
|
if obj.width != 0:
|
100
93
|
graph = av.filter.Graph()
|
101
|
-
link_nodes(
|
94
|
+
graph.link_nodes(
|
102
95
|
graph.add_buffer(template=my_stream),
|
103
96
|
graph.add("scale", f"{obj.width}:-1"),
|
104
97
|
graph.add("buffersink"),
|
105
|
-
)
|
106
|
-
graph.vpush(frame)
|
98
|
+
).vpush(frame)
|
107
99
|
frame = graph.vpull()
|
108
100
|
img_cache[(obj.src, obj.width)] = frame.to_ndarray(
|
109
101
|
format="rgb24"
|
@@ -177,7 +169,7 @@ def render_av(
|
|
177
169
|
target_width = max(round(tl.res[0] * args.scale), 2)
|
178
170
|
target_height = max(round(tl.res[1] * args.scale), 2)
|
179
171
|
scale_graph = av.filter.Graph()
|
180
|
-
link_nodes(
|
172
|
+
scale_graph.link_nodes(
|
181
173
|
scale_graph.add(
|
182
174
|
"buffer", video_size="1x1", time_base="1/1", pix_fmt=target_pix_fmt
|
183
175
|
),
|
@@ -213,7 +205,7 @@ def render_av(
|
|
213
205
|
if apply_video_later:
|
214
206
|
cmd += ["-c:v", "mpeg4", "-qscale:v", "1"]
|
215
207
|
else:
|
216
|
-
cmd += video_quality(args
|
208
|
+
cmd += video_quality(args)
|
217
209
|
|
218
210
|
# Setting SAR requires re-encoding so we do it here.
|
219
211
|
if src is not None and src.videos and (sar := src.videos[0].sar) is not None:
|
@@ -293,7 +285,7 @@ def render_av(
|
|
293
285
|
if (frame.width, frame.height) != tl.res:
|
294
286
|
width, height = tl.res
|
295
287
|
graph = av.filter.Graph()
|
296
|
-
link_nodes(
|
288
|
+
graph.link_nodes(
|
297
289
|
graph.add_buffer(template=my_stream),
|
298
290
|
graph.add(
|
299
291
|
"scale",
|
@@ -301,21 +293,19 @@ def render_av(
|
|
301
293
|
),
|
302
294
|
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
303
295
|
graph.add("buffersink"),
|
304
|
-
)
|
305
|
-
graph.vpush(frame)
|
296
|
+
).vpush(frame)
|
306
297
|
frame = graph.vpull()
|
307
298
|
elif isinstance(obj, TlRect):
|
308
299
|
graph = av.filter.Graph()
|
309
300
|
x, y = apply_anchor(obj.x, obj.y, obj.width, obj.height, obj.anchor)
|
310
|
-
link_nodes(
|
301
|
+
graph.link_nodes(
|
311
302
|
graph.add_buffer(template=my_stream),
|
312
303
|
graph.add(
|
313
304
|
"drawbox",
|
314
305
|
f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
|
315
306
|
),
|
316
307
|
graph.add("buffersink"),
|
317
|
-
)
|
318
|
-
graph.vpush(frame)
|
308
|
+
).vpush(frame)
|
319
309
|
frame = graph.vpull()
|
320
310
|
elif isinstance(obj, TlImage):
|
321
311
|
img = img_cache[(obj.src, obj.width)]
|
@@ -2,15 +2,15 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import sys
|
4
4
|
from dataclasses import dataclass, field
|
5
|
+
from fractions import Fraction
|
5
6
|
from typing import TYPE_CHECKING
|
6
7
|
|
7
8
|
import numpy as np
|
8
9
|
|
9
|
-
from auto_editor.analyze import LevelError, Levels
|
10
|
-
from auto_editor.ffwrapper import
|
10
|
+
from auto_editor.analyze import LevelError, Levels, iter_audio, iter_motion
|
11
|
+
from auto_editor.ffwrapper import initFileInfo
|
11
12
|
from auto_editor.lang.palet import env
|
12
13
|
from auto_editor.lib.contracts import is_bool, is_nat, is_nat1, is_str, is_void, orc
|
13
|
-
from auto_editor.output import Ensure
|
14
14
|
from auto_editor.utils.bar import Bar
|
15
15
|
from auto_editor.utils.cmdkw import (
|
16
16
|
ParserError,
|
@@ -25,6 +25,7 @@ from auto_editor.utils.types import frame_rate
|
|
25
25
|
from auto_editor.vanparse import ArgumentParser
|
26
26
|
|
27
27
|
if TYPE_CHECKING:
|
28
|
+
from collections.abc import Iterator
|
28
29
|
from fractions import Fraction
|
29
30
|
|
30
31
|
from numpy.typing import NDArray
|
@@ -35,8 +36,6 @@ class LevelArgs:
|
|
35
36
|
input: list[str] = field(default_factory=list)
|
36
37
|
edit: str = "audio"
|
37
38
|
timebase: Fraction | None = None
|
38
|
-
ffmpeg_location: str | None = None
|
39
|
-
my_ffmpeg: bool = False
|
40
39
|
help: bool = False
|
41
40
|
|
42
41
|
|
@@ -54,12 +53,6 @@ def levels_options(parser: ArgumentParser) -> ArgumentParser:
|
|
54
53
|
type=frame_rate,
|
55
54
|
help="Set custom timebase",
|
56
55
|
)
|
57
|
-
parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file")
|
58
|
-
parser.add_argument(
|
59
|
-
"--my-ffmpeg",
|
60
|
-
flag=True,
|
61
|
-
help="Use the ffmpeg on your PATH instead of the one packaged",
|
62
|
-
)
|
63
56
|
return parser
|
64
57
|
|
65
58
|
|
@@ -79,12 +72,21 @@ def print_arr(arr: NDArray) -> None:
|
|
79
72
|
print("")
|
80
73
|
|
81
74
|
|
75
|
+
def print_arr_gen(arr: Iterator[int | float]) -> None:
|
76
|
+
print("")
|
77
|
+
print("@start")
|
78
|
+
for a in arr:
|
79
|
+
if isinstance(a, float):
|
80
|
+
print(f"{a:.20f}")
|
81
|
+
else:
|
82
|
+
print(a)
|
83
|
+
print("")
|
84
|
+
|
85
|
+
|
82
86
|
def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
83
87
|
parser = levels_options(ArgumentParser("levels"))
|
84
88
|
args = parser.parse_args(LevelArgs, sys_args)
|
85
89
|
|
86
|
-
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg)
|
87
|
-
|
88
90
|
bar = Bar("none")
|
89
91
|
temp = setup_tempdir(None, Log())
|
90
92
|
log = Log(quiet=True, temp=temp)
|
@@ -96,7 +98,6 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
96
98
|
src = sources[0]
|
97
99
|
|
98
100
|
tb = src.get_fps() if args.timebase is None else args.timebase
|
99
|
-
ensure = Ensure(ffmpeg, bar, src.get_sr(), temp, log)
|
100
101
|
|
101
102
|
if ":" in args.edit:
|
102
103
|
method, attrs = args.edit.split(":", 1)
|
@@ -131,12 +132,12 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
131
132
|
except ParserError as e:
|
132
133
|
log.error(e)
|
133
134
|
|
134
|
-
levels = Levels(
|
135
|
+
levels = Levels(src, tb, bar, temp, log)
|
135
136
|
try:
|
136
137
|
if method == "audio":
|
137
|
-
|
138
|
+
print_arr_gen(iter_audio(src, tb, **obj))
|
138
139
|
elif method == "motion":
|
139
|
-
|
140
|
+
print_arr_gen(iter_motion(src, tb, **obj))
|
140
141
|
elif method == "subtitle":
|
141
142
|
print_arr(levels.subtitle(**obj))
|
142
143
|
elif method == "none":
|
auto_editor/subcommands/repl.py
CHANGED
@@ -6,11 +6,10 @@ from fractions import Fraction
|
|
6
6
|
|
7
7
|
import auto_editor
|
8
8
|
from auto_editor.analyze import FileSetup, Levels
|
9
|
-
from auto_editor.ffwrapper import
|
9
|
+
from auto_editor.ffwrapper import initFileInfo
|
10
10
|
from auto_editor.lang.palet import ClosingError, Lexer, Parser, env, interpret
|
11
11
|
from auto_editor.lib.data_structs import print_str
|
12
12
|
from auto_editor.lib.err import MyError
|
13
|
-
from auto_editor.output import Ensure
|
14
13
|
from auto_editor.utils.bar import Bar
|
15
14
|
from auto_editor.utils.func import setup_tempdir
|
16
15
|
from auto_editor.utils.log import Log
|
@@ -48,12 +47,6 @@ def repl_options(parser: ArgumentParser) -> ArgumentParser:
|
|
48
47
|
type=frame_rate,
|
49
48
|
help="Set custom timebase",
|
50
49
|
)
|
51
|
-
parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file")
|
52
|
-
parser.add_argument(
|
53
|
-
"--my-ffmpeg",
|
54
|
-
flag=True,
|
55
|
-
help="Use the ffmpeg on your PATH instead of the one packaged",
|
56
|
-
)
|
57
50
|
parser.add_argument(
|
58
51
|
"--temp-dir",
|
59
52
|
metavar="PATH",
|
@@ -68,16 +61,14 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
68
61
|
if args.input:
|
69
62
|
temp = setup_tempdir(args.temp_dir, Log())
|
70
63
|
log = Log(quiet=True, temp=temp)
|
71
|
-
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, False)
|
72
64
|
strict = len(args.input) < 2
|
73
65
|
sources = [initFileInfo(path, log) for path in args.input]
|
74
66
|
src = sources[0]
|
75
67
|
tb = src.get_fps() if args.timebase is None else args.timebase
|
76
68
|
bar = Bar("modern")
|
77
|
-
ensure = Ensure(ffmpeg, bar, src.get_sr(), temp, log)
|
78
69
|
env["timebase"] = tb
|
79
|
-
env["@levels"] = Levels(
|
80
|
-
env["@filesetup"] = FileSetup(src,
|
70
|
+
env["@levels"] = Levels(src, tb, bar, temp, log)
|
71
|
+
env["@filesetup"] = FileSetup(src, strict, tb, bar, temp, log)
|
81
72
|
|
82
73
|
print(f"Auto-Editor {auto_editor.version} ({auto_editor.__version__})")
|
83
74
|
text = None
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import sys
|
2
2
|
|
3
3
|
import av
|
4
|
-
from av.subtitles.subtitle import AssSubtitle
|
4
|
+
from av.subtitles.subtitle import AssSubtitle
|
5
5
|
|
6
6
|
|
7
7
|
def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
@@ -9,13 +9,10 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
9
9
|
with av.open(input_file) as container:
|
10
10
|
for s in range(len(container.streams.subtitles)):
|
11
11
|
print(f"file: {input_file} ({s}:{container.streams.subtitles[s].name})")
|
12
|
-
for
|
13
|
-
for
|
14
|
-
|
15
|
-
|
16
|
-
print(sub.ass.decode("utf-8", errors="ignore"))
|
17
|
-
elif isinstance(sub, TextSubtitle):
|
18
|
-
print(sub.text.decode("utf-8", errors="ignore"))
|
12
|
+
for subset in container.decode(subtitles=s):
|
13
|
+
for sub in subset:
|
14
|
+
if isinstance(sub, AssSubtitle):
|
15
|
+
print(sub.ass.decode("utf-8", errors="ignore"))
|
19
16
|
print("------")
|
20
17
|
|
21
18
|
|
auto_editor/utils/container.py
CHANGED
@@ -1,338 +1,96 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
from dataclasses import dataclass
|
3
|
+
from dataclasses import dataclass
|
4
4
|
from typing import TypedDict
|
5
5
|
|
6
|
+
import av
|
7
|
+
from av.codec import Codec
|
8
|
+
|
6
9
|
|
7
10
|
class DictContainer(TypedDict, total=False):
|
8
|
-
allow_video: bool
|
9
|
-
allow_audio: bool
|
10
|
-
allow_subtitle: bool
|
11
|
-
allow_image: bool
|
12
11
|
max_videos: int | None
|
13
12
|
max_audios: int | None
|
14
13
|
max_subtitles: int | None
|
15
|
-
vcodecs: list[str] | None
|
16
|
-
acodecs: list[str] | None
|
17
|
-
scodecs: list[str] | None
|
18
|
-
vstrict: bool
|
19
|
-
sstrict: bool
|
20
|
-
disallow_v: list[str]
|
21
14
|
samplerate: list[int] | None
|
22
15
|
|
23
16
|
|
24
17
|
@dataclass(slots=True)
|
25
18
|
class Container:
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
19
|
+
allow_image: bool
|
20
|
+
vcodecs: set[str]
|
21
|
+
acodecs: set[str]
|
22
|
+
scodecs: set[str]
|
23
|
+
default_vid: str
|
24
|
+
default_aud: str
|
25
|
+
default_sub: str
|
30
26
|
max_videos: int | None = None
|
31
27
|
max_audios: int | None = None
|
32
28
|
max_subtitles: int | None = None
|
33
|
-
vcodecs: list[str] | None = None
|
34
|
-
acodecs: list[str] | None = None
|
35
|
-
scodecs: list[str] | None = None
|
36
|
-
vstrict: bool = False
|
37
|
-
sstrict: bool = False
|
38
|
-
disallow_v: list[str] = field(default_factory=list)
|
39
29
|
samplerate: list[int] | None = None # Any samplerate is allowed
|
40
30
|
|
41
31
|
|
42
|
-
|
43
|
-
"
|
44
|
-
"
|
45
|
-
"
|
46
|
-
"
|
47
|
-
"
|
48
|
-
"
|
49
|
-
"
|
50
|
-
"
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
opus_en = ["opus", "libopus"]
|
57
|
-
|
58
|
-
h265: DictContainer = {
|
59
|
-
"allow_video": True,
|
60
|
-
"vcodecs": hevc_en + ["mpeg4"] + h264_en,
|
61
|
-
}
|
62
|
-
h264: DictContainer = {
|
63
|
-
"allow_video": True,
|
64
|
-
"vcodecs": h264_en + ["mpeg4"] + hevc_en,
|
65
|
-
}
|
66
|
-
aac: DictContainer = {
|
67
|
-
"allow_audio": True,
|
68
|
-
"max_audios": 1,
|
69
|
-
"acodecs": aac_en,
|
70
|
-
}
|
71
|
-
ass: DictContainer = {
|
72
|
-
"allow_subtitle": True,
|
73
|
-
"scodecs": ["ass", "ssa"],
|
74
|
-
"max_subtitles": 1,
|
75
|
-
"sstrict": True,
|
76
|
-
}
|
77
|
-
mp4: DictContainer = {
|
78
|
-
"allow_video": True,
|
79
|
-
"allow_audio": True,
|
80
|
-
"allow_subtitle": True,
|
81
|
-
"allow_image": True,
|
82
|
-
"vcodecs": h264_en + hevc_en + av1_en + ["vp9", "mpeg4", "mpeg2video", "mjpeg"],
|
83
|
-
"acodecs": aac_en + opus_en + ["mp3", "flac", "vorbis", "libvorbis", "ac3", "mp2"],
|
84
|
-
"vstrict": True,
|
85
|
-
}
|
86
|
-
ogg: DictContainer = {
|
87
|
-
"allow_video": True,
|
88
|
-
"allow_audio": True,
|
89
|
-
"allow_subtitle": True,
|
90
|
-
"vcodecs": ["libtheora", "theora"],
|
91
|
-
"acodecs": opus_en + ["libvorbis", "vorbis", "flac", "speex"],
|
92
|
-
"vstrict": True,
|
32
|
+
containers: dict[str, DictContainer] = {
|
33
|
+
"aac": {"max_audios": 1},
|
34
|
+
"adts": {"max_audios": 1},
|
35
|
+
"ass": {"max_subtitles": 1},
|
36
|
+
"ssa": {"max_subtitles": 1},
|
37
|
+
"apng": {"max_videos": 1},
|
38
|
+
"gif": {"max_videos": 1},
|
39
|
+
"wav": {"max_audios": 1},
|
40
|
+
"ast": {"max_audios": 1},
|
41
|
+
"mp3": {"max_audios": 1},
|
42
|
+
"flac": {"max_audios": 1},
|
43
|
+
"srt": {"max_subtitles": 1},
|
44
|
+
"vtt": {"max_subtitles": 1},
|
45
|
+
"swf": {"samplerate": [44100, 22050, 11025]},
|
93
46
|
}
|
94
47
|
|
95
|
-
mka_audio = (
|
96
|
-
["libvorbis", "vorbis"]
|
97
|
-
+ aac_en
|
98
|
-
+ opus_en
|
99
|
-
+ [
|
100
|
-
"mp3",
|
101
|
-
"flac",
|
102
|
-
"ac3",
|
103
|
-
"mp2",
|
104
|
-
"wmav2",
|
105
|
-
"pcm_s16le",
|
106
|
-
"pcm_alaw",
|
107
|
-
"pcm_f32le",
|
108
|
-
"pcm_f64le",
|
109
|
-
"pcm_mulaw",
|
110
|
-
"pcm_s16be",
|
111
|
-
"pcm_s24be",
|
112
|
-
"pcm_s24le",
|
113
|
-
"pcm_s32be",
|
114
|
-
"pcm_s32le",
|
115
|
-
"pcm_u8",
|
116
|
-
]
|
117
|
-
)
|
118
48
|
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
"
|
123
|
-
|
124
|
-
"ssa":
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
"
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
"pcm_u8",
|
161
|
-
],
|
162
|
-
},
|
163
|
-
"ast": {
|
164
|
-
"allow_audio": True,
|
165
|
-
"max_audios": 1,
|
166
|
-
"acodecs": ["pcm_s16be_planar"],
|
167
|
-
},
|
168
|
-
"mp3": {
|
169
|
-
"allow_audio": True,
|
170
|
-
"max_audios": 1,
|
171
|
-
"acodecs": ["mp3"],
|
172
|
-
},
|
173
|
-
"opus": {
|
174
|
-
"allow_audio": True,
|
175
|
-
"acodecs": opus_en + ["flac", "libvorbis", "vorbis", "speex"],
|
176
|
-
},
|
177
|
-
"oga": {
|
178
|
-
"allow_audio": True,
|
179
|
-
"acodecs": opus_en + ["flac", "libvorbis", "vorbis", "speex"],
|
180
|
-
},
|
181
|
-
"flac": {
|
182
|
-
"allow_audio": True,
|
183
|
-
"max_audios": 1,
|
184
|
-
"acodecs": ["flac"],
|
185
|
-
},
|
186
|
-
"webm": {
|
187
|
-
"allow_video": True,
|
188
|
-
"allow_audio": True,
|
189
|
-
"allow_subtitle": True,
|
190
|
-
"vcodecs": ["vp9", "vp8"] + av1_en,
|
191
|
-
"acodecs": opus_en + ["vorbis", "libvorbis"],
|
192
|
-
"scodecs": ["webvtt"],
|
193
|
-
"vstrict": True,
|
194
|
-
"sstrict": True,
|
195
|
-
},
|
196
|
-
"srt": {
|
197
|
-
"allow_subtitle": True,
|
198
|
-
"scodecs": ["srt"],
|
199
|
-
"max_subtitles": 1,
|
200
|
-
"sstrict": True,
|
201
|
-
},
|
202
|
-
"vtt": {
|
203
|
-
"allow_subtitle": True,
|
204
|
-
"scodecs": ["webvtt"],
|
205
|
-
"max_subtitles": 1,
|
206
|
-
"sstrict": True,
|
207
|
-
},
|
208
|
-
"avi": {
|
209
|
-
"allow_video": True,
|
210
|
-
"allow_audio": True,
|
211
|
-
"vcodecs": ["mpeg4"] + h264_en + ["prores", "mjpeg", "mpeg2video", "rawvideo"],
|
212
|
-
"acodecs": ["mp3"]
|
213
|
-
+ aac_en
|
214
|
-
+ [
|
215
|
-
"flac",
|
216
|
-
"vorbis",
|
217
|
-
"libvorbis",
|
218
|
-
"mp2",
|
219
|
-
"wmav2",
|
220
|
-
"pcm_s16le",
|
221
|
-
"pcm_alaw",
|
222
|
-
"pcm_f32le",
|
223
|
-
"pcm_f64le",
|
224
|
-
"pcm_mulaw",
|
225
|
-
"pcm_s24le",
|
226
|
-
"pcm_s32le",
|
227
|
-
"pcm_u8",
|
228
|
-
],
|
229
|
-
"disallow_v": hevc_en + ["apng", "gif"],
|
230
|
-
},
|
231
|
-
"wmv": {
|
232
|
-
"allow_video": True,
|
233
|
-
"allow_audio": True,
|
234
|
-
"vcodecs": ["msmpeg4v3"]
|
235
|
-
+ h264_en
|
236
|
-
+ ["mpeg4", "mpeg2video", "mjpeg", "rawvideo"],
|
237
|
-
"acodecs": ["wmav2"]
|
238
|
-
+ aac_en
|
239
|
-
+ [
|
240
|
-
"mp3",
|
241
|
-
"flac",
|
242
|
-
"vorbis",
|
243
|
-
"libvorbis",
|
244
|
-
"ac3",
|
245
|
-
"mp2",
|
246
|
-
"pcm_s16le",
|
247
|
-
"pcm_alaw",
|
248
|
-
"pcm_f32le",
|
249
|
-
"pcm_f64le",
|
250
|
-
"pcm_mulaw",
|
251
|
-
"pcm_s24le",
|
252
|
-
"pcm_s32le",
|
253
|
-
"pcm_u8",
|
254
|
-
],
|
255
|
-
"vstrict": True,
|
256
|
-
},
|
257
|
-
"mkv": {
|
258
|
-
"allow_video": True,
|
259
|
-
"allow_audio": True,
|
260
|
-
"allow_subtitle": True,
|
261
|
-
"allow_image": True,
|
262
|
-
"vcodecs": h264_en
|
263
|
-
+ hevc_en
|
264
|
-
+ prores_en
|
265
|
-
+ [
|
266
|
-
"vp9",
|
267
|
-
"vp8",
|
268
|
-
"mpeg4",
|
269
|
-
"mpeg2video",
|
270
|
-
"msmpeg4v3",
|
271
|
-
"mjpeg",
|
272
|
-
"gif",
|
273
|
-
"rawvideo",
|
274
|
-
],
|
275
|
-
"acodecs": mka_audio,
|
276
|
-
"disallow_v": ["apng"],
|
277
|
-
},
|
278
|
-
"mka": {
|
279
|
-
"allow_audio": True,
|
280
|
-
"acodecs": mka_audio,
|
281
|
-
},
|
282
|
-
"mov": {
|
283
|
-
"allow_video": True,
|
284
|
-
"allow_audio": True,
|
285
|
-
"allow_subtitle": True,
|
286
|
-
"vcodecs": h264_en
|
287
|
-
+ hevc_en
|
288
|
-
+ prores_en
|
289
|
-
+ [
|
290
|
-
"mpeg4",
|
291
|
-
"mpeg2video",
|
292
|
-
"msmpeg4v3",
|
293
|
-
"mjpeg",
|
294
|
-
"gif",
|
295
|
-
"flv1",
|
296
|
-
"dvvideo",
|
297
|
-
"rawvideo",
|
298
|
-
],
|
299
|
-
"acodecs": aac_en
|
300
|
-
+ [
|
301
|
-
"mp3",
|
302
|
-
"vorbis",
|
303
|
-
"libvorbis",
|
304
|
-
"ac3",
|
305
|
-
"mp2",
|
306
|
-
"wmav2",
|
307
|
-
"pcm_s16le",
|
308
|
-
"pcm_alaw",
|
309
|
-
"pcm_f32be",
|
310
|
-
"pcm_f32le",
|
311
|
-
"pcm_f64be",
|
312
|
-
"pcm_f64le",
|
313
|
-
"pcm_mulaw",
|
314
|
-
"pcm_s16be",
|
315
|
-
"pcm_s24be",
|
316
|
-
"pcm_s24le",
|
317
|
-
"pcm_s32be",
|
318
|
-
"pcm_s32le",
|
319
|
-
"pcm_s8",
|
320
|
-
"pcm_u8",
|
321
|
-
],
|
322
|
-
"disallow_v": ["apng", "vp9", "vp8"],
|
323
|
-
},
|
324
|
-
"swf": {
|
325
|
-
"allow_video": True,
|
326
|
-
"allow_audio": True,
|
327
|
-
"vcodecs": ["flv1", "mjpeg"],
|
328
|
-
"acodecs": ["mp3"],
|
329
|
-
"vstrict": True,
|
330
|
-
"samplerate": [44100, 22050, 11025],
|
331
|
-
},
|
332
|
-
}
|
49
|
+
def codec_type(x: str) -> str:
|
50
|
+
if x in ("vp9", "vp8", "h264", "hevc", "av1", "gif", "apng"):
|
51
|
+
return "video"
|
52
|
+
if x in ("aac", "flac", "mp3"):
|
53
|
+
return "audio"
|
54
|
+
if x in ("ass", "ssa", "srt"):
|
55
|
+
return "subtitle"
|
56
|
+
|
57
|
+
try:
|
58
|
+
return Codec(x, "r").type
|
59
|
+
except Exception:
|
60
|
+
try:
|
61
|
+
return Codec(x, "w").type
|
62
|
+
except Exception:
|
63
|
+
return ""
|
64
|
+
|
65
|
+
|
66
|
+
def container_constructor(ext: str) -> Container:
|
67
|
+
with av.open(f".{ext}", "w") as container:
|
68
|
+
codecs = container.supported_codecs
|
69
|
+
if ext == "webm":
|
70
|
+
vdefault = "vp9"
|
71
|
+
else:
|
72
|
+
vdefault = container.default_video_codec
|
73
|
+
adefault = container.default_audio_codec
|
74
|
+
sdefault = container.default_subtitle_codec
|
75
|
+
|
76
|
+
vcodecs = set()
|
77
|
+
acodecs = set()
|
78
|
+
scodecs = set()
|
79
|
+
|
80
|
+
for codec in codecs:
|
81
|
+
kind = codec_type(codec)
|
82
|
+
if kind == "video":
|
83
|
+
vcodecs.add(codec)
|
84
|
+
if codec == "h264":
|
85
|
+
vcodecs.add("libx264")
|
86
|
+
if kind == "audio":
|
87
|
+
acodecs.add(codec)
|
88
|
+
if kind == "subtitle":
|
89
|
+
scodecs.add(codec)
|
333
90
|
|
91
|
+
allow_image = ext in ("mp4", "mkv")
|
92
|
+
kwargs = containers[ext] if ext in containers else {}
|
334
93
|
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
return Container(allow_video=True, allow_audio=True, allow_subtitle=True)
|
94
|
+
return Container(
|
95
|
+
allow_image, vcodecs, acodecs, scodecs, vdefault, adefault, sdefault, **kwargs
|
96
|
+
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: auto-editor
|
3
|
-
Version: 24.
|
3
|
+
Version: 24.30.1
|
4
4
|
Summary: Auto-Editor: Effort free video editing!
|
5
5
|
Author-email: WyattBlue <wyattblue@auto-editor.com>
|
6
6
|
License: Unlicense
|
@@ -12,7 +12,7 @@ Requires-Python: >=3.10
|
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
14
|
Requires-Dist: numpy >=1.23.0
|
15
|
-
Requires-Dist: pyav ==12.
|
15
|
+
Requires-Dist: pyav ==12.3.*
|
16
16
|
Requires-Dist: ae-ffmpeg ==1.2.*
|
17
17
|
|
18
18
|
<p align="center"><img src="https://auto-editor.com/img/auto-editor-banner.webp" title="Auto-Editor" width="700"></p>
|
@@ -1,12 +1,12 @@
|
|
1
|
-
auto_editor/__init__.py,sha256=
|
1
|
+
auto_editor/__init__.py,sha256=2eo38rUAsDC2UUJAIJ4AZoh26SJrz93bjEMG1PMKUnQ,43
|
2
2
|
auto_editor/__main__.py,sha256=nkb8N_bxF_qld53LWo4c6Y0n9NDRdsPfANpVF1RD1cQ,9863
|
3
|
-
auto_editor/analyze.py,sha256=
|
4
|
-
auto_editor/edit.py,sha256=
|
3
|
+
auto_editor/analyze.py,sha256=6siPLoYwEjvbAzPfKZWDEcqtGXIM8n9qNveyCCYLAgI,11769
|
4
|
+
auto_editor/edit.py,sha256=yZFLoWqfC2QFEdibp7AlKXMRB-6jHwNDpTW2SlN-4Is,11325
|
5
5
|
auto_editor/ffwrapper.py,sha256=jga7-HbQnC4w21qZk4aY4kwLT7UPqkGn6NJPFM5Qssc,7811
|
6
6
|
auto_editor/help.py,sha256=BFiP7vBz42TUzum4-zaQIrV1OY7kHeN0pe0MPE0T5xw,7997
|
7
|
-
auto_editor/make_layers.py,sha256=
|
8
|
-
auto_editor/output.py,sha256=
|
9
|
-
auto_editor/preview.py,sha256=
|
7
|
+
auto_editor/make_layers.py,sha256=AzfxRkm9ZFjeVwdh3jFT_5_YvVaUMtphVKDtIaDI5bo,8900
|
8
|
+
auto_editor/output.py,sha256=OpZG3SvDRumKoU-fZOA-8-hc9pO_uBdO3WTNhDO9LHM,8065
|
9
|
+
auto_editor/preview.py,sha256=CVKtSjcX1dNjbBgsD9PTbRBwRV6dxBLkJ_VIkkBuTUY,3032
|
10
10
|
auto_editor/timeline.py,sha256=JwcS-8AS5vsoTL_m03aosYijScQef4AGa2lyutQ8wbI,7069
|
11
11
|
auto_editor/validate_input.py,sha256=JQt7J5xOBJDp6lnd2sQptpYhf7Z_hyxNEzLsE9E7LKU,2596
|
12
12
|
auto_editor/vanparse.py,sha256=p0u1X2gKM_lWB9bg-Lotqq9-bjfLgsVW9-U4Lwbz0aU,10029
|
@@ -27,29 +27,29 @@ auto_editor/lib/data_structs.py,sha256=EXNcdMsdmZxMRlpbXmIbRoC-YfGzvPZi7EdBQGwvp
|
|
27
27
|
auto_editor/lib/err.py,sha256=UlszQJdzMZwkbT8x3sY4GkCV_5x9yrd6uVVUzvA8iiI,35
|
28
28
|
auto_editor/render/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
29
|
auto_editor/render/audio.py,sha256=pUhD4rQZfUnyzKgpuxNxl_2CUGwbkAWo2356HUAW7VM,8835
|
30
|
-
auto_editor/render/subtitle.py,sha256=
|
31
|
-
auto_editor/render/video.py,sha256=
|
30
|
+
auto_editor/render/subtitle.py,sha256=g195kDN0LcwKlZeQMCflXPH5n_74iwCk1RPLSQ5eP34,4373
|
31
|
+
auto_editor/render/video.py,sha256=MOhZh72VGe0LVrDE8SzBb-FqXUrnQTIF51zz1qvX2_I,12987
|
32
32
|
auto_editor/subcommands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
33
33
|
auto_editor/subcommands/desc.py,sha256=GDrKJYiHMaeTrplZAceXl1JwoqD78XsV2_5lc0Xd7po,869
|
34
34
|
auto_editor/subcommands/info.py,sha256=7Sgt9WR0rWxe7juCRKseMxW6gKv3z3voqFcL8-MOVVM,6927
|
35
|
-
auto_editor/subcommands/levels.py,sha256=
|
35
|
+
auto_editor/subcommands/levels.py,sha256=ymHwAp2l7HScmdTtNAPDEeDfRVY_rRPRigUCDYx2F40,4220
|
36
36
|
auto_editor/subcommands/palet.py,sha256=tbQoRWoT4jR3yu0etGApfprM-oQgXIjC-rIY-QG3nM0,655
|
37
|
-
auto_editor/subcommands/repl.py,sha256=
|
38
|
-
auto_editor/subcommands/subdump.py,sha256=
|
37
|
+
auto_editor/subcommands/repl.py,sha256=gEVmZduWwnumMe4GLHi72C_IbmdscYwiz7mjigU97Qk,3300
|
38
|
+
auto_editor/subcommands/subdump.py,sha256=af_XBf7kaevqHn1A71z8C-7x8pS5WKD9FE_ugkCw6rk,665
|
39
39
|
auto_editor/subcommands/test.py,sha256=UcpJp-PMcoUyz6LDD4y2V6EQ9w1ed66fPZr4GpzJReg,25050
|
40
40
|
auto_editor/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
41
41
|
auto_editor/utils/bar.py,sha256=RJqkJ8gNr8op_Z-2hh48ExjSonmTPX-RshctK_itv14,3988
|
42
42
|
auto_editor/utils/chunks.py,sha256=J-eGKtEz68gFtRrj1kOSgH4Tj_Yz6prNQ7Xr-d9NQJw,52
|
43
43
|
auto_editor/utils/cmdkw.py,sha256=XApxw7FZBOEJV9N4LHhdw1GVfHbFfCjr-zCZ1gJsSvY,6002
|
44
|
-
auto_editor/utils/container.py,sha256=
|
44
|
+
auto_editor/utils/container.py,sha256=CWB55RRzADBuQmX8lhiPaG8iPayGWaebvtmJjgJ6WRA,2497
|
45
45
|
auto_editor/utils/encoder.py,sha256=auNYo7HXbcU4iTUCc0LE5lpwFmSvdWvBm6-5KIaRK8w,2983
|
46
46
|
auto_editor/utils/func.py,sha256=H38xO6Wxg1TZILVrx-nCowCzj_mqBUtJuOFp4DV3Hsc,4843
|
47
47
|
auto_editor/utils/log.py,sha256=ry-C92PRkJ-c8PQYIs1imk1qigDYfsCoLBYK6CQSP7I,2844
|
48
48
|
auto_editor/utils/subtitle_tools.py,sha256=TjjVPiT8bWzZJcrZjF7ddpgfIsVkLE4LyxXzBswHAGU,693
|
49
49
|
auto_editor/utils/types.py,sha256=zWbU_VkcdP4yHHzKyaSiXu560n5U53i0x5SPkUDsCZU,11570
|
50
|
-
auto_editor-24.
|
51
|
-
auto_editor-24.
|
52
|
-
auto_editor-24.
|
53
|
-
auto_editor-24.
|
54
|
-
auto_editor-24.
|
55
|
-
auto_editor-24.
|
50
|
+
auto_editor-24.30.1.dist-info/LICENSE,sha256=yiq99pWITHfqS0pbZMp7cy2dnbreTuvBwudsU-njvIM,1210
|
51
|
+
auto_editor-24.30.1.dist-info/METADATA,sha256=6PQ4JL2l2YioyyknShGetU4rsQ2Czp8EJHZzMsbo7dA,6138
|
52
|
+
auto_editor-24.30.1.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
53
|
+
auto_editor-24.30.1.dist-info/entry_points.txt,sha256=-H7zdTw4MqnAcwrN5xTNkGIhzZtJMxS9r6lTMeR9-aA,240
|
54
|
+
auto_editor-24.30.1.dist-info/top_level.txt,sha256=ky1HUkqq9i034c4CUU_0wBw0xZsxxyGEak1eTbdvpyA,12
|
55
|
+
auto_editor-24.30.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|