auto-editor 25.3.1__py3-none-any.whl → 26.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +1 -1
- auto_editor/__main__.py +1 -11
- auto_editor/edit.py +156 -44
- auto_editor/ffwrapper.py +2 -43
- auto_editor/help.py +4 -3
- auto_editor/output.py +22 -183
- auto_editor/render/audio.py +65 -55
- auto_editor/render/subtitle.py +69 -10
- auto_editor/render/video.py +166 -180
- auto_editor/subcommands/repl.py +12 -3
- auto_editor/subcommands/test.py +41 -37
- auto_editor/utils/container.py +2 -0
- auto_editor/utils/func.py +1 -1
- auto_editor/utils/types.py +2 -15
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/METADATA +1 -1
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/RECORD +20 -20
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/WHEEL +1 -1
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/LICENSE +0 -0
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/entry_points.txt +0 -0
- {auto_editor-25.3.1.dist-info → auto_editor-26.0.0.dist-info}/top_level.txt +0 -0
auto_editor/render/video.py
CHANGED
@@ -1,26 +1,22 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
import os.path
|
4
3
|
from dataclasses import dataclass
|
5
|
-
from subprocess import DEVNULL, PIPE
|
6
|
-
from sys import platform
|
7
4
|
from typing import TYPE_CHECKING
|
8
5
|
|
9
6
|
import av
|
10
7
|
import numpy as np
|
11
8
|
|
12
|
-
from auto_editor.output import
|
9
|
+
from auto_editor.output import parse_bitrate
|
13
10
|
from auto_editor.timeline import TlImage, TlRect, TlVideo
|
14
|
-
from auto_editor.utils.encoder import encoders
|
15
11
|
from auto_editor.utils.types import color
|
16
12
|
|
17
13
|
if TYPE_CHECKING:
|
18
14
|
from collections.abc import Iterator
|
15
|
+
from typing import Any
|
19
16
|
|
20
|
-
from auto_editor.ffwrapper import
|
17
|
+
from auto_editor.ffwrapper import FileInfo
|
21
18
|
from auto_editor.timeline import v3
|
22
19
|
from auto_editor.utils.bar import Bar
|
23
|
-
from auto_editor.utils.container import Container
|
24
20
|
from auto_editor.utils.log import Log
|
25
21
|
from auto_editor.utils.types import Args
|
26
22
|
|
@@ -90,8 +86,10 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
|
|
90
86
|
|
91
87
|
|
92
88
|
def render_av(
|
93
|
-
|
94
|
-
) ->
|
89
|
+
output: av.container.OutputContainer, tl: v3, args: Args, bar: Bar, log: Log
|
90
|
+
) -> Any:
|
91
|
+
from_ndarray = av.VideoFrame.from_ndarray
|
92
|
+
|
95
93
|
src = tl.src
|
96
94
|
cns: dict[FileInfo, av.container.InputContainer] = {}
|
97
95
|
decoders: dict[FileInfo, Iterator[av.VideoFrame]] = {}
|
@@ -99,8 +97,8 @@ def render_av(
|
|
99
97
|
tous: dict[FileInfo, int] = {}
|
100
98
|
|
101
99
|
target_pix_fmt = "yuv420p" # Reasonable default
|
100
|
+
target_fps = tl.tb # Always constant
|
102
101
|
img_cache = make_image_cache(tl)
|
103
|
-
temp = log.temp
|
104
102
|
|
105
103
|
first_src: FileInfo | None = None
|
106
104
|
for src in tl.sources:
|
@@ -133,14 +131,27 @@ def render_av(
|
|
133
131
|
log.debug(f"Tous: {tous}")
|
134
132
|
log.debug(f"Clips: {tl.v}")
|
135
133
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
134
|
+
if args.video_codec == "gif":
|
135
|
+
_c = av.Codec("gif", "w")
|
136
|
+
if _c.video_formats is not None and target_pix_fmt in (
|
137
|
+
f.name for f in _c.video_formats
|
138
|
+
):
|
139
|
+
target_pix_fmt = target_pix_fmt
|
140
|
+
else:
|
141
|
+
target_pix_fmt = "rgb8"
|
142
|
+
del _c
|
143
|
+
else:
|
144
|
+
target_pix_fmt = (
|
145
|
+
target_pix_fmt if target_pix_fmt in allowed_pix_fmt else "yuv420p"
|
146
|
+
)
|
142
147
|
|
143
|
-
|
148
|
+
ops = {"mov_flags": "faststart"}
|
149
|
+
output_stream = output.add_stream(args.video_codec, rate=target_fps, options=ops)
|
150
|
+
yield output_stream
|
151
|
+
if not isinstance(output_stream, av.VideoStream):
|
152
|
+
log.error(f"Not a known video codec: {args.video_codec}")
|
153
|
+
if src.videos and src.videos[0].lang is not None:
|
154
|
+
output_stream.metadata["language"] = src.videos[0].lang
|
144
155
|
|
145
156
|
if args.scale == 1.0:
|
146
157
|
target_width, target_height = tl.res
|
@@ -157,44 +168,33 @@ def render_av(
|
|
157
168
|
scale_graph.add("buffersink"),
|
158
169
|
)
|
159
170
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
# Fix videotoolbox issue with legacy macs
|
183
|
-
cmd += ["-allow_sw", "1"]
|
184
|
-
|
185
|
-
if apply_video_later:
|
186
|
-
cmd += ["-c:v", "mpeg4", "-qscale:v", "1"]
|
171
|
+
output_stream.width = target_width
|
172
|
+
output_stream.height = target_height
|
173
|
+
output_stream.pix_fmt = target_pix_fmt
|
174
|
+
output_stream.framerate = target_fps
|
175
|
+
|
176
|
+
color_range = src.videos[0].color_range
|
177
|
+
colorspace = src.videos[0].color_space
|
178
|
+
color_prim = src.videos[0].color_primaries
|
179
|
+
color_trc = src.videos[0].color_transfer
|
180
|
+
|
181
|
+
if color_range == 1 or color_range == 2:
|
182
|
+
output_stream.color_range = color_range
|
183
|
+
if colorspace in (0, 1) or (colorspace >= 3 and colorspace < 16):
|
184
|
+
output_stream.colorspace = colorspace
|
185
|
+
if color_prim == 1 or (color_prim >= 4 and color_prim < 17):
|
186
|
+
output_stream.color_primaries = color_prim
|
187
|
+
if color_trc == 1 or (color_trc >= 4 and color_trc < 22):
|
188
|
+
output_stream.color_trc = color_trc
|
189
|
+
|
190
|
+
if args.video_bitrate != "auto":
|
191
|
+
output_stream.bit_rate = parse_bitrate(args.video_bitrate, log)
|
192
|
+
log.debug(f"video bitrate: {output_stream.bit_rate}")
|
187
193
|
else:
|
188
|
-
|
194
|
+
log.debug(f"[auto] video bitrate: {output_stream.bit_rate}")
|
189
195
|
|
190
|
-
# Setting SAR requires re-encoding so we do it here.
|
191
196
|
if src is not None and src.videos and (sar := src.videos[0].sar) is not None:
|
192
|
-
|
193
|
-
|
194
|
-
cmd.append(spedup)
|
195
|
-
|
196
|
-
process2 = ffmpeg.Popen(cmd, stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL)
|
197
|
-
assert process2.stdin is not None
|
197
|
+
output_stream.sample_aspect_ratio = sar
|
198
198
|
|
199
199
|
# First few frames can have an abnormal keyframe count, so never seek there.
|
200
200
|
seek = 10
|
@@ -206,142 +206,128 @@ def render_av(
|
|
206
206
|
bg = color(args.background)
|
207
207
|
null_frame = make_solid(target_width, target_height, target_pix_fmt, bg)
|
208
208
|
frame_index = -1
|
209
|
-
try:
|
210
|
-
for index in range(tl.end):
|
211
|
-
obj_list: list[VideoFrame | TlRect | TlImage] = []
|
212
|
-
for layer in tl.v:
|
213
|
-
for lobj in layer:
|
214
|
-
if isinstance(lobj, TlVideo):
|
215
|
-
if index >= lobj.start and index < (lobj.start + lobj.dur):
|
216
|
-
_i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
217
|
-
obj_list.append(VideoFrame(_i, lobj.src))
|
218
|
-
elif index >= lobj.start and index < lobj.start + lobj.dur:
|
219
|
-
obj_list.append(lobj)
|
220
209
|
|
210
|
+
for index in range(tl.end):
|
211
|
+
obj_list: list[VideoFrame | TlRect | TlImage] = []
|
212
|
+
for layer in tl.v:
|
213
|
+
for lobj in layer:
|
214
|
+
if isinstance(lobj, TlVideo):
|
215
|
+
if index >= lobj.start and index < (lobj.start + lobj.dur):
|
216
|
+
_i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
217
|
+
obj_list.append(VideoFrame(_i, lobj.src))
|
218
|
+
elif index >= lobj.start and index < lobj.start + lobj.dur:
|
219
|
+
obj_list.append(lobj)
|
220
|
+
|
221
|
+
if tl.v1 is not None:
|
222
|
+
# When there can be valid gaps in the timeline.
|
221
223
|
frame = null_frame
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
if (frame.width, frame.height) != tl.res:
|
266
|
-
width, height = tl.res
|
267
|
-
graph = av.filter.Graph()
|
268
|
-
graph.link_nodes(
|
269
|
-
graph.add_buffer(template=my_stream),
|
270
|
-
graph.add(
|
271
|
-
"scale",
|
272
|
-
f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
|
273
|
-
),
|
274
|
-
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
275
|
-
graph.add("buffersink"),
|
276
|
-
).vpush(frame)
|
277
|
-
frame = graph.vpull()
|
278
|
-
elif isinstance(obj, TlRect):
|
224
|
+
# else, use the last frame
|
225
|
+
|
226
|
+
for obj in obj_list:
|
227
|
+
if isinstance(obj, VideoFrame):
|
228
|
+
my_stream = cns[obj.src].streams.video[0]
|
229
|
+
if frame_index > obj.index:
|
230
|
+
log.debug(f"Seek: {frame_index} -> 0")
|
231
|
+
cns[obj.src].seek(0)
|
232
|
+
try:
|
233
|
+
frame = next(decoders[obj.src])
|
234
|
+
frame_index = round(frame.time * tl.tb)
|
235
|
+
except StopIteration:
|
236
|
+
pass
|
237
|
+
|
238
|
+
while frame_index < obj.index:
|
239
|
+
# Check if skipping ahead is worth it.
|
240
|
+
if (
|
241
|
+
obj.index - frame_index > seek_cost[obj.src]
|
242
|
+
and frame_index > seek
|
243
|
+
):
|
244
|
+
seek = frame_index + (seek_cost[obj.src] // 2)
|
245
|
+
seek_frame = frame_index
|
246
|
+
log.debug(f"Seek: {frame_index} -> {obj.index}")
|
247
|
+
cns[obj.src].seek(obj.index * tous[obj.src], stream=my_stream)
|
248
|
+
|
249
|
+
try:
|
250
|
+
frame = next(decoders[obj.src])
|
251
|
+
frame_index = round(frame.time * tl.tb)
|
252
|
+
except StopIteration:
|
253
|
+
log.debug(f"No source frame at {index=}. Using null frame")
|
254
|
+
frame = null_frame
|
255
|
+
break
|
256
|
+
|
257
|
+
if seek_frame is not None:
|
258
|
+
log.debug(f"Skipped {frame_index - seek_frame} frame indexes")
|
259
|
+
frames_saved += frame_index - seek_frame
|
260
|
+
seek_frame = None
|
261
|
+
if frame.key_frame:
|
262
|
+
log.debug(f"Keyframe {frame_index} {frame.pts}")
|
263
|
+
|
264
|
+
if (frame.width, frame.height) != tl.res:
|
265
|
+
width, height = tl.res
|
279
266
|
graph = av.filter.Graph()
|
280
|
-
x, y = obj.x, obj.y
|
281
267
|
graph.link_nodes(
|
282
268
|
graph.add_buffer(template=my_stream),
|
283
269
|
graph.add(
|
284
|
-
"
|
285
|
-
f"
|
270
|
+
"scale",
|
271
|
+
f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
|
286
272
|
),
|
273
|
+
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
287
274
|
graph.add("buffersink"),
|
288
275
|
).vpush(frame)
|
289
276
|
frame = graph.vpull()
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
277
|
+
elif isinstance(obj, TlRect):
|
278
|
+
graph = av.filter.Graph()
|
279
|
+
x, y = obj.x, obj.y
|
280
|
+
graph.link_nodes(
|
281
|
+
graph.add_buffer(template=my_stream),
|
282
|
+
graph.add(
|
283
|
+
"drawbox",
|
284
|
+
f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
|
285
|
+
),
|
286
|
+
graph.add("buffersink"),
|
287
|
+
).vpush(frame)
|
288
|
+
frame = graph.vpull()
|
289
|
+
elif isinstance(obj, TlImage):
|
290
|
+
img = img_cache[(obj.src, obj.width)]
|
291
|
+
array = frame.to_ndarray(format="rgb24")
|
292
|
+
|
293
|
+
overlay_h, overlay_w, _ = img.shape
|
294
|
+
x_pos, y_pos = obj.x, obj.y
|
295
|
+
|
296
|
+
x_start = max(x_pos, 0)
|
297
|
+
y_start = max(y_pos, 0)
|
298
|
+
x_end = min(x_pos + overlay_w, frame.width)
|
299
|
+
y_end = min(y_pos + overlay_h, frame.height)
|
300
|
+
|
301
|
+
# Clip the overlay image to fit into the frame
|
302
|
+
overlay_x_start = max(-x_pos, 0)
|
303
|
+
overlay_y_start = max(-y_pos, 0)
|
304
|
+
overlay_x_end = overlay_w - max((x_pos + overlay_w) - frame.width, 0)
|
305
|
+
overlay_y_end = overlay_h - max((y_pos + overlay_h) - frame.height, 0)
|
306
|
+
clipped_overlay = img[
|
307
|
+
overlay_y_start:overlay_y_end, overlay_x_start:overlay_x_end
|
308
|
+
]
|
309
|
+
|
310
|
+
# Create a region of interest (ROI) on the video frame
|
311
|
+
roi = array[y_start:y_end, x_start:x_end]
|
312
|
+
|
313
|
+
# Blend the overlay image with the ROI based on the opacity
|
314
|
+
roi = (1 - obj.opacity) * roi + obj.opacity * clipped_overlay
|
315
|
+
array[y_start:y_end, x_start:x_end] = roi
|
316
|
+
array = np.clip(array, 0, 255).astype(np.uint8)
|
317
|
+
|
318
|
+
frame = from_ndarray(array, format="rgb24")
|
319
|
+
|
320
|
+
if scale_graph is not None and frame.width != target_width:
|
321
|
+
scale_graph.vpush(frame)
|
322
|
+
frame = scale_graph.vpull()
|
323
|
+
|
324
|
+
if frame.format.name != target_pix_fmt:
|
325
|
+
frame = frame.reformat(format=target_pix_fmt)
|
326
|
+
bar.tick(index)
|
327
|
+
elif index % 3 == 0:
|
328
|
+
bar.tick(index)
|
329
|
+
|
330
|
+
yield from_ndarray(frame.to_ndarray(), format=frame.format.name)
|
331
|
+
|
332
|
+
bar.end()
|
345
333
|
log.debug(f"Total frames saved seeking: {frames_saved}")
|
346
|
-
|
347
|
-
return spedup, apply_video_later
|
auto_editor/subcommands/repl.py
CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
3
3
|
import sys
|
4
4
|
from dataclasses import dataclass, field
|
5
5
|
from fractions import Fraction
|
6
|
+
from os import environ
|
6
7
|
|
7
8
|
import auto_editor
|
8
9
|
from auto_editor.analyze import Levels
|
@@ -71,11 +72,19 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
71
72
|
print(f"Auto-Editor {auto_editor.__version__}")
|
72
73
|
text = None
|
73
74
|
|
75
|
+
no_color = bool(environ.get("NO_COLOR") or environ.get("AV_LOG_FORCE_NOCOLOR"))
|
76
|
+
if no_color:
|
77
|
+
bold_pink = bold_red = reset = ""
|
78
|
+
else:
|
79
|
+
bold_pink = "\033[1;95m"
|
80
|
+
bold_red = "\033[1;31m"
|
81
|
+
reset = "\033[0m"
|
82
|
+
|
74
83
|
try:
|
75
84
|
while True:
|
76
85
|
try:
|
77
86
|
if text is None:
|
78
|
-
text = input("> ")
|
87
|
+
text = input(f"{bold_pink}>{reset} ")
|
79
88
|
else:
|
80
89
|
text += "\n" + input(" ")
|
81
90
|
except KeyboardInterrupt as e:
|
@@ -97,8 +106,8 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
97
106
|
|
98
107
|
except ClosingError:
|
99
108
|
continue # Allow user to continue adding text
|
100
|
-
except
|
101
|
-
print(f"error: {e}")
|
109
|
+
except MyError as e:
|
110
|
+
print(f"{bold_red}error{reset}: {e}")
|
102
111
|
|
103
112
|
text = None
|
104
113
|
|
auto_editor/subcommands/test.py
CHANGED
@@ -141,7 +141,10 @@ def run_tests(tests: list[Callable], args: TestArgs) -> None:
|
|
141
141
|
except Exception as e:
|
142
142
|
dur = perf_counter() - start
|
143
143
|
total_time += dur
|
144
|
-
print(
|
144
|
+
print(
|
145
|
+
f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs \033[1;31m[FAILED]\033[0m",
|
146
|
+
flush=True,
|
147
|
+
)
|
145
148
|
if args.no_fail_fast:
|
146
149
|
print(f"\n{e}")
|
147
150
|
else:
|
@@ -150,8 +153,10 @@ def run_tests(tests: list[Callable], args: TestArgs) -> None:
|
|
150
153
|
raise e
|
151
154
|
else:
|
152
155
|
passed += 1
|
153
|
-
print(
|
154
|
-
|
156
|
+
print(
|
157
|
+
f"{name:<24} ({index}/{total}) {round(dur, 2):<4} secs [\033[1;32mPASSED\033[0m]",
|
158
|
+
flush=True,
|
159
|
+
)
|
155
160
|
if outputs is not None:
|
156
161
|
if isinstance(outputs, str):
|
157
162
|
outputs = [outputs]
|
@@ -234,7 +239,7 @@ def main(sys_args: list[str] | None = None):
|
|
234
239
|
video = cn.videos[0]
|
235
240
|
|
236
241
|
assert video.fps == 30
|
237
|
-
assert video.time_base == Fraction(1, 30)
|
242
|
+
# assert video.time_base == Fraction(1, 30)
|
238
243
|
assert video.width == 1280
|
239
244
|
assert video.height == 720
|
240
245
|
assert video.codec == "h264"
|
@@ -339,7 +344,10 @@ def main(sys_args: list[str] | None = None):
|
|
339
344
|
)
|
340
345
|
|
341
346
|
def track_tests():
|
342
|
-
|
347
|
+
out = run.main(["resources/multi-track.mov"], ["--keep_tracks_seperate"])
|
348
|
+
assert len(fileinfo(out).audios) == 2
|
349
|
+
|
350
|
+
return out
|
343
351
|
|
344
352
|
def export_json_tests():
|
345
353
|
out = run.main(["example.mp4"], ["--export_as_json"])
|
@@ -358,11 +366,11 @@ def main(sys_args: list[str] | None = None):
|
|
358
366
|
run.main(["example.mp4"], ["--export", 'premiere:name="Foo Bar"'])
|
359
367
|
|
360
368
|
def export_subtitles():
|
361
|
-
cn = fileinfo(run.main(["resources/mov_text.mp4"], []))
|
369
|
+
# cn = fileinfo(run.main(["resources/mov_text.mp4"], []))
|
362
370
|
|
363
|
-
assert len(cn.videos) == 1
|
364
|
-
assert len(cn.audios) == 1
|
365
|
-
assert len(cn.subtitles) == 1
|
371
|
+
# assert len(cn.videos) == 1
|
372
|
+
# assert len(cn.audios) == 1
|
373
|
+
# assert len(cn.subtitles) == 1
|
366
374
|
|
367
375
|
cn = fileinfo(run.main(["resources/webvtt.mkv"], []))
|
368
376
|
|
@@ -465,47 +473,44 @@ def main(sys_args: list[str] | None = None):
|
|
465
473
|
def frame_rate():
|
466
474
|
cn = fileinfo(run.main(["example.mp4"], ["-r", "15", "--no-seek"]))
|
467
475
|
video = cn.videos[0]
|
468
|
-
assert video.fps == 15
|
469
|
-
assert video.
|
470
|
-
assert float(video.duration) - 17.33333333333333333333333 < 3
|
476
|
+
assert video.fps == 15, video.fps
|
477
|
+
assert video.duration - 17.33333333333333333333333 < 3, video.duration
|
471
478
|
|
472
479
|
cn = fileinfo(run.main(["example.mp4"], ["-r", "20"]))
|
473
480
|
video = cn.videos[0]
|
474
|
-
assert video.fps == 20
|
475
|
-
assert video.
|
476
|
-
assert float(video.duration) - 17.33333333333333333333333 < 2
|
481
|
+
assert video.fps == 20, video.fps
|
482
|
+
assert video.duration - 17.33333333333333333333333 < 2
|
477
483
|
|
478
484
|
cn = fileinfo(out := run.main(["example.mp4"], ["-r", "60"]))
|
479
485
|
video = cn.videos[0]
|
480
486
|
|
481
|
-
assert video.fps == 60
|
482
|
-
assert video.
|
483
|
-
assert float(video.duration) - 17.33333333333333333333333 < 0.3
|
487
|
+
assert video.fps == 60, video.fps
|
488
|
+
assert video.duration - 17.33333333333333333333333 < 0.3
|
484
489
|
|
485
490
|
return out
|
486
491
|
|
487
|
-
def embedded_image():
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
+
# def embedded_image():
|
493
|
+
# out1 = run.main(["resources/embedded-image/h264-png.mp4"], [])
|
494
|
+
# cn = fileinfo(out1)
|
495
|
+
# assert cn.videos[0].codec == "h264"
|
496
|
+
# assert cn.videos[1].codec == "png"
|
492
497
|
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
498
|
+
# out2 = run.main(["resources/embedded-image/h264-mjpeg.mp4"], [])
|
499
|
+
# cn = fileinfo(out2)
|
500
|
+
# assert cn.videos[0].codec == "h264"
|
501
|
+
# assert cn.videos[1].codec == "mjpeg"
|
497
502
|
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
503
|
+
# out3 = run.main(["resources/embedded-image/h264-png.mkv"], [])
|
504
|
+
# cn = fileinfo(out3)
|
505
|
+
# assert cn.videos[0].codec == "h264"
|
506
|
+
# assert cn.videos[1].codec == "png"
|
502
507
|
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
508
|
+
# out4 = run.main(["resources/embedded-image/h264-mjpeg.mkv"], [])
|
509
|
+
# cn = fileinfo(out4)
|
510
|
+
# assert cn.videos[0].codec == "h264"
|
511
|
+
# assert cn.videos[1].codec == "mjpeg"
|
507
512
|
|
508
|
-
|
513
|
+
# return out1, out2, out3, out4
|
509
514
|
|
510
515
|
def motion():
|
511
516
|
out = run.main(
|
@@ -751,7 +756,6 @@ def main(sys_args: list[str] | None = None):
|
|
751
756
|
sr_units,
|
752
757
|
backwards_range,
|
753
758
|
cut_out,
|
754
|
-
embedded_image,
|
755
759
|
gif,
|
756
760
|
margin_tests,
|
757
761
|
input_extension,
|
auto_editor/utils/container.py
CHANGED
auto_editor/utils/func.py
CHANGED
@@ -41,7 +41,7 @@ def to_timecode(secs: float | Fraction, fmt: str) -> str:
|
|
41
41
|
if h == 0:
|
42
42
|
return f"{sign}{m:02d}:{s:06.3f}"
|
43
43
|
return f"{sign}{h:02d}:{m:02d}:{s:06.3f}"
|
44
|
-
if fmt == "mov_text":
|
44
|
+
if fmt == "srt" or fmt == "mov_text":
|
45
45
|
return f"{sign}{h:02d}:{m:02d}:" + f"{s:06.3f}".replace(".", ",", 1)
|
46
46
|
if fmt == "standard":
|
47
47
|
return f"{sign}{h:02d}:{m:02d}:{s:06.3f}"
|
auto_editor/utils/types.py
CHANGED
@@ -111,17 +111,6 @@ def sample_rate(val: str) -> int:
|
|
111
111
|
return natural(num)
|
112
112
|
|
113
113
|
|
114
|
-
def bitrate(val: str) -> str:
|
115
|
-
if val == "unset":
|
116
|
-
return val
|
117
|
-
_num, unit = _split_num_str(val)
|
118
|
-
num = int(_num) if _num.is_integer() else _num
|
119
|
-
if unit not in ("", "k", "K", "M"):
|
120
|
-
extra = f". Did you mean `{num}M`?" if unit == "m" else ""
|
121
|
-
raise CoerceError(f"`{val}` is not a valid bitrate format{extra}")
|
122
|
-
return val
|
123
|
-
|
124
|
-
|
125
114
|
def time(val: str, tb: Fraction) -> int:
|
126
115
|
if ":" in val:
|
127
116
|
boxes = val.split(":")
|
@@ -218,11 +207,9 @@ class Args:
|
|
218
207
|
yt_dlp_extras: str | None = None
|
219
208
|
video_codec: str = "auto"
|
220
209
|
audio_codec: str = "auto"
|
221
|
-
video_bitrate: str = "
|
222
|
-
audio_bitrate: str = "
|
223
|
-
video_quality_scale: str = "unset"
|
210
|
+
video_bitrate: str = "auto"
|
211
|
+
audio_bitrate: str = "auto"
|
224
212
|
scale: float = 1.0
|
225
|
-
extras: str | None = None
|
226
213
|
sn: bool = False
|
227
214
|
dn: bool = False
|
228
215
|
no_seek: bool = False
|