auto-editor 25.3.0__py3-none-any.whl → 26.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +1 -1
- auto_editor/__main__.py +67 -16
- auto_editor/analyze.py +11 -14
- auto_editor/edit.py +177 -52
- auto_editor/ffwrapper.py +36 -114
- auto_editor/help.py +4 -3
- auto_editor/output.py +22 -183
- auto_editor/render/audio.py +66 -57
- auto_editor/render/subtitle.py +74 -13
- auto_editor/render/video.py +166 -180
- auto_editor/subcommands/repl.py +12 -3
- auto_editor/subcommands/test.py +47 -36
- auto_editor/utils/container.py +2 -0
- auto_editor/utils/func.py +1 -27
- auto_editor/utils/types.py +2 -15
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/METADATA +2 -2
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/RECORD +22 -24
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/WHEEL +1 -1
- docs/build.py +1 -0
- auto_editor/utils/subtitle_tools.py +0 -29
- auto_editor/validate_input.py +0 -88
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/LICENSE +0 -0
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/entry_points.txt +0 -0
- {auto_editor-25.3.0.dist-info → auto_editor-26.0.0.dist-info}/top_level.txt +0 -0
auto_editor/render/subtitle.py
CHANGED
@@ -1,18 +1,23 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import io
|
3
4
|
import os
|
4
5
|
import re
|
5
6
|
from dataclasses import dataclass
|
6
7
|
from typing import TYPE_CHECKING
|
7
8
|
|
9
|
+
import av
|
10
|
+
|
8
11
|
from auto_editor.utils.func import to_timecode
|
9
12
|
|
10
13
|
if TYPE_CHECKING:
|
11
14
|
from fractions import Fraction
|
12
15
|
|
13
|
-
from auto_editor.output import Ensure
|
14
16
|
from auto_editor.timeline import v3
|
15
17
|
from auto_editor.utils.chunks import Chunks
|
18
|
+
from auto_editor.utils.log import Log
|
19
|
+
|
20
|
+
Input = av.container.InputContainer
|
16
21
|
|
17
22
|
|
18
23
|
@dataclass(slots=True)
|
@@ -26,7 +31,6 @@ class SerialSub:
|
|
26
31
|
|
27
32
|
class SubtitleParser:
|
28
33
|
def __init__(self, tb: Fraction) -> None:
|
29
|
-
self.supported_codecs = ("ass", "webvtt", "mov_text")
|
30
34
|
self.tb = tb
|
31
35
|
self.contents: list[SerialSub] = []
|
32
36
|
self.header = ""
|
@@ -112,35 +116,92 @@ class SubtitleParser:
|
|
112
116
|
self.contents = new_content
|
113
117
|
|
114
118
|
def write(self, file_path: str) -> None:
|
119
|
+
codec = self.codec
|
115
120
|
with open(file_path, "w", encoding="utf-8") as file:
|
116
121
|
file.write(self.header)
|
117
122
|
for c in self.contents:
|
118
123
|
file.write(
|
119
|
-
f"{c.before}{to_timecode(c.start / self.tb,
|
120
|
-
f"{c.middle}{to_timecode(c.end / self.tb,
|
121
|
-
|
124
|
+
f"{c.before}{to_timecode(c.start / self.tb, codec)}"
|
125
|
+
+ f"{c.middle}{to_timecode(c.end / self.tb, codec)}"
|
126
|
+
+ c.after
|
127
|
+
+ ("\n" if codec == "webvtt" else "")
|
122
128
|
)
|
123
129
|
file.write(self.footer)
|
124
130
|
|
125
131
|
|
126
|
-
def
|
132
|
+
def make_srt(input_: Input, stream: int) -> str:
|
133
|
+
output_bytes = io.StringIO()
|
134
|
+
input_stream = input_.streams.subtitles[stream]
|
135
|
+
assert input_stream.time_base is not None
|
136
|
+
s = 1
|
137
|
+
for packet in input_.demux(input_stream):
|
138
|
+
if packet.dts is None or packet.pts is None or packet.duration is None:
|
139
|
+
continue
|
140
|
+
|
141
|
+
start = packet.pts * input_stream.time_base
|
142
|
+
end = start + packet.duration * input_stream.time_base
|
143
|
+
|
144
|
+
for subset in packet.decode():
|
145
|
+
start_time = to_timecode(start, "srt")
|
146
|
+
end_time = to_timecode(end, "srt")
|
147
|
+
|
148
|
+
sub = subset[0]
|
149
|
+
assert len(subset) == 1
|
150
|
+
assert isinstance(sub, av.subtitles.subtitle.AssSubtitle)
|
151
|
+
|
152
|
+
output_bytes.write(f"{s}\n{start_time} --> {end_time}\n")
|
153
|
+
output_bytes.write(sub.dialogue.decode("utf-8", errors="ignore") + "\n\n")
|
154
|
+
s += 1
|
155
|
+
|
156
|
+
output_bytes.seek(0)
|
157
|
+
return output_bytes.getvalue()
|
158
|
+
|
159
|
+
|
160
|
+
def _ensure(input_: Input, format: str, stream: int, log: Log) -> str:
|
161
|
+
output_bytes = io.BytesIO()
|
162
|
+
output = av.open(output_bytes, "w", format=format)
|
163
|
+
|
164
|
+
in_stream = input_.streams.subtitles[stream]
|
165
|
+
out_stream = output.add_stream(template=in_stream)
|
166
|
+
|
167
|
+
for packet in input_.demux(in_stream):
|
168
|
+
if packet.dts is None:
|
169
|
+
continue
|
170
|
+
packet.stream = out_stream
|
171
|
+
output.mux(packet)
|
172
|
+
|
173
|
+
output.close()
|
174
|
+
output_bytes.seek(0)
|
175
|
+
return output_bytes.getvalue().decode("utf-8", errors="ignore")
|
176
|
+
|
177
|
+
|
178
|
+
def make_new_subtitles(tl: v3, log: Log) -> list[str]:
|
127
179
|
if tl.v1 is None:
|
128
180
|
return []
|
129
181
|
|
182
|
+
input_ = av.open(tl.v1.source.path)
|
130
183
|
new_paths = []
|
131
184
|
|
132
185
|
for s, sub in enumerate(tl.v1.source.subtitles):
|
133
|
-
|
134
|
-
|
186
|
+
if sub.codec == "mov_text":
|
187
|
+
continue
|
135
188
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
189
|
+
parser = SubtitleParser(tl.tb)
|
190
|
+
if sub.codec in ("webvtt", "ass", "ssa"):
|
191
|
+
format = sub.codec
|
192
|
+
else:
|
193
|
+
log.error(f"Unknown subtitle codec: {sub.codec}")
|
141
194
|
|
195
|
+
if sub.codec == "mov_text":
|
196
|
+
ret = make_srt(input_, s)
|
197
|
+
else:
|
198
|
+
ret = _ensure(input_, format, s, log)
|
199
|
+
parser.parse(ret, sub.codec)
|
142
200
|
parser.edit(tl.v1.chunks)
|
201
|
+
|
202
|
+
new_path = os.path.join(log.temp, f"new{s}s.{sub.ext}")
|
143
203
|
parser.write(new_path)
|
144
204
|
new_paths.append(new_path)
|
145
205
|
|
206
|
+
input_.close()
|
146
207
|
return new_paths
|
auto_editor/render/video.py
CHANGED
@@ -1,26 +1,22 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
-
import os.path
|
4
3
|
from dataclasses import dataclass
|
5
|
-
from subprocess import DEVNULL, PIPE
|
6
|
-
from sys import platform
|
7
4
|
from typing import TYPE_CHECKING
|
8
5
|
|
9
6
|
import av
|
10
7
|
import numpy as np
|
11
8
|
|
12
|
-
from auto_editor.output import
|
9
|
+
from auto_editor.output import parse_bitrate
|
13
10
|
from auto_editor.timeline import TlImage, TlRect, TlVideo
|
14
|
-
from auto_editor.utils.encoder import encoders
|
15
11
|
from auto_editor.utils.types import color
|
16
12
|
|
17
13
|
if TYPE_CHECKING:
|
18
14
|
from collections.abc import Iterator
|
15
|
+
from typing import Any
|
19
16
|
|
20
|
-
from auto_editor.ffwrapper import
|
17
|
+
from auto_editor.ffwrapper import FileInfo
|
21
18
|
from auto_editor.timeline import v3
|
22
19
|
from auto_editor.utils.bar import Bar
|
23
|
-
from auto_editor.utils.container import Container
|
24
20
|
from auto_editor.utils.log import Log
|
25
21
|
from auto_editor.utils.types import Args
|
26
22
|
|
@@ -90,8 +86,10 @@ def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
|
|
90
86
|
|
91
87
|
|
92
88
|
def render_av(
|
93
|
-
|
94
|
-
) ->
|
89
|
+
output: av.container.OutputContainer, tl: v3, args: Args, bar: Bar, log: Log
|
90
|
+
) -> Any:
|
91
|
+
from_ndarray = av.VideoFrame.from_ndarray
|
92
|
+
|
95
93
|
src = tl.src
|
96
94
|
cns: dict[FileInfo, av.container.InputContainer] = {}
|
97
95
|
decoders: dict[FileInfo, Iterator[av.VideoFrame]] = {}
|
@@ -99,8 +97,8 @@ def render_av(
|
|
99
97
|
tous: dict[FileInfo, int] = {}
|
100
98
|
|
101
99
|
target_pix_fmt = "yuv420p" # Reasonable default
|
100
|
+
target_fps = tl.tb # Always constant
|
102
101
|
img_cache = make_image_cache(tl)
|
103
|
-
temp = log.temp
|
104
102
|
|
105
103
|
first_src: FileInfo | None = None
|
106
104
|
for src in tl.sources:
|
@@ -133,14 +131,27 @@ def render_av(
|
|
133
131
|
log.debug(f"Tous: {tous}")
|
134
132
|
log.debug(f"Clips: {tl.v}")
|
135
133
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
134
|
+
if args.video_codec == "gif":
|
135
|
+
_c = av.Codec("gif", "w")
|
136
|
+
if _c.video_formats is not None and target_pix_fmt in (
|
137
|
+
f.name for f in _c.video_formats
|
138
|
+
):
|
139
|
+
target_pix_fmt = target_pix_fmt
|
140
|
+
else:
|
141
|
+
target_pix_fmt = "rgb8"
|
142
|
+
del _c
|
143
|
+
else:
|
144
|
+
target_pix_fmt = (
|
145
|
+
target_pix_fmt if target_pix_fmt in allowed_pix_fmt else "yuv420p"
|
146
|
+
)
|
142
147
|
|
143
|
-
|
148
|
+
ops = {"mov_flags": "faststart"}
|
149
|
+
output_stream = output.add_stream(args.video_codec, rate=target_fps, options=ops)
|
150
|
+
yield output_stream
|
151
|
+
if not isinstance(output_stream, av.VideoStream):
|
152
|
+
log.error(f"Not a known video codec: {args.video_codec}")
|
153
|
+
if src.videos and src.videos[0].lang is not None:
|
154
|
+
output_stream.metadata["language"] = src.videos[0].lang
|
144
155
|
|
145
156
|
if args.scale == 1.0:
|
146
157
|
target_width, target_height = tl.res
|
@@ -157,44 +168,33 @@ def render_av(
|
|
157
168
|
scale_graph.add("buffersink"),
|
158
169
|
)
|
159
170
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
# Fix videotoolbox issue with legacy macs
|
183
|
-
cmd += ["-allow_sw", "1"]
|
184
|
-
|
185
|
-
if apply_video_later:
|
186
|
-
cmd += ["-c:v", "mpeg4", "-qscale:v", "1"]
|
171
|
+
output_stream.width = target_width
|
172
|
+
output_stream.height = target_height
|
173
|
+
output_stream.pix_fmt = target_pix_fmt
|
174
|
+
output_stream.framerate = target_fps
|
175
|
+
|
176
|
+
color_range = src.videos[0].color_range
|
177
|
+
colorspace = src.videos[0].color_space
|
178
|
+
color_prim = src.videos[0].color_primaries
|
179
|
+
color_trc = src.videos[0].color_transfer
|
180
|
+
|
181
|
+
if color_range == 1 or color_range == 2:
|
182
|
+
output_stream.color_range = color_range
|
183
|
+
if colorspace in (0, 1) or (colorspace >= 3 and colorspace < 16):
|
184
|
+
output_stream.colorspace = colorspace
|
185
|
+
if color_prim == 1 or (color_prim >= 4 and color_prim < 17):
|
186
|
+
output_stream.color_primaries = color_prim
|
187
|
+
if color_trc == 1 or (color_trc >= 4 and color_trc < 22):
|
188
|
+
output_stream.color_trc = color_trc
|
189
|
+
|
190
|
+
if args.video_bitrate != "auto":
|
191
|
+
output_stream.bit_rate = parse_bitrate(args.video_bitrate, log)
|
192
|
+
log.debug(f"video bitrate: {output_stream.bit_rate}")
|
187
193
|
else:
|
188
|
-
|
194
|
+
log.debug(f"[auto] video bitrate: {output_stream.bit_rate}")
|
189
195
|
|
190
|
-
# Setting SAR requires re-encoding so we do it here.
|
191
196
|
if src is not None and src.videos and (sar := src.videos[0].sar) is not None:
|
192
|
-
|
193
|
-
|
194
|
-
cmd.append(spedup)
|
195
|
-
|
196
|
-
process2 = ffmpeg.Popen(cmd, stdin=PIPE, stdout=DEVNULL, stderr=DEVNULL)
|
197
|
-
assert process2.stdin is not None
|
197
|
+
output_stream.sample_aspect_ratio = sar
|
198
198
|
|
199
199
|
# First few frames can have an abnormal keyframe count, so never seek there.
|
200
200
|
seek = 10
|
@@ -206,142 +206,128 @@ def render_av(
|
|
206
206
|
bg = color(args.background)
|
207
207
|
null_frame = make_solid(target_width, target_height, target_pix_fmt, bg)
|
208
208
|
frame_index = -1
|
209
|
-
try:
|
210
|
-
for index in range(tl.end):
|
211
|
-
obj_list: list[VideoFrame | TlRect | TlImage] = []
|
212
|
-
for layer in tl.v:
|
213
|
-
for lobj in layer:
|
214
|
-
if isinstance(lobj, TlVideo):
|
215
|
-
if index >= lobj.start and index < (lobj.start + lobj.dur):
|
216
|
-
_i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
217
|
-
obj_list.append(VideoFrame(_i, lobj.src))
|
218
|
-
elif index >= lobj.start and index < lobj.start + lobj.dur:
|
219
|
-
obj_list.append(lobj)
|
220
209
|
|
210
|
+
for index in range(tl.end):
|
211
|
+
obj_list: list[VideoFrame | TlRect | TlImage] = []
|
212
|
+
for layer in tl.v:
|
213
|
+
for lobj in layer:
|
214
|
+
if isinstance(lobj, TlVideo):
|
215
|
+
if index >= lobj.start and index < (lobj.start + lobj.dur):
|
216
|
+
_i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
217
|
+
obj_list.append(VideoFrame(_i, lobj.src))
|
218
|
+
elif index >= lobj.start and index < lobj.start + lobj.dur:
|
219
|
+
obj_list.append(lobj)
|
220
|
+
|
221
|
+
if tl.v1 is not None:
|
222
|
+
# When there can be valid gaps in the timeline.
|
221
223
|
frame = null_frame
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
if (frame.width, frame.height) != tl.res:
|
266
|
-
width, height = tl.res
|
267
|
-
graph = av.filter.Graph()
|
268
|
-
graph.link_nodes(
|
269
|
-
graph.add_buffer(template=my_stream),
|
270
|
-
graph.add(
|
271
|
-
"scale",
|
272
|
-
f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
|
273
|
-
),
|
274
|
-
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
275
|
-
graph.add("buffersink"),
|
276
|
-
).vpush(frame)
|
277
|
-
frame = graph.vpull()
|
278
|
-
elif isinstance(obj, TlRect):
|
224
|
+
# else, use the last frame
|
225
|
+
|
226
|
+
for obj in obj_list:
|
227
|
+
if isinstance(obj, VideoFrame):
|
228
|
+
my_stream = cns[obj.src].streams.video[0]
|
229
|
+
if frame_index > obj.index:
|
230
|
+
log.debug(f"Seek: {frame_index} -> 0")
|
231
|
+
cns[obj.src].seek(0)
|
232
|
+
try:
|
233
|
+
frame = next(decoders[obj.src])
|
234
|
+
frame_index = round(frame.time * tl.tb)
|
235
|
+
except StopIteration:
|
236
|
+
pass
|
237
|
+
|
238
|
+
while frame_index < obj.index:
|
239
|
+
# Check if skipping ahead is worth it.
|
240
|
+
if (
|
241
|
+
obj.index - frame_index > seek_cost[obj.src]
|
242
|
+
and frame_index > seek
|
243
|
+
):
|
244
|
+
seek = frame_index + (seek_cost[obj.src] // 2)
|
245
|
+
seek_frame = frame_index
|
246
|
+
log.debug(f"Seek: {frame_index} -> {obj.index}")
|
247
|
+
cns[obj.src].seek(obj.index * tous[obj.src], stream=my_stream)
|
248
|
+
|
249
|
+
try:
|
250
|
+
frame = next(decoders[obj.src])
|
251
|
+
frame_index = round(frame.time * tl.tb)
|
252
|
+
except StopIteration:
|
253
|
+
log.debug(f"No source frame at {index=}. Using null frame")
|
254
|
+
frame = null_frame
|
255
|
+
break
|
256
|
+
|
257
|
+
if seek_frame is not None:
|
258
|
+
log.debug(f"Skipped {frame_index - seek_frame} frame indexes")
|
259
|
+
frames_saved += frame_index - seek_frame
|
260
|
+
seek_frame = None
|
261
|
+
if frame.key_frame:
|
262
|
+
log.debug(f"Keyframe {frame_index} {frame.pts}")
|
263
|
+
|
264
|
+
if (frame.width, frame.height) != tl.res:
|
265
|
+
width, height = tl.res
|
279
266
|
graph = av.filter.Graph()
|
280
|
-
x, y = obj.x, obj.y
|
281
267
|
graph.link_nodes(
|
282
268
|
graph.add_buffer(template=my_stream),
|
283
269
|
graph.add(
|
284
|
-
"
|
285
|
-
f"
|
270
|
+
"scale",
|
271
|
+
f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
|
286
272
|
),
|
273
|
+
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
287
274
|
graph.add("buffersink"),
|
288
275
|
).vpush(frame)
|
289
276
|
frame = graph.vpull()
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
277
|
+
elif isinstance(obj, TlRect):
|
278
|
+
graph = av.filter.Graph()
|
279
|
+
x, y = obj.x, obj.y
|
280
|
+
graph.link_nodes(
|
281
|
+
graph.add_buffer(template=my_stream),
|
282
|
+
graph.add(
|
283
|
+
"drawbox",
|
284
|
+
f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
|
285
|
+
),
|
286
|
+
graph.add("buffersink"),
|
287
|
+
).vpush(frame)
|
288
|
+
frame = graph.vpull()
|
289
|
+
elif isinstance(obj, TlImage):
|
290
|
+
img = img_cache[(obj.src, obj.width)]
|
291
|
+
array = frame.to_ndarray(format="rgb24")
|
292
|
+
|
293
|
+
overlay_h, overlay_w, _ = img.shape
|
294
|
+
x_pos, y_pos = obj.x, obj.y
|
295
|
+
|
296
|
+
x_start = max(x_pos, 0)
|
297
|
+
y_start = max(y_pos, 0)
|
298
|
+
x_end = min(x_pos + overlay_w, frame.width)
|
299
|
+
y_end = min(y_pos + overlay_h, frame.height)
|
300
|
+
|
301
|
+
# Clip the overlay image to fit into the frame
|
302
|
+
overlay_x_start = max(-x_pos, 0)
|
303
|
+
overlay_y_start = max(-y_pos, 0)
|
304
|
+
overlay_x_end = overlay_w - max((x_pos + overlay_w) - frame.width, 0)
|
305
|
+
overlay_y_end = overlay_h - max((y_pos + overlay_h) - frame.height, 0)
|
306
|
+
clipped_overlay = img[
|
307
|
+
overlay_y_start:overlay_y_end, overlay_x_start:overlay_x_end
|
308
|
+
]
|
309
|
+
|
310
|
+
# Create a region of interest (ROI) on the video frame
|
311
|
+
roi = array[y_start:y_end, x_start:x_end]
|
312
|
+
|
313
|
+
# Blend the overlay image with the ROI based on the opacity
|
314
|
+
roi = (1 - obj.opacity) * roi + obj.opacity * clipped_overlay
|
315
|
+
array[y_start:y_end, x_start:x_end] = roi
|
316
|
+
array = np.clip(array, 0, 255).astype(np.uint8)
|
317
|
+
|
318
|
+
frame = from_ndarray(array, format="rgb24")
|
319
|
+
|
320
|
+
if scale_graph is not None and frame.width != target_width:
|
321
|
+
scale_graph.vpush(frame)
|
322
|
+
frame = scale_graph.vpull()
|
323
|
+
|
324
|
+
if frame.format.name != target_pix_fmt:
|
325
|
+
frame = frame.reformat(format=target_pix_fmt)
|
326
|
+
bar.tick(index)
|
327
|
+
elif index % 3 == 0:
|
328
|
+
bar.tick(index)
|
329
|
+
|
330
|
+
yield from_ndarray(frame.to_ndarray(), format=frame.format.name)
|
331
|
+
|
332
|
+
bar.end()
|
345
333
|
log.debug(f"Total frames saved seeking: {frames_saved}")
|
346
|
-
|
347
|
-
return spedup, apply_video_later
|
auto_editor/subcommands/repl.py
CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
3
3
|
import sys
|
4
4
|
from dataclasses import dataclass, field
|
5
5
|
from fractions import Fraction
|
6
|
+
from os import environ
|
6
7
|
|
7
8
|
import auto_editor
|
8
9
|
from auto_editor.analyze import Levels
|
@@ -71,11 +72,19 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
71
72
|
print(f"Auto-Editor {auto_editor.__version__}")
|
72
73
|
text = None
|
73
74
|
|
75
|
+
no_color = bool(environ.get("NO_COLOR") or environ.get("AV_LOG_FORCE_NOCOLOR"))
|
76
|
+
if no_color:
|
77
|
+
bold_pink = bold_red = reset = ""
|
78
|
+
else:
|
79
|
+
bold_pink = "\033[1;95m"
|
80
|
+
bold_red = "\033[1;31m"
|
81
|
+
reset = "\033[0m"
|
82
|
+
|
74
83
|
try:
|
75
84
|
while True:
|
76
85
|
try:
|
77
86
|
if text is None:
|
78
|
-
text = input("> ")
|
87
|
+
text = input(f"{bold_pink}>{reset} ")
|
79
88
|
else:
|
80
89
|
text += "\n" + input(" ")
|
81
90
|
except KeyboardInterrupt as e:
|
@@ -97,8 +106,8 @@ def main(sys_args: list[str] = sys.argv[1:]) -> None:
|
|
97
106
|
|
98
107
|
except ClosingError:
|
99
108
|
continue # Allow user to continue adding text
|
100
|
-
except
|
101
|
-
print(f"error: {e}")
|
109
|
+
except MyError as e:
|
110
|
+
print(f"{bold_red}error{reset}: {e}")
|
102
111
|
|
103
112
|
text = None
|
104
113
|
|