auto-editor 28.1.0__py3-none-any.whl → 29.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +3 -1
- auto_editor/__main__.py +31 -497
- auto_editor/cli.py +12 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/METADATA +5 -6
- auto_editor-29.0.1.dist-info/RECORD +9 -0
- auto_editor-29.0.1.dist-info/entry_points.txt +2 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/top_level.txt +0 -1
- auto_editor/analyze.py +0 -393
- auto_editor/cmds/__init__.py +0 -0
- auto_editor/cmds/cache.py +0 -69
- auto_editor/cmds/desc.py +0 -32
- auto_editor/cmds/info.py +0 -213
- auto_editor/cmds/levels.py +0 -199
- auto_editor/cmds/palet.py +0 -29
- auto_editor/cmds/repl.py +0 -113
- auto_editor/cmds/subdump.py +0 -72
- auto_editor/cmds/test.py +0 -816
- auto_editor/edit.py +0 -560
- auto_editor/exports/__init__.py +0 -0
- auto_editor/exports/fcp11.py +0 -195
- auto_editor/exports/fcp7.py +0 -313
- auto_editor/exports/json.py +0 -63
- auto_editor/exports/kdenlive.py +0 -322
- auto_editor/exports/shotcut.py +0 -147
- auto_editor/ffwrapper.py +0 -187
- auto_editor/help.py +0 -224
- auto_editor/imports/__init__.py +0 -0
- auto_editor/imports/fcp7.py +0 -275
- auto_editor/imports/json.py +0 -234
- auto_editor/json.py +0 -297
- auto_editor/lang/__init__.py +0 -0
- auto_editor/lang/libintrospection.py +0 -10
- auto_editor/lang/libmath.py +0 -23
- auto_editor/lang/palet.py +0 -724
- auto_editor/lang/stdenv.py +0 -1179
- auto_editor/lib/__init__.py +0 -0
- auto_editor/lib/contracts.py +0 -235
- auto_editor/lib/data_structs.py +0 -278
- auto_editor/lib/err.py +0 -2
- auto_editor/make_layers.py +0 -315
- auto_editor/preview.py +0 -93
- auto_editor/render/__init__.py +0 -0
- auto_editor/render/audio.py +0 -517
- auto_editor/render/subtitle.py +0 -205
- auto_editor/render/video.py +0 -307
- auto_editor/timeline.py +0 -331
- auto_editor/utils/__init__.py +0 -0
- auto_editor/utils/bar.py +0 -142
- auto_editor/utils/chunks.py +0 -2
- auto_editor/utils/cmdkw.py +0 -206
- auto_editor/utils/container.py +0 -101
- auto_editor/utils/func.py +0 -128
- auto_editor/utils/log.py +0 -126
- auto_editor/utils/types.py +0 -277
- auto_editor/vanparse.py +0 -313
- auto_editor-28.1.0.dist-info/RECORD +0 -57
- auto_editor-28.1.0.dist-info/entry_points.txt +0 -6
- docs/build.py +0 -70
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/WHEEL +0 -0
- {auto_editor-28.1.0.dist-info → auto_editor-29.0.1.dist-info}/licenses/LICENSE +0 -0
auto_editor/render/subtitle.py
DELETED
@@ -1,205 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import io
|
4
|
-
import os
|
5
|
-
import re
|
6
|
-
from dataclasses import dataclass
|
7
|
-
from typing import TYPE_CHECKING
|
8
|
-
|
9
|
-
import av
|
10
|
-
|
11
|
-
from auto_editor.utils.func import to_timecode
|
12
|
-
|
13
|
-
if TYPE_CHECKING:
|
14
|
-
from fractions import Fraction
|
15
|
-
|
16
|
-
from auto_editor.timeline import v3
|
17
|
-
from auto_editor.utils.chunks import Chunks
|
18
|
-
from auto_editor.utils.log import Log
|
19
|
-
|
20
|
-
Input = av.container.InputContainer
|
21
|
-
|
22
|
-
|
23
|
-
@dataclass(slots=True)
|
24
|
-
class SerialSub:
|
25
|
-
start: int
|
26
|
-
end: int
|
27
|
-
before: str
|
28
|
-
middle: str
|
29
|
-
after: str
|
30
|
-
|
31
|
-
|
32
|
-
class SubtitleParser:
|
33
|
-
def __init__(self, tb: Fraction) -> None:
|
34
|
-
self.tb = tb
|
35
|
-
self.contents: list[SerialSub] = []
|
36
|
-
self.header = ""
|
37
|
-
self.footer = ""
|
38
|
-
|
39
|
-
@staticmethod
|
40
|
-
def to_tick(text: str, codec: str, tb: Fraction) -> int:
|
41
|
-
boxes = text.replace(",", ".").split(":")
|
42
|
-
assert len(boxes) < 4
|
43
|
-
|
44
|
-
boxes.reverse()
|
45
|
-
multiply = (1, 60, 3600)
|
46
|
-
seconds = 0.0
|
47
|
-
for box, mul in zip(boxes, multiply):
|
48
|
-
seconds += float(box) * mul
|
49
|
-
|
50
|
-
return round(seconds * tb)
|
51
|
-
|
52
|
-
def parse(self, text: str, codec: str) -> None:
|
53
|
-
self.codec = codec
|
54
|
-
self.contents = []
|
55
|
-
|
56
|
-
if codec in {"ass", "ssa"}:
|
57
|
-
time_code = re.compile(r"(.*)(\d+:\d+:[\d.]+)(.*)(\d+:\d+:[\d.]+)(.*)")
|
58
|
-
elif codec == "webvtt":
|
59
|
-
time_code = re.compile(r"()(\d+:[\d.]+)( --> )(\d+:[\d.]+)(\n.*)")
|
60
|
-
elif codec == "mov_text":
|
61
|
-
time_code = re.compile(r"()(\d+:\d+:[\d,]+)( --> )(\d+:\d+:[\d,]+)(\n.*)")
|
62
|
-
else:
|
63
|
-
raise ValueError(f"codec {codec} not supported.")
|
64
|
-
|
65
|
-
i = 0
|
66
|
-
for reg in re.finditer(time_code, text):
|
67
|
-
i += 1
|
68
|
-
if i == 1:
|
69
|
-
self.header = text[: reg.span()[0]]
|
70
|
-
|
71
|
-
self.contents.append(
|
72
|
-
SerialSub(
|
73
|
-
self.to_tick(reg.group(2), self.codec, self.tb),
|
74
|
-
self.to_tick(reg.group(4), self.codec, self.tb),
|
75
|
-
reg.group(1),
|
76
|
-
reg.group(3),
|
77
|
-
f"{reg.group(5)}\n",
|
78
|
-
)
|
79
|
-
)
|
80
|
-
|
81
|
-
if i == 0:
|
82
|
-
self.header = ""
|
83
|
-
self.footer = ""
|
84
|
-
else:
|
85
|
-
self.footer = text[reg.span()[1] :]
|
86
|
-
|
87
|
-
def edit(self, chunks: Chunks) -> None:
|
88
|
-
for cut in reversed(chunks):
|
89
|
-
the_speed = cut[2]
|
90
|
-
speed_factor = (
|
91
|
-
1 if (the_speed == 0 or the_speed >= 99999) else 1 - (1 / the_speed)
|
92
|
-
)
|
93
|
-
|
94
|
-
new_content = []
|
95
|
-
for content in self.contents:
|
96
|
-
if cut[0] <= content.end and cut[1] > content.start:
|
97
|
-
diff = int(
|
98
|
-
(min(cut[1], content.end) - max(cut[0], content.start))
|
99
|
-
* speed_factor
|
100
|
-
)
|
101
|
-
if content.start > cut[0]:
|
102
|
-
content.start -= diff
|
103
|
-
content.end -= diff
|
104
|
-
|
105
|
-
content.end -= diff
|
106
|
-
|
107
|
-
elif content.start >= cut[0]:
|
108
|
-
diff = int((cut[1] - cut[0]) * speed_factor)
|
109
|
-
|
110
|
-
content.start -= diff
|
111
|
-
content.end -= diff
|
112
|
-
|
113
|
-
if content.start != content.end:
|
114
|
-
new_content.append(content)
|
115
|
-
|
116
|
-
self.contents = new_content
|
117
|
-
|
118
|
-
def write(self, file_path: str) -> None:
|
119
|
-
codec = self.codec
|
120
|
-
with open(file_path, "w", encoding="utf-8") as file:
|
121
|
-
file.write(self.header)
|
122
|
-
for c in self.contents:
|
123
|
-
file.write(
|
124
|
-
f"{c.before}{to_timecode(c.start / self.tb, codec)}"
|
125
|
-
+ f"{c.middle}{to_timecode(c.end / self.tb, codec)}"
|
126
|
-
+ c.after
|
127
|
-
+ ("\n" if codec == "webvtt" else "")
|
128
|
-
)
|
129
|
-
file.write(self.footer)
|
130
|
-
|
131
|
-
|
132
|
-
def make_srt(input_: Input, stream: int) -> str:
|
133
|
-
output_bytes = io.StringIO()
|
134
|
-
input_stream = input_.streams.subtitles[stream]
|
135
|
-
assert input_stream.time_base is not None
|
136
|
-
s = 1
|
137
|
-
for packet in input_.demux(input_stream):
|
138
|
-
if packet.dts is None or packet.pts is None or packet.duration is None:
|
139
|
-
continue
|
140
|
-
|
141
|
-
start_num = packet.pts * input_stream.time_base
|
142
|
-
start = to_timecode(start_num, "srt")
|
143
|
-
end = to_timecode(start_num + packet.duration * input_stream.time_base, "srt")
|
144
|
-
|
145
|
-
for sub in packet.decode():
|
146
|
-
assert isinstance(sub, av.subtitles.subtitle.AssSubtitle)
|
147
|
-
|
148
|
-
output_bytes.write(f"{s}\n{start} --> {end}\n")
|
149
|
-
output_bytes.write(sub.dialogue.decode("utf-8", errors="ignore") + "\n\n")
|
150
|
-
s += 1
|
151
|
-
|
152
|
-
output_bytes.seek(0)
|
153
|
-
return output_bytes.getvalue()
|
154
|
-
|
155
|
-
|
156
|
-
def _ensure(input_: Input, format: str, stream: int) -> str:
|
157
|
-
output_bytes = io.BytesIO()
|
158
|
-
output = av.open(output_bytes, "w", format=format)
|
159
|
-
|
160
|
-
in_stream = input_.streams.subtitles[stream]
|
161
|
-
out_stream = output.add_stream_from_template(in_stream)
|
162
|
-
|
163
|
-
for packet in input_.demux(in_stream):
|
164
|
-
if packet.dts is None:
|
165
|
-
continue
|
166
|
-
packet.stream = out_stream
|
167
|
-
output.mux(packet)
|
168
|
-
|
169
|
-
output.close()
|
170
|
-
output_bytes.seek(0)
|
171
|
-
return output_bytes.getvalue().decode("utf-8", errors="ignore")
|
172
|
-
|
173
|
-
|
174
|
-
def make_new_subtitles(tl: v3, log: Log) -> list[str]:
|
175
|
-
if tl.v1 is None:
|
176
|
-
return []
|
177
|
-
|
178
|
-
input_ = av.open(tl.v1.source.path)
|
179
|
-
new_paths = []
|
180
|
-
|
181
|
-
for s, sub in enumerate(tl.v1.source.subtitles):
|
182
|
-
if sub.codec == "mov_text":
|
183
|
-
continue
|
184
|
-
|
185
|
-
parser = SubtitleParser(tl.tb)
|
186
|
-
if sub.codec == "ssa":
|
187
|
-
format = "ass"
|
188
|
-
elif sub.codec in {"webvtt", "ass"}:
|
189
|
-
format = sub.codec
|
190
|
-
else:
|
191
|
-
log.error(f"Unknown subtitle codec: {sub.codec}")
|
192
|
-
|
193
|
-
if sub.codec == "mov_text":
|
194
|
-
ret = make_srt(input_, s)
|
195
|
-
else:
|
196
|
-
ret = _ensure(input_, format, s)
|
197
|
-
parser.parse(ret, format)
|
198
|
-
parser.edit(tl.v1.chunks)
|
199
|
-
|
200
|
-
new_path = os.path.join(log.temp, f"new{s}s.{sub.ext}")
|
201
|
-
parser.write(new_path)
|
202
|
-
new_paths.append(new_path)
|
203
|
-
|
204
|
-
input_.close()
|
205
|
-
return new_paths
|
auto_editor/render/video.py
DELETED
@@ -1,307 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
from dataclasses import dataclass
|
4
|
-
from typing import TYPE_CHECKING
|
5
|
-
|
6
|
-
import av
|
7
|
-
import numpy as np
|
8
|
-
|
9
|
-
from auto_editor.timeline import Clip, TlImage, TlRect
|
10
|
-
from auto_editor.utils.func import parse_bitrate
|
11
|
-
|
12
|
-
if TYPE_CHECKING:
|
13
|
-
from collections.abc import Iterator
|
14
|
-
|
15
|
-
from auto_editor.__main__ import Args
|
16
|
-
from auto_editor.ffwrapper import FileInfo
|
17
|
-
from auto_editor.timeline import v3
|
18
|
-
from auto_editor.utils.log import Log
|
19
|
-
|
20
|
-
|
21
|
-
@dataclass(slots=True)
|
22
|
-
class VideoFrame:
|
23
|
-
index: int
|
24
|
-
src: FileInfo
|
25
|
-
|
26
|
-
|
27
|
-
def make_solid(width: int, height: int, pix_fmt: str, bg: str) -> av.VideoFrame:
|
28
|
-
hex_color = bg.lstrip("#").upper()
|
29
|
-
rgb_color = tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
|
30
|
-
|
31
|
-
rgb_array = np.full((height, width, 3), rgb_color, dtype=np.uint8)
|
32
|
-
rgb_frame = av.VideoFrame.from_ndarray(rgb_array, format="rgb24")
|
33
|
-
return rgb_frame.reformat(format=pix_fmt)
|
34
|
-
|
35
|
-
|
36
|
-
def make_image_cache(tl: v3) -> dict[tuple[FileInfo, int], np.ndarray]:
|
37
|
-
img_cache = {}
|
38
|
-
for clip in tl.v:
|
39
|
-
for obj in clip:
|
40
|
-
if isinstance(obj, TlImage) and (obj.src, obj.width) not in img_cache:
|
41
|
-
with av.open(obj.src.path) as cn:
|
42
|
-
my_stream = cn.streams.video[0]
|
43
|
-
for frame in cn.decode(my_stream):
|
44
|
-
if obj.width != 0:
|
45
|
-
graph = av.filter.Graph()
|
46
|
-
graph.link_nodes(
|
47
|
-
graph.add_buffer(template=my_stream),
|
48
|
-
graph.add("scale", f"{obj.width}:-1"),
|
49
|
-
graph.add("buffersink"),
|
50
|
-
).vpush(frame)
|
51
|
-
frame = graph.vpull()
|
52
|
-
img_cache[(obj.src, obj.width)] = frame.to_ndarray(
|
53
|
-
format="rgb24"
|
54
|
-
)
|
55
|
-
break
|
56
|
-
return img_cache
|
57
|
-
|
58
|
-
|
59
|
-
def render_av(
|
60
|
-
output: av.container.OutputContainer, tl: v3, args: Args, log: Log
|
61
|
-
) -> Iterator[tuple[int, av.VideoFrame]]:
|
62
|
-
from_ndarray = av.VideoFrame.from_ndarray
|
63
|
-
|
64
|
-
cns: dict[FileInfo, av.container.InputContainer] = {}
|
65
|
-
decoders: dict[FileInfo, Iterator[av.VideoFrame]] = {}
|
66
|
-
seek_cost: dict[FileInfo, int] = {}
|
67
|
-
tous: dict[FileInfo, int] = {}
|
68
|
-
|
69
|
-
pix_fmt = "yuv420p" # Reasonable default
|
70
|
-
target_fps = tl.tb # Always constant
|
71
|
-
img_cache = make_image_cache(tl)
|
72
|
-
|
73
|
-
first_src: FileInfo | None = None
|
74
|
-
for src in tl.sources:
|
75
|
-
if first_src is None:
|
76
|
-
first_src = src
|
77
|
-
|
78
|
-
if src not in cns:
|
79
|
-
cns[src] = av.open(f"{src.path}")
|
80
|
-
|
81
|
-
for src, cn in cns.items():
|
82
|
-
if len(cn.streams.video) > 0:
|
83
|
-
stream = cn.streams.video[0]
|
84
|
-
stream.thread_type = "AUTO"
|
85
|
-
|
86
|
-
if args.no_seek or stream.average_rate is None or stream.time_base is None:
|
87
|
-
sc_val = 4294967295 # 2 ** 32 - 1
|
88
|
-
tou = 0
|
89
|
-
else:
|
90
|
-
# Keyframes are usually spread out every 5 seconds or less.
|
91
|
-
sc_val = int(stream.average_rate * 5)
|
92
|
-
tou = int(stream.time_base.denominator / stream.average_rate)
|
93
|
-
|
94
|
-
seek_cost[src] = sc_val
|
95
|
-
tous[src] = tou
|
96
|
-
decoders[src] = cn.decode(stream)
|
97
|
-
|
98
|
-
if src == first_src and stream.pix_fmt is not None:
|
99
|
-
pix_fmt = stream.pix_fmt
|
100
|
-
|
101
|
-
log.debug(f"Tous: {tous}")
|
102
|
-
log.debug(f"Clips: {tl.v}")
|
103
|
-
|
104
|
-
output_stream = output.add_stream(args.video_codec, rate=target_fps)
|
105
|
-
output_stream.options = {"x265-params": "log-level=error"}
|
106
|
-
|
107
|
-
need_valid_fmt = not (
|
108
|
-
output_stream.codec.video_formats
|
109
|
-
and any(pix_fmt == vfmt.name for vfmt in output_stream.codec.video_formats)
|
110
|
-
)
|
111
|
-
if need_valid_fmt:
|
112
|
-
match output_stream.codec.canonical_name:
|
113
|
-
case "gif":
|
114
|
-
pix_fmt = "rgb8"
|
115
|
-
case "prores":
|
116
|
-
pix_fmt = "yuv422p10le"
|
117
|
-
case _:
|
118
|
-
pix_fmt = "yuv420p"
|
119
|
-
|
120
|
-
cc = output_stream.codec_context
|
121
|
-
if args.vprofile is not None:
|
122
|
-
if args.vprofile.title() not in cc.profiles:
|
123
|
-
b = " ".join([f'"{x.lower()}"' for x in cc.profiles])
|
124
|
-
log.error(
|
125
|
-
f"`{args.vprofile}` is not a valid profile.\nprofiles supported: {b}"
|
126
|
-
)
|
127
|
-
|
128
|
-
cc.profile = args.vprofile.title()
|
129
|
-
|
130
|
-
yield output_stream # type: ignore
|
131
|
-
if not isinstance(output_stream, av.VideoStream):
|
132
|
-
log.error(f"Not a known video codec: {args.video_codec}")
|
133
|
-
if src.videos and src.videos[0].lang is not None:
|
134
|
-
output_stream.metadata["language"] = src.videos[0].lang
|
135
|
-
|
136
|
-
if args.scale == 1.0:
|
137
|
-
target_width, target_height = tl.res
|
138
|
-
scale_graph = None
|
139
|
-
else:
|
140
|
-
target_width = max(round(tl.res[0] * args.scale), 2)
|
141
|
-
target_height = max(round(tl.res[1] * args.scale), 2)
|
142
|
-
scale_graph = av.filter.Graph()
|
143
|
-
scale_graph.link_nodes(
|
144
|
-
scale_graph.add(
|
145
|
-
"buffer", video_size="1x1", time_base="1/1", pix_fmt=pix_fmt
|
146
|
-
),
|
147
|
-
scale_graph.add("scale", f"{target_width}:{target_height}"),
|
148
|
-
scale_graph.add("buffersink"),
|
149
|
-
)
|
150
|
-
|
151
|
-
output_stream.width = target_width
|
152
|
-
output_stream.height = target_height
|
153
|
-
output_stream.pix_fmt = pix_fmt
|
154
|
-
output_stream.framerate = target_fps
|
155
|
-
|
156
|
-
color_range = src.videos[0].color_range
|
157
|
-
colorspace = src.videos[0].color_space
|
158
|
-
color_prim = src.videos[0].color_primaries
|
159
|
-
color_trc = src.videos[0].color_transfer
|
160
|
-
|
161
|
-
if color_range in {1, 2}:
|
162
|
-
output_stream.color_range = color_range
|
163
|
-
if colorspace in {0, 1} or (colorspace >= 3 and colorspace < 16):
|
164
|
-
output_stream.colorspace = colorspace
|
165
|
-
if color_prim == 1 or (color_prim >= 4 and color_prim < 17):
|
166
|
-
output_stream.color_primaries = color_prim
|
167
|
-
if color_trc == 1 or (color_trc >= 4 and color_trc < 22):
|
168
|
-
output_stream.color_trc = color_trc
|
169
|
-
|
170
|
-
if args.video_bitrate != "auto":
|
171
|
-
output_stream.bit_rate = parse_bitrate(args.video_bitrate, log)
|
172
|
-
log.debug(f"video bitrate: {output_stream.bit_rate}")
|
173
|
-
else:
|
174
|
-
log.debug(f"[auto] video bitrate: {output_stream.bit_rate}")
|
175
|
-
|
176
|
-
if src is not None and src.videos and (sar := src.videos[0].sar) is not None:
|
177
|
-
output_stream.sample_aspect_ratio = sar
|
178
|
-
|
179
|
-
# First few frames can have an abnormal keyframe count, so never seek there.
|
180
|
-
seek = 10
|
181
|
-
seek_frame = None
|
182
|
-
frames_saved = 0
|
183
|
-
|
184
|
-
bg = args.background
|
185
|
-
null_frame = make_solid(target_width, target_height, pix_fmt, bg)
|
186
|
-
frame_index = -1
|
187
|
-
|
188
|
-
for index in range(tl.end):
|
189
|
-
obj_list: list[VideoFrame | TlRect | TlImage] = []
|
190
|
-
for layer in tl.v:
|
191
|
-
for lobj in layer:
|
192
|
-
if isinstance(lobj, Clip):
|
193
|
-
if index >= lobj.start and index < (lobj.start + lobj.dur):
|
194
|
-
_i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
195
|
-
obj_list.append(VideoFrame(_i, lobj.src))
|
196
|
-
elif index >= lobj.start and index < lobj.start + lobj.dur:
|
197
|
-
obj_list.append(lobj)
|
198
|
-
|
199
|
-
if tl.v1 is not None:
|
200
|
-
# When there can be valid gaps in the timeline.
|
201
|
-
frame = null_frame
|
202
|
-
# else, use the last frame
|
203
|
-
|
204
|
-
for obj in obj_list:
|
205
|
-
if isinstance(obj, VideoFrame):
|
206
|
-
my_stream = cns[obj.src].streams.video[0]
|
207
|
-
if frame_index > obj.index:
|
208
|
-
log.debug(f"Seek: {frame_index} -> 0")
|
209
|
-
cns[obj.src].seek(0)
|
210
|
-
try:
|
211
|
-
frame = next(decoders[obj.src])
|
212
|
-
frame_index = round(frame.time * tl.tb)
|
213
|
-
except StopIteration:
|
214
|
-
pass
|
215
|
-
|
216
|
-
while frame_index < obj.index:
|
217
|
-
# Check if skipping ahead is worth it.
|
218
|
-
if (
|
219
|
-
obj.index - frame_index > seek_cost[obj.src]
|
220
|
-
and frame_index > seek
|
221
|
-
):
|
222
|
-
seek = frame_index + (seek_cost[obj.src] // 2)
|
223
|
-
seek_frame = frame_index
|
224
|
-
log.debug(f"Seek: {frame_index} -> {obj.index}")
|
225
|
-
cns[obj.src].seek(obj.index * tous[obj.src], stream=my_stream)
|
226
|
-
|
227
|
-
try:
|
228
|
-
frame = next(decoders[obj.src])
|
229
|
-
frame_index = round(frame.time * tl.tb)
|
230
|
-
except StopIteration:
|
231
|
-
log.debug(f"No source frame at {index=}. Using null frame")
|
232
|
-
frame = null_frame
|
233
|
-
break
|
234
|
-
|
235
|
-
if seek_frame is not None:
|
236
|
-
log.debug(f"Skipped {frame_index - seek_frame} frame indexes")
|
237
|
-
frames_saved += frame_index - seek_frame
|
238
|
-
seek_frame = None
|
239
|
-
if frame.key_frame:
|
240
|
-
log.debug(f"Keyframe {frame_index} {frame.pts}")
|
241
|
-
|
242
|
-
if (frame.width, frame.height) != tl.res:
|
243
|
-
width, height = tl.res
|
244
|
-
graph = av.filter.Graph()
|
245
|
-
graph.link_nodes(
|
246
|
-
graph.add_buffer(template=my_stream),
|
247
|
-
graph.add(
|
248
|
-
"scale",
|
249
|
-
f"{width}:{height}:force_original_aspect_ratio=decrease:eval=frame",
|
250
|
-
),
|
251
|
-
graph.add("pad", f"{width}:{height}:-1:-1:color={bg}"),
|
252
|
-
graph.add("buffersink"),
|
253
|
-
).vpush(frame)
|
254
|
-
frame = graph.vpull()
|
255
|
-
elif isinstance(obj, TlRect):
|
256
|
-
graph = av.filter.Graph()
|
257
|
-
x, y = obj.x, obj.y
|
258
|
-
graph.link_nodes(
|
259
|
-
graph.add_buffer(template=my_stream),
|
260
|
-
graph.add(
|
261
|
-
"drawbox",
|
262
|
-
f"x={x}:y={y}:w={obj.width}:h={obj.height}:color={obj.fill}:t=fill",
|
263
|
-
),
|
264
|
-
graph.add("buffersink"),
|
265
|
-
).vpush(frame)
|
266
|
-
frame = graph.vpull()
|
267
|
-
elif isinstance(obj, TlImage):
|
268
|
-
img = img_cache[(obj.src, obj.width)]
|
269
|
-
array = frame.to_ndarray(format="rgb24")
|
270
|
-
|
271
|
-
overlay_h, overlay_w, _ = img.shape
|
272
|
-
x_pos, y_pos = obj.x, obj.y
|
273
|
-
|
274
|
-
x_start = max(x_pos, 0)
|
275
|
-
y_start = max(y_pos, 0)
|
276
|
-
x_end = min(x_pos + overlay_w, frame.width)
|
277
|
-
y_end = min(y_pos + overlay_h, frame.height)
|
278
|
-
|
279
|
-
# Clip the overlay image to fit into the frame
|
280
|
-
overlay_x_start = max(-x_pos, 0)
|
281
|
-
overlay_y_start = max(-y_pos, 0)
|
282
|
-
overlay_x_end = overlay_w - max((x_pos + overlay_w) - frame.width, 0)
|
283
|
-
overlay_y_end = overlay_h - max((y_pos + overlay_h) - frame.height, 0)
|
284
|
-
clipped_overlay = img[
|
285
|
-
overlay_y_start:overlay_y_end, overlay_x_start:overlay_x_end
|
286
|
-
]
|
287
|
-
|
288
|
-
# Create a region of interest (ROI) on the video frame
|
289
|
-
roi = array[y_start:y_end, x_start:x_end]
|
290
|
-
|
291
|
-
# Blend the overlay image with the ROI based on the opacity
|
292
|
-
roi = (1 - obj.opacity) * roi + obj.opacity * clipped_overlay # type: ignore
|
293
|
-
array[y_start:y_end, x_start:x_end] = roi
|
294
|
-
array = np.clip(array, 0, 255).astype(np.uint8)
|
295
|
-
|
296
|
-
frame = from_ndarray(array, format="rgb24")
|
297
|
-
|
298
|
-
if scale_graph is not None and frame.width != target_width:
|
299
|
-
scale_graph.vpush(frame)
|
300
|
-
frame = scale_graph.vpull()
|
301
|
-
|
302
|
-
frame = frame.reformat(format=pix_fmt)
|
303
|
-
frame.pts = None # type: ignore
|
304
|
-
frame.time_base = 0 # type: ignore
|
305
|
-
yield (index, frame)
|
306
|
-
|
307
|
-
log.debug(f"Total frames saved seeking: {frames_saved}")
|