auto-editor 24.27.1__tar.gz → 24.30.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {auto_editor-24.27.1/auto_editor.egg-info → auto_editor-24.30.1}/PKG-INFO +3 -6
- {auto_editor-24.27.1 → auto_editor-24.30.1}/README.md +0 -3
- auto_editor-24.30.1/auto_editor/__init__.py +2 -0
- auto_editor-24.30.1/auto_editor/analyze.py +377 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/edit.py +14 -21
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lang/palet.py +29 -9
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lib/contracts.py +12 -6
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/make_layers.py +4 -25
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/output.py +4 -4
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/preview.py +2 -3
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/render/subtitle.py +1 -1
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/render/video.py +8 -18
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/levels.py +52 -37
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/repl.py +4 -13
- auto_editor-24.30.1/auto_editor/subcommands/subdump.py +20 -0
- auto_editor-24.30.1/auto_editor/utils/container.py +96 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1/auto_editor.egg-info}/PKG-INFO +3 -6
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor.egg-info/SOURCES.txt +0 -3
- auto_editor-24.30.1/auto_editor.egg-info/requires.txt +3 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor.egg-info/top_level.txt +0 -1
- {auto_editor-24.27.1 → auto_editor-24.30.1}/pyproject.toml +2 -2
- auto_editor-24.27.1/ae-ffmpeg/ae_ffmpeg/__init__.py +0 -16
- auto_editor-24.27.1/ae-ffmpeg/ae_ffmpeg/py.typed +0 -0
- auto_editor-24.27.1/ae-ffmpeg/setup.py +0 -65
- auto_editor-24.27.1/auto_editor/__init__.py +0 -2
- auto_editor-24.27.1/auto_editor/analyze.py +0 -436
- auto_editor-24.27.1/auto_editor/subcommands/subdump.py +0 -23
- auto_editor-24.27.1/auto_editor/utils/container.py +0 -338
- auto_editor-24.27.1/auto_editor.egg-info/requires.txt +0 -3
- {auto_editor-24.27.1 → auto_editor-24.30.1}/LICENSE +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/__main__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/ffwrapper.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/fcp11.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/fcp7.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/json.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/shotcut.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/formats/utils.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/help.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lang/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lang/json.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lang/libmath.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lib/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lib/data_structs.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/lib/err.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/render/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/render/audio.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/desc.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/info.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/palet.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/subcommands/test.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/timeline.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/__init__.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/bar.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/chunks.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/cmdkw.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/encoder.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/func.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/log.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/subtitle_tools.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/utils/types.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/validate_input.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/vanparse.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor/wavfile.py +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor.egg-info/dependency_links.txt +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/auto_editor.egg-info/entry_points.txt +0 -0
- {auto_editor-24.27.1 → auto_editor-24.30.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: auto-editor
|
3
|
-
Version: 24.
|
3
|
+
Version: 24.30.1
|
4
4
|
Summary: Auto-Editor: Effort free video editing!
|
5
5
|
Author-email: WyattBlue <wyattblue@auto-editor.com>
|
6
6
|
License: Unlicense
|
@@ -11,8 +11,8 @@ Keywords: video,audio,media,editor,editing,processing,nonlinear,automatic,silenc
|
|
11
11
|
Requires-Python: >=3.10
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: numpy>=1.
|
15
|
-
Requires-Dist: pyav==12.
|
14
|
+
Requires-Dist: numpy>=1.23.0
|
15
|
+
Requires-Dist: pyav==12.3.*
|
16
16
|
Requires-Dist: ae-ffmpeg==1.2.*
|
17
17
|
|
18
18
|
<p align="center"><img src="https://auto-editor.com/img/auto-editor-banner.webp" title="Auto-Editor" width="700"></p>
|
@@ -182,6 +182,3 @@ auto-editor --margin --help
|
|
182
182
|
|
183
183
|
## Copyright
|
184
184
|
Auto-Editor is under the [Public Domain](https://github.com/WyattBlue/auto-editor/blob/master/LICENSE) and includes all directories besides the ones listed below. Auto-Editor was created by [these people.](https://auto-editor.com/blog/thank-you-early-testers)
|
185
|
-
|
186
|
-
ae-ffmpeg is under the [LGPLv3 License](https://github.com/WyattBlue/auto-editor/blob/master/ae-ffmpeg/LICENSE.txt). The ffmpeg and ffprobe programs were created by the FFmpeg team.
|
187
|
-
|
@@ -165,6 +165,3 @@ auto-editor --margin --help
|
|
165
165
|
|
166
166
|
## Copyright
|
167
167
|
Auto-Editor is under the [Public Domain](https://github.com/WyattBlue/auto-editor/blob/master/LICENSE) and includes all directories besides the ones listed below. Auto-Editor was created by [these people.](https://auto-editor.com/blog/thank-you-early-testers)
|
168
|
-
|
169
|
-
ae-ffmpeg is under the [LGPLv3 License](https://github.com/WyattBlue/auto-editor/blob/master/ae-ffmpeg/LICENSE.txt). The ffmpeg and ffprobe programs were created by the FFmpeg team.
|
170
|
-
|
@@ -0,0 +1,377 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import os
|
4
|
+
import re
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from fractions import Fraction
|
7
|
+
from math import ceil
|
8
|
+
from typing import TYPE_CHECKING
|
9
|
+
|
10
|
+
import av
|
11
|
+
import numpy as np
|
12
|
+
from av.audio.fifo import AudioFifo
|
13
|
+
from av.subtitles.subtitle import AssSubtitle
|
14
|
+
|
15
|
+
from auto_editor import version
|
16
|
+
from auto_editor.utils.subtitle_tools import convert_ass_to_text
|
17
|
+
|
18
|
+
if TYPE_CHECKING:
|
19
|
+
from collections.abc import Iterator
|
20
|
+
from fractions import Fraction
|
21
|
+
from typing import Any
|
22
|
+
|
23
|
+
from numpy.typing import NDArray
|
24
|
+
|
25
|
+
from auto_editor.ffwrapper import FileInfo
|
26
|
+
from auto_editor.utils.bar import Bar
|
27
|
+
from auto_editor.utils.log import Log
|
28
|
+
|
29
|
+
|
30
|
+
@dataclass(slots=True)
|
31
|
+
class FileSetup:
|
32
|
+
src: FileInfo
|
33
|
+
strict: bool
|
34
|
+
tb: Fraction
|
35
|
+
bar: Bar
|
36
|
+
temp: str
|
37
|
+
log: Log
|
38
|
+
|
39
|
+
|
40
|
+
class LevelError(Exception):
|
41
|
+
pass
|
42
|
+
|
43
|
+
|
44
|
+
def mut_remove_small(
|
45
|
+
arr: NDArray[np.bool_], lim: int, replace: int, with_: int
|
46
|
+
) -> None:
|
47
|
+
start_p = 0
|
48
|
+
active = False
|
49
|
+
for j, item in enumerate(arr):
|
50
|
+
if item == replace:
|
51
|
+
if not active:
|
52
|
+
start_p = j
|
53
|
+
active = True
|
54
|
+
|
55
|
+
if j == len(arr) - 1 and j - start_p < lim:
|
56
|
+
arr[start_p:] = with_
|
57
|
+
elif active:
|
58
|
+
if j - start_p < lim:
|
59
|
+
arr[start_p:j] = with_
|
60
|
+
active = False
|
61
|
+
|
62
|
+
|
63
|
+
def mut_remove_large(
|
64
|
+
arr: NDArray[np.bool_], lim: int, replace: int, with_: int
|
65
|
+
) -> None:
|
66
|
+
start_p = 0
|
67
|
+
active = False
|
68
|
+
for j, item in enumerate(arr):
|
69
|
+
if item == replace:
|
70
|
+
if not active:
|
71
|
+
start_p = j
|
72
|
+
active = True
|
73
|
+
|
74
|
+
if j == len(arr) - 1 and j - start_p >= lim:
|
75
|
+
arr[start_p:] = with_
|
76
|
+
elif active:
|
77
|
+
if j - start_p > lim:
|
78
|
+
arr[start_p:j] = with_
|
79
|
+
active = False
|
80
|
+
|
81
|
+
|
82
|
+
def obj_tag(tag: str, tb: Fraction, obj: dict[str, Any]) -> str:
|
83
|
+
key = f"{tag}:{tb}:"
|
84
|
+
for k, v in obj.items():
|
85
|
+
key += f"{k}={v},"
|
86
|
+
|
87
|
+
key = key[:-1] # remove unnecessary char
|
88
|
+
return key
|
89
|
+
|
90
|
+
|
91
|
+
def iter_audio(src, tb: Fraction, stream: int = 0) -> Iterator[float]:
|
92
|
+
fifo = AudioFifo()
|
93
|
+
try:
|
94
|
+
container = av.open(src.path, "r")
|
95
|
+
audio_stream = container.streams.audio[stream]
|
96
|
+
sample_rate = audio_stream.rate
|
97
|
+
|
98
|
+
exact_size = (1 / tb) * sample_rate
|
99
|
+
accumulated_error = 0
|
100
|
+
|
101
|
+
# Resample so that audio data is between [-1, 1]
|
102
|
+
resampler = av.AudioResampler(
|
103
|
+
av.AudioFormat("flt"), audio_stream.layout, sample_rate
|
104
|
+
)
|
105
|
+
|
106
|
+
for frame in container.decode(audio=stream):
|
107
|
+
frame.pts = None # Skip time checks
|
108
|
+
|
109
|
+
for reframe in resampler.resample(frame):
|
110
|
+
fifo.write(reframe)
|
111
|
+
|
112
|
+
while fifo.samples >= ceil(exact_size):
|
113
|
+
size_with_error = exact_size + accumulated_error
|
114
|
+
current_size = round(size_with_error)
|
115
|
+
accumulated_error = size_with_error - current_size
|
116
|
+
|
117
|
+
audio_chunk = fifo.read(current_size)
|
118
|
+
assert audio_chunk is not None
|
119
|
+
arr = audio_chunk.to_ndarray().flatten()
|
120
|
+
yield float(np.max(np.abs(arr)))
|
121
|
+
|
122
|
+
finally:
|
123
|
+
container.close()
|
124
|
+
|
125
|
+
|
126
|
+
def iter_motion(src, tb, stream: int, blur: int, width: int) -> Iterator[float]:
|
127
|
+
container = av.open(src.path, "r")
|
128
|
+
|
129
|
+
video = container.streams.video[stream]
|
130
|
+
video.thread_type = "AUTO"
|
131
|
+
|
132
|
+
prev_frame = None
|
133
|
+
current_frame = None
|
134
|
+
total_pixels = src.videos[0].width * src.videos[0].height
|
135
|
+
index = 0
|
136
|
+
prev_index = -1
|
137
|
+
|
138
|
+
graph = av.filter.Graph()
|
139
|
+
graph.link_nodes(
|
140
|
+
graph.add_buffer(template=video),
|
141
|
+
graph.add("scale", f"{width}:-1"),
|
142
|
+
graph.add("format", "gray"),
|
143
|
+
graph.add("gblur", f"sigma={blur}"),
|
144
|
+
graph.add("buffersink"),
|
145
|
+
).configure()
|
146
|
+
|
147
|
+
for unframe in container.decode(video):
|
148
|
+
if unframe.pts is None:
|
149
|
+
continue
|
150
|
+
|
151
|
+
graph.push(unframe)
|
152
|
+
frame = graph.pull()
|
153
|
+
assert frame.time is not None
|
154
|
+
index = round(frame.time * tb)
|
155
|
+
|
156
|
+
current_frame = frame.to_ndarray()
|
157
|
+
if prev_frame is None:
|
158
|
+
value = 0.0
|
159
|
+
else:
|
160
|
+
# Use `int16` to avoid underflow with `uint8` datatype
|
161
|
+
diff = np.abs(prev_frame.astype(np.int16) - current_frame.astype(np.int16))
|
162
|
+
value = np.count_nonzero(diff) / total_pixels
|
163
|
+
|
164
|
+
for _ in range(index - prev_index):
|
165
|
+
yield value
|
166
|
+
|
167
|
+
prev_frame = current_frame
|
168
|
+
prev_index = index
|
169
|
+
|
170
|
+
container.close()
|
171
|
+
|
172
|
+
|
173
|
+
@dataclass(slots=True)
|
174
|
+
class Levels:
|
175
|
+
src: FileInfo
|
176
|
+
tb: Fraction
|
177
|
+
bar: Bar
|
178
|
+
temp: str
|
179
|
+
log: Log
|
180
|
+
|
181
|
+
@property
|
182
|
+
def media_length(self) -> int:
|
183
|
+
if self.src.audios:
|
184
|
+
if (arr := self.read_cache("audio", {"stream": 0})) is not None:
|
185
|
+
return len(arr)
|
186
|
+
|
187
|
+
result = sum(1 for _ in iter_audio(self.src, self.tb, 0))
|
188
|
+
self.log.debug(f"Audio Length: {result}")
|
189
|
+
return result
|
190
|
+
|
191
|
+
# If there's no audio, get length in video metadata.
|
192
|
+
with av.open(self.src.path) as container:
|
193
|
+
if len(container.streams.video) == 0:
|
194
|
+
self.log.error("Could not get media duration")
|
195
|
+
|
196
|
+
video = container.streams.video[0]
|
197
|
+
|
198
|
+
if video.duration is None or video.time_base is None:
|
199
|
+
dur = 0
|
200
|
+
else:
|
201
|
+
dur = int(video.duration * video.time_base * self.tb)
|
202
|
+
self.log.debug(f"Video duration: {dur}")
|
203
|
+
|
204
|
+
return dur
|
205
|
+
|
206
|
+
def none(self) -> NDArray[np.bool_]:
|
207
|
+
return np.ones(self.media_length, dtype=np.bool_)
|
208
|
+
|
209
|
+
def all(self) -> NDArray[np.bool_]:
|
210
|
+
return np.zeros(self.media_length, dtype=np.bool_)
|
211
|
+
|
212
|
+
def read_cache(self, tag: str, obj: dict[str, Any]) -> None | np.ndarray:
|
213
|
+
workfile = os.path.join(
|
214
|
+
os.path.dirname(self.temp), f"ae-{version}", "cache.npz"
|
215
|
+
)
|
216
|
+
|
217
|
+
try:
|
218
|
+
npzfile = np.load(workfile, allow_pickle=False)
|
219
|
+
except Exception as e:
|
220
|
+
self.log.debug(e)
|
221
|
+
return None
|
222
|
+
|
223
|
+
key = f"{self.src.path}:{obj_tag(tag, self.tb, obj)}"
|
224
|
+
if key not in npzfile.files:
|
225
|
+
return None
|
226
|
+
|
227
|
+
self.log.debug("Using cache")
|
228
|
+
return npzfile[key]
|
229
|
+
|
230
|
+
def cache(self, tag: str, obj: dict[str, Any], arr: np.ndarray) -> np.ndarray:
|
231
|
+
workdur = os.path.join(os.path.dirname(self.temp), f"ae-{version}")
|
232
|
+
if not os.path.exists(workdur):
|
233
|
+
os.mkdir(workdur)
|
234
|
+
|
235
|
+
tag = obj_tag(tag, self.tb, obj)
|
236
|
+
np.savez(os.path.join(workdur, "cache.npz"), **{f"{self.src.path}:{tag}": arr})
|
237
|
+
|
238
|
+
return arr
|
239
|
+
|
240
|
+
def audio(self, stream: int) -> NDArray[np.float64]:
|
241
|
+
if stream >= len(self.src.audios):
|
242
|
+
raise LevelError(f"audio: audio stream '{stream}' does not exist.")
|
243
|
+
|
244
|
+
if (arr := self.read_cache("audio", {"stream": stream})) is not None:
|
245
|
+
return arr
|
246
|
+
|
247
|
+
with av.open(self.src.path, "r") as container:
|
248
|
+
audio = container.streams.audio[stream]
|
249
|
+
if audio.duration is not None and audio.time_base is not None:
|
250
|
+
inaccurate_dur = int(audio.duration * audio.time_base * self.tb)
|
251
|
+
elif container.duration is not None:
|
252
|
+
inaccurate_dur = int(container.duration / av.time_base * self.tb)
|
253
|
+
else:
|
254
|
+
inaccurate_dur = 1024
|
255
|
+
|
256
|
+
bar = self.bar
|
257
|
+
bar.start(inaccurate_dur, "Analyzing audio volume")
|
258
|
+
|
259
|
+
result = np.zeros((inaccurate_dur), dtype=np.float64)
|
260
|
+
index = 0
|
261
|
+
for value in iter_audio(self.src, self.tb, stream):
|
262
|
+
if index > len(result) - 1:
|
263
|
+
result = np.concatenate(
|
264
|
+
(result, np.zeros((len(result)), dtype=np.float64))
|
265
|
+
)
|
266
|
+
result[index] = value
|
267
|
+
bar.tick(index)
|
268
|
+
index += 1
|
269
|
+
|
270
|
+
bar.end()
|
271
|
+
return self.cache("audio", {"stream": stream}, result[:index])
|
272
|
+
|
273
|
+
def motion(self, stream: int, blur: int, width: int) -> NDArray[np.float64]:
|
274
|
+
if stream >= len(self.src.videos):
|
275
|
+
raise LevelError(f"motion: video stream '{stream}' does not exist.")
|
276
|
+
|
277
|
+
mobj = {"stream": stream, "width": width, "blur": blur}
|
278
|
+
if (arr := self.read_cache("motion", mobj)) is not None:
|
279
|
+
return arr
|
280
|
+
|
281
|
+
with av.open(self.src.path, "r") as container:
|
282
|
+
video = container.streams.video[stream]
|
283
|
+
inaccurate_dur = (
|
284
|
+
1024
|
285
|
+
if video.duration is None or video.time_base is None
|
286
|
+
else int(video.duration * video.time_base * self.tb)
|
287
|
+
)
|
288
|
+
|
289
|
+
bar = self.bar
|
290
|
+
bar.start(inaccurate_dur, "Analyzing motion")
|
291
|
+
|
292
|
+
result = np.zeros((inaccurate_dur), dtype=np.float64)
|
293
|
+
index = 0
|
294
|
+
for value in iter_motion(self.src, self.tb, stream, blur, width):
|
295
|
+
if index > len(result) - 1:
|
296
|
+
result = np.concatenate(
|
297
|
+
(result, np.zeros((len(result)), dtype=np.float64))
|
298
|
+
)
|
299
|
+
result[index] = value
|
300
|
+
bar.tick(index)
|
301
|
+
index += 1
|
302
|
+
|
303
|
+
bar.end()
|
304
|
+
return self.cache("motion", mobj, result[:index])
|
305
|
+
|
306
|
+
def subtitle(
|
307
|
+
self,
|
308
|
+
pattern: str,
|
309
|
+
stream: int,
|
310
|
+
ignore_case: bool,
|
311
|
+
max_count: int | None,
|
312
|
+
) -> NDArray[np.bool_]:
|
313
|
+
if stream >= len(self.src.subtitles):
|
314
|
+
raise LevelError(f"subtitle: subtitle stream '{stream}' does not exist.")
|
315
|
+
|
316
|
+
try:
|
317
|
+
flags = re.IGNORECASE if ignore_case else 0
|
318
|
+
re_pattern = re.compile(pattern, flags)
|
319
|
+
except re.error as e:
|
320
|
+
self.log.error(e)
|
321
|
+
|
322
|
+
try:
|
323
|
+
container = av.open(self.src.path, "r")
|
324
|
+
subtitle_stream = container.streams.subtitles[stream]
|
325
|
+
assert isinstance(subtitle_stream.time_base, Fraction)
|
326
|
+
except Exception as e:
|
327
|
+
self.log.error(e)
|
328
|
+
|
329
|
+
# Get the length of the subtitle stream.
|
330
|
+
sub_length = 0
|
331
|
+
for packet in container.demux(subtitle_stream):
|
332
|
+
if packet.pts is None or packet.duration is None:
|
333
|
+
continue
|
334
|
+
for subset in packet.decode():
|
335
|
+
# See definition of `AVSubtitle`
|
336
|
+
# in: https://ffmpeg.org/doxygen/trunk/avcodec_8h_source.html
|
337
|
+
start = float(packet.pts * subtitle_stream.time_base)
|
338
|
+
dur = float(packet.duration * subtitle_stream.time_base)
|
339
|
+
|
340
|
+
end = round((start + dur) * self.tb)
|
341
|
+
sub_length = max(sub_length, end)
|
342
|
+
|
343
|
+
result = np.zeros((sub_length), dtype=np.bool_)
|
344
|
+
del sub_length
|
345
|
+
|
346
|
+
count = 0
|
347
|
+
early_exit = False
|
348
|
+
container.seek(0)
|
349
|
+
for packet in container.demux(subtitle_stream):
|
350
|
+
if packet.pts is None or packet.duration is None:
|
351
|
+
continue
|
352
|
+
if early_exit:
|
353
|
+
break
|
354
|
+
for subset in packet.decode():
|
355
|
+
if max_count is not None and count >= max_count:
|
356
|
+
early_exit = True
|
357
|
+
break
|
358
|
+
|
359
|
+
start = float(packet.pts * subtitle_stream.time_base)
|
360
|
+
dur = float(packet.duration * subtitle_stream.time_base)
|
361
|
+
|
362
|
+
san_start = round(start * self.tb)
|
363
|
+
san_end = round((start + dur) * self.tb)
|
364
|
+
|
365
|
+
for sub in subset:
|
366
|
+
if isinstance(sub, AssSubtitle):
|
367
|
+
line = convert_ass_to_text(sub.ass.decode(errors="ignore"))
|
368
|
+
else:
|
369
|
+
continue
|
370
|
+
|
371
|
+
if line and re.search(re_pattern, line):
|
372
|
+
result[san_start:san_end] = 1
|
373
|
+
count += 1
|
374
|
+
|
375
|
+
container.close()
|
376
|
+
|
377
|
+
return result
|
@@ -68,12 +68,8 @@ def set_video_codec(
|
|
68
68
|
) -> str:
|
69
69
|
if codec == "auto":
|
70
70
|
codec = "h264" if (src is None or not src.videos) else src.videos[0].codec
|
71
|
-
if ctr.vcodecs
|
72
|
-
|
73
|
-
return ctr.vcodecs[0]
|
74
|
-
|
75
|
-
if codec in ctr.disallow_v:
|
76
|
-
return ctr.vcodecs[0]
|
71
|
+
if codec not in ctr.vcodecs and ctr.default_vid != "none":
|
72
|
+
return ctr.default_vid
|
77
73
|
return codec
|
78
74
|
|
79
75
|
if codec == "copy":
|
@@ -83,12 +79,7 @@ def set_video_codec(
|
|
83
79
|
log.error("Input file does not have a video stream to copy codec from.")
|
84
80
|
codec = src.videos[0].codec
|
85
81
|
|
86
|
-
if ctr.
|
87
|
-
assert ctr.vcodecs is not None
|
88
|
-
if codec not in ctr.vcodecs:
|
89
|
-
log.error(codec_error.format(codec, out_ext))
|
90
|
-
|
91
|
-
if codec in ctr.disallow_v:
|
82
|
+
if ctr.vcodecs is not None and codec not in ctr.vcodecs:
|
92
83
|
log.error(codec_error.format(codec, out_ext))
|
93
84
|
|
94
85
|
return codec
|
@@ -99,8 +90,10 @@ def set_audio_codec(
|
|
99
90
|
) -> str:
|
100
91
|
if codec == "auto":
|
101
92
|
codec = "aac" if (src is None or not src.audios) else src.audios[0].codec
|
102
|
-
if
|
103
|
-
return ctr.
|
93
|
+
if codec not in ctr.acodecs and ctr.default_aud != "none":
|
94
|
+
return ctr.default_aud
|
95
|
+
if codec == "mp3float":
|
96
|
+
return "mp3"
|
104
97
|
return codec
|
105
98
|
|
106
99
|
if codec == "copy":
|
@@ -209,10 +202,8 @@ def edit_media(
|
|
209
202
|
else:
|
210
203
|
samplerate = args.sample_rate
|
211
204
|
|
212
|
-
ensure = Ensure(ffmpeg, bar, samplerate, temp, log)
|
213
|
-
|
214
205
|
if tl is None:
|
215
|
-
tl = make_timeline(sources,
|
206
|
+
tl = make_timeline(sources, args, samplerate, bar, temp, log)
|
216
207
|
|
217
208
|
if export["export"] == "timeline":
|
218
209
|
from auto_editor.formats.json import make_json_timeline
|
@@ -223,7 +214,7 @@ def edit_media(
|
|
223
214
|
if args.preview:
|
224
215
|
from auto_editor.preview import preview
|
225
216
|
|
226
|
-
preview(
|
217
|
+
preview(tl, temp, log)
|
227
218
|
return
|
228
219
|
|
229
220
|
if export["export"] == "json":
|
@@ -272,13 +263,15 @@ def edit_media(
|
|
272
263
|
sub_output = []
|
273
264
|
apply_later = False
|
274
265
|
|
275
|
-
|
266
|
+
ensure = Ensure(ffmpeg, bar, samplerate, temp, log)
|
267
|
+
|
268
|
+
if ctr.default_sub != "none" and not args.sn:
|
276
269
|
sub_output = make_new_subtitles(tl, ensure, temp)
|
277
270
|
|
278
|
-
if ctr.
|
271
|
+
if ctr.default_aud != "none":
|
279
272
|
audio_output = make_new_audio(tl, ensure, args, ffmpeg, bar, temp, log)
|
280
273
|
|
281
|
-
if ctr.
|
274
|
+
if ctr.default_vid != "none":
|
282
275
|
if tl.v:
|
283
276
|
out_path, apply_later = render_av(ffmpeg, tl, args, bar, ctr, temp, log)
|
284
277
|
visual_output.append((True, out_path))
|
@@ -18,12 +18,7 @@ from typing import TYPE_CHECKING
|
|
18
18
|
import numpy as np
|
19
19
|
from numpy import logical_and, logical_not, logical_or, logical_xor
|
20
20
|
|
21
|
-
from auto_editor.analyze import
|
22
|
-
LevelError,
|
23
|
-
mut_remove_large,
|
24
|
-
mut_remove_small,
|
25
|
-
to_threshold,
|
26
|
-
)
|
21
|
+
from auto_editor.analyze import LevelError, mut_remove_large, mut_remove_small
|
27
22
|
from auto_editor.lib.contracts import *
|
28
23
|
from auto_editor.lib.data_structs import *
|
29
24
|
from auto_editor.lib.err import MyError
|
@@ -690,6 +685,9 @@ def palet_map(proc: Proc, seq: Any) -> Any:
|
|
690
685
|
return Quoted(tuple(map(proc, seq.val)))
|
691
686
|
if isinstance(seq, list | range):
|
692
687
|
return list(map(proc, seq))
|
688
|
+
elif isinstance(seq, np.ndarray):
|
689
|
+
vectorized_proc = np.vectorize(proc)
|
690
|
+
return vectorized_proc(seq)
|
693
691
|
return proc(seq)
|
694
692
|
|
695
693
|
|
@@ -1469,6 +1467,26 @@ def edit_all() -> np.ndarray:
|
|
1469
1467
|
return env["@levels"].all()
|
1470
1468
|
|
1471
1469
|
|
1470
|
+
def audio_levels(stream: int) -> np.ndarray:
|
1471
|
+
if "@levels" not in env:
|
1472
|
+
raise MyError("Can't use `audio` if there's no input media")
|
1473
|
+
|
1474
|
+
try:
|
1475
|
+
return env["@levels"].audio(stream)
|
1476
|
+
except LevelError as e:
|
1477
|
+
raise MyError(e)
|
1478
|
+
|
1479
|
+
|
1480
|
+
def motion_levels(stream: int, blur: int = 9, width: int = 400) -> np.ndarray:
|
1481
|
+
if "@levels" not in env:
|
1482
|
+
raise MyError("Can't use `motion` if there's no input media")
|
1483
|
+
|
1484
|
+
try:
|
1485
|
+
return env["@levels"].motion(stream, blur, width)
|
1486
|
+
except LevelError as e:
|
1487
|
+
raise MyError(e)
|
1488
|
+
|
1489
|
+
|
1472
1490
|
def edit_audio(
|
1473
1491
|
threshold: float = 0.04,
|
1474
1492
|
stream: object = Sym("all"),
|
@@ -1491,7 +1509,7 @@ def edit_audio(
|
|
1491
1509
|
|
1492
1510
|
try:
|
1493
1511
|
for s in stream_range:
|
1494
|
-
audio_list =
|
1512
|
+
audio_list = levels.audio(s) >= threshold
|
1495
1513
|
if stream_data is None:
|
1496
1514
|
stream_data = audio_list
|
1497
1515
|
else:
|
@@ -1521,7 +1539,7 @@ def edit_motion(
|
|
1521
1539
|
levels = env["@levels"]
|
1522
1540
|
strict = env["@filesetup"].strict
|
1523
1541
|
try:
|
1524
|
-
return
|
1542
|
+
return levels.motion(stream, blur, width) >= threshold
|
1525
1543
|
except LevelError as e:
|
1526
1544
|
return raise_(e) if strict else levels.all()
|
1527
1545
|
|
@@ -1582,7 +1600,7 @@ def my_eval(env: Env, node: object) -> Any:
|
|
1582
1600
|
return ref(oper, my_eval(env, node[1]))
|
1583
1601
|
|
1584
1602
|
raise MyError(
|
1585
|
-
f"
|
1603
|
+
f"{print_str(oper)} is not a function. Tried to run with args: {print_str(node[1:])}"
|
1586
1604
|
)
|
1587
1605
|
|
1588
1606
|
if type(oper) is Syntax:
|
@@ -1617,10 +1635,12 @@ env.update({
|
|
1617
1635
|
# edit procedures
|
1618
1636
|
"none": Proc("none", edit_none, (0, 0)),
|
1619
1637
|
"all/e": Proc("all/e", edit_all, (0, 0)),
|
1638
|
+
"audio-levels": Proc("audio-levels", audio_levels, (1, 1), is_nat),
|
1620
1639
|
"audio": Proc("audio", edit_audio, (0, 4),
|
1621
1640
|
is_threshold, orc(is_nat, Sym("all")), is_nat,
|
1622
1641
|
{"threshold": 0, "stream": 1, "minclip": 2, "mincut": 2}
|
1623
1642
|
),
|
1643
|
+
"motion-levels": Proc("motion-levels", motion_levels, (1, 3), is_nat, is_nat1, {"blur": 1, "width": 2}),
|
1624
1644
|
"motion": Proc("motion", edit_motion, (0, 4),
|
1625
1645
|
is_threshold, is_nat, is_nat1,
|
1626
1646
|
{"threshold": 0, "stream": 1, "blur": 1, "width": 2}
|
@@ -5,6 +5,8 @@ from dataclasses import dataclass
|
|
5
5
|
from fractions import Fraction
|
6
6
|
from typing import Any
|
7
7
|
|
8
|
+
from numpy import float64
|
9
|
+
|
8
10
|
from .data_structs import Sym, print_str
|
9
11
|
from .err import MyError
|
10
12
|
|
@@ -41,7 +43,7 @@ def check_contract(c: object, val: object) -> bool:
|
|
41
43
|
return val is True
|
42
44
|
if c is False:
|
43
45
|
return val is False
|
44
|
-
if type(c) in (int, float, Fraction, complex, str, Sym):
|
46
|
+
if type(c) in (int, float, float64, Fraction, complex, str, Sym):
|
45
47
|
return val == c
|
46
48
|
raise MyError(f"Invalid contract, got: {print_str(c)}")
|
47
49
|
|
@@ -163,17 +165,21 @@ is_int = Contract("int?", lambda v: type(v) is int)
|
|
163
165
|
is_nat = Contract("nat?", lambda v: type(v) is int and v > -1)
|
164
166
|
is_nat1 = Contract("nat1?", lambda v: type(v) is int and v > 0)
|
165
167
|
int_not_zero = Contract("(or/c (not/c 0) int?)", lambda v: v != 0 and is_int(v))
|
166
|
-
is_num = Contract(
|
167
|
-
|
168
|
-
|
168
|
+
is_num = Contract(
|
169
|
+
"number?", lambda v: type(v) in (int, float, float64, Fraction, complex)
|
170
|
+
)
|
171
|
+
is_real = Contract("real?", lambda v: type(v) in (int, float, float64, Fraction))
|
172
|
+
is_float = Contract("float?", lambda v: type(v) in (float, float64))
|
169
173
|
is_frac = Contract("frac?", lambda v: type(v) is Fraction)
|
170
174
|
is_str = Contract("string?", lambda v: type(v) is str)
|
171
175
|
any_p = Contract("any", lambda v: True)
|
172
176
|
is_void = Contract("void?", lambda v: v is None)
|
173
|
-
is_int_or_float = Contract(
|
177
|
+
is_int_or_float = Contract(
|
178
|
+
"(or/c int? float?)", lambda v: type(v) in (int, float, float64)
|
179
|
+
)
|
174
180
|
is_threshold = Contract(
|
175
181
|
"threshold?",
|
176
|
-
lambda v: type(v) in (int, float) and v >= 0 and v <= 1, # type: ignore
|
182
|
+
lambda v: type(v) in (int, float, float64) and v >= 0 and v <= 1, # type: ignore
|
177
183
|
)
|
178
184
|
is_proc = Contract("procedure?", lambda v: isinstance(v, Proc | Contract))
|
179
185
|
|
@@ -18,7 +18,6 @@ from auto_editor.utils.types import Args, CoerceError, time
|
|
18
18
|
if TYPE_CHECKING:
|
19
19
|
from numpy.typing import NDArray
|
20
20
|
|
21
|
-
from auto_editor.output import Ensure
|
22
21
|
from auto_editor.utils.bar import Bar
|
23
22
|
from auto_editor.utils.chunks import Chunks
|
24
23
|
from auto_editor.utils.log import Log
|
@@ -75,7 +74,6 @@ def make_av(src: FileInfo, all_clips: list[list[Clip]]) -> tuple[VSpace, ASpace]
|
|
75
74
|
def run_interpreter_for_edit_option(
|
76
75
|
text: str, filesetup: FileSetup
|
77
76
|
) -> NDArray[np.bool_]:
|
78
|
-
ensure = filesetup.ensure
|
79
77
|
src = filesetup.src
|
80
78
|
tb = filesetup.tb
|
81
79
|
bar = filesetup.bar
|
@@ -87,8 +85,8 @@ def run_interpreter_for_edit_option(
|
|
87
85
|
if log.is_debug:
|
88
86
|
log.debug(f"edit: {parser}")
|
89
87
|
|
90
|
-
env["timebase"] =
|
91
|
-
env["@levels"] = Levels(
|
88
|
+
env["timebase"] = tb
|
89
|
+
env["@levels"] = Levels(src, tb, bar, temp, log)
|
92
90
|
env["@filesetup"] = filesetup
|
93
91
|
|
94
92
|
results = interpret(env, parser)
|
@@ -139,7 +137,6 @@ def parse_time(val: str, arr: NDArray, tb: Fraction) -> int: # raises: `CoerceE
|
|
139
137
|
|
140
138
|
def make_timeline(
|
141
139
|
sources: list[FileInfo],
|
142
|
-
ensure: Ensure,
|
143
140
|
args: Args,
|
144
141
|
sr: int,
|
145
142
|
bar: Bar,
|
@@ -169,7 +166,7 @@ def make_timeline(
|
|
169
166
|
concat = np.concatenate
|
170
167
|
|
171
168
|
for i, src in enumerate(sources):
|
172
|
-
filesetup = FileSetup(src,
|
169
|
+
filesetup = FileSetup(src, len(sources) < 2, tb, bar, temp, log)
|
173
170
|
|
174
171
|
edit_result = run_interpreter_for_edit_option(method, filesetup)
|
175
172
|
mut_margin(edit_result, start_margin, end_margin)
|
@@ -296,22 +293,4 @@ def make_timeline(
|
|
296
293
|
else:
|
297
294
|
v1_compatiable = None
|
298
295
|
|
299
|
-
|
300
|
-
|
301
|
-
# Additional monotonic check, o(n^2) time complexity so disable by default.
|
302
|
-
|
303
|
-
# if len(sources) != 1:
|
304
|
-
# return tl
|
305
|
-
|
306
|
-
# last_i = 0
|
307
|
-
# for index in range(tl.end):
|
308
|
-
# for layer in tl.v:
|
309
|
-
# for lobj in layer:
|
310
|
-
# if index >= lobj.start and index < (lobj.start + lobj.dur):
|
311
|
-
# _i = round((lobj.offset + index - lobj.start) * lobj.speed)
|
312
|
-
# if (_i < last_i):
|
313
|
-
# print(_i, last_i)
|
314
|
-
# raise ValueError("not monotonic")
|
315
|
-
# last_i = _i
|
316
|
-
|
317
|
-
return tl
|
296
|
+
return v3(inp, tb, sr, res, args.background, vtl, atl, v1_compatiable)
|