auto-editor 26.3.2__py3-none-any.whl → 27.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +1 -1
- auto_editor/__main__.py +155 -40
- auto_editor/analyze.py +30 -36
- auto_editor/cmds/info.py +1 -1
- auto_editor/cmds/levels.py +3 -3
- auto_editor/cmds/subdump.py +62 -8
- auto_editor/cmds/test.py +73 -53
- auto_editor/edit.py +50 -58
- auto_editor/ffwrapper.py +9 -9
- auto_editor/formats/json.py +2 -2
- auto_editor/{lang/json.py → json.py} +39 -43
- auto_editor/lang/palet.py +2 -2
- auto_editor/lang/stdenv.py +12 -0
- auto_editor/make_layers.py +2 -1
- auto_editor/output.py +6 -6
- auto_editor/render/audio.py +30 -25
- auto_editor/render/subtitle.py +10 -14
- auto_editor/render/video.py +41 -45
- auto_editor/timeline.py +8 -1
- auto_editor/utils/container.py +3 -3
- auto_editor/utils/types.py +7 -118
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info}/METADATA +8 -7
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info}/RECORD +28 -28
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info}/WHEEL +1 -1
- docs/build.py +16 -7
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info}/entry_points.txt +0 -0
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info/licenses}/LICENSE +0 -0
- {auto_editor-26.3.2.dist-info → auto_editor-27.0.0.dist-info}/top_level.txt +0 -0
@@ -11,29 +11,10 @@ if TYPE_CHECKING:
|
|
11
11
|
|
12
12
|
from _typeshed import SupportsWrite
|
13
13
|
|
14
|
-
|
15
|
-
class Token:
|
16
|
-
__slots__ = ("type", "value")
|
17
|
-
|
18
|
-
def __init__(self, type: int, value: object):
|
19
|
-
self.type = type
|
20
|
-
self.value = value
|
21
|
-
|
22
|
-
def __str__(self) -> str:
|
23
|
-
return f"{self.type=} {self.value=}"
|
24
|
-
|
25
|
-
__repr__ = __str__
|
14
|
+
Token = tuple[int, object]
|
26
15
|
|
27
16
|
|
28
17
|
EOF, LCUR, RCUR, LBRAC, RBRAC, COL, COMMA, STR, VAL = range(9)
|
29
|
-
table = {
|
30
|
-
"{": LCUR,
|
31
|
-
"}": RCUR,
|
32
|
-
"[": LBRAC,
|
33
|
-
"]": RBRAC,
|
34
|
-
":": COL,
|
35
|
-
",": COMMA,
|
36
|
-
}
|
37
18
|
str_escape = {
|
38
19
|
"\\": "\\",
|
39
20
|
"/": "/",
|
@@ -88,6 +69,12 @@ class Lexer:
|
|
88
69
|
self.char = self.text[self.pos]
|
89
70
|
self.column += 1
|
90
71
|
|
72
|
+
def rewind(self) -> None:
|
73
|
+
self.pos = 0
|
74
|
+
self.lineno = 1
|
75
|
+
self.column = 1
|
76
|
+
self.char = self.text[self.pos] if self.text else None
|
77
|
+
|
91
78
|
def peek(self) -> str | None:
|
92
79
|
peek_pos = self.pos + 1
|
93
80
|
return None if peek_pos > len(self.text) - 1 else self.text[peek_pos]
|
@@ -142,7 +129,7 @@ class Lexer:
|
|
142
129
|
result = buf.getvalue()
|
143
130
|
|
144
131
|
try:
|
145
|
-
return
|
132
|
+
return (VAL, float(result) if has_dot else int(result))
|
146
133
|
except ValueError:
|
147
134
|
self.error(f"`{result}` is not a valid JSON Number")
|
148
135
|
|
@@ -158,7 +145,7 @@ class Lexer:
|
|
158
145
|
|
159
146
|
if self.char == '"':
|
160
147
|
self.advance()
|
161
|
-
return
|
148
|
+
return (STR, self.string())
|
162
149
|
|
163
150
|
if self.char == "-":
|
164
151
|
_peek = self.peek()
|
@@ -168,10 +155,11 @@ class Lexer:
|
|
168
155
|
if self.char in "0123456789.":
|
169
156
|
return self.number()
|
170
157
|
|
158
|
+
table = {"{": LCUR, "}": RCUR, "[": LBRAC, "]": RBRAC, ":": COL, ",": COMMA}
|
171
159
|
if self.char in table:
|
172
160
|
key = table[self.char]
|
173
161
|
self.advance()
|
174
|
-
return
|
162
|
+
return (key, None)
|
175
163
|
|
176
164
|
keyword = ""
|
177
165
|
for i in range(5): # Longest valid keyword length
|
@@ -181,14 +169,14 @@ class Lexer:
|
|
181
169
|
self.advance()
|
182
170
|
|
183
171
|
if keyword == "true":
|
184
|
-
return
|
172
|
+
return (VAL, True)
|
185
173
|
if keyword == "false":
|
186
|
-
return
|
174
|
+
return (VAL, False)
|
187
175
|
if keyword == "null":
|
188
|
-
return
|
176
|
+
return (VAL, None)
|
189
177
|
|
190
178
|
self.error(f"Invalid keyword: `{keyword}`")
|
191
|
-
return
|
179
|
+
return (EOF, None)
|
192
180
|
|
193
181
|
|
194
182
|
class Parser:
|
@@ -204,49 +192,49 @@ class Parser:
|
|
204
192
|
def expr(self) -> Any:
|
205
193
|
self.current_token
|
206
194
|
|
207
|
-
if self.current_token
|
208
|
-
val = self.current_token
|
195
|
+
if self.current_token[0] in {STR, VAL}:
|
196
|
+
val = self.current_token[1]
|
209
197
|
self.eat()
|
210
198
|
return val
|
211
199
|
|
212
|
-
if self.current_token
|
200
|
+
if self.current_token[0] == LCUR:
|
213
201
|
self.eat()
|
214
202
|
|
215
203
|
my_dic = {}
|
216
|
-
while self.current_token
|
217
|
-
if self.current_token
|
218
|
-
if self.current_token
|
204
|
+
while self.current_token[0] != RCUR:
|
205
|
+
if self.current_token[0] != STR:
|
206
|
+
if self.current_token[0] in {LBRAC, VAL}:
|
219
207
|
self.lexer.error("JSON Objects only allow strings as keys")
|
220
208
|
self.lexer.error("Expected closing `}`")
|
221
|
-
key = self.current_token
|
209
|
+
key = self.current_token[1]
|
222
210
|
if key in my_dic:
|
223
211
|
self.lexer.error(f"Object has repeated key `{key}`")
|
224
212
|
self.eat()
|
225
|
-
if self.current_token
|
213
|
+
if self.current_token[0] != COL:
|
226
214
|
self.lexer.error("Expected `:`")
|
227
215
|
self.eat()
|
228
216
|
|
229
217
|
my_dic[key] = self.expr()
|
230
|
-
if self.current_token
|
231
|
-
if self.current_token
|
218
|
+
if self.current_token[0] != RCUR:
|
219
|
+
if self.current_token[0] != COMMA:
|
232
220
|
self.lexer.error("Expected `,` between Object entries")
|
233
221
|
self.eat()
|
234
|
-
if self.current_token
|
222
|
+
if self.current_token[0] == RCUR:
|
235
223
|
self.lexer.error("Trailing `,` in Object")
|
236
224
|
|
237
225
|
self.eat()
|
238
226
|
return my_dic
|
239
227
|
|
240
|
-
if self.current_token
|
228
|
+
if self.current_token[0] == LBRAC:
|
241
229
|
self.eat()
|
242
230
|
my_arr = []
|
243
|
-
while self.current_token
|
231
|
+
while self.current_token[0] != RBRAC:
|
244
232
|
my_arr.append(self.expr())
|
245
|
-
if self.current_token
|
246
|
-
if self.current_token
|
233
|
+
if self.current_token[0] != RBRAC:
|
234
|
+
if self.current_token[0] != COMMA:
|
247
235
|
self.lexer.error("Expected `,` between array entries")
|
248
236
|
self.eat()
|
249
|
-
if self.current_token
|
237
|
+
if self.current_token[0] == RBRAC:
|
250
238
|
self.lexer.error("Trailing `,` in array")
|
251
239
|
self.eat()
|
252
240
|
return my_arr
|
@@ -254,6 +242,14 @@ class Parser:
|
|
254
242
|
raise MyError(f"Unknown token: {self.current_token}")
|
255
243
|
|
256
244
|
|
245
|
+
def load(path: str, f: str | bytes | TextIOWrapper) -> dict[str, object]:
|
246
|
+
lexer = Lexer(path, f)
|
247
|
+
if lexer.get_next_token()[0] != LCUR:
|
248
|
+
raise MyError("Expected JSON Object")
|
249
|
+
lexer.rewind()
|
250
|
+
return Parser(lexer).expr()
|
251
|
+
|
252
|
+
|
257
253
|
def dump(
|
258
254
|
data: object, file: SupportsWrite[str], indent: int | None = None, level: int = 0
|
259
255
|
) -> None:
|
auto_editor/lang/palet.py
CHANGED
@@ -646,8 +646,8 @@ stack_trace_manager = StackTraceManager()
|
|
646
646
|
|
647
647
|
|
648
648
|
def my_eval(env: Env, node: object) -> Any:
|
649
|
-
def make_trace(sym:
|
650
|
-
return f" at {sym.val} ({sym.lineno}:{sym.column})"
|
649
|
+
def make_trace(sym: object) -> str:
|
650
|
+
return f" at {sym.val} ({sym.lineno}:{sym.column})" if type(sym) is Sym else ""
|
651
651
|
|
652
652
|
if type(node) is Sym:
|
653
653
|
val = env.get(node.val)
|
auto_editor/lang/stdenv.py
CHANGED
@@ -2,6 +2,8 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
from typing import TYPE_CHECKING
|
4
4
|
|
5
|
+
import bv
|
6
|
+
|
5
7
|
from auto_editor.analyze import mut_remove_large, mut_remove_small
|
6
8
|
from auto_editor.lib.contracts import *
|
7
9
|
from auto_editor.lib.data_structs import *
|
@@ -749,6 +751,13 @@ def make_standard_env() -> dict[str, Any]:
|
|
749
751
|
raise MyError("@r: attribute must be an identifier")
|
750
752
|
|
751
753
|
base = my_eval(env, node[1])
|
754
|
+
|
755
|
+
if hasattr(base, "__pyx_vtable__"):
|
756
|
+
try:
|
757
|
+
return getattr(base, node[2].val)
|
758
|
+
except AttributeError as e:
|
759
|
+
raise MyError(e)
|
760
|
+
|
752
761
|
if type(base) is PaletClass:
|
753
762
|
if type(name := node[2]) is not Sym:
|
754
763
|
raise MyError("@r: class attribute must be an identifier")
|
@@ -1171,6 +1180,9 @@ def make_standard_env() -> dict[str, Any]:
|
|
1171
1180
|
"string->vector", lambda s: [Char(c) for c in s], (1, 1), is_str
|
1172
1181
|
),
|
1173
1182
|
"range->vector": Proc("range->vector", list, (1, 1), is_range),
|
1183
|
+
# av
|
1184
|
+
"encoder": Proc("encoder", lambda x: bv.Codec(x, "w"), (1, 1), is_str),
|
1185
|
+
"decoder": Proc("decoder", lambda x: bv.Codec(x), (1, 1), is_str),
|
1174
1186
|
# reflexion
|
1175
1187
|
"var-exists?": Proc("var-exists?", lambda sym: sym.val in env, (1, 1), is_symbol),
|
1176
1188
|
"rename": Syntax(syn_rename),
|
auto_editor/make_layers.py
CHANGED
@@ -13,11 +13,12 @@ from auto_editor.lib.data_structs import print_str
|
|
13
13
|
from auto_editor.lib.err import MyError
|
14
14
|
from auto_editor.timeline import ASpace, TlAudio, TlVideo, VSpace, v1, v3
|
15
15
|
from auto_editor.utils.func import mut_margin
|
16
|
-
from auto_editor.utils.types import
|
16
|
+
from auto_editor.utils.types import CoerceError, time
|
17
17
|
|
18
18
|
if TYPE_CHECKING:
|
19
19
|
from numpy.typing import NDArray
|
20
20
|
|
21
|
+
from auto_editor.__main__ import Args
|
21
22
|
from auto_editor.utils.bar import Bar
|
22
23
|
from auto_editor.utils.chunks import Chunks
|
23
24
|
from auto_editor.utils.log import Log
|
auto_editor/output.py
CHANGED
@@ -3,18 +3,18 @@ from __future__ import annotations
|
|
3
3
|
import os.path
|
4
4
|
from dataclasses import dataclass, field
|
5
5
|
|
6
|
-
import
|
7
|
-
from
|
6
|
+
import bv
|
7
|
+
from bv.audio.resampler import AudioResampler
|
8
8
|
|
9
9
|
from auto_editor.ffwrapper import FileInfo
|
10
10
|
from auto_editor.utils.bar import Bar
|
11
11
|
from auto_editor.utils.log import Log
|
12
|
-
from auto_editor.utils.types import
|
12
|
+
from auto_editor.utils.types import split_num_str
|
13
13
|
|
14
14
|
|
15
15
|
def parse_bitrate(input_: str, log: Log) -> int:
|
16
16
|
try:
|
17
|
-
val, unit =
|
17
|
+
val, unit = split_num_str(input_)
|
18
18
|
except Exception as e:
|
19
19
|
log.error(e)
|
20
20
|
|
@@ -53,8 +53,8 @@ class Ensure:
|
|
53
53
|
bar = self._bar
|
54
54
|
self.log.debug(f"Making external audio: {out_path}")
|
55
55
|
|
56
|
-
in_container =
|
57
|
-
out_container =
|
56
|
+
in_container = bv.open(src.path, "r")
|
57
|
+
out_container = bv.open(
|
58
58
|
out_path, "w", format="wav", options={"rf64": "always"}
|
59
59
|
)
|
60
60
|
astream = in_container.streams.audio[stream]
|
auto_editor/render/audio.py
CHANGED
@@ -2,13 +2,14 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import io
|
4
4
|
from pathlib import Path
|
5
|
+
from typing import TYPE_CHECKING
|
5
6
|
|
6
|
-
import
|
7
|
+
import bv
|
7
8
|
import numpy as np
|
8
|
-
from
|
9
|
+
from bv.filter.loudnorm import stats
|
9
10
|
|
10
11
|
from auto_editor.ffwrapper import FileInfo
|
11
|
-
from auto_editor.
|
12
|
+
from auto_editor.json import load
|
12
13
|
from auto_editor.lang.palet import env
|
13
14
|
from auto_editor.lib.contracts import andc, between_c, is_int_or_float
|
14
15
|
from auto_editor.lib.err import MyError
|
@@ -18,9 +19,11 @@ from auto_editor.utils.bar import Bar
|
|
18
19
|
from auto_editor.utils.cmdkw import ParserError, parse_with_palet, pAttr, pAttrs
|
19
20
|
from auto_editor.utils.container import Container
|
20
21
|
from auto_editor.utils.log import Log
|
21
|
-
from auto_editor.utils.types import Args
|
22
22
|
from auto_editor.wavfile import AudioData, read, write
|
23
23
|
|
24
|
+
if TYPE_CHECKING:
|
25
|
+
from auto_editor.__main__ import Args
|
26
|
+
|
24
27
|
norm_types = {
|
25
28
|
"ebu": pAttrs(
|
26
29
|
"ebu",
|
@@ -58,12 +61,14 @@ def parse_norm(norm: str, log: Log) -> dict | None:
|
|
58
61
|
|
59
62
|
def parse_ebu_bytes(norm: dict, stat: bytes, log: Log) -> tuple[str, str]:
|
60
63
|
try:
|
61
|
-
parsed =
|
64
|
+
parsed = load("loudnorm", stat)
|
62
65
|
except MyError:
|
63
66
|
log.error(f"Invalid loudnorm stats.\n{stat!r}")
|
64
67
|
|
65
68
|
for key in {"input_i", "input_tp", "input_lra", "input_thresh", "target_offset"}:
|
66
|
-
|
69
|
+
val_ = parsed[key]
|
70
|
+
assert isinstance(val_, int | float | str | bytes)
|
71
|
+
val = float(val_)
|
67
72
|
if val == float("-inf"):
|
68
73
|
parsed[key] = -99
|
69
74
|
elif val == float("inf"):
|
@@ -94,14 +99,14 @@ def apply_audio_normalization(
|
|
94
99
|
f"i={norm['i']}:lra={norm['lra']}:tp={norm['tp']}:offset={norm['gain']}"
|
95
100
|
)
|
96
101
|
log.debug(f"audio norm first pass: {first_pass}")
|
97
|
-
with
|
102
|
+
with bv.open(f"{pre_master}") as container:
|
98
103
|
stats_ = stats(first_pass, container.streams.audio[0])
|
99
104
|
|
100
105
|
name, filter_args = parse_ebu_bytes(norm, stats_, log)
|
101
106
|
else:
|
102
107
|
assert "t" in norm
|
103
108
|
|
104
|
-
def get_peak_level(frame:
|
109
|
+
def get_peak_level(frame: bv.AudioFrame) -> float:
|
105
110
|
# Calculate peak level in dB
|
106
111
|
# Should be equivalent to: -af astats=measure_overall=Peak_level:measure_perchannel=0
|
107
112
|
max_amplitude = np.abs(frame.to_ndarray()).max()
|
@@ -109,7 +114,7 @@ def apply_audio_normalization(
|
|
109
114
|
return -20.0 * np.log10(max_amplitude)
|
110
115
|
return -99.0
|
111
116
|
|
112
|
-
with
|
117
|
+
with bv.open(pre_master) as container:
|
113
118
|
max_peak_level = -99.0
|
114
119
|
assert len(container.streams.video) == 0
|
115
120
|
for frame in container.decode(audio=0):
|
@@ -121,13 +126,13 @@ def apply_audio_normalization(
|
|
121
126
|
log.print(f"peak adjustment: {adjustment:.3f}dB")
|
122
127
|
name, filter_args = "volume", f"{adjustment}"
|
123
128
|
|
124
|
-
with
|
129
|
+
with bv.open(pre_master) as container:
|
125
130
|
input_stream = container.streams.audio[0]
|
126
131
|
|
127
|
-
output_file =
|
132
|
+
output_file = bv.open(path, mode="w")
|
128
133
|
output_stream = output_file.add_stream("pcm_s16le", rate=input_stream.rate)
|
129
134
|
|
130
|
-
graph =
|
135
|
+
graph = bv.filter.Graph()
|
131
136
|
graph.link_nodes(
|
132
137
|
graph.add_abuffer(template=input_stream),
|
133
138
|
graph.add(name, filter_args),
|
@@ -138,9 +143,9 @@ def apply_audio_normalization(
|
|
138
143
|
while True:
|
139
144
|
try:
|
140
145
|
aframe = graph.pull()
|
141
|
-
assert isinstance(aframe,
|
146
|
+
assert isinstance(aframe, bv.AudioFrame)
|
142
147
|
output_file.mux(output_stream.encode(aframe))
|
143
|
-
except (
|
148
|
+
except (bv.BlockingIOError, bv.EOFError):
|
144
149
|
break
|
145
150
|
|
146
151
|
output_file.mux(output_stream.encode(None))
|
@@ -154,14 +159,14 @@ def process_audio_clip(
|
|
154
159
|
write(input_buffer, sr, samp_list[samp_start:samp_end])
|
155
160
|
input_buffer.seek(0)
|
156
161
|
|
157
|
-
input_file =
|
162
|
+
input_file = bv.open(input_buffer, "r")
|
158
163
|
input_stream = input_file.streams.audio[0]
|
159
164
|
|
160
165
|
output_bytes = io.BytesIO()
|
161
|
-
output_file =
|
166
|
+
output_file = bv.open(output_bytes, mode="w", format="wav")
|
162
167
|
output_stream = output_file.add_stream("pcm_s16le", rate=sr)
|
163
168
|
|
164
|
-
graph =
|
169
|
+
graph = bv.filter.Graph()
|
165
170
|
args = [graph.add_abuffer(template=input_stream)]
|
166
171
|
|
167
172
|
if clip.speed != 1:
|
@@ -191,9 +196,9 @@ def process_audio_clip(
|
|
191
196
|
while True:
|
192
197
|
try:
|
193
198
|
aframe = graph.pull()
|
194
|
-
assert isinstance(aframe,
|
199
|
+
assert isinstance(aframe, bv.AudioFrame)
|
195
200
|
output_file.mux(output_stream.encode(aframe))
|
196
|
-
except (
|
201
|
+
except (bv.BlockingIOError, bv.EOFError):
|
197
202
|
break
|
198
203
|
|
199
204
|
# Flush the stream
|
@@ -217,7 +222,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
|
|
217
222
|
|
218
223
|
# First pass: determine the maximum length
|
219
224
|
for path in audio_paths:
|
220
|
-
container =
|
225
|
+
container = bv.open(path)
|
221
226
|
stream = container.streams.audio[0]
|
222
227
|
|
223
228
|
# Calculate duration in samples
|
@@ -229,10 +234,10 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
|
|
229
234
|
|
230
235
|
# Second pass: read and mix audio
|
231
236
|
for path in audio_paths:
|
232
|
-
container =
|
237
|
+
container = bv.open(path)
|
233
238
|
stream = container.streams.audio[0]
|
234
239
|
|
235
|
-
resampler =
|
240
|
+
resampler = bv.audio.resampler.AudioResampler(
|
236
241
|
format="s16", layout="mono", rate=sr
|
237
242
|
)
|
238
243
|
|
@@ -265,7 +270,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
|
|
265
270
|
mixed_audio = mixed_audio * (32767 / max_val)
|
266
271
|
mixed_audio = mixed_audio.astype(np.int16) # type: ignore
|
267
272
|
|
268
|
-
output_container =
|
273
|
+
output_container = bv.open(output_path, mode="w")
|
269
274
|
output_stream = output_container.add_stream("pcm_s16le", rate=sr)
|
270
275
|
|
271
276
|
chunk_size = sr # Process 1 second at a time
|
@@ -273,7 +278,7 @@ def mix_audio_files(sr: int, audio_paths: list[str], output_path: str) -> None:
|
|
273
278
|
# Shape becomes (1, samples) for mono
|
274
279
|
chunk = np.array([mixed_audio[i : i + chunk_size]])
|
275
280
|
|
276
|
-
frame =
|
281
|
+
frame = bv.AudioFrame.from_ndarray(chunk, format="s16", layout="mono")
|
277
282
|
frame.rate = sr
|
278
283
|
frame.pts = i # Set presentation timestamp
|
279
284
|
|
@@ -367,7 +372,7 @@ def make_new_audio(
|
|
367
372
|
except PermissionError:
|
368
373
|
pass
|
369
374
|
|
370
|
-
if
|
375
|
+
if args.mix_audio_streams and len(output) > 1:
|
371
376
|
new_a_file = f"{Path(temp, 'new_audio.wav')}"
|
372
377
|
mix_audio_files(sr, output, new_a_file)
|
373
378
|
return [new_a_file]
|
auto_editor/render/subtitle.py
CHANGED
@@ -6,7 +6,7 @@ import re
|
|
6
6
|
from dataclasses import dataclass
|
7
7
|
from typing import TYPE_CHECKING
|
8
8
|
|
9
|
-
import
|
9
|
+
import bv
|
10
10
|
|
11
11
|
from auto_editor.utils.func import to_timecode
|
12
12
|
|
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
|
|
17
17
|
from auto_editor.utils.chunks import Chunks
|
18
18
|
from auto_editor.utils.log import Log
|
19
19
|
|
20
|
-
Input =
|
20
|
+
Input = bv.container.InputContainer
|
21
21
|
|
22
22
|
|
23
23
|
@dataclass(slots=True)
|
@@ -138,18 +138,14 @@ def make_srt(input_: Input, stream: int) -> str:
|
|
138
138
|
if packet.dts is None or packet.pts is None or packet.duration is None:
|
139
139
|
continue
|
140
140
|
|
141
|
-
|
142
|
-
|
141
|
+
start_num = packet.pts * input_stream.time_base
|
142
|
+
start = to_timecode(start_num, "srt")
|
143
|
+
end = to_timecode(start_num + packet.duration * input_stream.time_base, "srt")
|
143
144
|
|
144
|
-
for
|
145
|
-
|
146
|
-
end_time = to_timecode(end, "srt")
|
145
|
+
for sub in packet.decode():
|
146
|
+
assert isinstance(sub, bv.subtitles.subtitle.AssSubtitle)
|
147
147
|
|
148
|
-
|
149
|
-
assert len(subset) == 1
|
150
|
-
assert isinstance(sub, av.subtitles.subtitle.AssSubtitle)
|
151
|
-
|
152
|
-
output_bytes.write(f"{s}\n{start_time} --> {end_time}\n")
|
148
|
+
output_bytes.write(f"{s}\n{start} --> {end}\n")
|
153
149
|
output_bytes.write(sub.dialogue.decode("utf-8", errors="ignore") + "\n\n")
|
154
150
|
s += 1
|
155
151
|
|
@@ -159,7 +155,7 @@ def make_srt(input_: Input, stream: int) -> str:
|
|
159
155
|
|
160
156
|
def _ensure(input_: Input, format: str, stream: int) -> str:
|
161
157
|
output_bytes = io.BytesIO()
|
162
|
-
output =
|
158
|
+
output = bv.open(output_bytes, "w", format=format)
|
163
159
|
|
164
160
|
in_stream = input_.streams.subtitles[stream]
|
165
161
|
out_stream = output.add_stream_from_template(in_stream)
|
@@ -179,7 +175,7 @@ def make_new_subtitles(tl: v3, log: Log) -> list[str]:
|
|
179
175
|
if tl.v1 is None:
|
180
176
|
return []
|
181
177
|
|
182
|
-
input_ =
|
178
|
+
input_ = bv.open(tl.v1.source.path)
|
183
179
|
new_paths = []
|
184
180
|
|
185
181
|
for s, sub in enumerate(tl.v1.source.subtitles):
|