auto-editor 28.0.1__py3-none-any.whl → 28.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_editor/__init__.py +1 -1
- auto_editor/__main__.py +4 -3
- auto_editor/analyze.py +13 -13
- auto_editor/cmds/desc.py +2 -2
- auto_editor/cmds/levels.py +20 -12
- auto_editor/cmds/subdump.py +4 -4
- auto_editor/cmds/test.py +31 -27
- auto_editor/edit.py +64 -48
- auto_editor/exports/fcp11.py +40 -14
- auto_editor/exports/json.py +39 -8
- auto_editor/exports/kdenlive.py +322 -0
- auto_editor/exports/shotcut.py +1 -2
- auto_editor/ffwrapper.py +20 -9
- auto_editor/help.py +1 -0
- auto_editor/lang/stdenv.py +0 -5
- auto_editor/make_layers.py +3 -3
- auto_editor/preview.py +12 -21
- auto_editor/render/audio.py +42 -42
- auto_editor/render/subtitle.py +5 -5
- auto_editor/render/video.py +28 -33
- auto_editor/timeline.py +0 -35
- auto_editor/utils/container.py +2 -3
- auto_editor/utils/log.py +3 -1
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/METADATA +2 -2
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/RECORD +29 -28
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/WHEEL +0 -0
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/entry_points.txt +0 -0
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/licenses/LICENSE +0 -0
- {auto_editor-28.0.1.dist-info → auto_editor-28.1.0.dist-info}/top_level.txt +0 -0
auto_editor/exports/fcp11.py
CHANGED
@@ -1,17 +1,10 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
1
|
import xml.etree.ElementTree as ET
|
4
|
-
from
|
2
|
+
from fractions import Fraction
|
5
3
|
from xml.etree.ElementTree import Element, ElementTree, SubElement, indent
|
6
4
|
|
5
|
+
from auto_editor.ffwrapper import FileInfo
|
7
6
|
from auto_editor.timeline import Clip, v3
|
8
|
-
|
9
|
-
if TYPE_CHECKING:
|
10
|
-
from fractions import Fraction
|
11
|
-
|
12
|
-
from auto_editor.ffwrapper import FileInfo
|
13
|
-
from auto_editor.utils.log import Log
|
14
|
-
|
7
|
+
from auto_editor.utils.log import Log
|
15
8
|
|
16
9
|
"""
|
17
10
|
Export a FCPXML 11 file readable with Final Cut Pro 10.6.8 or later.
|
@@ -52,6 +45,35 @@ def make_name(src: FileInfo, tb: Fraction) -> str:
|
|
52
45
|
return "FFVideoFormatRateUndefined"
|
53
46
|
|
54
47
|
|
48
|
+
def parseSMPTE(val: str, fps: Fraction, log: Log) -> int:
|
49
|
+
if len(val) == 0:
|
50
|
+
return 0
|
51
|
+
try:
|
52
|
+
parts = val.split(":")
|
53
|
+
if len(parts) != 4:
|
54
|
+
raise ValueError(f"Invalid SMPTE format: {val}")
|
55
|
+
|
56
|
+
hours, minutes, seconds, frames = map(int, parts)
|
57
|
+
|
58
|
+
if (
|
59
|
+
hours < 0
|
60
|
+
or minutes < 0
|
61
|
+
or minutes >= 60
|
62
|
+
or seconds < 0
|
63
|
+
or seconds >= 60
|
64
|
+
or frames < 0
|
65
|
+
):
|
66
|
+
raise ValueError(f"Invalid SMPTE values: {val}")
|
67
|
+
|
68
|
+
if frames >= fps:
|
69
|
+
raise ValueError(f"Frame count {frames} exceeds fps {fps}")
|
70
|
+
|
71
|
+
total_frames = (hours * 3600 + minutes * 60 + seconds) * fps + frames
|
72
|
+
return int(round(total_frames))
|
73
|
+
except (ValueError, ZeroDivisionError) as e:
|
74
|
+
log.error(f"Cannot parse SMPTE timecode '{val}': {e}")
|
75
|
+
|
76
|
+
|
55
77
|
def fcp11_write_xml(
|
56
78
|
group_name: str, version: int, output: str, resolve: bool, tl: v3, log: Log
|
57
79
|
) -> None:
|
@@ -90,12 +112,14 @@ def fcp11_write_xml(
|
|
90
112
|
height=f"{tl.res[1]}",
|
91
113
|
colorSpace=get_colorspace(one_src),
|
92
114
|
)
|
115
|
+
|
116
|
+
startPoint = parseSMPTE(one_src.timecode, tl.tb, log)
|
93
117
|
r2 = SubElement(
|
94
118
|
resources,
|
95
119
|
"asset",
|
96
120
|
id=f"r{i * 2 + 2}",
|
97
121
|
name=one_src.path.stem,
|
98
|
-
start=
|
122
|
+
start=fraction(startPoint),
|
99
123
|
hasVideo="1" if one_src.videos else "0",
|
100
124
|
format=f"r{i * 2 + 1}",
|
101
125
|
hasAudio="1" if one_src.audios else "0",
|
@@ -122,12 +146,14 @@ def fcp11_write_xml(
|
|
122
146
|
spine = SubElement(sequence, "spine")
|
123
147
|
|
124
148
|
def make_clip(ref: str, clip: Clip) -> None:
|
149
|
+
startPoint = parseSMPTE(clip.src.timecode, tl.tb, log)
|
150
|
+
|
125
151
|
clip_properties = {
|
126
152
|
"name": proj_name,
|
127
153
|
"ref": ref,
|
128
|
-
"offset": fraction(clip.start),
|
154
|
+
"offset": fraction(clip.start + startPoint),
|
129
155
|
"duration": fraction(clip.dur),
|
130
|
-
"start": fraction(clip.offset),
|
156
|
+
"start": fraction(clip.offset + startPoint),
|
131
157
|
"tcFormat": "NDF",
|
132
158
|
}
|
133
159
|
asset = SubElement(spine, "asset-clip", clip_properties)
|
@@ -146,7 +172,7 @@ def fcp11_write_xml(
|
|
146
172
|
)
|
147
173
|
|
148
174
|
if tl.v and tl.v[0]:
|
149
|
-
clips =
|
175
|
+
clips = [clip for clip in tl.v[0] if isinstance(clip, Clip)]
|
150
176
|
elif tl.a and tl.a[0]:
|
151
177
|
clips = tl.a[0]
|
152
178
|
else:
|
auto_editor/exports/json.py
CHANGED
@@ -1,13 +1,44 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
1
|
import sys
|
4
|
-
from typing import TYPE_CHECKING
|
5
2
|
|
6
3
|
from auto_editor.json import dump
|
7
|
-
from auto_editor.timeline import v3
|
8
|
-
|
9
|
-
|
10
|
-
|
4
|
+
from auto_editor.timeline import Clip, v3
|
5
|
+
from auto_editor.utils.log import Log
|
6
|
+
|
7
|
+
|
8
|
+
def as_dict(self: v3) -> dict:
|
9
|
+
def aclip_to_dict(self: Clip) -> dict:
|
10
|
+
return {
|
11
|
+
"name": "audio",
|
12
|
+
"src": self.src,
|
13
|
+
"start": self.start,
|
14
|
+
"dur": self.dur,
|
15
|
+
"offset": self.offset,
|
16
|
+
"speed": self.speed,
|
17
|
+
"volume": self.volume,
|
18
|
+
"stream": self.stream,
|
19
|
+
}
|
20
|
+
|
21
|
+
v = []
|
22
|
+
a = []
|
23
|
+
for vlayer in self.v:
|
24
|
+
vb = [vobj.as_dict() for vobj in vlayer]
|
25
|
+
if vb:
|
26
|
+
v.append(vb)
|
27
|
+
for layer in self.a:
|
28
|
+
ab = [aclip_to_dict(clip) for clip in layer]
|
29
|
+
if ab:
|
30
|
+
a.append(ab)
|
31
|
+
|
32
|
+
return {
|
33
|
+
"version": "3",
|
34
|
+
"timebase": f"{self.tb.numerator}/{self.tb.denominator}",
|
35
|
+
"background": self.background,
|
36
|
+
"resolution": self.T.res,
|
37
|
+
"samplerate": self.T.sr,
|
38
|
+
"layout": self.T.layout,
|
39
|
+
"v": v,
|
40
|
+
"a": a,
|
41
|
+
}
|
11
42
|
|
12
43
|
|
13
44
|
def make_json_timeline(ver: str, out: str, tl: v3, log: Log) -> None:
|
@@ -20,7 +51,7 @@ def make_json_timeline(ver: str, out: str, tl: v3, log: Log) -> None:
|
|
20
51
|
outfile = open(out, "w")
|
21
52
|
|
22
53
|
if ver == "v3":
|
23
|
-
dump(
|
54
|
+
dump(as_dict(tl), outfile, indent=2)
|
24
55
|
else:
|
25
56
|
if tl.v1 is None:
|
26
57
|
log.error("Timeline can't be converted to v1 format")
|
@@ -0,0 +1,322 @@
|
|
1
|
+
import json
|
2
|
+
import xml.etree.ElementTree as ET
|
3
|
+
from os import getcwd
|
4
|
+
from uuid import uuid4
|
5
|
+
|
6
|
+
from auto_editor.timeline import Clip, v3
|
7
|
+
from auto_editor.utils.func import aspect_ratio, to_timecode
|
8
|
+
|
9
|
+
"""
|
10
|
+
kdenlive uses the MLT timeline format
|
11
|
+
|
12
|
+
See docs here:
|
13
|
+
https://mltframework.org/docs/mltxml/
|
14
|
+
|
15
|
+
kdenlive specifics:
|
16
|
+
https://github.com/KDE/kdenlive/blob/master/dev-docs/fileformat.md
|
17
|
+
"""
|
18
|
+
|
19
|
+
|
20
|
+
def kdenlive_write(output: str, tl: v3) -> None:
|
21
|
+
mlt = ET.Element(
|
22
|
+
"mlt",
|
23
|
+
attrib={
|
24
|
+
"LC_NUMERIC": "C",
|
25
|
+
"version": "7.22.0",
|
26
|
+
"producer": "main_bin",
|
27
|
+
"root": f"{getcwd()}",
|
28
|
+
},
|
29
|
+
)
|
30
|
+
|
31
|
+
width, height = tl.res
|
32
|
+
num, den = aspect_ratio(width, height)
|
33
|
+
tb = tl.tb
|
34
|
+
seq_uuid = uuid4()
|
35
|
+
|
36
|
+
ET.SubElement(
|
37
|
+
mlt,
|
38
|
+
"profile",
|
39
|
+
attrib={
|
40
|
+
"description": "automatic",
|
41
|
+
"width": f"{width}",
|
42
|
+
"height": f"{height}",
|
43
|
+
"progressive": "1",
|
44
|
+
"sample_aspect_num": "1",
|
45
|
+
"sample_aspect_den": "1",
|
46
|
+
"display_aspect_num": f"{num}",
|
47
|
+
"display_aspect_den": f"{den}",
|
48
|
+
"frame_rate_num": f"{tb.numerator}",
|
49
|
+
"frame_rate_den": f"{tb.denominator}",
|
50
|
+
"colorspace": "709",
|
51
|
+
},
|
52
|
+
)
|
53
|
+
|
54
|
+
# Reserved producer0
|
55
|
+
global_out = to_timecode(len(tl) / tb, "standard")
|
56
|
+
producer = ET.SubElement(mlt, "producer", id="producer0")
|
57
|
+
ET.SubElement(producer, "property", name="length").text = global_out
|
58
|
+
ET.SubElement(producer, "property", name="eof").text = "continue"
|
59
|
+
ET.SubElement(producer, "property", name="resource").text = "black"
|
60
|
+
ET.SubElement(producer, "property", name="mlt_service").text = "color"
|
61
|
+
ET.SubElement(producer, "property", name="kdenlive:playlistid").text = "black_track"
|
62
|
+
ET.SubElement(producer, "property", name="mlt_image_format").text = "rgba"
|
63
|
+
ET.SubElement(producer, "property", name="aspect_ratio").text = "1"
|
64
|
+
|
65
|
+
# Get all clips
|
66
|
+
if tl.v:
|
67
|
+
clips = [clip for clip in tl.v[0] if isinstance(clip, Clip)]
|
68
|
+
elif tl.a:
|
69
|
+
clips = tl.a[0]
|
70
|
+
else:
|
71
|
+
clips = []
|
72
|
+
|
73
|
+
source_ids = {}
|
74
|
+
source_id = 4
|
75
|
+
clip_playlists = []
|
76
|
+
chains = 0
|
77
|
+
playlists = 0
|
78
|
+
producers = 1
|
79
|
+
a_channels = len(tl.a)
|
80
|
+
v_channels = len(tl.v)
|
81
|
+
warped_clips = [i for i, clip in enumerate(clips) if clip.speed != 1]
|
82
|
+
|
83
|
+
# create all producers for warped clips
|
84
|
+
for clip_idx in warped_clips:
|
85
|
+
for i in range(a_channels + v_channels):
|
86
|
+
clip = clips[clip_idx]
|
87
|
+
path = str(clip.src.path)
|
88
|
+
|
89
|
+
if path not in source_ids:
|
90
|
+
source_ids[path] = str(source_id)
|
91
|
+
source_id += 1
|
92
|
+
|
93
|
+
prod = ET.SubElement(
|
94
|
+
mlt,
|
95
|
+
"producer",
|
96
|
+
attrib={
|
97
|
+
"id": f"producer{producers}",
|
98
|
+
"in": "00:00:00.000",
|
99
|
+
"out": global_out,
|
100
|
+
},
|
101
|
+
)
|
102
|
+
ET.SubElement(
|
103
|
+
prod, "property", name="resource"
|
104
|
+
).text = f"{clip.speed}:{path}"
|
105
|
+
ET.SubElement(prod, "property", name="warp_speed").text = str(clip.speed)
|
106
|
+
ET.SubElement(prod, "property", name="warp_resource").text = path
|
107
|
+
ET.SubElement(prod, "property", name="warp_pitch").text = "0"
|
108
|
+
ET.SubElement(prod, "property", name="mlt_service").text = "timewarp"
|
109
|
+
ET.SubElement(prod, "property", name="kdenlive:id").text = source_ids[path]
|
110
|
+
|
111
|
+
if i < a_channels:
|
112
|
+
ET.SubElement(prod, "property", name="vstream").text = "0"
|
113
|
+
ET.SubElement(prod, "property", name="astream").text = str(
|
114
|
+
a_channels - 1 - i
|
115
|
+
)
|
116
|
+
ET.SubElement(prod, "property", name="set.test_audio").text = "0"
|
117
|
+
ET.SubElement(prod, "property", name="set.test_video").text = "1"
|
118
|
+
else:
|
119
|
+
ET.SubElement(prod, "property", name="vstream").text = str(
|
120
|
+
v_channels - 1 - (i - a_channels)
|
121
|
+
)
|
122
|
+
ET.SubElement(prod, "property", name="astream").text = "0"
|
123
|
+
ET.SubElement(prod, "property", name="set.test_audio").text = "1"
|
124
|
+
ET.SubElement(prod, "property", name="set.test_video").text = "0"
|
125
|
+
|
126
|
+
producers += 1
|
127
|
+
|
128
|
+
# create chains, playlists and tractors for audio channels
|
129
|
+
for i, audio in enumerate(tl.a):
|
130
|
+
path = str(audio[0].src.path)
|
131
|
+
|
132
|
+
if path not in source_ids:
|
133
|
+
source_ids[path] = str(source_id)
|
134
|
+
source_id += 1
|
135
|
+
|
136
|
+
chain = ET.SubElement(mlt, "chain", attrib={"id": f"chain{chains}"})
|
137
|
+
ET.SubElement(chain, "property", name="resource").text = path
|
138
|
+
ET.SubElement(
|
139
|
+
chain, "property", name="mlt_service"
|
140
|
+
).text = "avformat-novalidate"
|
141
|
+
ET.SubElement(chain, "property", name="vstream").text = "0"
|
142
|
+
ET.SubElement(chain, "property", name="astream").text = str(a_channels - 1 - i)
|
143
|
+
ET.SubElement(chain, "property", name="set.test_audio").text = "0"
|
144
|
+
ET.SubElement(chain, "property", name="set.test_video").text = "1"
|
145
|
+
ET.SubElement(chain, "property", name="kdenlive:id").text = source_ids[path]
|
146
|
+
|
147
|
+
for _i in range(2):
|
148
|
+
playlist = ET.SubElement(mlt, "playlist", id=f"playlist{playlists}")
|
149
|
+
clip_playlists.append(playlist)
|
150
|
+
ET.SubElement(playlist, "property", name="kdenlive:audio_track").text = "1"
|
151
|
+
playlists += 1
|
152
|
+
|
153
|
+
tractor = ET.SubElement(
|
154
|
+
mlt,
|
155
|
+
"tractor",
|
156
|
+
attrib={"id": f"tractor{chains}", "in": "00:00:00.000", "out": global_out},
|
157
|
+
)
|
158
|
+
ET.SubElement(tractor, "property", name="kdenlive:audio_track").text = "1"
|
159
|
+
ET.SubElement(tractor, "property", name="kdenlive:timeline_active").text = "1"
|
160
|
+
ET.SubElement(tractor, "property", name="kdenlive:audio_rec")
|
161
|
+
ET.SubElement(
|
162
|
+
tractor,
|
163
|
+
"track",
|
164
|
+
attrib={"hide": "video", "producer": f"playlist{playlists - 2}"},
|
165
|
+
)
|
166
|
+
ET.SubElement(
|
167
|
+
tractor,
|
168
|
+
"track",
|
169
|
+
attrib={"hide": "video", "producer": f"playlist{playlists - 1}"},
|
170
|
+
)
|
171
|
+
chains += 1
|
172
|
+
|
173
|
+
# create chains, playlists and tractors for video channels
|
174
|
+
for i, video in enumerate(tl.v):
|
175
|
+
path = f"{video[0].src.path}" # type: ignore
|
176
|
+
|
177
|
+
if path not in source_ids:
|
178
|
+
source_ids[path] = str(source_id)
|
179
|
+
source_id += 1
|
180
|
+
|
181
|
+
chain = ET.SubElement(mlt, "chain", attrib={"id": f"chain{chains}"})
|
182
|
+
ET.SubElement(chain, "property", name="resource").text = path
|
183
|
+
ET.SubElement(
|
184
|
+
chain, "property", name="mlt_service"
|
185
|
+
).text = "avformat-novalidate"
|
186
|
+
ET.SubElement(chain, "property", name="vstream").text = str(v_channels - 1 - i)
|
187
|
+
ET.SubElement(chain, "property", name="astream").text = "0"
|
188
|
+
ET.SubElement(chain, "property", name="set.test_audio").text = "1"
|
189
|
+
ET.SubElement(chain, "property", name="set.test_video").text = "0"
|
190
|
+
ET.SubElement(chain, "property", name="kdenlive:id").text = source_ids[path]
|
191
|
+
|
192
|
+
for _i in range(2):
|
193
|
+
playlist = ET.SubElement(mlt, "playlist", id=f"playlist{playlists}")
|
194
|
+
clip_playlists.append(playlist)
|
195
|
+
playlists += 1
|
196
|
+
|
197
|
+
tractor = ET.SubElement(
|
198
|
+
mlt,
|
199
|
+
"tractor",
|
200
|
+
attrib={"id": f"tractor{chains}", "in": "00:00:00.000", "out": global_out},
|
201
|
+
)
|
202
|
+
ET.SubElement(tractor, "property", name="kdenlive:timeline_active").text = "1"
|
203
|
+
ET.SubElement(
|
204
|
+
tractor,
|
205
|
+
"track",
|
206
|
+
attrib={"hide": "audio", "producer": f"playlist{playlists - 2}"},
|
207
|
+
)
|
208
|
+
ET.SubElement(
|
209
|
+
tractor,
|
210
|
+
"track",
|
211
|
+
attrib={"hide": "audio", "producer": f"playlist{playlists - 1}"},
|
212
|
+
)
|
213
|
+
chains += 1
|
214
|
+
|
215
|
+
# final chain for the project bin
|
216
|
+
path = str(clips[0].src.path)
|
217
|
+
chain = ET.SubElement(mlt, "chain", attrib={"id": f"chain{chains}"})
|
218
|
+
ET.SubElement(chain, "property", name="resource").text = path
|
219
|
+
ET.SubElement(chain, "property", name="mlt_service").text = "avformat-novalidate"
|
220
|
+
ET.SubElement(chain, "property", name="audio_index").text = "1"
|
221
|
+
ET.SubElement(chain, "property", name="video_index").text = "0"
|
222
|
+
ET.SubElement(chain, "property", name="vstream").text = "0"
|
223
|
+
ET.SubElement(chain, "property", name="astream").text = "0"
|
224
|
+
ET.SubElement(chain, "property", name="kdenlive:id").text = source_ids[path]
|
225
|
+
|
226
|
+
groups = []
|
227
|
+
group_counter = 0
|
228
|
+
producers = 1
|
229
|
+
|
230
|
+
for clip in clips:
|
231
|
+
group_children: list[object] = []
|
232
|
+
_in = to_timecode(clip.offset / tb, "standard")
|
233
|
+
_out = to_timecode((clip.offset + clip.dur) / tb, "standard")
|
234
|
+
path = str(clip.src.path)
|
235
|
+
|
236
|
+
for i, playlist in enumerate(clip_playlists[::2]):
|
237
|
+
# adding 1 extra frame for each previous group to the start time works but feels hacky?
|
238
|
+
group_children.append(
|
239
|
+
{
|
240
|
+
"data": f"{i}:{clip.start + group_counter}",
|
241
|
+
"leaf": "clip",
|
242
|
+
"type": "Leaf",
|
243
|
+
}
|
244
|
+
)
|
245
|
+
clip_prod = ""
|
246
|
+
|
247
|
+
if clip.speed == 1:
|
248
|
+
clip_prod = f"chain{i}"
|
249
|
+
else:
|
250
|
+
clip_prod = f"producer{producers}"
|
251
|
+
producers += 1
|
252
|
+
|
253
|
+
entry = ET.SubElement(
|
254
|
+
playlist,
|
255
|
+
"entry",
|
256
|
+
attrib={"producer": f"{clip_prod}", "in": _in, "out": _out},
|
257
|
+
)
|
258
|
+
ET.SubElement(entry, "property", name="kdenlive:id").text = source_ids[path]
|
259
|
+
|
260
|
+
groups.append({"children": group_children[:], "type": "Normal"})
|
261
|
+
group_counter += 1
|
262
|
+
|
263
|
+
# default sequence tractor
|
264
|
+
sequence = ET.SubElement(
|
265
|
+
mlt,
|
266
|
+
"tractor",
|
267
|
+
attrib={"id": f"{{{seq_uuid}}}", "in": "00:00:00.000", "out": "00:00:00.000"},
|
268
|
+
)
|
269
|
+
ET.SubElement(sequence, "property", name="kdenlive:uuid").text = f"{{{seq_uuid}}}"
|
270
|
+
ET.SubElement(sequence, "property", name="kdenlive:clipname").text = "Sequence 1"
|
271
|
+
ET.SubElement(
|
272
|
+
sequence, "property", name="kdenlive:sequenceproperties.groups"
|
273
|
+
).text = json.dumps(groups, indent=4)
|
274
|
+
ET.SubElement(sequence, "track", producer="producer0")
|
275
|
+
|
276
|
+
for i in range(chains):
|
277
|
+
ET.SubElement(sequence, "track", producer=f"tractor{i}")
|
278
|
+
|
279
|
+
# main bin
|
280
|
+
playlist_bin = ET.SubElement(mlt, "playlist", id="main_bin")
|
281
|
+
ET.SubElement(
|
282
|
+
playlist_bin, "property", name="kdenlive:docproperties.uuid"
|
283
|
+
).text = f"{{{seq_uuid}}}"
|
284
|
+
ET.SubElement(
|
285
|
+
playlist_bin, "property", name="kdenlive:docproperties.version"
|
286
|
+
).text = "1.1"
|
287
|
+
ET.SubElement(playlist_bin, "property", name="xml_retain").text = "1"
|
288
|
+
ET.SubElement(
|
289
|
+
playlist_bin,
|
290
|
+
"entry",
|
291
|
+
attrib={
|
292
|
+
"producer": f"{{{seq_uuid}}}",
|
293
|
+
"in": "00:00:00.000",
|
294
|
+
"out": "00:00:00.000",
|
295
|
+
},
|
296
|
+
)
|
297
|
+
ET.SubElement(
|
298
|
+
playlist_bin,
|
299
|
+
"entry",
|
300
|
+
attrib={"producer": f"chain{chains}", "in": "00:00:00.000"},
|
301
|
+
)
|
302
|
+
|
303
|
+
# reserved last tractor for project
|
304
|
+
tractor = ET.SubElement(
|
305
|
+
mlt,
|
306
|
+
"tractor",
|
307
|
+
attrib={"id": f"tractor{chains}", "in": "00:00:00.000", "out": global_out},
|
308
|
+
)
|
309
|
+
ET.SubElement(tractor, "property", name="kdenlive:projectTractor").text = "1"
|
310
|
+
ET.SubElement(
|
311
|
+
tractor,
|
312
|
+
"track",
|
313
|
+
attrib={"producer": f"{{{seq_uuid}}}", "in": "00:00:00.000", "out": global_out},
|
314
|
+
)
|
315
|
+
tree = ET.ElementTree(mlt)
|
316
|
+
|
317
|
+
ET.indent(tree, space="\t", level=0)
|
318
|
+
|
319
|
+
if output == "-":
|
320
|
+
print(ET.tostring(mlt, encoding="unicode"))
|
321
|
+
else:
|
322
|
+
tree.write(output, xml_declaration=True, encoding="utf-8")
|
auto_editor/exports/shotcut.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1
1
|
import xml.etree.ElementTree as ET
|
2
|
-
from typing import cast
|
3
2
|
|
4
3
|
from auto_editor.timeline import Clip, v3
|
5
4
|
from auto_editor.utils.func import aspect_ratio, to_timecode
|
@@ -71,7 +70,7 @@ def shotcut_write_mlt(output: str, tl: v3) -> None:
|
|
71
70
|
producers = 0
|
72
71
|
|
73
72
|
if tl.v:
|
74
|
-
clips =
|
73
|
+
clips = [clip for clip in tl.v[0] if isinstance(clip, Clip)]
|
75
74
|
elif tl.a:
|
76
75
|
clips = tl.a[0]
|
77
76
|
else:
|
auto_editor/ffwrapper.py
CHANGED
@@ -4,14 +4,14 @@ from dataclasses import dataclass
|
|
4
4
|
from fractions import Fraction
|
5
5
|
from pathlib import Path
|
6
6
|
|
7
|
-
import
|
7
|
+
import av
|
8
8
|
|
9
9
|
from auto_editor.utils.log import Log
|
10
10
|
|
11
11
|
|
12
12
|
def mux(input: Path, output: Path, stream: int) -> None:
|
13
|
-
input_container =
|
14
|
-
output_container =
|
13
|
+
input_container = av.open(input, "r")
|
14
|
+
output_container = av.open(output, "w")
|
15
15
|
|
16
16
|
input_audio_stream = input_container.streams.audio[stream]
|
17
17
|
output_audio_stream = output_container.add_stream("pcm_s16le")
|
@@ -66,6 +66,7 @@ class FileInfo:
|
|
66
66
|
path: Path
|
67
67
|
bitrate: int
|
68
68
|
duration: float
|
69
|
+
timecode: str # in SMPTE
|
69
70
|
videos: tuple[VideoStream, ...]
|
70
71
|
audios: tuple[AudioStream, ...]
|
71
72
|
subtitles: tuple[SubtitleStream, ...]
|
@@ -88,12 +89,12 @@ class FileInfo:
|
|
88
89
|
@classmethod
|
89
90
|
def init(self, path: str, log: Log) -> FileInfo:
|
90
91
|
try:
|
91
|
-
cont =
|
92
|
-
except
|
92
|
+
cont = av.open(path, "r")
|
93
|
+
except av.error.FileNotFoundError:
|
93
94
|
log.error(f"Input file doesn't exist: {path}")
|
94
|
-
except
|
95
|
+
except av.error.IsADirectoryError:
|
95
96
|
log.error(f"Expected a media file, but got a directory: {path}")
|
96
|
-
except
|
97
|
+
except av.error.InvalidDataError:
|
97
98
|
log.error(f"Invalid data when processing: {path}")
|
98
99
|
|
99
100
|
videos: tuple[VideoStream, ...] = ()
|
@@ -165,12 +166,22 @@ class FileInfo:
|
|
165
166
|
ext = sub_exts.get(codec, "vtt")
|
166
167
|
subtitles += (SubtitleStream(codec, ext, s.language),)
|
167
168
|
|
169
|
+
def get_timecode() -> str:
|
170
|
+
for d in cont.streams.data:
|
171
|
+
if (result := d.metadata.get("timecode")) is not None:
|
172
|
+
return result
|
173
|
+
for v in cont.streams.video:
|
174
|
+
if (result := v.metadata.get("timecode")) is not None:
|
175
|
+
return result
|
176
|
+
return "00:00:00:00"
|
177
|
+
|
178
|
+
timecode = get_timecode()
|
168
179
|
bitrate = 0 if cont.bit_rate is None else cont.bit_rate
|
169
|
-
dur = 0 if cont.duration is None else cont.duration /
|
180
|
+
dur = 0 if cont.duration is None else cont.duration / av.time_base
|
170
181
|
|
171
182
|
cont.close()
|
172
183
|
|
173
|
-
return FileInfo(Path(path), bitrate, dur, videos, audios, subtitles)
|
184
|
+
return FileInfo(Path(path), bitrate, dur, timecode, videos, audios, subtitles)
|
174
185
|
|
175
186
|
def __repr__(self) -> str:
|
176
187
|
return f"@{self.path.name}"
|
auto_editor/help.py
CHANGED
@@ -78,6 +78,7 @@ Export Methods:
|
|
78
78
|
- final-cut-pro ; Export as an XML timeline file for Final Cut Pro
|
79
79
|
- name : "Auto-Editor Media Group"
|
80
80
|
- shotcut ; Export as an XML timeline file for Shotcut
|
81
|
+
- kdenlive ; Export as an XML timeline file for kdenlive
|
81
82
|
- v3 ; Export as an auto-editor v3 timeline file
|
82
83
|
- v1 ; Export as an auto-editor v1 timeline file
|
83
84
|
- clip-sequence ; Export as multiple numbered media files
|
auto_editor/lang/stdenv.py
CHANGED
@@ -3,8 +3,6 @@ from __future__ import annotations
|
|
3
3
|
from dataclasses import dataclass
|
4
4
|
from typing import TYPE_CHECKING
|
5
5
|
|
6
|
-
import bv
|
7
|
-
|
8
6
|
from auto_editor.analyze import mut_remove_large, mut_remove_small
|
9
7
|
from auto_editor.lib.contracts import *
|
10
8
|
from auto_editor.lib.data_structs import *
|
@@ -1169,9 +1167,6 @@ def make_standard_env() -> dict[str, Any]:
|
|
1169
1167
|
"string->vector", lambda s: [Char(c) for c in s], (1, 1), is_str
|
1170
1168
|
),
|
1171
1169
|
"range->vector": Proc("range->vector", list, (1, 1), is_range),
|
1172
|
-
# av
|
1173
|
-
"encoder": Proc("encoder", lambda x: bv.Codec(x, "w"), (1, 1), is_str),
|
1174
|
-
"decoder": Proc("decoder", lambda x: bv.Codec(x), (1, 1), is_str),
|
1175
1170
|
# reflexion
|
1176
1171
|
"var-exists?": Proc("var-exists?", lambda sym: sym.val in env, (1, 1), is_symbol),
|
1177
1172
|
"rename": Syntax(syn_rename),
|
auto_editor/make_layers.py
CHANGED
@@ -299,9 +299,9 @@ def make_timeline(
|
|
299
299
|
|
300
300
|
if len(sources) == 1 and inp is not None:
|
301
301
|
chunks = chunkify(speed_index, speed_hash)
|
302
|
-
|
302
|
+
v1_compatible = v1(inp, chunks)
|
303
303
|
else:
|
304
|
-
|
304
|
+
v1_compatible = None
|
305
305
|
|
306
306
|
if len(vtl) == 0 and len(atl) == 0:
|
307
307
|
log.error("Timeline is empty, nothing to do.")
|
@@ -312,4 +312,4 @@ def make_timeline(
|
|
312
312
|
else:
|
313
313
|
template = Template.init(inp, sr, args.audio_layout, res)
|
314
314
|
|
315
|
-
return v3(tb, args.background, template, vtl, atl,
|
315
|
+
return v3(tb, args.background, template, vtl, atl, v1_compatible)
|
auto_editor/preview.py
CHANGED
@@ -28,23 +28,24 @@ def time_frame(
|
|
28
28
|
def all_cuts(tl: v3, in_len: int) -> list[int]:
|
29
29
|
# Calculate cuts
|
30
30
|
tb = tl.tb
|
31
|
-
|
31
|
+
clip_spans: list[tuple[int, int]] = []
|
32
32
|
|
33
33
|
for clip in tl.a[0]:
|
34
34
|
old_offset = clip.offset * clip.speed
|
35
|
-
|
35
|
+
clip_spans.append((round(old_offset), round(old_offset + clip.dur)))
|
36
36
|
|
37
37
|
cut_lens = []
|
38
38
|
i = 0
|
39
|
-
while i < len(
|
40
|
-
if i == 0 and
|
41
|
-
cut_lens.append(
|
39
|
+
while i < len(clip_spans) - 1:
|
40
|
+
if i == 0 and clip_spans[i][0] != 0:
|
41
|
+
cut_lens.append(clip_spans[i][0])
|
42
42
|
|
43
|
-
cut_lens.append(
|
43
|
+
cut_lens.append(clip_spans[i + 1][0] - clip_spans[i][1])
|
44
44
|
i += 1
|
45
45
|
|
46
|
-
if len(
|
47
|
-
cut_lens.append(in_len -
|
46
|
+
if len(clip_spans) > 0 and clip_spans[-1][1] < round(in_len / tb):
|
47
|
+
cut_lens.append(in_len - clip_spans[-1][1])
|
48
|
+
|
48
49
|
return cut_lens
|
49
50
|
|
50
51
|
|
@@ -53,19 +54,9 @@ def preview(tl: v3, log: Log) -> None:
|
|
53
54
|
tb = tl.tb
|
54
55
|
|
55
56
|
# Calculate input videos length
|
56
|
-
all_sources = set()
|
57
|
-
for vlayer in tl.v:
|
58
|
-
for vclip in vlayer:
|
59
|
-
if hasattr(vclip, "src"):
|
60
|
-
all_sources.add(vclip.src)
|
61
|
-
for alayer in tl.a:
|
62
|
-
for aclip in alayer:
|
63
|
-
if hasattr(aclip, "src"):
|
64
|
-
all_sources.add(aclip.src)
|
65
|
-
|
66
57
|
in_len = 0
|
67
58
|
bar = initBar("none")
|
68
|
-
for src in
|
59
|
+
for src in tl.unique_sources():
|
69
60
|
in_len += initLevels(src, tb, bar, False, log).media_length
|
70
61
|
|
71
62
|
out_len = len(tl)
|
@@ -77,7 +68,7 @@ def preview(tl: v3, log: Log) -> None:
|
|
77
68
|
time_frame(fp, "output", out_len, tb, f"{round((out_len / in_len) * 100, 2)}%")
|
78
69
|
time_frame(fp, "diff", diff, tb, f"{round((diff / in_len) * 100, 2)}%")
|
79
70
|
|
80
|
-
clip_lens = [clip.dur
|
71
|
+
clip_lens = [clip.dur for clip in tl.a[0]]
|
81
72
|
log.debug(clip_lens)
|
82
73
|
|
83
74
|
fp.write(f"clips:\n - amount: {len(clip_lens)}\n")
|
@@ -90,7 +81,7 @@ def preview(tl: v3, log: Log) -> None:
|
|
90
81
|
|
91
82
|
cut_lens = all_cuts(tl, in_len)
|
92
83
|
log.debug(cut_lens)
|
93
|
-
fp.write(f"cuts:\n - amount: {len(
|
84
|
+
fp.write(f"cuts:\n - amount: {len(cut_lens)}\n")
|
94
85
|
if len(cut_lens) > 0:
|
95
86
|
time_frame(fp, "smallest", min(cut_lens), tb)
|
96
87
|
time_frame(fp, "largest", max(cut_lens), tb)
|