auto-editor 27.1.1__py3-none-any.whl → 28.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,9 +7,7 @@ from typing import TYPE_CHECKING
7
7
  from xml.etree.ElementTree import Element
8
8
 
9
9
  from auto_editor.ffwrapper import FileInfo
10
- from auto_editor.timeline import ASpace, Template, TlAudio, TlVideo, VSpace, v3
11
-
12
- from .utils import Validator, show
10
+ from auto_editor.timeline import TlVideo, v3
13
11
 
14
12
  if TYPE_CHECKING:
15
13
  from auto_editor.utils.log import Log
@@ -27,28 +25,6 @@ come back the way they started.
27
25
  DEPTH = "16"
28
26
 
29
27
 
30
- def uri_to_path(uri: str) -> str:
31
- urllib = __import__("urllib.parse", fromlist=["parse"])
32
-
33
- if uri.startswith("file://localhost/"):
34
- uri = uri[16:]
35
- elif uri.startswith("file://"):
36
- # Windows-style paths
37
- if len(uri) > 8 and uri[9] == ":":
38
- uri = uri[8:]
39
- else:
40
- uri = uri[7:]
41
- else:
42
- return uri
43
-
44
- return urllib.parse.unquote(uri)
45
-
46
- # /Users/wyattblue/projects/auto-editor/example.mp4
47
- # file:///Users/wyattblue/projects/auto-editor/example.mp4
48
- # file:///C:/Users/WyattBlue/projects/auto-editor/example.mp4
49
- # file://localhost/Users/wyattblue/projects/auto-editor/example.mp4
50
-
51
-
52
28
  def set_tb_ntsc(tb: Fraction) -> tuple[int, str]:
53
29
  # See chart: https://developer.apple.com/library/archive/documentation/AppleApplications/Reference/FinalCutPro_XML/FrameRate/FrameRate.html#//apple_ref/doc/uid/TP30001158-TPXREF103
54
30
  if tb == Fraction(24000, 1001):
@@ -65,19 +41,6 @@ def set_tb_ntsc(tb: Fraction) -> tuple[int, str]:
65
41
  return int(tb), "FALSE"
66
42
 
67
43
 
68
- def read_tb_ntsc(tb: int, ntsc: bool) -> Fraction:
69
- if ntsc:
70
- if tb == 24:
71
- return Fraction(24000, 1001)
72
- if tb == 30:
73
- return Fraction(30000, 1001)
74
- if tb == 60:
75
- return Fraction(60000, 1001)
76
- return tb * Fraction(999, 1000)
77
-
78
- return Fraction(tb)
79
-
80
-
81
44
  def speedup(speed: float) -> Element:
82
45
  fil = Element("filter")
83
46
  effect = ET.SubElement(fil, "effect")
@@ -139,167 +102,6 @@ def read_filters(clipitem: Element, log: Log) -> float:
139
102
  return 1.0
140
103
 
141
104
 
142
- def fcp7_read_xml(path: str, log: Log) -> v3:
143
- def xml_bool(val: str) -> bool:
144
- if val == "TRUE":
145
- return True
146
- if val == "FALSE":
147
- return False
148
- raise TypeError("Value must be 'TRUE' or 'FALSE'")
149
-
150
- try:
151
- tree = ET.parse(path)
152
- except FileNotFoundError:
153
- log.error(f"Could not find '{path}'")
154
-
155
- root = tree.getroot()
156
-
157
- valid = Validator(log)
158
-
159
- valid.check(root, "xmeml")
160
- valid.check(root[0], "sequence")
161
- result = valid.parse(
162
- root[0],
163
- {
164
- "name": str,
165
- "duration": int,
166
- "rate": {
167
- "timebase": Fraction,
168
- "ntsc": xml_bool,
169
- },
170
- "media": None,
171
- },
172
- )
173
-
174
- tb = read_tb_ntsc(result["rate"]["timebase"], result["rate"]["ntsc"])
175
-
176
- av = valid.parse(
177
- result["media"],
178
- {
179
- "video": None,
180
- "audio": None,
181
- },
182
- )
183
-
184
- sources: dict[str, FileInfo] = {}
185
- vobjs: VSpace = []
186
- aobjs: ASpace = []
187
-
188
- vclip_schema = {
189
- "format": {
190
- "samplecharacteristics": {
191
- "width": int,
192
- "height": int,
193
- },
194
- },
195
- "track": {
196
- "__arr": "",
197
- "clipitem": {
198
- "__arr": "",
199
- "start": int,
200
- "end": int,
201
- "in": int,
202
- "out": int,
203
- "file": None,
204
- "filter": None,
205
- },
206
- },
207
- }
208
-
209
- aclip_schema = {
210
- "format": {"samplecharacteristics": {"samplerate": int}},
211
- "track": {
212
- "__arr": "",
213
- "clipitem": {
214
- "__arr": "",
215
- "start": int,
216
- "end": int,
217
- "in": int,
218
- "out": int,
219
- "file": None,
220
- "filter": None,
221
- },
222
- },
223
- }
224
-
225
- sr = 48000
226
- res = (1920, 1080)
227
-
228
- if "video" in av:
229
- tracks = valid.parse(av["video"], vclip_schema)
230
-
231
- if "format" in tracks:
232
- width = tracks["format"]["samplecharacteristics"]["width"]
233
- height = tracks["format"]["samplecharacteristics"]["height"]
234
- res = width, height
235
-
236
- for t, track in enumerate(tracks["track"]):
237
- if len(track["clipitem"]) > 0:
238
- vobjs.append([])
239
- for i, clipitem in enumerate(track["clipitem"]):
240
- file_id = clipitem["file"].attrib["id"]
241
- if file_id not in sources:
242
- fileobj = valid.parse(clipitem["file"], {"pathurl": str})
243
-
244
- if "pathurl" in fileobj:
245
- sources[file_id] = FileInfo.init(
246
- uri_to_path(fileobj["pathurl"]),
247
- log,
248
- )
249
- else:
250
- show(clipitem["file"], 3)
251
- log.error(
252
- f"'pathurl' child element not found in {clipitem['file'].tag}"
253
- )
254
-
255
- if "filter" in clipitem:
256
- speed = read_filters(clipitem["filter"], log)
257
- else:
258
- speed = 1.0
259
-
260
- start = clipitem["start"]
261
- dur = clipitem["end"] - start
262
- offset = clipitem["in"]
263
-
264
- vobjs[t].append(
265
- TlVideo(start, dur, sources[file_id], offset, speed, stream=0)
266
- )
267
-
268
- if "audio" in av:
269
- tracks = valid.parse(av["audio"], aclip_schema)
270
- if "format" in tracks:
271
- sr = tracks["format"]["samplecharacteristics"]["samplerate"]
272
-
273
- for t, track in enumerate(tracks["track"]):
274
- if len(track["clipitem"]) > 0:
275
- aobjs.append([])
276
- for i, clipitem in enumerate(track["clipitem"]):
277
- file_id = clipitem["file"].attrib["id"]
278
- if file_id not in sources:
279
- fileobj = valid.parse(clipitem["file"], {"pathurl": str})
280
- sources[file_id] = FileInfo.init(
281
- uri_to_path(fileobj["pathurl"]), log
282
- )
283
-
284
- if "filter" in clipitem:
285
- speed = read_filters(clipitem["filter"], log)
286
- else:
287
- speed = 1.0
288
-
289
- start = clipitem["start"]
290
- dur = clipitem["end"] - start
291
- offset = clipitem["in"]
292
-
293
- aobjs[t].append(
294
- TlAudio(
295
- start, dur, sources[file_id], offset, speed, volume=1, stream=0
296
- )
297
- )
298
-
299
- T = Template.init(sources[next(iter(sources))], sr, res=res)
300
- return v3(tb, "#000", T, vobjs, aobjs, v1=None)
301
-
302
-
303
105
  def media_def(
304
106
  filedef: Element, url: str, src: FileInfo, tl: v3, tb: int, ntsc: str
305
107
  ) -> None:
@@ -472,7 +274,7 @@ def fcp7_write_xml(name: str, output: str, resolve: bool, tl: v3) -> None:
472
274
  sequence = ET.SubElement(xmeml, "sequence", explodedTracks="true")
473
275
 
474
276
  ET.SubElement(sequence, "name").text = name
475
- ET.SubElement(sequence, "duration").text = f"{int(tl.out_len())}"
277
+ ET.SubElement(sequence, "duration").text = f"{len(tl)}"
476
278
  rate = ET.SubElement(sequence, "rate")
477
279
  ET.SubElement(rate, "timebase").text = f"{timebase}"
478
280
  ET.SubElement(rate, "ntsc").text = ntsc
@@ -542,4 +344,7 @@ def fcp7_write_xml(name: str, output: str, resolve: bool, tl: v3) -> None:
542
344
 
543
345
  tree = ET.ElementTree(xmeml)
544
346
  ET.indent(tree, space=" ", level=0)
545
- tree.write(output, xml_declaration=True, encoding="utf-8")
347
+ if output == "-":
348
+ print(ET.tostring(xmeml, encoding="unicode"))
349
+ else:
350
+ tree.write(output, xml_declaration=True, encoding="utf-8")
@@ -0,0 +1,32 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from typing import TYPE_CHECKING
5
+
6
+ from auto_editor.json import dump
7
+ from auto_editor.timeline import v3
8
+
9
+ if TYPE_CHECKING:
10
+ from auto_editor.utils.log import Log
11
+
12
+
13
+ def make_json_timeline(ver: str, out: str, tl: v3, log: Log) -> None:
14
+ if ver not in {"v1", "v3"}:
15
+ log.error(f"Unknown timeline version: {ver}")
16
+
17
+ if out == "-":
18
+ outfile = sys.stdout
19
+ else:
20
+ outfile = open(out, "w")
21
+
22
+ if ver == "v3":
23
+ dump(tl.as_dict(), outfile, indent=2)
24
+ else:
25
+ if tl.v1 is None:
26
+ log.error("Timeline can't be converted to v1 format")
27
+ dump(tl.v1.as_dict(), outfile, indent=2)
28
+
29
+ if out == "-":
30
+ print("") # Flush stdout
31
+ else:
32
+ outfile.close()
@@ -1,16 +1,12 @@
1
- from __future__ import annotations
2
-
3
1
  import xml.etree.ElementTree as ET
4
2
  from typing import TYPE_CHECKING, Any, cast
5
3
 
6
- from auto_editor.timeline import v3
4
+ from auto_editor.timeline import TlAudio, TlVideo, v3
7
5
  from auto_editor.utils.func import aspect_ratio, to_timecode
8
6
 
9
7
  if TYPE_CHECKING:
10
8
  from collections.abc import Sequence
11
9
 
12
- from auto_editor.timeline import TlAudio, TlVideo
13
- from auto_editor.utils.log import Log
14
10
 
15
11
  """
16
12
  Shotcut uses the MLT timeline format
@@ -21,10 +17,6 @@ https://mltframework.org/docs/mltxml/
21
17
  """
22
18
 
23
19
 
24
- def shotcut_read_mlt(path: str, log: Log) -> v3:
25
- raise NotImplementedError
26
-
27
-
28
20
  def shotcut_write_mlt(output: str, tl: v3) -> None:
29
21
  mlt = ET.Element(
30
22
  "mlt",
@@ -61,7 +53,7 @@ def shotcut_write_mlt(output: str, tl: v3) -> None:
61
53
  playlist_bin = ET.SubElement(mlt, "playlist", id="main_bin")
62
54
  ET.SubElement(playlist_bin, "property", name="xml_retain").text = "1"
63
55
 
64
- global_out = to_timecode(tl.out_len() / tb, "standard")
56
+ global_out = to_timecode(len(tl) / tb, "standard")
65
57
 
66
58
  producer = ET.SubElement(mlt, "producer", id="bg")
67
59
 
@@ -154,4 +146,7 @@ def shotcut_write_mlt(output: str, tl: v3) -> None:
154
146
 
155
147
  ET.indent(tree, space="\t", level=0)
156
148
 
157
- tree.write(output, xml_declaration=True, encoding="utf-8")
149
+ if output == "-":
150
+ print(ET.tostring(mlt, encoding="unicode"))
151
+ else:
152
+ tree.write(output, xml_declaration=True, encoding="utf-8")
auto_editor/ffwrapper.py CHANGED
@@ -66,7 +66,6 @@ class FileInfo:
66
66
  path: Path
67
67
  bitrate: int
68
68
  duration: float
69
- description: str | None
70
69
  videos: tuple[VideoStream, ...]
71
70
  audios: tuple[AudioStream, ...]
72
71
  subtitles: tuple[SubtitleStream, ...]
@@ -166,13 +165,12 @@ class FileInfo:
166
165
  ext = sub_exts.get(codec, "vtt")
167
166
  subtitles += (SubtitleStream(codec, ext, s.language),)
168
167
 
169
- desc = cont.metadata.get("description", None)
170
168
  bitrate = 0 if cont.bit_rate is None else cont.bit_rate
171
169
  dur = 0 if cont.duration is None else cont.duration / bv.time_base
172
170
 
173
171
  cont.close()
174
172
 
175
- return FileInfo(Path(path), bitrate, dur, desc, videos, audios, subtitles)
173
+ return FileInfo(Path(path), bitrate, dur, videos, audios, subtitles)
176
174
 
177
175
  def __repr__(self) -> str:
178
176
  return f"@{self.path.name}"
auto_editor/help.py CHANGED
@@ -71,26 +71,15 @@ This option controls how timelines are exported.
71
71
 
72
72
  Export Methods:
73
73
  - default ; Export as a regular media file
74
-
75
74
  - premiere ; Export as an XML timeline file for Adobe Premiere Pro
76
- - name string? : "Auto-Editor Media Group"
77
-
75
+ - name : "Auto-Editor Media Group"
78
76
  - resolve ; Export as an XML timeline file for DaVinci Resolve
79
- - name string? : "Auto-Editor Media Group"
80
-
77
+ - name : "Auto-Editor Media Group"
81
78
  - final-cut-pro ; Export as an XML timeline file for Final Cut Pro
82
- - name string? : "Auto-Editor Media Group"
83
-
84
- - shotcut ; Export as an XML timeline file for Shotcut
85
-
86
- - json ; Export as an auto-editor JSON timeline file
87
- - api string? : "3"
88
-
89
- - timeline ; Print the auto-editor timeline to stdout
90
- - api string? : "3"
91
-
92
- - audio ; Export as a WAV audio file
93
-
79
+ - name : "Auto-Editor Media Group"
80
+ - shotcut ; Export as an XML timeline file for Shotcut
81
+ - v3 ; Export as an auto-editor v3 timeline file
82
+ - v1 ; Export as an auto-editor v1 timeline file
94
83
  - clip-sequence ; Export as multiple numbered media files
95
84
 
96
85
  """.strip(),
File without changes
@@ -0,0 +1,277 @@
1
+ from __future__ import annotations
2
+
3
+ import xml.etree.ElementTree as ET
4
+ from fractions import Fraction
5
+ from typing import TYPE_CHECKING
6
+ from urllib.parse import unquote
7
+ from xml.etree.ElementTree import Element
8
+
9
+ from auto_editor.ffwrapper import FileInfo
10
+ from auto_editor.timeline import ASpace, Template, TlAudio, TlVideo, VSpace, v3
11
+
12
+ if TYPE_CHECKING:
13
+ from auto_editor.utils.log import Log
14
+
15
+
16
+ SUPPORTED_EFFECTS = ("timeremap",)
17
+
18
+
19
+ def show(ele: Element, limit: int, depth: int = 0) -> None:
20
+ print(
21
+ f"{' ' * (depth * 4)}<{ele.tag} {ele.attrib}> {ele.text.strip() if ele.text is not None else ''}"
22
+ )
23
+ for child in ele:
24
+ if isinstance(child, Element) and depth < limit:
25
+ show(child, limit, depth + 1)
26
+
27
+
28
+ def read_filters(clipitem: Element, log: Log) -> float:
29
+ for effect_tag in clipitem:
30
+ if effect_tag.tag in {"enabled", "start", "end"}:
31
+ continue
32
+ if len(effect_tag) < 3:
33
+ log.error("<effect> requires: <effectid> <name> and one <parameter>")
34
+ for i, effects in enumerate(effect_tag):
35
+ if i == 0 and effects.tag != "name":
36
+ log.error("<effect>: <name> must be first tag")
37
+ if i == 1 and effects.tag != "effectid":
38
+ log.error("<effect>: <effectid> must be second tag")
39
+ if effects.text not in SUPPORTED_EFFECTS:
40
+ log.error(f"`{effects.text}` is not a supported effect.")
41
+
42
+ if i > 1:
43
+ for j, parms in enumerate(effects):
44
+ if j == 0:
45
+ if parms.tag != "parameterid":
46
+ log.error("<parameter>: <parameterid> must be first tag")
47
+ if parms.text != "speed":
48
+ break
49
+
50
+ if j > 0 and parms.tag == "value":
51
+ if parms.text is None:
52
+ log.error("<value>: number required")
53
+ return float(parms.text) / 100
54
+
55
+ return 1.0
56
+
57
+
58
+ def uri_to_path(uri: str) -> str:
59
+ # Handle inputs like:
60
+ # /Users/wyattblue/projects/auto-editor/example.mp4
61
+ # file:///Users/wyattblue/projects/auto-editor/example.mp4
62
+ # file:///C:/Users/WyattBlue/projects/auto-editor/example.mp4
63
+ # file://localhost/Users/wyattblue/projects/auto-editor/example.mp4
64
+
65
+ if uri.startswith("file://localhost/"):
66
+ uri = uri[16:]
67
+ elif uri.startswith("file://"):
68
+ # Windows-style paths
69
+ uri = uri[8:] if len(uri) > 8 and uri[9] == ":" else uri[7:]
70
+ else:
71
+ return uri
72
+ return unquote(uri)
73
+
74
+
75
+ def read_tb_ntsc(tb: int, ntsc: bool) -> Fraction:
76
+ if ntsc:
77
+ if tb == 24:
78
+ return Fraction(24000, 1001)
79
+ if tb == 30:
80
+ return Fraction(30000, 1001)
81
+ if tb == 60:
82
+ return Fraction(60000, 1001)
83
+ return tb * Fraction(999, 1000)
84
+
85
+ return Fraction(tb)
86
+
87
+
88
+ def fcp7_read_xml(path: str, log: Log) -> v3:
89
+ def xml_bool(val: str) -> bool:
90
+ if val == "TRUE":
91
+ return True
92
+ if val == "FALSE":
93
+ return False
94
+ raise TypeError("Value must be 'TRUE' or 'FALSE'")
95
+
96
+ try:
97
+ tree = ET.parse(path)
98
+ except FileNotFoundError:
99
+ log.error(f"Could not find '{path}'")
100
+
101
+ root = tree.getroot()
102
+
103
+ def parse(ele: Element, schema: dict) -> dict:
104
+ new: dict = {}
105
+ for key, val in schema.items():
106
+ if isinstance(val, dict) and "__arr" in val:
107
+ new[key] = []
108
+
109
+ is_arr = False
110
+ for child in ele:
111
+ if child.tag not in schema:
112
+ continue
113
+
114
+ if schema[child.tag] is None:
115
+ new[child.tag] = child
116
+ continue
117
+
118
+ if isinstance(schema[child.tag], dict):
119
+ val = parse(child, schema[child.tag])
120
+ is_arr = "__arr" in schema[child.tag]
121
+ else:
122
+ val = schema[child.tag](child.text)
123
+
124
+ if child.tag in new:
125
+ if not is_arr:
126
+ log.error(f"<{child.tag}> can only occur once")
127
+ new[child.tag].append(val)
128
+ else:
129
+ new[child.tag] = [val] if is_arr else val
130
+
131
+ return new
132
+
133
+ def check(ele: Element, tag: str) -> None:
134
+ if tag != ele.tag:
135
+ log.error(f"Expected '{tag}' tag, got '{ele.tag}'")
136
+
137
+ check(root, "xmeml")
138
+ check(root[0], "sequence")
139
+ result = parse(
140
+ root[0],
141
+ {
142
+ "name": str,
143
+ "duration": int,
144
+ "rate": {
145
+ "timebase": Fraction,
146
+ "ntsc": xml_bool,
147
+ },
148
+ "media": None,
149
+ },
150
+ )
151
+
152
+ tb = read_tb_ntsc(result["rate"]["timebase"], result["rate"]["ntsc"])
153
+ av = parse(
154
+ result["media"],
155
+ {
156
+ "video": None,
157
+ "audio": None,
158
+ },
159
+ )
160
+
161
+ sources: dict[str, FileInfo] = {}
162
+ vobjs: VSpace = []
163
+ aobjs: ASpace = []
164
+
165
+ vclip_schema = {
166
+ "format": {
167
+ "samplecharacteristics": {
168
+ "width": int,
169
+ "height": int,
170
+ },
171
+ },
172
+ "track": {
173
+ "__arr": "",
174
+ "clipitem": {
175
+ "__arr": "",
176
+ "start": int,
177
+ "end": int,
178
+ "in": int,
179
+ "out": int,
180
+ "file": None,
181
+ "filter": None,
182
+ },
183
+ },
184
+ }
185
+
186
+ aclip_schema = {
187
+ "format": {"samplecharacteristics": {"samplerate": int}},
188
+ "track": {
189
+ "__arr": "",
190
+ "clipitem": {
191
+ "__arr": "",
192
+ "start": int,
193
+ "end": int,
194
+ "in": int,
195
+ "out": int,
196
+ "file": None,
197
+ "filter": None,
198
+ },
199
+ },
200
+ }
201
+
202
+ sr = 48000
203
+ res = (1920, 1080)
204
+
205
+ if "video" in av:
206
+ tracks = parse(av["video"], vclip_schema)
207
+
208
+ if "format" in tracks:
209
+ width = tracks["format"]["samplecharacteristics"]["width"]
210
+ height = tracks["format"]["samplecharacteristics"]["height"]
211
+ res = width, height
212
+
213
+ for t, track in enumerate(tracks["track"]):
214
+ if len(track["clipitem"]) > 0:
215
+ vobjs.append([])
216
+ for i, clipitem in enumerate(track["clipitem"]):
217
+ file_id = clipitem["file"].attrib["id"]
218
+ if file_id not in sources:
219
+ fileobj = parse(clipitem["file"], {"pathurl": str})
220
+
221
+ if "pathurl" in fileobj:
222
+ sources[file_id] = FileInfo.init(
223
+ uri_to_path(fileobj["pathurl"]),
224
+ log,
225
+ )
226
+ else:
227
+ show(clipitem["file"], 3)
228
+ log.error(
229
+ f"'pathurl' child element not found in {clipitem['file'].tag}"
230
+ )
231
+
232
+ if "filter" in clipitem:
233
+ speed = read_filters(clipitem["filter"], log)
234
+ else:
235
+ speed = 1.0
236
+
237
+ start = clipitem["start"]
238
+ dur = clipitem["end"] - start
239
+ offset = clipitem["in"]
240
+
241
+ vobjs[t].append(
242
+ TlVideo(start, dur, sources[file_id], offset, speed, stream=0)
243
+ )
244
+
245
+ if "audio" in av:
246
+ tracks = parse(av["audio"], aclip_schema)
247
+ if "format" in tracks:
248
+ sr = tracks["format"]["samplecharacteristics"]["samplerate"]
249
+
250
+ for t, track in enumerate(tracks["track"]):
251
+ if len(track["clipitem"]) > 0:
252
+ aobjs.append([])
253
+ for i, clipitem in enumerate(track["clipitem"]):
254
+ file_id = clipitem["file"].attrib["id"]
255
+ if file_id not in sources:
256
+ fileobj = parse(clipitem["file"], {"pathurl": str})
257
+ sources[file_id] = FileInfo.init(
258
+ uri_to_path(fileobj["pathurl"]), log
259
+ )
260
+
261
+ if "filter" in clipitem:
262
+ speed = read_filters(clipitem["filter"], log)
263
+ else:
264
+ speed = 1.0
265
+
266
+ start = clipitem["start"]
267
+ dur = clipitem["end"] - start
268
+ offset = clipitem["in"]
269
+
270
+ aobjs[t].append(
271
+ TlAudio(
272
+ start, dur, sources[file_id], offset, speed, volume=1, stream=0
273
+ )
274
+ )
275
+
276
+ T = Template.init(sources[next(iter(sources))], sr, res=res)
277
+ return v3(tb, "#000", T, vobjs, aobjs, v1=None)