sprocket-systems.coda.sdk 1.3.2__py3-none-any.whl → 2.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coda/__init__.py +2 -31
- coda/sdk/__init__.py +27 -0
- coda/sdk/constants.py +22 -0
- coda/sdk/enums.py +270 -0
- coda/sdk/essence.py +496 -0
- coda/sdk/job.py +582 -0
- coda/sdk/preset.py +215 -0
- coda/sdk/utils.py +282 -0
- coda/sdk/workflow.py +1402 -0
- {sprocket_systems_coda_sdk-1.3.2.dist-info → sprocket_systems_coda_sdk-2.0.5.dist-info}/METADATA +4 -3
- sprocket_systems_coda_sdk-2.0.5.dist-info/RECORD +15 -0
- {sprocket_systems_coda_sdk-1.3.2.dist-info → sprocket_systems_coda_sdk-2.0.5.dist-info}/WHEEL +1 -1
- coda/sdk.py +0 -1646
- sprocket_systems_coda_sdk-1.3.2.dist-info/RECORD +0 -8
- {sprocket_systems_coda_sdk-1.3.2.dist-info → sprocket_systems_coda_sdk-2.0.5.dist-info}/entry_points.txt +0 -0
- {sprocket_systems_coda_sdk-1.3.2.dist-info → sprocket_systems_coda_sdk-2.0.5.dist-info}/licenses/LICENSE +0 -0
coda/sdk/workflow.py
ADDED
|
@@ -0,0 +1,1402 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import requests
|
|
5
|
+
|
|
6
|
+
from typing import List, Any, Callable, Tuple, Dict
|
|
7
|
+
from .enums import Format, FrameRate, InputFilter, InputStemType, PackageType, StemType, PresetType, VenueType
|
|
8
|
+
from .preset import Preset
|
|
9
|
+
from .utils import is_key_value_comma_string, validate_group_id, make_request, get_channels
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
_DEFAULT_LOUDNESS_TOLERANCES = {
|
|
13
|
+
"target_program_loudness": [-0.5, 0.4],
|
|
14
|
+
"target_dialog_loudness": [-0.5, 0.4],
|
|
15
|
+
"target_true_peak": [-0.2, 0.0],
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
_IMAX_ENHANCED_FORMAT_MAPPINGS = {
|
|
19
|
+
Format.IMAX12: "5.1.4;mode=imax_enhanced",
|
|
20
|
+
Format.IMAX6: "5.1.1;mode=imax_enhanced",
|
|
21
|
+
Format.IMAX5: "5.1;mode=imax_enhanced",
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
_VALID_IMAX_FORMATS = [
|
|
25
|
+
Format.FIVE_ONE,
|
|
26
|
+
Format.FIVE_ONE_FOUR,
|
|
27
|
+
Format.SEVEN_ONE_FIVE,
|
|
28
|
+
Format.FIVE_ONE_ONE,
|
|
29
|
+
Format.IMAX5,
|
|
30
|
+
Format.IMAX6,
|
|
31
|
+
Format.IMAX12,
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
_DEFAULT_ATMOS_RENDERS = [Format.SEVEN_ONE_FOUR, Format.SEVEN_ONE]
|
|
35
|
+
|
|
36
|
+
_DME_STEM_MAPPING = {
|
|
37
|
+
"audio/adr": "audio/dx;contents=comp",
|
|
38
|
+
"audio/arch": "audio/dx;contents=comp",
|
|
39
|
+
"audio/audiodescription": "audio/dx;contents=comp",
|
|
40
|
+
"audio/bg": "audio/fx;contents=comp",
|
|
41
|
+
"audio/crd": "audio/fx;contents=comp",
|
|
42
|
+
"audio/dx": "audio/dx;contents=comp",
|
|
43
|
+
# "audio/dx1": "audio/dx;contents=comp",
|
|
44
|
+
# "audio/dx2": "audio/dx;contents=comp",
|
|
45
|
+
# "audio/dxcomp": "audio/dx;contents=comp",
|
|
46
|
+
"audio/fffx": "audio/fx;contents=comp",
|
|
47
|
+
"audio/fix": "audio/fx;contents=comp",
|
|
48
|
+
# "audio/fix1": "audio/fx;contents=comp",
|
|
49
|
+
# "audio/fix2": "audio/fx;contents=comp",
|
|
50
|
+
# "audio/fix3": "audio/fx;contents=comp",
|
|
51
|
+
# "audio/fix4": "audio/fx;contents=comp",
|
|
52
|
+
"audio/fol": "audio/fx;contents=comp",
|
|
53
|
+
# "audio/fol1": "audio/fx;contents=comp",
|
|
54
|
+
# "audio/fol2": "audio/fx;contents=comp",
|
|
55
|
+
"audio/fx": "audio/fx;contents=comp",
|
|
56
|
+
# "audio/fx1": "audio/fx;contents=comp",
|
|
57
|
+
# "audio/fx2": "audio/fx;contents=comp",
|
|
58
|
+
# "audio/fx3": "audio/fx;contents=comp",
|
|
59
|
+
# "audio/fx4": "audio/fx;contents=comp",
|
|
60
|
+
# "audio/fxcomp": "audio/fx;contents=comp",
|
|
61
|
+
"audio/lg": "audio/dx;contents=comp",
|
|
62
|
+
"audio/mnemx": "audio/mx;contents=comp",
|
|
63
|
+
"audio/mx": "audio/mx;contents=comp",
|
|
64
|
+
# "audio/mx1": "audio/mx;contents=comp",
|
|
65
|
+
# "audio/mx2": "audio/mx;contents=comp",
|
|
66
|
+
# "audio/mxcomp": "audio/mx;contents=comp",
|
|
67
|
+
"audio/nar": "audio/dx;contents=comp",
|
|
68
|
+
"audio/pfx": "audio/fx;contents=comp",
|
|
69
|
+
"audio/scr": "audio/mx;contents=comp",
|
|
70
|
+
"audio/sng": "audio/mx;contents=comp",
|
|
71
|
+
"audio/vo": "audio/dx;contents=comp",
|
|
72
|
+
"audio/vox": "audio/dx;contents=comp",
|
|
73
|
+
# "audio/wla": "audio/fx;contents=comp",
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class WorkflowDefinitionBuilder:
|
|
78
|
+
"""Uses the Builder pattern to construct a Coda workflow definition."""
|
|
79
|
+
|
|
80
|
+
_SAME_AS_INPUT = "same_as_input"
|
|
81
|
+
_ALL_FROM_ESSENCE = "all_from_essence"
|
|
82
|
+
|
|
83
|
+
def __init__(self, name: str) -> None:
|
|
84
|
+
"""Initialize the WorkflowBuilder.
|
|
85
|
+
|
|
86
|
+
This constructor sets up a new builder with a given name and initializes
|
|
87
|
+
the internal attributes for storing the workflow definition components.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
name (str): The name of the workflow.
|
|
91
|
+
|
|
92
|
+
"""
|
|
93
|
+
self._name: str = name
|
|
94
|
+
self._packages: dict = {}
|
|
95
|
+
self._process_blocks: dict = {}
|
|
96
|
+
self._destinations: dict = {}
|
|
97
|
+
self._wf_params: dict = {}
|
|
98
|
+
|
|
99
|
+
def with_group(self, group: str) -> 'WorkflowDefinitionBuilder':
|
|
100
|
+
"""Set the CODA_API_GROUP_ID env var based on known group name associated with the CODA_API_TOKEN.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
group (str): A known group name or group id which the CODA_API_TOKEN has access to.
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
ValueError: If the group ID for the given group name is not found.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
110
|
+
|
|
111
|
+
"""
|
|
112
|
+
all_groups = Preset.get_presets(PresetType.GROUPS)
|
|
113
|
+
|
|
114
|
+
found_group = next((g for g in all_groups if g.get("name") == group or g.get("group_id") == group), None)
|
|
115
|
+
|
|
116
|
+
if not found_group or not found_group.get("group_id"):
|
|
117
|
+
raise ValueError(f"Group '{group}' not found.")
|
|
118
|
+
|
|
119
|
+
os.environ["CODA_API_GROUP_ID"] = str(found_group["group_id"])
|
|
120
|
+
|
|
121
|
+
return self
|
|
122
|
+
|
|
123
|
+
def with_parameters(self, params: dict) -> 'WorkflowDefinitionBuilder':
|
|
124
|
+
"""Set the workflow-wide parameters.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
params (dict): A dictionary of parameters to apply to the workflow.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
131
|
+
|
|
132
|
+
"""
|
|
133
|
+
self._wf_params = params.copy()
|
|
134
|
+
return self
|
|
135
|
+
|
|
136
|
+
def with_process_block(
|
|
137
|
+
self,
|
|
138
|
+
name: str,
|
|
139
|
+
output_venue: VenueType = VenueType.NEARFIELD,
|
|
140
|
+
loudness_preset: dict | str | None = None,
|
|
141
|
+
timecode_preset: dict | str | None = None,
|
|
142
|
+
input_filter: str = InputFilter.ALL_STEMS,
|
|
143
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
144
|
+
"""Add a process block to the workflow.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
name (str): The name of the process block.
|
|
148
|
+
output_venue (VenueType, optional): The output venue enum. Defaults to VenueType.NEARFIELD.
|
|
149
|
+
loudness_preset (dict | str, optional): Loudness preset name or definition. Defaults to None.
|
|
150
|
+
timecode_preset (dict | str, optional): Timecode preset name or definition. Defaults to None.
|
|
151
|
+
input_filter (str, optional): The input filter enum to use. Defaults to InputFilter.ALL_STEMS.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
ValueError: If a timecode or loudness preset name is provided but not found.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
158
|
+
|
|
159
|
+
"""
|
|
160
|
+
if output_venue == VenueType.ALL_FROM_ESSENCE:
|
|
161
|
+
raise ValueError("Output venue cannot be VenueType.ALL_FROM_ESSENCE in process blocks. Use VenueType.SAME_AS_INPUT for dynamic venue selection.")
|
|
162
|
+
if not loudness_preset:
|
|
163
|
+
loudness_preset = {}
|
|
164
|
+
if not timecode_preset:
|
|
165
|
+
timecode_preset = {}
|
|
166
|
+
|
|
167
|
+
if isinstance(timecode_preset, str):
|
|
168
|
+
presets = Preset.get_presets(PresetType.TIMECODE)
|
|
169
|
+
pf = [p for p in presets if p["name"] == timecode_preset]
|
|
170
|
+
if not pf:
|
|
171
|
+
raise ValueError(f"Timecode preset '{timecode_preset}' not found.")
|
|
172
|
+
timecode_preset = pf[0]["definition"]
|
|
173
|
+
|
|
174
|
+
if isinstance(loudness_preset, str):
|
|
175
|
+
presets = Preset.get_presets(PresetType.LOUDNESS)
|
|
176
|
+
pf = [p for p in presets if p["name"] == loudness_preset]
|
|
177
|
+
if not pf:
|
|
178
|
+
raise ValueError(f"Loudness preset '{loudness_preset}' not found.")
|
|
179
|
+
loudness_preset = pf[0]["definition"]
|
|
180
|
+
|
|
181
|
+
if isinstance(loudness_preset, dict) and "tolerances" not in loudness_preset:
|
|
182
|
+
loudness_preset["tolerances"] = _DEFAULT_LOUDNESS_TOLERANCES.copy()
|
|
183
|
+
process_block_config = {
|
|
184
|
+
"name": name,
|
|
185
|
+
"input_filter": input_filter,
|
|
186
|
+
"output_settings": {
|
|
187
|
+
"loudness": loudness_preset,
|
|
188
|
+
"venue": output_venue,
|
|
189
|
+
},
|
|
190
|
+
"output_essences": {},
|
|
191
|
+
}
|
|
192
|
+
if timecode_preset:
|
|
193
|
+
process_block_config["output_settings"]["timecode"] = timecode_preset
|
|
194
|
+
|
|
195
|
+
pid = f"my-process-block-{len(self._process_blocks) + 1}"
|
|
196
|
+
self._process_blocks[pid] = process_block_config
|
|
197
|
+
|
|
198
|
+
return self
|
|
199
|
+
|
|
200
|
+
def with_dcp_package(
|
|
201
|
+
self,
|
|
202
|
+
name: str,
|
|
203
|
+
process_blocks: List[str],
|
|
204
|
+
reels: bool = False,
|
|
205
|
+
naming_convention_preset: str | dict | int | None = None,
|
|
206
|
+
naming_options: str | None = None,
|
|
207
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
208
|
+
"""Add a DCP MXF package to the workflow.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
name (str): Name of the package.
|
|
212
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
213
|
+
reels (bool, optional): Enable reel splitting. Defaults to False.
|
|
214
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
215
|
+
or definition (dict). Defaults to None.
|
|
216
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
217
|
+
|
|
218
|
+
Raises:
|
|
219
|
+
ValueError: If venue type is not "theatrical".
|
|
220
|
+
TypeError: If naming_options is not a string.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
224
|
+
|
|
225
|
+
"""
|
|
226
|
+
if naming_options is not None and \
|
|
227
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
228
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
229
|
+
|
|
230
|
+
naming_convention_id = None
|
|
231
|
+
naming_convention_dict = None
|
|
232
|
+
if naming_convention_preset:
|
|
233
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
234
|
+
|
|
235
|
+
block_list = self._get_process_block_ids(process_blocks)
|
|
236
|
+
for block_id in block_list:
|
|
237
|
+
block = self._process_blocks[block_id]
|
|
238
|
+
if block["output_settings"]["venue"] != VenueType.THEATRICAL:
|
|
239
|
+
raise ValueError(
|
|
240
|
+
f"This package type requires a '{VenueType.THEATRICAL}' venue, "
|
|
241
|
+
f"but the process block '{block['name']}' is set to '{block['output_settings']['venue']}'."
|
|
242
|
+
)
|
|
243
|
+
fps = FrameRate.TWENTY_FOUR
|
|
244
|
+
fmt = [Format.ATMOS]
|
|
245
|
+
typ = [StemType.PRINTMASTER]
|
|
246
|
+
for f in fmt:
|
|
247
|
+
for t in typ:
|
|
248
|
+
block["output_essences"][f"{t}_{fps}_{f}"] = {
|
|
249
|
+
"audio_format": f,
|
|
250
|
+
"frame_rate": fps,
|
|
251
|
+
"type": t,
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "dcp_mxf")
|
|
255
|
+
pid = f"my-dcp-mxf-package-{count + 1}"
|
|
256
|
+
|
|
257
|
+
package_definition = {
|
|
258
|
+
"name": name,
|
|
259
|
+
"process_block_ids": block_list,
|
|
260
|
+
"include_reel_splitting": reels,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if naming_convention_id:
|
|
264
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
265
|
+
if naming_convention_dict:
|
|
266
|
+
package_definition["naming_convention"] = copy.deepcopy(naming_convention_dict)
|
|
267
|
+
if naming_options:
|
|
268
|
+
package_definition["naming_convention_options"] = naming_options
|
|
269
|
+
|
|
270
|
+
self._packages[pid] = {"type": PackageType.DCP_MXF, "definition": package_definition}
|
|
271
|
+
return self
|
|
272
|
+
|
|
273
|
+
def with_super_session_package(
|
|
274
|
+
self,
|
|
275
|
+
name: str,
|
|
276
|
+
track_definitions: List[Tuple[str, Format, StemType, VenueType]],
|
|
277
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
278
|
+
super_session_preset: str | int | dict[str, Any] | None = None,
|
|
279
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
280
|
+
naming_options: str | None = None,
|
|
281
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
282
|
+
"""Add a Super Session package to the workflow.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
name (str): Name of the package.
|
|
286
|
+
track_definitions (List[Tuple[str, Format, StemType, VenueType]]): A list of 4-element tuples,
|
|
287
|
+
where each tuple defines a track configuration as (process_block_name, audio_format, stem_type, venue_type).
|
|
288
|
+
venue_type may only be VenueType.THEATRICAL or VenueType.NEARFIELD.
|
|
289
|
+
output_frame_rate (FrameRate, optional): The output frame rate enum. Defaults to FrameRate.ALL_FROM_ESSENCE.
|
|
290
|
+
super_session_preset (str | int | dict, optional): Super session preset name, ID, or definition. Defaults to None.
|
|
291
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name, ID, or definition. Defaults to None.
|
|
292
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
293
|
+
|
|
294
|
+
Raises:
|
|
295
|
+
TypeError: If 'track_definitions' is not a list of 4-element string tuples.
|
|
296
|
+
ValueError: If a process block specified in `track_definitions` is not found,
|
|
297
|
+
or if stem_type or audio_format is set to "all_from_essence", or if venue_type is invalid.
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
301
|
+
|
|
302
|
+
"""
|
|
303
|
+
if naming_options is not None and \
|
|
304
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
305
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
306
|
+
|
|
307
|
+
if not isinstance(track_definitions, list):
|
|
308
|
+
raise TypeError("The 'track_definitions' parameter must be a list of (process_block_name, audio_format, stem_type) tuples.")
|
|
309
|
+
|
|
310
|
+
block_names = []
|
|
311
|
+
for idx, track_tuple in enumerate(track_definitions):
|
|
312
|
+
if not isinstance(track_tuple, tuple) or len(track_tuple) != 4:
|
|
313
|
+
raise TypeError(
|
|
314
|
+
f"Track configuration at index {idx} must be a tuple of (process_block_name: str, audio_format: Format, stem_type: StemType, venue_type: VenueType), "
|
|
315
|
+
f"but got {type(track_tuple).__name__} of length {len(track_tuple)}."
|
|
316
|
+
)
|
|
317
|
+
if not all(isinstance(s, str) for s in track_tuple):
|
|
318
|
+
raise TypeError(
|
|
319
|
+
f"Track configuration at index {idx} must contain only strings."
|
|
320
|
+
)
|
|
321
|
+
block_names.append(track_tuple[0])
|
|
322
|
+
|
|
323
|
+
block_list = self._get_process_block_ids(block_names)
|
|
324
|
+
|
|
325
|
+
naming_convention_id = None
|
|
326
|
+
naming_convention_dict = None
|
|
327
|
+
super_session_preset_id = None
|
|
328
|
+
super_session_preset_dict = None
|
|
329
|
+
|
|
330
|
+
if naming_convention_preset:
|
|
331
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
332
|
+
if super_session_preset:
|
|
333
|
+
super_session_preset_id, super_session_preset_dict = self._resolve_preset(super_session_preset, PresetType.SUPER_SESSION)
|
|
334
|
+
|
|
335
|
+
if super_session_preset_dict is None and super_session_preset_id is None:
|
|
336
|
+
super_session_preset_dict = {
|
|
337
|
+
"session_name_template": "{{TITLE}}_{{FRAME_RATE}}",
|
|
338
|
+
"tracks": [],
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
tracks = []
|
|
342
|
+
for idx, (_, audio_format, stem_type, venue_type) in enumerate(track_definitions):
|
|
343
|
+
block_id = block_list[idx]
|
|
344
|
+
block = self._process_blocks[block_id]
|
|
345
|
+
if venue_type not in (VenueType.NEARFIELD, VenueType.THEATRICAL):
|
|
346
|
+
raise ValueError("Venue type must be VenueType.NEARFIELD or VenueType.THEATRICAL only.")
|
|
347
|
+
venue = venue_type
|
|
348
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
349
|
+
if audio_format == self._ALL_FROM_ESSENCE:
|
|
350
|
+
raise ValueError(f"Audio format must not be {audio_format}")
|
|
351
|
+
if stem_type == self._ALL_FROM_ESSENCE:
|
|
352
|
+
raise ValueError(f"Stem type must not be {stem_type}")
|
|
353
|
+
block["output_essences"][f"{stem_type}_{fr}_{audio_format}"] = {
|
|
354
|
+
"audio_format": audio_format,
|
|
355
|
+
"frame_rate": fr,
|
|
356
|
+
"type": stem_type,
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
track_item = {
|
|
360
|
+
"element": stem_type,
|
|
361
|
+
"format": audio_format,
|
|
362
|
+
"venue": venue
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
if venue != VenueType.ALL_FROM_ESSENCE:
|
|
366
|
+
tracks.append(track_item)
|
|
367
|
+
else:
|
|
368
|
+
tracks.append({**track_item, "venue": VenueType.THEATRICAL})
|
|
369
|
+
tracks.append({**track_item, "venue": VenueType.NEARFIELD})
|
|
370
|
+
|
|
371
|
+
if isinstance(super_session_preset_dict, dict) and not super_session_preset_dict.get("tracks"):
|
|
372
|
+
expanded_tracks = []
|
|
373
|
+
for track in tracks:
|
|
374
|
+
element = track.get("element")
|
|
375
|
+
if element == StemType.WIDES:
|
|
376
|
+
stem_list = [InputStemType.DX, InputStemType.FX, InputStemType.MX, InputStemType.VOX, InputStemType.FOL, InputStemType.FIX]
|
|
377
|
+
for k in stem_list:
|
|
378
|
+
expanded_tracks.append({"element": k, "format": track["format"], "venue": track["venue"]})
|
|
379
|
+
elif element == StemType.DME:
|
|
380
|
+
stem_list = [InputStemType.DX, InputStemType.FX, InputStemType.MX]
|
|
381
|
+
for k in stem_list:
|
|
382
|
+
expanded_tracks.append({"element": k, "format": track["format"], "venue": track["venue"]})
|
|
383
|
+
elif element == StemType.PRINTMASTER:
|
|
384
|
+
expanded_tracks.append({"element": InputStemType.PRINTMASTER, "format": track["format"], "venue": track["venue"]})
|
|
385
|
+
else:
|
|
386
|
+
expanded_tracks.append(track)
|
|
387
|
+
super_session_preset_dict["tracks"] = expanded_tracks
|
|
388
|
+
|
|
389
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "super_session")
|
|
390
|
+
pid = f"my-super-session-package-{count + 1}"
|
|
391
|
+
|
|
392
|
+
package_definition = {
|
|
393
|
+
"name": name,
|
|
394
|
+
"process_block_ids": block_list,
|
|
395
|
+
"frame_rate": output_frame_rate,
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
if super_session_preset_id:
|
|
399
|
+
package_definition["super_session_profile_id"] = super_session_preset_id
|
|
400
|
+
if super_session_preset_dict:
|
|
401
|
+
package_definition["super_session_profile"] = super_session_preset_dict
|
|
402
|
+
if naming_convention_id:
|
|
403
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
404
|
+
if naming_convention_dict:
|
|
405
|
+
package_definition["naming_convention"] = naming_convention_dict
|
|
406
|
+
if naming_options:
|
|
407
|
+
package_definition["naming_convention_options"] = naming_options
|
|
408
|
+
|
|
409
|
+
self._packages[pid] = {"type": PackageType.SUPER_SESSION, "definition": package_definition}
|
|
410
|
+
return self
|
|
411
|
+
|
|
412
|
+
def with_multi_mono_reels_package(
|
|
413
|
+
self,
|
|
414
|
+
name: str,
|
|
415
|
+
process_blocks: List[str],
|
|
416
|
+
formats: List[Format] | None = None,
|
|
417
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
418
|
+
naming_options: str | None = None,
|
|
419
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
420
|
+
"""Add a Multi-mono Reels package to the workflow.
|
|
421
|
+
|
|
422
|
+
Note:
|
|
423
|
+
You will need to add `with_edits()` in the JobPayloadBuilder to properly set up this package.
|
|
424
|
+
See an example of the dict needed in the JobPayloadBuilder.with_edits() method documentation.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
name (str): Name of the package.
|
|
428
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
429
|
+
formats (List[Format] | None, optional): List of formats. Defaults to ["all_from_essence"].
|
|
430
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name, ID, or definition. Defaults to None.
|
|
431
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
432
|
+
|
|
433
|
+
Raises:
|
|
434
|
+
ValueError: If the output venue for any connected process block is not 'theatrical'.
|
|
435
|
+
TypeError: If naming_options is not a string.
|
|
436
|
+
|
|
437
|
+
Returns:
|
|
438
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
439
|
+
|
|
440
|
+
"""
|
|
441
|
+
if naming_options is not None and \
|
|
442
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
443
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
444
|
+
|
|
445
|
+
if formats is None:
|
|
446
|
+
formats = [Format.ALL_FROM_ESSENCE]
|
|
447
|
+
|
|
448
|
+
naming_convention_id = None
|
|
449
|
+
naming_convention_dict = None
|
|
450
|
+
if naming_convention_preset:
|
|
451
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
452
|
+
|
|
453
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
454
|
+
for block_id in blist:
|
|
455
|
+
block = self._process_blocks[block_id]
|
|
456
|
+
if block["output_settings"]["venue"] != VenueType.THEATRICAL:
|
|
457
|
+
raise ValueError(
|
|
458
|
+
f"This package type requires a '{VenueType.THEATRICAL}' venue, "
|
|
459
|
+
f"but the process block '{block['name']}' is set to '{block['output_settings']['venue']}'."
|
|
460
|
+
)
|
|
461
|
+
fps = FrameRate.TWENTY_FOUR
|
|
462
|
+
fmt = formats
|
|
463
|
+
typ = StemType.PRINTMASTER
|
|
464
|
+
for f_original in fmt:
|
|
465
|
+
f = self._SAME_AS_INPUT if f_original == self._ALL_FROM_ESSENCE else f_original
|
|
466
|
+
block["output_essences"][f"{typ}_{fps}_{f}"] = {
|
|
467
|
+
"audio_format": f,
|
|
468
|
+
"frame_rate": fps,
|
|
469
|
+
"type": typ,
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "multi_mono_reels")
|
|
473
|
+
pid = f"my-multi-mono-reels-package-{count + 1}"
|
|
474
|
+
|
|
475
|
+
package_definition = {
|
|
476
|
+
"name": name,
|
|
477
|
+
"process_block_ids": blist,
|
|
478
|
+
"formats": fmt,
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
if naming_convention_id:
|
|
482
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
483
|
+
if naming_convention_dict:
|
|
484
|
+
package_definition["naming_convention"] = copy.deepcopy(naming_convention_dict)
|
|
485
|
+
if naming_options:
|
|
486
|
+
package_definition["naming_convention_options"] = naming_options
|
|
487
|
+
|
|
488
|
+
self._packages[pid] = {
|
|
489
|
+
"type": PackageType.MULTI_MONO_REELS,
|
|
490
|
+
"definition": package_definition,
|
|
491
|
+
}
|
|
492
|
+
return self
|
|
493
|
+
|
|
494
|
+
def with_dolby_encode_package(
|
|
495
|
+
self,
|
|
496
|
+
name: str,
|
|
497
|
+
process_blocks: List[str],
|
|
498
|
+
encoding_preset: str | int | dict,
|
|
499
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
500
|
+
output_format: Format = Format.ALL_FROM_ESSENCE,
|
|
501
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
502
|
+
naming_options: str | None = None,
|
|
503
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
504
|
+
"""Add a Dolby Encode package to the workflow.
|
|
505
|
+
|
|
506
|
+
Args:
|
|
507
|
+
name (str): Name of the package.
|
|
508
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
509
|
+
encoding_preset (str | int | dict): Dolby encoding preset name (str), ID (int),
|
|
510
|
+
or definition (dict). Defaults to None.
|
|
511
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
512
|
+
Defaults to "all_from_essence"
|
|
513
|
+
output_format (Format, optional): A Format enum defining the output format.
|
|
514
|
+
Defaults to "all_from_essence"
|
|
515
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
516
|
+
or definition (dict). Defaults to None.
|
|
517
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
518
|
+
|
|
519
|
+
Raises:
|
|
520
|
+
TypeError: If the 'essences' parameter is not a tuple with the expected structure of (frame_rate, format),
|
|
521
|
+
or if naming_options is not a string.
|
|
522
|
+
ValueError: If the provided Dolby encoding preset does not support the required format,
|
|
523
|
+
or if the output venue for any connected process block is not 'nearfield'.
|
|
524
|
+
|
|
525
|
+
Returns:
|
|
526
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
527
|
+
|
|
528
|
+
"""
|
|
529
|
+
if naming_options is not None and \
|
|
530
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
531
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
532
|
+
|
|
533
|
+
def format_filter(preset: dict) -> bool:
|
|
534
|
+
return output_format in preset.get("formats", [])
|
|
535
|
+
|
|
536
|
+
encode_profile_id, encode_profile_dict = self._resolve_preset(
|
|
537
|
+
encoding_preset, PresetType.DOLBY, filter_lambda=format_filter
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
if encode_profile_dict and output_format not in encode_profile_dict.get("formats", []):
|
|
541
|
+
raise ValueError(f"Provided Dolby encode preset definition does not support format '{output_format}'.")
|
|
542
|
+
|
|
543
|
+
naming_convention_id = None
|
|
544
|
+
naming_convention_dict = None
|
|
545
|
+
if naming_convention_preset:
|
|
546
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
547
|
+
|
|
548
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
549
|
+
for block_id in blist:
|
|
550
|
+
block = self._process_blocks[block_id]
|
|
551
|
+
if block["output_settings"]["venue"] != VenueType.NEARFIELD:
|
|
552
|
+
raise ValueError(f"Dolby encode packages require a '{VenueType.NEARFIELD}' venue.")
|
|
553
|
+
fmt, typ = output_format, StemType.PRINTMASTER
|
|
554
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
555
|
+
block["output_essences"][f"{typ}_{fr}_{fmt}"] = {"audio_format": fmt, "frame_rate": fr, "type": typ}
|
|
556
|
+
|
|
557
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "dolby")
|
|
558
|
+
pid = f"my-dolby-package-{count + 1}"
|
|
559
|
+
|
|
560
|
+
package_definition = {"name": name, "process_block_ids": blist, "format": output_format, "frame_rate": output_frame_rate}
|
|
561
|
+
|
|
562
|
+
if encode_profile_id:
|
|
563
|
+
package_definition["encoding_profile_id"] = encode_profile_id
|
|
564
|
+
if encode_profile_dict:
|
|
565
|
+
package_definition["encoding_profile"] = encode_profile_dict
|
|
566
|
+
if naming_convention_id:
|
|
567
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
568
|
+
if naming_convention_dict:
|
|
569
|
+
package_definition["naming_convention"] = naming_convention_dict
|
|
570
|
+
if naming_options:
|
|
571
|
+
package_definition["naming_convention_options"] = naming_options
|
|
572
|
+
|
|
573
|
+
self._packages[pid] = {
|
|
574
|
+
"type": PackageType.DOLBY,
|
|
575
|
+
"definition": package_definition,
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
return self
|
|
579
|
+
|
|
580
|
+
def with_dts_encode_package(
|
|
581
|
+
self,
|
|
582
|
+
name: str,
|
|
583
|
+
process_blocks: List[str],
|
|
584
|
+
encoding_preset: str | int | dict,
|
|
585
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
586
|
+
output_format: Format = Format.ALL_FROM_ESSENCE,
|
|
587
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
588
|
+
naming_options: str | None = None,
|
|
589
|
+
is_imax_enhanced: bool = False,
|
|
590
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
591
|
+
"""Add a DTS or IMAX Enhanced package to the workflow.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
name (str): Name of the package.
|
|
595
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
596
|
+
encoding_preset (str | int | dict): DTS encoding preset name (str), ID (int),
|
|
597
|
+
or definition (dict).
|
|
598
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
599
|
+
Defaults to "all_from_essence"
|
|
600
|
+
output_format (Format, optional): A Format enum defining the output format.
|
|
601
|
+
Defaults to "all_from_essence"
|
|
602
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
603
|
+
or definition (dict). Defaults to None.
|
|
604
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
605
|
+
is_imax_enhanced (bool, optional): Flag for IMAX enhanced package type. Defaults to False.
|
|
606
|
+
|
|
607
|
+
Raises:
|
|
608
|
+
TypeError: If naming_options is not a string in the correct format.
|
|
609
|
+
ValueError: If the provided DTS encoding preset does not support the required format,
|
|
610
|
+
or if the output venue for any connected process block is not 'nearfield'.
|
|
611
|
+
|
|
612
|
+
Returns:
|
|
613
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
614
|
+
|
|
615
|
+
"""
|
|
616
|
+
if naming_options is not None and \
|
|
617
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
618
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
619
|
+
|
|
620
|
+
def format_filter(preset: dict) -> bool:
|
|
621
|
+
return output_format in preset.get("formats", [])
|
|
622
|
+
|
|
623
|
+
encode_profile_id, encode_profile_dict = self._resolve_preset(
|
|
624
|
+
encoding_preset, PresetType.DTS, filter_lambda=format_filter
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
t1cc = False
|
|
628
|
+
if encode_profile_dict:
|
|
629
|
+
t1cc = encode_profile_dict.get("t1cc", False)
|
|
630
|
+
elif encode_profile_id:
|
|
631
|
+
presets = Preset.get_presets(PresetType.DTS)
|
|
632
|
+
pf = [p for p in presets if p.get("encoding_preset_id") == encode_profile_id]
|
|
633
|
+
if pf:
|
|
634
|
+
t1cc = pf[0].get("definition", {}).get("t1cc", False)
|
|
635
|
+
|
|
636
|
+
naming_convention_id = None
|
|
637
|
+
naming_convention_dict = None
|
|
638
|
+
if naming_convention_preset:
|
|
639
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
640
|
+
|
|
641
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
642
|
+
|
|
643
|
+
for block_id in blist:
|
|
644
|
+
block = self._process_blocks[block_id]
|
|
645
|
+
if block["output_settings"]["venue"] != VenueType.NEARFIELD:
|
|
646
|
+
raise ValueError(f"DTS encode packages require a '{VenueType.NEARFIELD}' venue.")
|
|
647
|
+
|
|
648
|
+
f_original, st = output_format, StemType.PRINTMASTER
|
|
649
|
+
if t1cc and f_original != Format.ALL_FROM_ESSENCE and "imax" not in f_original:
|
|
650
|
+
f_original += ";mode=imax_enhanced"
|
|
651
|
+
|
|
652
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
653
|
+
f = self._SAME_AS_INPUT if f_original == self._ALL_FROM_ESSENCE else f_original
|
|
654
|
+
|
|
655
|
+
block["output_essences"][f"{st}_{fr}_{f.replace(';', '_').replace('=', '_')}"] = {"audio_format": f, "frame_rate": fr, "type": st}
|
|
656
|
+
|
|
657
|
+
packtype = PackageType.IMAX_ENHANCED if is_imax_enhanced or t1cc else PackageType.DTS
|
|
658
|
+
|
|
659
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == packtype)
|
|
660
|
+
pid = f"my-{packtype.replace('_', '-')}-package-{count + 1}"
|
|
661
|
+
|
|
662
|
+
pfmt = output_format
|
|
663
|
+
if pfmt in _IMAX_ENHANCED_FORMAT_MAPPINGS:
|
|
664
|
+
pfmt = _IMAX_ENHANCED_FORMAT_MAPPINGS[pfmt]
|
|
665
|
+
|
|
666
|
+
package_definition = {"name": name, "process_block_ids": blist, "format": pfmt, "frame_rate": output_frame_rate}
|
|
667
|
+
|
|
668
|
+
if encode_profile_id:
|
|
669
|
+
package_definition["encoding_profile_id"] = encode_profile_id
|
|
670
|
+
if encode_profile_dict:
|
|
671
|
+
package_definition["encoding_profile"] = encode_profile_dict
|
|
672
|
+
if naming_convention_id:
|
|
673
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
674
|
+
if naming_convention_dict:
|
|
675
|
+
package_definition["naming_convention"] = naming_convention_dict
|
|
676
|
+
if naming_options:
|
|
677
|
+
package_definition["naming_convention_options"] = naming_options
|
|
678
|
+
|
|
679
|
+
self._packages[pid] = {
|
|
680
|
+
"type": packtype,
|
|
681
|
+
"definition": package_definition,
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
return self
|
|
685
|
+
|
|
686
|
+
def with_imax_enhanced_encode_package(
|
|
687
|
+
self,
|
|
688
|
+
name: str,
|
|
689
|
+
process_blocks: List[str],
|
|
690
|
+
encoding_preset: str | int | dict,
|
|
691
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
692
|
+
output_format: Format = Format.ALL_FROM_ESSENCE,
|
|
693
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
694
|
+
naming_options: str | None = None,
|
|
695
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
696
|
+
"""Add an IMAX Enhanced package, a specific type of DTS package.
|
|
697
|
+
|
|
698
|
+
Args:
|
|
699
|
+
name (str): Name of the package.
|
|
700
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
701
|
+
encoding_preset (str | int | dict): Dolby encoding preset name (str), ID (int),
|
|
702
|
+
or definition (dict). Defaults to None.
|
|
703
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
704
|
+
Defaults to "all_from_essence"
|
|
705
|
+
output_format (Format, optional): A Format enum defining the output format.
|
|
706
|
+
Defaults to "all_from_essence"
|
|
707
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
708
|
+
or definition (dict). Defaults to None.
|
|
709
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
710
|
+
|
|
711
|
+
Raises:
|
|
712
|
+
TypeError: If the 'essences' parameter is not a tuple with the expected structure of (frame_rate, format),
|
|
713
|
+
or if naming_options is not a string.
|
|
714
|
+
ValueError: If the format provided in 'essences' is not a valid IMAX Enhanced format.
|
|
715
|
+
|
|
716
|
+
Returns:
|
|
717
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
718
|
+
|
|
719
|
+
"""
|
|
720
|
+
if naming_options is not None and \
|
|
721
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
722
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
723
|
+
|
|
724
|
+
if output_format not in _VALID_IMAX_FORMATS:
|
|
725
|
+
raise ValueError(
|
|
726
|
+
f"Format '{output_format}' is not valid for IMAX Enhanced packages. "
|
|
727
|
+
f"Valid formats are: {', '.join(_VALID_IMAX_FORMATS)}"
|
|
728
|
+
)
|
|
729
|
+
|
|
730
|
+
return self.with_dts_encode_package(
|
|
731
|
+
name,
|
|
732
|
+
process_blocks,
|
|
733
|
+
encoding_preset,
|
|
734
|
+
output_frame_rate,
|
|
735
|
+
output_format,
|
|
736
|
+
naming_convention_preset,
|
|
737
|
+
naming_options,
|
|
738
|
+
is_imax_enhanced=True,
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
def with_interleaved_package(
|
|
742
|
+
self,
|
|
743
|
+
name: str,
|
|
744
|
+
process_blocks: List[str],
|
|
745
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
746
|
+
output_formats: List[Format] | None = None,
|
|
747
|
+
output_stem_types: List[StemType] | None = None,
|
|
748
|
+
streams: List[Dict[str, str]] | None = None,
|
|
749
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
750
|
+
naming_options: str | None = None,
|
|
751
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
752
|
+
"""Add an Interleaved WAV package to the workflow.
|
|
753
|
+
|
|
754
|
+
Args:
|
|
755
|
+
name (str): Name of the package.
|
|
756
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
757
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
758
|
+
Defaults to "all_from_essence"
|
|
759
|
+
output_formats (List[Format], optional): A list defining output formats.
|
|
760
|
+
Defaults to "all_from_essence"
|
|
761
|
+
output_stem_types (List[StemType], optional): A list defining output stem types.
|
|
762
|
+
Defaults to "all_from_essence"
|
|
763
|
+
streams (List[Dict[str, str]] | None) A list of interleaved channels mapping order for the streams.
|
|
764
|
+
If None, it's auto-generated. Defaults to None.
|
|
765
|
+
The Dict should be structured as: `{"format": str, "element": str, "channel": str}`.
|
|
766
|
+
The list for example should look like this:
|
|
767
|
+
`[{"format": "2.0", "element": "audio/pm", "channel": "L"}, {"format": "2.0", "element": "audio/pm", "channel": "R"}]`.
|
|
768
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
769
|
+
or definition (dict). Defaults to None.
|
|
770
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention.
|
|
771
|
+
Defaults to None.
|
|
772
|
+
|
|
773
|
+
Raises:
|
|
774
|
+
TypeError: If the 'essences' parameter is not a tuple with the expected
|
|
775
|
+
structure of (frame_rate, [formats], [types]), or if naming_options is not a string.
|
|
776
|
+
|
|
777
|
+
Returns:
|
|
778
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
779
|
+
|
|
780
|
+
"""
|
|
781
|
+
if output_formats is None:
|
|
782
|
+
output_formats = [Format.ALL_FROM_ESSENCE]
|
|
783
|
+
if output_stem_types is None:
|
|
784
|
+
output_stem_types = [StemType.ALL_FROM_ESSENCE]
|
|
785
|
+
if naming_options is not None and \
|
|
786
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
787
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
788
|
+
|
|
789
|
+
naming_convention_id = None
|
|
790
|
+
naming_convention_dict = None
|
|
791
|
+
if naming_convention_preset:
|
|
792
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
793
|
+
|
|
794
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
795
|
+
for block_id in blist:
|
|
796
|
+
block = self._process_blocks[block_id]
|
|
797
|
+
for f_original in output_formats:
|
|
798
|
+
for st_original in output_stem_types:
|
|
799
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
800
|
+
f = self._SAME_AS_INPUT if f_original == self._ALL_FROM_ESSENCE else f_original
|
|
801
|
+
st = self._SAME_AS_INPUT if st_original == self._ALL_FROM_ESSENCE else st_original
|
|
802
|
+
block["output_essences"][f"{st}_{fr}_{f}"] = {
|
|
803
|
+
"audio_format": f,
|
|
804
|
+
"frame_rate": fr,
|
|
805
|
+
"type": st,
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
if not streams:
|
|
809
|
+
streams = []
|
|
810
|
+
for st_original in sorted(output_stem_types):
|
|
811
|
+
for f in output_formats:
|
|
812
|
+
if st_original == StemType.DME:
|
|
813
|
+
elements = ["audio/pm"]
|
|
814
|
+
if st_original == StemType.PRINTMASTER:
|
|
815
|
+
elements = ['audio/dx', 'audio/fx', 'audio/mx']
|
|
816
|
+
channels = get_channels(f)
|
|
817
|
+
if channels:
|
|
818
|
+
for e in elements:
|
|
819
|
+
for ch in channels:
|
|
820
|
+
streams.append({"format": f, "element": e, "channel": ch})
|
|
821
|
+
|
|
822
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == PackageType.INTERLEAVED)
|
|
823
|
+
pid = f"my-interleaved-package-{count + 1}"
|
|
824
|
+
|
|
825
|
+
package_definition = {
|
|
826
|
+
"name": name,
|
|
827
|
+
"frame_rate": output_frame_rate,
|
|
828
|
+
"process_block_ids": blist,
|
|
829
|
+
"streams": streams,
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
if naming_convention_id:
|
|
833
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
834
|
+
if naming_convention_dict:
|
|
835
|
+
package_definition["naming_convention"] = copy.deepcopy(naming_convention_dict)
|
|
836
|
+
if naming_options:
|
|
837
|
+
package_definition["naming_convention_options"] = naming_options
|
|
838
|
+
|
|
839
|
+
self._packages[pid] = {"type": PackageType.INTERLEAVED, "definition": package_definition}
|
|
840
|
+
|
|
841
|
+
return self
|
|
842
|
+
|
|
843
|
+
def with_mov_package(
|
|
844
|
+
self,
|
|
845
|
+
name: str,
|
|
846
|
+
process_blocks: List[str],
|
|
847
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
848
|
+
output_formats: List[Format] | None = None,
|
|
849
|
+
output_stem_types: List[StemType] | None = None,
|
|
850
|
+
streams: List[Dict[str, str]] | None = None,
|
|
851
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
852
|
+
naming_options: str | None = None,
|
|
853
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
854
|
+
"""Add a MOV package to the workflow.
|
|
855
|
+
|
|
856
|
+
Args:
|
|
857
|
+
name (str): Name of the package.
|
|
858
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
859
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
860
|
+
Defaults to "all_from_essence"
|
|
861
|
+
output_formats (List[Format], optional): A list defining output formats.
|
|
862
|
+
Defaults to "all_from_essence"
|
|
863
|
+
output_stem_types (List[StemType], optional): A list defining output stem types.
|
|
864
|
+
Defaults to "all_from_essence"
|
|
865
|
+
streams (List[Dict[str, str]] | None) A list of interleaved channels mapping order for the streams.
|
|
866
|
+
If None, it's auto-generated. Defaults to None.
|
|
867
|
+
The Dict should be structured as: `{"format": str, "element": str, "channel": str}`.
|
|
868
|
+
The list for example should look like this:
|
|
869
|
+
`[{"format": "2.0", "element": "audio/pm", "channel": "L"}, {"format": "2.0", "element": "audio/pm", "channel": "R"}]`.
|
|
870
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
871
|
+
or definition (dict). Defaults to None.
|
|
872
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention.
|
|
873
|
+
Defaults to None.
|
|
874
|
+
|
|
875
|
+
Raises:
|
|
876
|
+
TypeError: If the 'essences' parameter is not a tuple with the expected
|
|
877
|
+
structure of (frame_rate, [formats], [types]), or if naming_options is not a string.
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
881
|
+
|
|
882
|
+
"""
|
|
883
|
+
if output_formats is None:
|
|
884
|
+
output_formats = [Format.ALL_FROM_ESSENCE]
|
|
885
|
+
if output_stem_types is None:
|
|
886
|
+
output_stem_types = [StemType.ALL_FROM_ESSENCE]
|
|
887
|
+
if naming_options is not None and \
|
|
888
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
889
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
890
|
+
|
|
891
|
+
naming_convention_id = None
|
|
892
|
+
naming_convention_dict = None
|
|
893
|
+
if naming_convention_preset:
|
|
894
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
895
|
+
|
|
896
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
897
|
+
for block_id in blist:
|
|
898
|
+
block = self._process_blocks[block_id]
|
|
899
|
+
for f_original in output_formats:
|
|
900
|
+
for st_original in output_stem_types:
|
|
901
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
902
|
+
f = self._SAME_AS_INPUT if f_original == self._ALL_FROM_ESSENCE else f_original
|
|
903
|
+
st = self._SAME_AS_INPUT if st_original == self._ALL_FROM_ESSENCE else st_original
|
|
904
|
+
block["output_essences"][f"{st}_{fr}_{f}"] = {
|
|
905
|
+
"audio_format": f,
|
|
906
|
+
"frame_rate": fr,
|
|
907
|
+
"type": st,
|
|
908
|
+
}
|
|
909
|
+
|
|
910
|
+
if not streams:
|
|
911
|
+
streams = []
|
|
912
|
+
for st_original in sorted(output_stem_types):
|
|
913
|
+
for f in output_formats:
|
|
914
|
+
if st_original == StemType.DME:
|
|
915
|
+
elements = ["audio/pm"]
|
|
916
|
+
if st_original == StemType.PRINTMASTER:
|
|
917
|
+
elements = ['audio/dx', 'audio/fx', 'audio/mx']
|
|
918
|
+
channels = get_channels(f)
|
|
919
|
+
if channels:
|
|
920
|
+
for e in elements:
|
|
921
|
+
for ch in channels:
|
|
922
|
+
streams.append({"format": f, "element": e, "channel": ch})
|
|
923
|
+
|
|
924
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == PackageType.MOV)
|
|
925
|
+
pid = f"my-mov-package-{count + 1}"
|
|
926
|
+
|
|
927
|
+
package_definition = {
|
|
928
|
+
"name": name,
|
|
929
|
+
"frame_rate": output_frame_rate,
|
|
930
|
+
"process_block_ids": blist,
|
|
931
|
+
"streams": streams,
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
if naming_convention_id:
|
|
935
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
936
|
+
if naming_convention_dict:
|
|
937
|
+
package_definition["naming_convention"] = copy.deepcopy(naming_convention_dict)
|
|
938
|
+
if naming_options:
|
|
939
|
+
package_definition["naming_convention_options"] = naming_options
|
|
940
|
+
|
|
941
|
+
self._packages[pid] = {"type": PackageType.MOV, "definition": package_definition}
|
|
942
|
+
|
|
943
|
+
return self
|
|
944
|
+
|
|
945
|
+
def with_multi_mono_package(
|
|
946
|
+
self,
|
|
947
|
+
name: str,
|
|
948
|
+
process_blocks: List[str],
|
|
949
|
+
output_frame_rates: List[FrameRate] | None = None,
|
|
950
|
+
output_formats: List[Format] | None = None,
|
|
951
|
+
output_stem_types: List[StemType] | None = None,
|
|
952
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
953
|
+
naming_options: str | None = None,
|
|
954
|
+
include_pt_session: bool = False,
|
|
955
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
956
|
+
"""Add a Multi-mono WAV package to the workflow.
|
|
957
|
+
|
|
958
|
+
Args:
|
|
959
|
+
name (str): Name of the package.
|
|
960
|
+
process_blocks (List[str]): List of process block names to connect.
|
|
961
|
+
output_frame_rates (List[FrameRate], optional): A list defining output frame rates.
|
|
962
|
+
Defaults to "all_from_essence"
|
|
963
|
+
output_formats (List[Format], optional): A list defining output formats.
|
|
964
|
+
Defaults to "all_from_essence"
|
|
965
|
+
output_stem_types (List[StemType], optional): A list defining output stem types.
|
|
966
|
+
Defaults to "all_from_essence"
|
|
967
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
968
|
+
or definition (dict). Defaults to None.
|
|
969
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
970
|
+
include_pt_session (bool, optional): Whether to include a Pro Tools session. Defaults to False.
|
|
971
|
+
|
|
972
|
+
Raises:
|
|
973
|
+
TypeError: If the 'naming_options' parameter is set and not a comma-delimeted string.
|
|
974
|
+
|
|
975
|
+
Returns:
|
|
976
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
977
|
+
|
|
978
|
+
"""
|
|
979
|
+
if output_frame_rates is None:
|
|
980
|
+
output_frame_rates = [FrameRate.ALL_FROM_ESSENCE]
|
|
981
|
+
if output_formats is None:
|
|
982
|
+
output_formats = [Format.ALL_FROM_ESSENCE]
|
|
983
|
+
if output_stem_types is None:
|
|
984
|
+
output_stem_types = [StemType.ALL_FROM_ESSENCE]
|
|
985
|
+
if naming_options is not None and \
|
|
986
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
987
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
988
|
+
|
|
989
|
+
naming_convention_id = None
|
|
990
|
+
naming_convention_dict = None
|
|
991
|
+
if naming_convention_preset:
|
|
992
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
993
|
+
|
|
994
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
995
|
+
venues = []
|
|
996
|
+
for block_id in blist:
|
|
997
|
+
block = self._process_blocks[block_id]
|
|
998
|
+
venues.append(block["output_settings"]["venue"])
|
|
999
|
+
for fr_original in output_frame_rates:
|
|
1000
|
+
for f_original in output_formats:
|
|
1001
|
+
for t_original in output_stem_types:
|
|
1002
|
+
fr = self._SAME_AS_INPUT if fr_original == self._ALL_FROM_ESSENCE else fr_original
|
|
1003
|
+
f = self._SAME_AS_INPUT if f_original == self._ALL_FROM_ESSENCE else f_original
|
|
1004
|
+
t = self._SAME_AS_INPUT if t_original == self._ALL_FROM_ESSENCE else t_original
|
|
1005
|
+
block["output_essences"][f"{t}_{fr}_{f}"] = {
|
|
1006
|
+
"audio_format": f,
|
|
1007
|
+
"frame_rate": fr,
|
|
1008
|
+
"type": t,
|
|
1009
|
+
}
|
|
1010
|
+
|
|
1011
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "multi_mono")
|
|
1012
|
+
pid = f"my-multi-mono-package-{count + 1}"
|
|
1013
|
+
|
|
1014
|
+
package_definition = {
|
|
1015
|
+
"name": name,
|
|
1016
|
+
"frame_rates": output_frame_rates,
|
|
1017
|
+
"formats": output_formats,
|
|
1018
|
+
"elements": output_stem_types,
|
|
1019
|
+
"venues": list(set(venues)),
|
|
1020
|
+
"process_block_ids": blist,
|
|
1021
|
+
"include_pro_tools_session": include_pt_session,
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
if naming_convention_id:
|
|
1025
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
1026
|
+
if naming_convention_dict:
|
|
1027
|
+
package_definition["naming_convention"] = copy.deepcopy(naming_convention_dict)
|
|
1028
|
+
if naming_options:
|
|
1029
|
+
package_definition["naming_convention_options"] = naming_options
|
|
1030
|
+
|
|
1031
|
+
self._packages[pid] = {"type": PackageType.MULTI_MONO, "definition": package_definition}
|
|
1032
|
+
return self
|
|
1033
|
+
|
|
1034
|
+
def with_adm_package(
|
|
1035
|
+
self,
|
|
1036
|
+
name: str,
|
|
1037
|
+
process_blocks: List[str],
|
|
1038
|
+
output_frame_rate: FrameRate = FrameRate.ALL_FROM_ESSENCE,
|
|
1039
|
+
output_stem_type: StemType = StemType.ALL_FROM_ESSENCE,
|
|
1040
|
+
naming_convention_preset: str | int | dict | None = None,
|
|
1041
|
+
naming_options: str | None = None,
|
|
1042
|
+
) -> 'WorkflowDefinitionBuilder':
|
|
1043
|
+
"""Add an ADM (Atmos) package to the workflow.
|
|
1044
|
+
|
|
1045
|
+
Args:
|
|
1046
|
+
name (str): Name of the package.
|
|
1047
|
+
process_blocks (List[str]): List of process block names. Must contain exactly one.
|
|
1048
|
+
output_frame_rate (FrameRate, optional): A FrameRate enum defining the output frame rate.
|
|
1049
|
+
Defaults to "all_from_essence"
|
|
1050
|
+
output_stem_type (StemType, optional): A StemType enum defining the output stem type.
|
|
1051
|
+
Defaults to "all_from_essence"
|
|
1052
|
+
naming_convention_preset (str | int | dict, optional): Naming convention preset name (str), ID (int),
|
|
1053
|
+
or definition (dict). Defaults to None.
|
|
1054
|
+
naming_options (str, optional): Comma-separated key-value pairs for the naming convention. Defaults to None.
|
|
1055
|
+
|
|
1056
|
+
Raises:
|
|
1057
|
+
TypeError: If the 'essences' parameter is not a tuple with the
|
|
1058
|
+
expected structure of (frame_rate, element_type), or if naming_options is not a string.
|
|
1059
|
+
ValueError: If the number of process blocks provided is not exactly one.
|
|
1060
|
+
|
|
1061
|
+
Returns:
|
|
1062
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
1063
|
+
|
|
1064
|
+
"""
|
|
1065
|
+
if naming_options is not None and \
|
|
1066
|
+
(not isinstance(naming_options, str) or not is_key_value_comma_string(naming_options)):
|
|
1067
|
+
raise TypeError("naming_options must be a comma-delimited string in KEY=VALUE format.")
|
|
1068
|
+
if len(process_blocks) != 1:
|
|
1069
|
+
raise ValueError("ADM packages require exactly one process block.")
|
|
1070
|
+
|
|
1071
|
+
naming_convention_id = None
|
|
1072
|
+
naming_convention_dict = None
|
|
1073
|
+
if naming_convention_preset:
|
|
1074
|
+
naming_convention_id, naming_convention_dict = self._resolve_preset(naming_convention_preset, PresetType.NAMING)
|
|
1075
|
+
|
|
1076
|
+
blist = self._get_process_block_ids(process_blocks)
|
|
1077
|
+
venues = []
|
|
1078
|
+
|
|
1079
|
+
for block_id in blist:
|
|
1080
|
+
block = self._process_blocks[block_id]
|
|
1081
|
+
venues.append(block["output_settings"]["venue"])
|
|
1082
|
+
fmt = Format.ATMOS
|
|
1083
|
+
fr = self._SAME_AS_INPUT if output_frame_rate == self._ALL_FROM_ESSENCE else output_frame_rate
|
|
1084
|
+
st = self._SAME_AS_INPUT if output_stem_type == self._ALL_FROM_ESSENCE else output_stem_type
|
|
1085
|
+
block["output_essences"][f"{st}_{fr}_{fmt}"] = {
|
|
1086
|
+
"audio_format": fmt,
|
|
1087
|
+
"frame_rate": fr,
|
|
1088
|
+
"type": st,
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
count = sum(1 for pkg in self._packages.values() if pkg.get("type") == "adm")
|
|
1092
|
+
pid = f"my-adm-package-{count + 1}"
|
|
1093
|
+
|
|
1094
|
+
package_definition = {
|
|
1095
|
+
"name": name,
|
|
1096
|
+
"frame_rate": output_frame_rate,
|
|
1097
|
+
"format": Format.ATMOS,
|
|
1098
|
+
"element": output_stem_type,
|
|
1099
|
+
"venue": venues[0],
|
|
1100
|
+
"process_block_ids": blist,
|
|
1101
|
+
}
|
|
1102
|
+
|
|
1103
|
+
if naming_convention_id:
|
|
1104
|
+
package_definition["naming_convention_id"] = naming_convention_id
|
|
1105
|
+
if naming_convention_dict:
|
|
1106
|
+
package_definition["naming_convention"] = naming_convention_dict
|
|
1107
|
+
if naming_options:
|
|
1108
|
+
package_definition["naming_convention_options"] = naming_options
|
|
1109
|
+
|
|
1110
|
+
self._packages[pid] = {
|
|
1111
|
+
"type": PackageType.ADM,
|
|
1112
|
+
"definition": package_definition,
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
return self
|
|
1116
|
+
|
|
1117
|
+
def with_destination(self, name: str, io_location_id: str | None = None, s3_url: str | None = None, s3_auth: dict | None = None, options: dict | None = None) -> 'WorkflowDefinitionBuilder':
|
|
1118
|
+
"""Add a destination node to the workflow.
|
|
1119
|
+
|
|
1120
|
+
Args:
|
|
1121
|
+
name (str): A unique name for the destination.
|
|
1122
|
+
io_location_id (str): The ULID of a desired IO Location. Defaults to None.
|
|
1123
|
+
s3_url (str): The URL of the S3 destination (e.g., "s3://..."). Defaults to None.
|
|
1124
|
+
s3_auth (dict, optional): Authentication details. Defaults to None.
|
|
1125
|
+
options (dict, optional): URL options. Defaults to None.
|
|
1126
|
+
|
|
1127
|
+
Returns:
|
|
1128
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
1129
|
+
|
|
1130
|
+
Raises:
|
|
1131
|
+
ValueError: If either io_location or s3_url is not supplied.
|
|
1132
|
+
ValueError: If both io_location and s3_url is supplied.
|
|
1133
|
+
|
|
1134
|
+
"""
|
|
1135
|
+
if s3_url is None and io_location_id is None:
|
|
1136
|
+
raise ValueError("Either an 'io_location_id' or 's3_url' must be supplied")
|
|
1137
|
+
|
|
1138
|
+
if s3_url is not None and io_location_id is not None:
|
|
1139
|
+
raise ValueError("Either an 'io_location_id' or 's3_url' must be supplied, not both.")
|
|
1140
|
+
|
|
1141
|
+
dest_def: dict[str, Any] = {"package_ids": []}
|
|
1142
|
+
if s3_url is not None and io_location_id is None:
|
|
1143
|
+
dest_type = "s3"
|
|
1144
|
+
dest_def["url"] = s3_url
|
|
1145
|
+
if s3_auth:
|
|
1146
|
+
dest_def["auth"] = s3_auth
|
|
1147
|
+
if options:
|
|
1148
|
+
dest_def["url_options"] = options
|
|
1149
|
+
|
|
1150
|
+
if io_location_id is not None and s3_url is None:
|
|
1151
|
+
dest_type = "io_location"
|
|
1152
|
+
dest_def["io_location_id"] = io_location_id
|
|
1153
|
+
|
|
1154
|
+
self._destinations[name] = {"type": dest_type, "definition": dest_def}
|
|
1155
|
+
return self
|
|
1156
|
+
|
|
1157
|
+
def with_packages_sent_to_destination(self, dest_name: str, package_names: List[str]) -> 'WorkflowDefinitionBuilder':
|
|
1158
|
+
"""Connect packages to a destination.
|
|
1159
|
+
|
|
1160
|
+
Args:
|
|
1161
|
+
dest_name (str): The name of the destination to send packages to.
|
|
1162
|
+
package_names (List[str]): A list of package names to connect.
|
|
1163
|
+
|
|
1164
|
+
Raises:
|
|
1165
|
+
ValueError: If the destination or any package is not found.
|
|
1166
|
+
|
|
1167
|
+
Returns:
|
|
1168
|
+
WorkflowDefinitionBuilder: The builder instance for fluent chaining.
|
|
1169
|
+
|
|
1170
|
+
"""
|
|
1171
|
+
if dest_name not in self._destinations:
|
|
1172
|
+
raise ValueError(f"Destination '{dest_name}' not found in workflow.")
|
|
1173
|
+
|
|
1174
|
+
package_ids = self._get_package_ids(package_names)
|
|
1175
|
+
self._destinations[dest_name]["definition"]["package_ids"].extend(package_ids)
|
|
1176
|
+
return self
|
|
1177
|
+
|
|
1178
|
+
def get_package_list(self) -> List[dict]:
|
|
1179
|
+
"""Return a flattened list of all packages in the workflow.
|
|
1180
|
+
|
|
1181
|
+
Each item in the list is a dictionary containing the package's
|
|
1182
|
+
definition, its type, and its unique ID within the workflow.
|
|
1183
|
+
|
|
1184
|
+
Returns:
|
|
1185
|
+
List[dict]: A list of package dictionaries.
|
|
1186
|
+
|
|
1187
|
+
"""
|
|
1188
|
+
package_list = []
|
|
1189
|
+
if not self._packages:
|
|
1190
|
+
return package_list
|
|
1191
|
+
|
|
1192
|
+
for pid, pdata in self._packages.items():
|
|
1193
|
+
# Create a copy of the main definition
|
|
1194
|
+
package_info = copy.deepcopy(pdata.get("definition", {}))
|
|
1195
|
+
|
|
1196
|
+
# Add the type and id for easy access
|
|
1197
|
+
package_info["type"] = pdata.get("type")
|
|
1198
|
+
package_info["id"] = pid
|
|
1199
|
+
|
|
1200
|
+
package_list.append(package_info)
|
|
1201
|
+
|
|
1202
|
+
return package_list
|
|
1203
|
+
|
|
1204
|
+
def build(self) -> 'WorkflowDefinition':
|
|
1205
|
+
"""Construct and return the final Workflow object.
|
|
1206
|
+
|
|
1207
|
+
Returns:
|
|
1208
|
+
WorkflowDefinition: A new Workflow instance containing the built definition.
|
|
1209
|
+
|
|
1210
|
+
"""
|
|
1211
|
+
definition = {
|
|
1212
|
+
"name": self._name,
|
|
1213
|
+
"process_blocks": copy.deepcopy(self._process_blocks),
|
|
1214
|
+
"packages": copy.deepcopy(self._packages),
|
|
1215
|
+
"destinations": copy.deepcopy(self._destinations) or {},
|
|
1216
|
+
"workflow_parameters": self._get_default_workflow_params()
|
|
1217
|
+
}
|
|
1218
|
+
for k, v in self._wf_params.items():
|
|
1219
|
+
definition["workflow_parameters"][k] = v
|
|
1220
|
+
|
|
1221
|
+
return WorkflowDefinition(definition)
|
|
1222
|
+
|
|
1223
|
+
def _resolve_preset(
|
|
1224
|
+
self,
|
|
1225
|
+
preset_value: str | int | dict[str, Any],
|
|
1226
|
+
preset_type: PresetType,
|
|
1227
|
+
filter_lambda: Callable[[dict], bool] | None = None,
|
|
1228
|
+
) -> tuple[int | None, dict | None]:
|
|
1229
|
+
"""Resolve a preset from various input types.
|
|
1230
|
+
|
|
1231
|
+
This helper method takes a preset identifier and fetches the corresponding
|
|
1232
|
+
preset data. The identifier can be a string (preset name), an integer
|
|
1233
|
+
(preset ID), or a dictionary (a direct definition). It can also apply
|
|
1234
|
+
an additional filter function when resolving by name.
|
|
1235
|
+
|
|
1236
|
+
Args:
|
|
1237
|
+
preset_value (Any): The preset identifier (str, int, or dict).
|
|
1238
|
+
preset_type (PresetType): The type of preset to resolve.
|
|
1239
|
+
filter_lambda (Callable[[dict], bool], optional): A function to
|
|
1240
|
+
additionally filter presets when resolving by name. Defaults to None.
|
|
1241
|
+
|
|
1242
|
+
Raises:
|
|
1243
|
+
ValueError: If a preset name is not found or is not unique.
|
|
1244
|
+
|
|
1245
|
+
Returns:
|
|
1246
|
+
tuple[int | None, dict | None]: A tuple of (preset_id, preset_dict).
|
|
1247
|
+
|
|
1248
|
+
"""
|
|
1249
|
+
preset_id = None
|
|
1250
|
+
preset_dict = None
|
|
1251
|
+
|
|
1252
|
+
key_map = {
|
|
1253
|
+
PresetType.NAMING: "id",
|
|
1254
|
+
PresetType.SUPER_SESSION: "super_session_preset_id",
|
|
1255
|
+
PresetType.DOLBY: "encoding_preset_id",
|
|
1256
|
+
PresetType.DTS: "encoding_preset_id",
|
|
1257
|
+
PresetType.LOUDNESS: "loudness_preset_id",
|
|
1258
|
+
PresetType.TIMECODE: "timecode_preset_id",
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
if isinstance(preset_value, str):
|
|
1262
|
+
presets = Preset.get_presets(preset_type)
|
|
1263
|
+
if not presets:
|
|
1264
|
+
raise ValueError(f"No presets of type '{preset_type.value}' found.")
|
|
1265
|
+
|
|
1266
|
+
if filter_lambda:
|
|
1267
|
+
presets = [p for p in presets if filter_lambda(p)]
|
|
1268
|
+
pf = [p for p in presets if p.get("name") == preset_value]
|
|
1269
|
+
if not pf:
|
|
1270
|
+
raise ValueError(f"{preset_type.value.capitalize()} preset '{preset_value}' not found or did not match filter criteria.")
|
|
1271
|
+
if len(pf) > 1:
|
|
1272
|
+
raise ValueError(f"Multiple {preset_type.value} presets found with name '{preset_value}'.")
|
|
1273
|
+
|
|
1274
|
+
preset_id = int(pf[0][key_map[preset_type]])
|
|
1275
|
+
|
|
1276
|
+
elif isinstance(preset_value, int):
|
|
1277
|
+
presets = Preset.get_presets(preset_type)
|
|
1278
|
+
if not presets:
|
|
1279
|
+
raise ValueError(f"No presets of type '{preset_type.value}' found.")
|
|
1280
|
+
|
|
1281
|
+
pf = [p for p in presets if p.get(key_map[preset_type]) == preset_value]
|
|
1282
|
+
if not pf:
|
|
1283
|
+
raise ValueError(f"{preset_type.value.capitalize()} preset '{preset_value}' not found or did not match filter criteria.")
|
|
1284
|
+
|
|
1285
|
+
preset_id = preset_value
|
|
1286
|
+
|
|
1287
|
+
elif isinstance(preset_value, dict):
|
|
1288
|
+
preset_dict = copy.deepcopy(preset_value)
|
|
1289
|
+
return preset_id, preset_dict
|
|
1290
|
+
|
|
1291
|
+
def _get_process_block_ids(self, block_names: List[str]) -> List[str]:
|
|
1292
|
+
ids = []
|
|
1293
|
+
for name in block_names:
|
|
1294
|
+
found_id = next((pid for pid, pdata in self._process_blocks.items() if pdata["name"] == name), None)
|
|
1295
|
+
if not found_id:
|
|
1296
|
+
raise ValueError(f"Process block '{name}' not found in workflow.")
|
|
1297
|
+
ids.append(found_id)
|
|
1298
|
+
return ids
|
|
1299
|
+
|
|
1300
|
+
def _get_package_ids(self, package_names: List[str]) -> List[str]:
|
|
1301
|
+
ids = []
|
|
1302
|
+
for name in package_names:
|
|
1303
|
+
found_id = next((pid for pid, pdata in self._packages.items() if pdata.get("definition", {}).get("name") == name), None)
|
|
1304
|
+
if not found_id:
|
|
1305
|
+
raise ValueError(f"Package '{name}' not found in workflow.")
|
|
1306
|
+
ids.append(found_id)
|
|
1307
|
+
return ids
|
|
1308
|
+
|
|
1309
|
+
def _get_default_workflow_params(self) -> dict:
|
|
1310
|
+
return {
|
|
1311
|
+
"dme_stem_mapping": _DME_STEM_MAPPING.copy(),
|
|
1312
|
+
"enable_atmos_renders": _DEFAULT_ATMOS_RENDERS.copy(),
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
|
|
1316
|
+
class WorkflowDefinition:
|
|
1317
|
+
"""Represents a finalized Coda workflow definition."""
|
|
1318
|
+
|
|
1319
|
+
def __init__(self, definition: dict) -> None:
|
|
1320
|
+
"""Initialize the Workflow object.
|
|
1321
|
+
|
|
1322
|
+
Args:
|
|
1323
|
+
definition (dict): A complete workflow definition payload.
|
|
1324
|
+
|
|
1325
|
+
Raises:
|
|
1326
|
+
ValueError: If the provided definition is empty or missing a 'name' key.
|
|
1327
|
+
|
|
1328
|
+
"""
|
|
1329
|
+
if not definition or "name" not in definition:
|
|
1330
|
+
raise ValueError("Cannot initialize Workflow with an invalid definition.")
|
|
1331
|
+
self.definition = definition
|
|
1332
|
+
self.name = definition["name"]
|
|
1333
|
+
|
|
1334
|
+
def dict(self) -> dict:
|
|
1335
|
+
"""Return the workflow definition as a dictionary.
|
|
1336
|
+
|
|
1337
|
+
Returns:
|
|
1338
|
+
dict: The complete workflow definition payload.
|
|
1339
|
+
|
|
1340
|
+
"""
|
|
1341
|
+
return self.definition
|
|
1342
|
+
|
|
1343
|
+
@staticmethod
|
|
1344
|
+
def from_preset(preset_name: str | None = None, preset_id: str | None = None) -> 'WorkflowDefinition':
|
|
1345
|
+
"""Create a Workflow instance by importing a saved preset.
|
|
1346
|
+
|
|
1347
|
+
Args:
|
|
1348
|
+
preset_name (str, optional): The name of the workflow preset to import. Defaults to None.
|
|
1349
|
+
preset_id (str, optional): The id of the workflow preset to import. Defaults to None.
|
|
1350
|
+
|
|
1351
|
+
Raises:
|
|
1352
|
+
ValueError: If the preset cannot be found.
|
|
1353
|
+
|
|
1354
|
+
Returns:
|
|
1355
|
+
WorkflowDefinition: A new Workflow instance based on the preset.
|
|
1356
|
+
|
|
1357
|
+
"""
|
|
1358
|
+
if preset_name is None and preset_id is None:
|
|
1359
|
+
raise ValueError("`preset_name` or `preset_id` must be supplied")
|
|
1360
|
+
|
|
1361
|
+
wf_presets = Preset.get_presets(PresetType.WORKFLOWS)
|
|
1362
|
+
for j in wf_presets:
|
|
1363
|
+
if preset_name is not None and j["name"] == preset_name:
|
|
1364
|
+
return WorkflowDefinition(j["definition"])
|
|
1365
|
+
if preset_id is not None and j["workflow_id"] == preset_id:
|
|
1366
|
+
return WorkflowDefinition(j["definition"])
|
|
1367
|
+
|
|
1368
|
+
raise ValueError("Unable to find workflow preset")
|
|
1369
|
+
|
|
1370
|
+
@staticmethod
|
|
1371
|
+
def from_job(job_id: int, use_mne_definition: bool = False) -> 'WorkflowDefinition':
|
|
1372
|
+
"""Create a Workflow instance from a completed job.
|
|
1373
|
+
|
|
1374
|
+
Args:
|
|
1375
|
+
job_id (int): The ID of the completed job to import from.
|
|
1376
|
+
use_mne_definition (bool, optional): Whether to use the M&E workflow. Defaults to False.
|
|
1377
|
+
|
|
1378
|
+
Raises:
|
|
1379
|
+
ValueError: If the job to import from is not in a 'COMPLETED' state.
|
|
1380
|
+
|
|
1381
|
+
Returns:
|
|
1382
|
+
WorkflowDefinition: A new Workflow instance based on the job.
|
|
1383
|
+
|
|
1384
|
+
"""
|
|
1385
|
+
group_id = validate_group_id()
|
|
1386
|
+
print(f"importing workflow from job {job_id}", file=sys.stderr)
|
|
1387
|
+
ret = make_request(
|
|
1388
|
+
requests.get, f"/interface/v2/groups/{group_id}/jobs/{job_id}"
|
|
1389
|
+
)
|
|
1390
|
+
j = ret.json()
|
|
1391
|
+
|
|
1392
|
+
if j.get("status") != "COMPLETED":
|
|
1393
|
+
raise ValueError(
|
|
1394
|
+
f"Cannot import workflow from job {job_id} because its status is '{j.get('status')}'. "
|
|
1395
|
+
"The job must be 'COMPLETED'."
|
|
1396
|
+
)
|
|
1397
|
+
|
|
1398
|
+
wf_def_key = "mne_workflow_definition" if use_mne_definition and "mne_workflow_definition" in j else "workflow_definition"
|
|
1399
|
+
if use_mne_definition and wf_def_key == "workflow_definition":
|
|
1400
|
+
print("** WARNING ** Mne workflow definition was not found. using normal workflow", file=sys.stderr)
|
|
1401
|
+
|
|
1402
|
+
return WorkflowDefinition(j[wf_def_key])
|