@aws-sdk/client-mediaconvert 3.529.1 → 3.535.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/dist-types/MediaConvert.d.ts +8 -1
  2. package/dist-types/MediaConvertClient.d.ts +1 -1
  3. package/dist-types/commands/AssociateCertificateCommand.d.ts +2 -1
  4. package/dist-types/commands/CancelJobCommand.d.ts +2 -1
  5. package/dist-types/commands/CreateJobCommand.d.ts +2 -1
  6. package/dist-types/commands/CreateJobTemplateCommand.d.ts +2 -1
  7. package/dist-types/commands/CreatePresetCommand.d.ts +2 -1
  8. package/dist-types/commands/CreateQueueCommand.d.ts +2 -1
  9. package/dist-types/commands/DeleteJobTemplateCommand.d.ts +2 -1
  10. package/dist-types/commands/DeletePolicyCommand.d.ts +2 -1
  11. package/dist-types/commands/DeletePresetCommand.d.ts +2 -1
  12. package/dist-types/commands/DeleteQueueCommand.d.ts +2 -1
  13. package/dist-types/commands/DescribeEndpointsCommand.d.ts +3 -3
  14. package/dist-types/commands/DisassociateCertificateCommand.d.ts +2 -1
  15. package/dist-types/commands/GetJobCommand.d.ts +2 -1
  16. package/dist-types/commands/GetJobTemplateCommand.d.ts +2 -1
  17. package/dist-types/commands/GetPolicyCommand.d.ts +2 -1
  18. package/dist-types/commands/GetPresetCommand.d.ts +2 -1
  19. package/dist-types/commands/GetQueueCommand.d.ts +2 -1
  20. package/dist-types/commands/ListJobTemplatesCommand.d.ts +2 -1
  21. package/dist-types/commands/ListJobsCommand.d.ts +2 -1
  22. package/dist-types/commands/ListPresetsCommand.d.ts +2 -1
  23. package/dist-types/commands/ListQueuesCommand.d.ts +2 -1
  24. package/dist-types/commands/ListTagsForResourceCommand.d.ts +2 -1
  25. package/dist-types/commands/PutPolicyCommand.d.ts +2 -1
  26. package/dist-types/commands/TagResourceCommand.d.ts +2 -1
  27. package/dist-types/commands/UntagResourceCommand.d.ts +2 -1
  28. package/dist-types/commands/UpdateJobTemplateCommand.d.ts +2 -1
  29. package/dist-types/commands/UpdatePresetCommand.d.ts +2 -1
  30. package/dist-types/commands/UpdateQueueCommand.d.ts +2 -1
  31. package/dist-types/models/models_0.d.ts +709 -709
  32. package/dist-types/models/models_1.d.ts +659 -659
  33. package/dist-types/models/models_2.d.ts +125 -126
  34. package/dist-types/runtimeConfig.browser.d.ts +2 -2
  35. package/dist-types/runtimeConfig.d.ts +2 -2
  36. package/dist-types/runtimeConfig.native.d.ts +2 -2
  37. package/dist-types/runtimeConfig.shared.d.ts +2 -2
  38. package/dist-types/ts3.4/MediaConvert.d.ts +7 -0
  39. package/dist-types/ts3.4/commands/AssociateCertificateCommand.d.ts +9 -0
  40. package/dist-types/ts3.4/commands/CancelJobCommand.d.ts +9 -0
  41. package/dist-types/ts3.4/commands/CreateJobCommand.d.ts +9 -0
  42. package/dist-types/ts3.4/commands/CreateJobTemplateCommand.d.ts +9 -0
  43. package/dist-types/ts3.4/commands/CreatePresetCommand.d.ts +9 -0
  44. package/dist-types/ts3.4/commands/CreateQueueCommand.d.ts +9 -0
  45. package/dist-types/ts3.4/commands/DeleteJobTemplateCommand.d.ts +9 -0
  46. package/dist-types/ts3.4/commands/DeletePolicyCommand.d.ts +9 -0
  47. package/dist-types/ts3.4/commands/DeletePresetCommand.d.ts +9 -0
  48. package/dist-types/ts3.4/commands/DeleteQueueCommand.d.ts +9 -0
  49. package/dist-types/ts3.4/commands/DescribeEndpointsCommand.d.ts +9 -0
  50. package/dist-types/ts3.4/commands/DisassociateCertificateCommand.d.ts +9 -0
  51. package/dist-types/ts3.4/commands/GetJobCommand.d.ts +7 -0
  52. package/dist-types/ts3.4/commands/GetJobTemplateCommand.d.ts +9 -0
  53. package/dist-types/ts3.4/commands/GetPolicyCommand.d.ts +9 -0
  54. package/dist-types/ts3.4/commands/GetPresetCommand.d.ts +9 -0
  55. package/dist-types/ts3.4/commands/GetQueueCommand.d.ts +9 -0
  56. package/dist-types/ts3.4/commands/ListJobTemplatesCommand.d.ts +9 -0
  57. package/dist-types/ts3.4/commands/ListJobsCommand.d.ts +9 -0
  58. package/dist-types/ts3.4/commands/ListPresetsCommand.d.ts +9 -0
  59. package/dist-types/ts3.4/commands/ListQueuesCommand.d.ts +9 -0
  60. package/dist-types/ts3.4/commands/ListTagsForResourceCommand.d.ts +9 -0
  61. package/dist-types/ts3.4/commands/PutPolicyCommand.d.ts +9 -0
  62. package/dist-types/ts3.4/commands/TagResourceCommand.d.ts +9 -0
  63. package/dist-types/ts3.4/commands/UntagResourceCommand.d.ts +9 -0
  64. package/dist-types/ts3.4/commands/UpdateJobTemplateCommand.d.ts +9 -0
  65. package/dist-types/ts3.4/commands/UpdatePresetCommand.d.ts +9 -0
  66. package/dist-types/ts3.4/commands/UpdateQueueCommand.d.ts +9 -0
  67. package/dist-types/ts3.4/runtimeConfig.browser.d.ts +2 -2
  68. package/dist-types/ts3.4/runtimeConfig.d.ts +2 -2
  69. package/dist-types/ts3.4/runtimeConfig.native.d.ts +2 -2
  70. package/dist-types/ts3.4/runtimeConfig.shared.d.ts +2 -2
  71. package/package.json +40 -40
@@ -11,23 +11,23 @@ export declare const RequiredFlag: {
11
11
  */
12
12
  export type RequiredFlag = (typeof RequiredFlag)[keyof typeof RequiredFlag];
13
13
  /**
14
- * @public
15
14
  * Use Allowed renditions to specify a list of possible resolutions in your ABR stack. * MediaConvert will create an ABR stack exclusively from the list of resolutions that you specify. * Some resolutions in the Allowed renditions list may not be included, however you can force a resolution to be included by setting Required to ENABLED. * You must specify at least one resolution that is greater than or equal to any resolutions that you specify in Min top rendition size or Min bottom rendition size. * If you specify Allowed renditions, you must not specify a separate rule for Force include renditions.
15
+ * @public
16
16
  */
17
17
  export interface AllowedRenditionSize {
18
18
  /**
19
- * @public
20
19
  * Use Height to define the video resolution height, in pixels, for this rule.
20
+ * @public
21
21
  */
22
22
  Height?: number;
23
23
  /**
24
- * @public
25
24
  * Set to ENABLED to force a rendition to be included.
25
+ * @public
26
26
  */
27
27
  Required?: RequiredFlag;
28
28
  /**
29
- * @public
30
29
  * Use Width to define the video resolution width, in pixels, for this rule.
30
+ * @public
31
31
  */
32
32
  Width?: number;
33
33
  }
@@ -70,18 +70,18 @@ export declare const AudioChannelTag: {
70
70
  */
71
71
  export type AudioChannelTag = (typeof AudioChannelTag)[keyof typeof AudioChannelTag];
72
72
  /**
73
- * @public
74
73
  * Specify the QuickTime audio channel layout tags for the audio channels in this audio track. When you don't specify a value, MediaConvert labels your track as Center (C) by default. To use Audio layout tagging, your output must be in a QuickTime (MOV) container and your audio codec must be AAC, WAV, or AIFF.
74
+ * @public
75
75
  */
76
76
  export interface AudioChannelTaggingSettings {
77
77
  /**
78
- * @public
79
78
  * Specify the QuickTime audio channel layout tags for the audio channels in this audio track. Enter channel layout tags in the same order as your output's audio channel order. For example, if your output audio track has a left and a right channel, enter Left (L) for the first channel and Right (R) for the second. If your output has multiple single-channel audio tracks, enter a single channel layout tag for each track.
79
+ * @public
80
80
  */
81
81
  ChannelTag?: AudioChannelTag;
82
82
  /**
83
- * @public
84
83
  * Specify the QuickTime audio channel layout tags for the audio channels in this audio track. Enter channel layout tags in the same order as your output's audio channel order. For example, if your output audio track has a left and a right channel, enter Left (L) for the first channel and Right (R) for the second. If your output has multiple single-channel audio tracks, enter a single channel layout tag for each track.
84
+ * @public
85
85
  */
86
86
  ChannelTags?: AudioChannelTag[];
87
87
  }
@@ -136,43 +136,43 @@ export declare const AudioNormalizationPeakCalculation: {
136
136
  */
137
137
  export type AudioNormalizationPeakCalculation = (typeof AudioNormalizationPeakCalculation)[keyof typeof AudioNormalizationPeakCalculation];
138
138
  /**
139
- * @public
140
139
  * Advanced audio normalization settings. Ignore these settings unless you need to comply with a loudness standard.
140
+ * @public
141
141
  */
142
142
  export interface AudioNormalizationSettings {
143
143
  /**
144
- * @public
145
144
  * Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: Ungated loudness. A measurement of ungated average loudness for an entire piece of content, suitable for measurement of short-form content under ATSC recommendation A/85. Supports up to 5.1 audio channels. ITU-R BS.1770-2: Gated loudness. A measurement of gated average loudness compliant with the requirements of EBU-R128. Supports up to 5.1 audio channels. ITU-R BS.1770-3: Modified peak. The same loudness measurement algorithm as 1770-2, with an updated true peak measurement. ITU-R BS.1770-4: Higher channel count. Allows for more audio channels than the other algorithms, including configurations such as 7.1.
145
+ * @public
146
146
  */
147
147
  Algorithm?: AudioNormalizationAlgorithm;
148
148
  /**
149
- * @public
150
149
  * When enabled the output audio is corrected using the chosen algorithm. If disabled, the audio will be measured but not adjusted.
150
+ * @public
151
151
  */
152
152
  AlgorithmControl?: AudioNormalizationAlgorithmControl;
153
153
  /**
154
- * @public
155
154
  * Content measuring above this level will be corrected to the target level. Content measuring below this level will not be corrected.
155
+ * @public
156
156
  */
157
157
  CorrectionGateLevel?: number;
158
158
  /**
159
- * @public
160
159
  * If set to LOG, log each output's audio track loudness to a CSV file.
160
+ * @public
161
161
  */
162
162
  LoudnessLogging?: AudioNormalizationLoudnessLogging;
163
163
  /**
164
- * @public
165
164
  * If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio track loudness.
165
+ * @public
166
166
  */
167
167
  PeakCalculation?: AudioNormalizationPeakCalculation;
168
168
  /**
169
- * @public
170
169
  * When you use Audio normalization, optionally use this setting to specify a target loudness. If you don't specify a value here, the encoder chooses a value for you, based on the algorithm that you choose for Algorithm. If you choose algorithm 1770-1, the encoder will choose -24 LKFS; otherwise, the encoder will choose -23 LKFS.
170
+ * @public
171
171
  */
172
172
  TargetLkfs?: number;
173
173
  /**
174
- * @public
175
174
  * Specify the True-peak limiter threshold in decibels relative to full scale (dBFS). The peak inter-audio sample loudness in your output will be limited to the value that you specify, without affecting the overall target LKFS. Enter a value from 0 to -8. Leave blank to use the default value 0.
175
+ * @public
176
176
  */
177
177
  TruePeakLimiterThreshold?: number;
178
178
  }
@@ -279,53 +279,53 @@ export declare const AacVbrQuality: {
279
279
  */
280
280
  export type AacVbrQuality = (typeof AacVbrQuality)[keyof typeof AacVbrQuality];
281
281
  /**
282
- * @public
283
282
  * Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to "VBR" or "CBR". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode.
283
+ * @public
284
284
  */
285
285
  export interface AacSettings {
286
286
  /**
287
- * @public
288
287
  * Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio + audio description (AD) as a stereo pair. The value for AudioType will be set to 3, which signals to downstream systems that this stream contains "broadcaster mixed AD". Note that the input received by the encoder must contain pre-mixed audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, the encoder ignores any values you provide in AudioType and FollowInputAudioType. Choose NORMAL when the input does not contain pre-mixed audio + audio description (AD). In this case, the encoder will use any values you provide for AudioType and FollowInputAudioType.
288
+ * @public
289
289
  */
290
290
  AudioDescriptionBroadcasterMix?: AacAudioDescriptionBroadcasterMix;
291
291
  /**
292
- * @public
293
292
  * Specify the average bitrate in bits per second. The set of valid values for this setting is: 6000, 8000, 10000, 12000, 14000, 16000, 20000, 24000, 28000, 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 288000, 320000, 384000, 448000, 512000, 576000, 640000, 768000, 896000, 1024000. The value you set is also constrained by the values that you choose for Profile, Bitrate control mode, and Sample rate. Default values depend on Bitrate control mode and Profile.
293
+ * @public
294
294
  */
295
295
  Bitrate?: number;
296
296
  /**
297
- * @public
298
297
  * AAC Profile.
298
+ * @public
299
299
  */
300
300
  CodecProfile?: AacCodecProfile;
301
301
  /**
302
- * @public
303
302
  * The Coding mode that you specify determines the number of audio channels and the audio channel layout metadata in your AAC output. Valid coding modes depend on the Rate control mode and Profile that you select. The following list shows the number of audio channels and channel layout for each coding mode. * 1.0 Audio Description (Receiver Mix): One channel, C. Includes audio description data from your stereo input. For more information see ETSI TS 101 154 Annex E. * 1.0 Mono: One channel, C. * 2.0 Stereo: Two channels, L, R. * 5.1 Surround: Six channels, C, L, R, Ls, Rs, LFE.
303
+ * @public
304
304
  */
305
305
  CodingMode?: AacCodingMode;
306
306
  /**
307
- * @public
308
307
  * Rate Control Mode.
308
+ * @public
309
309
  */
310
310
  RateControlMode?: AacRateControlMode;
311
311
  /**
312
- * @public
313
312
  * Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, you must choose "No container" for the output container.
313
+ * @public
314
314
  */
315
315
  RawFormat?: AacRawFormat;
316
316
  /**
317
- * @public
318
317
  * Specify the Sample rate in Hz. Valid sample rates depend on the Profile and Coding mode that you select. The following list shows valid sample rates for each Profile and Coding mode. * LC Profile, Coding mode 1.0, 2.0, and Receiver Mix: 8000, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200, 96000. * LC Profile, Coding mode 5.1: 32000, 44100, 48000, 96000. * HEV1 Profile, Coding mode 1.0 and Receiver Mix: 22050, 24000, 32000, 44100, 48000. * HEV1 Profile, Coding mode 2.0 and 5.1: 32000, 44100, 48000, 96000. * HEV2 Profile, Coding mode 2.0: 22050, 24000, 32000, 44100, 48000.
318
+ * @public
319
319
  */
320
320
  SampleRate?: number;
321
321
  /**
322
- * @public
323
322
  * Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream containers.
323
+ * @public
324
324
  */
325
325
  Specification?: AacSpecification;
326
326
  /**
327
- * @public
328
327
  * VBR Quality Level - Only used if rate_control_mode is VBR.
328
+ * @public
329
329
  */
330
330
  VbrQuality?: AacVbrQuality;
331
331
  }
@@ -430,79 +430,79 @@ export declare const Ac3MetadataControl: {
430
430
  */
431
431
  export type Ac3MetadataControl = (typeof Ac3MetadataControl)[keyof typeof Ac3MetadataControl];
432
432
  /**
433
- * @public
434
433
  * Required when you set Codec to the value AC3.
434
+ * @public
435
435
  */
436
436
  export interface Ac3Settings {
437
437
  /**
438
- * @public
439
438
  * Specify the average bitrate in bits per second. The bitrate that you specify must be a multiple of 8000 within the allowed minimum and maximum values. Leave blank to use the default bitrate for the coding mode you select according ETSI TS 102 366. Valid bitrates for coding mode 1/0: Default: 96000. Minimum: 64000. Maximum: 128000. Valid bitrates for coding mode 1/1: Default: 192000. Minimum: 128000. Maximum: 384000. Valid bitrates for coding mode 2/0: Default: 192000. Minimum: 128000. Maximum: 384000. Valid bitrates for coding mode 3/2 with FLE: Default: 384000. Minimum: 384000. Maximum: 640000.
439
+ * @public
440
440
  */
441
441
  Bitrate?: number;
442
442
  /**
443
- * @public
444
443
  * Specify the bitstream mode for the AC-3 stream that the encoder emits. For more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex E).
444
+ * @public
445
445
  */
446
446
  BitstreamMode?: Ac3BitstreamMode;
447
447
  /**
448
- * @public
449
448
  * Dolby Digital coding mode. Determines number of channels.
449
+ * @public
450
450
  */
451
451
  CodingMode?: Ac3CodingMode;
452
452
  /**
453
- * @public
454
453
  * Sets the dialnorm for the output. If blank and input audio is Dolby Digital, dialnorm will be passed through.
454
+ * @public
455
455
  */
456
456
  Dialnorm?: number;
457
457
  /**
458
- * @public
459
458
  * Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
459
+ * @public
460
460
  */
461
461
  DynamicRangeCompressionLine?: Ac3DynamicRangeCompressionLine;
462
462
  /**
463
- * @public
464
463
  * When you want to add Dolby dynamic range compression (DRC) signaling to your output stream, we recommend that you use the mode-specific settings instead of Dynamic range compression profile. The mode-specific settings are Dynamic range compression profile, line mode and Dynamic range compression profile, RF mode. Note that when you specify values for all three settings, MediaConvert ignores the value of this setting in favor of the mode-specific settings. If you do use this setting instead of the mode-specific settings, choose None to leave out DRC signaling. Keep the default Film standard to set the profile to Dolby's film standard profile for all operating modes.
464
+ * @public
465
465
  */
466
466
  DynamicRangeCompressionProfile?: Ac3DynamicRangeCompressionProfile;
467
467
  /**
468
- * @public
469
468
  * Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
469
+ * @public
470
470
  */
471
471
  DynamicRangeCompressionRf?: Ac3DynamicRangeCompressionRf;
472
472
  /**
473
- * @public
474
473
  * Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only valid with 3_2_LFE coding mode.
474
+ * @public
475
475
  */
476
476
  LfeFilter?: Ac3LfeFilter;
477
477
  /**
478
- * @public
479
478
  * When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied from one of these streams, then the static metadata settings will be used.
479
+ * @public
480
480
  */
481
481
  MetadataControl?: Ac3MetadataControl;
482
482
  /**
483
- * @public
484
483
  * This value is always 48000. It represents the sample rate in Hz.
484
+ * @public
485
485
  */
486
486
  SampleRate?: number;
487
487
  }
488
488
  /**
489
- * @public
490
489
  * Required when you set Codec to the value AIFF.
490
+ * @public
491
491
  */
492
492
  export interface AiffSettings {
493
493
  /**
494
- * @public
495
494
  * Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track.
495
+ * @public
496
496
  */
497
497
  BitDepth?: number;
498
498
  /**
499
- * @public
500
499
  * Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
500
+ * @public
501
501
  */
502
502
  Channels?: number;
503
503
  /**
504
- * @public
505
504
  * Sample rate in Hz.
505
+ * @public
506
506
  */
507
507
  SampleRate?: number;
508
508
  }
@@ -664,93 +664,93 @@ export declare const Eac3AtmosSurroundExMode: {
664
664
  */
665
665
  export type Eac3AtmosSurroundExMode = (typeof Eac3AtmosSurroundExMode)[keyof typeof Eac3AtmosSurroundExMode];
666
666
  /**
667
- * @public
668
667
  * Required when you set Codec to the value EAC3_ATMOS.
668
+ * @public
669
669
  */
670
670
  export interface Eac3AtmosSettings {
671
671
  /**
672
- * @public
673
672
  * Specify the average bitrate for this output in bits per second. Valid values: 384k, 448k, 576k, 640k, 768k, 1024k Default value: 448k Note that MediaConvert supports 384k only with channel-based immersive (CBI) 7.1.4 and 5.1.4 inputs. For CBI 9.1.6 and other input types, MediaConvert automatically increases your output bitrate to 448k.
673
+ * @public
674
674
  */
675
675
  Bitrate?: number;
676
676
  /**
677
- * @public
678
677
  * Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).
678
+ * @public
679
679
  */
680
680
  BitstreamMode?: Eac3AtmosBitstreamMode;
681
681
  /**
682
- * @public
683
682
  * The coding mode for Dolby Digital Plus JOC (Atmos).
683
+ * @public
684
684
  */
685
685
  CodingMode?: Eac3AtmosCodingMode;
686
686
  /**
687
- * @public
688
687
  * Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis.
688
+ * @public
689
689
  */
690
690
  DialogueIntelligence?: Eac3AtmosDialogueIntelligence;
691
691
  /**
692
- * @public
693
692
  * Specify whether MediaConvert should use any downmix metadata from your input file. Keep the default value, Custom to provide downmix values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your downmix values: Left only/Right only surround, Left total/Right total surround, Left total/Right total center, Left only/Right only center, and Stereo downmix. When you keep Custom for Downmix control and you don't specify values for the related settings, MediaConvert uses default values for those settings.
693
+ * @public
694
694
  */
695
695
  DownmixControl?: Eac3AtmosDownmixControl;
696
696
  /**
697
- * @public
698
697
  * Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the line operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression line. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
698
+ * @public
699
699
  */
700
700
  DynamicRangeCompressionLine?: Eac3AtmosDynamicRangeCompressionLine;
701
701
  /**
702
- * @public
703
702
  * Choose the Dolby dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby stream for the RF operating mode. Default value: Film light Related setting: To have MediaConvert use the value you specify here, keep the default value, Custom for the setting Dynamic range control. Otherwise, MediaConvert ignores Dynamic range compression RF. For information about the Dolby DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
703
+ * @public
704
704
  */
705
705
  DynamicRangeCompressionRf?: Eac3AtmosDynamicRangeCompressionRf;
706
706
  /**
707
- * @public
708
707
  * Specify whether MediaConvert should use any dynamic range control metadata from your input file. Keep the default value, Custom, to provide dynamic range control values in your job settings. Choose Follow source to use the metadata from your input. Related settings--Use these settings to specify your dynamic range control values: Dynamic range compression line and Dynamic range compression RF. When you keep the value Custom for Dynamic range control and you don't specify values for the related settings, MediaConvert uses default values for those settings.
708
+ * @public
709
709
  */
710
710
  DynamicRangeControl?: Eac3AtmosDynamicRangeControl;
711
711
  /**
712
- * @public
713
712
  * Specify a value for the following Dolby Atmos setting: Left only/Right only center mix (Lo/Ro center). MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only center.
713
+ * @public
714
714
  */
715
715
  LoRoCenterMixLevel?: number;
716
716
  /**
717
- * @public
718
717
  * Specify a value for the following Dolby Atmos setting: Left only/Right only. MediaConvert uses this value for downmixing. Default value: -3 dB. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left only/Right only surround.
718
+ * @public
719
719
  */
720
720
  LoRoSurroundMixLevel?: number;
721
721
  /**
722
- * @public
723
722
  * Specify a value for the following Dolby Atmos setting: Left total/Right total center mix (Lt/Rt center). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, and -6.0. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Left total/Right total center.
723
+ * @public
724
724
  */
725
725
  LtRtCenterMixLevel?: number;
726
726
  /**
727
- * @public
728
727
  * Specify a value for the following Dolby Atmos setting: Left total/Right total surround mix (Lt/Rt surround). MediaConvert uses this value for downmixing. Default value: -3 dB Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. Related setting: How the service uses this value depends on the value that you choose for Stereo downmix. Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, the service ignores Left total/Right total surround.
728
+ * @public
729
729
  */
730
730
  LtRtSurroundMixLevel?: number;
731
731
  /**
732
- * @public
733
732
  * Choose how the service meters the loudness of your audio.
733
+ * @public
734
734
  */
735
735
  MeteringMode?: Eac3AtmosMeteringMode;
736
736
  /**
737
- * @public
738
737
  * This value is always 48000. It represents the sample rate in Hz.
738
+ * @public
739
739
  */
740
740
  SampleRate?: number;
741
741
  /**
742
- * @public
743
742
  * Specify the percentage of audio content, from 0% to 100%, that must be speech in order for the encoder to use the measured speech loudness as the overall program loudness. Default value: 15%
743
+ * @public
744
744
  */
745
745
  SpeechThreshold?: number;
746
746
  /**
747
- * @public
748
747
  * Choose how the service does stereo downmixing. Default value: Not indicated Related setting: To have MediaConvert use this value, keep the default value, Custom for the setting Downmix control. Otherwise, MediaConvert ignores Stereo downmix.
748
+ * @public
749
749
  */
750
750
  StereoDownmix?: Eac3AtmosStereoDownmix;
751
751
  /**
752
- * @public
753
752
  * Specify whether your input audio has an additional center rear surround channel matrix encoded into your left and right surround channels.
753
+ * @public
754
754
  */
755
755
  SurroundExMode?: Eac3AtmosSurroundExMode;
756
756
  }
@@ -939,155 +939,155 @@ export declare const Eac3SurroundMode: {
939
939
  */
940
940
  export type Eac3SurroundMode = (typeof Eac3SurroundMode)[keyof typeof Eac3SurroundMode];
941
941
  /**
942
- * @public
943
942
  * Required when you set Codec to the value EAC3.
943
+ * @public
944
944
  */
945
945
  export interface Eac3Settings {
946
946
  /**
947
- * @public
948
947
  * If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. Only used for 3/2 coding mode.
948
+ * @public
949
949
  */
950
950
  AttenuationControl?: Eac3AttenuationControl;
951
951
  /**
952
- * @public
953
952
  * Specify the average bitrate in bits per second. The bitrate that you specify must be a multiple of 8000 within the allowed minimum and maximum values. Leave blank to use the default bitrate for the coding mode you select according ETSI TS 102 366. Valid bitrates for coding mode 1/0: Default: 96000. Minimum: 32000. Maximum: 3024000. Valid bitrates for coding mode 2/0: Default: 192000. Minimum: 96000. Maximum: 3024000. Valid bitrates for coding mode 3/2: Default: 384000. Minimum: 192000. Maximum: 3024000.
953
+ * @public
954
954
  */
955
955
  Bitrate?: number;
956
956
  /**
957
- * @public
958
957
  * Specify the bitstream mode for the E-AC-3 stream that the encoder emits. For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex E).
958
+ * @public
959
959
  */
960
960
  BitstreamMode?: Eac3BitstreamMode;
961
961
  /**
962
- * @public
963
962
  * Dolby Digital Plus coding mode. Determines number of channels.
963
+ * @public
964
964
  */
965
965
  CodingMode?: Eac3CodingMode;
966
966
  /**
967
- * @public
968
967
  * Activates a DC highpass filter for all input channels.
968
+ * @public
969
969
  */
970
970
  DcFilter?: Eac3DcFilter;
971
971
  /**
972
- * @public
973
972
  * Sets the dialnorm for the output. If blank and input audio is Dolby Digital Plus, dialnorm will be passed through.
973
+ * @public
974
974
  */
975
975
  Dialnorm?: number;
976
976
  /**
977
- * @public
978
977
  * Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the line operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
978
+ * @public
979
979
  */
980
980
  DynamicRangeCompressionLine?: Eac3DynamicRangeCompressionLine;
981
981
  /**
982
- * @public
983
982
  * Choose the Dolby Digital dynamic range control (DRC) profile that MediaConvert uses when encoding the metadata in the Dolby Digital stream for the RF operating mode. Related setting: When you use this setting, MediaConvert ignores any value you provide for Dynamic range compression profile. For information about the Dolby Digital DRC operating modes and profiles, see the Dynamic Range Control chapter of the Dolby Metadata Guide at https://developer.dolby.com/globalassets/professional/documents/dolby-metadata-guide.pdf.
983
+ * @public
984
984
  */
985
985
  DynamicRangeCompressionRf?: Eac3DynamicRangeCompressionRf;
986
986
  /**
987
- * @public
988
987
  * When encoding 3/2 audio, controls whether the LFE channel is enabled
988
+ * @public
989
989
  */
990
990
  LfeControl?: Eac3LfeControl;
991
991
  /**
992
- * @public
993
992
  * Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only valid with 3_2_LFE coding mode.
993
+ * @public
994
994
  */
995
995
  LfeFilter?: Eac3LfeFilter;
996
996
  /**
997
- * @public
998
997
  * Specify a value for the following Dolby Digital Plus setting: Left only/Right only center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only center.
998
+ * @public
999
999
  */
1000
1000
  LoRoCenterMixLevel?: number;
1001
1001
  /**
1002
- * @public
1003
1002
  * Specify a value for the following Dolby Digital Plus setting: Left only/Right only. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left only/Right only surround.
1003
+ * @public
1004
1004
  */
1005
1005
  LoRoSurroundMixLevel?: number;
1006
1006
  /**
1007
- * @public
1008
1007
  * Specify a value for the following Dolby Digital Plus setting: Left total/Right total center mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: 3.0, 1.5, 0.0, -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total center.
1008
+ * @public
1009
1009
  */
1010
1010
  LtRtCenterMixLevel?: number;
1011
1011
  /**
1012
- * @public
1013
1012
  * Specify a value for the following Dolby Digital Plus setting: Left total/Right total surround mix. MediaConvert uses this value for downmixing. How the service uses this value depends on the value that you choose for Stereo downmix. Valid values: -1.5, -3.0, -4.5, -6.0, and -60. The value -60 mutes the channel. This setting applies only if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Left total/Right total surround.
1013
+ * @public
1014
1014
  */
1015
1015
  LtRtSurroundMixLevel?: number;
1016
1016
  /**
1017
- * @public
1018
1017
  * When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, or DolbyE decoder that supplied this audio data. If audio was not supplied from one of these streams, then the static metadata settings will be used.
1018
+ * @public
1019
1019
  */
1020
1020
  MetadataControl?: Eac3MetadataControl;
1021
1021
  /**
1022
- * @public
1023
1022
  * When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is present on the input. this detection is dynamic over the life of the transcode. Inputs that alternate between DD+ and non-DD+ content will have a consistent DD+ output as the system alternates between passthrough and encoding.
1023
+ * @public
1024
1024
  */
1025
1025
  PassthroughControl?: Eac3PassthroughControl;
1026
1026
  /**
1027
- * @public
1028
1027
  * Controls the amount of phase-shift applied to the surround channels. Only used for 3/2 coding mode.
1028
+ * @public
1029
1029
  */
1030
1030
  PhaseControl?: Eac3PhaseControl;
1031
1031
  /**
1032
- * @public
1033
1032
  * This value is always 48000. It represents the sample rate in Hz.
1033
+ * @public
1034
1034
  */
1035
1035
  SampleRate?: number;
1036
1036
  /**
1037
- * @public
1038
1037
  * Choose how the service does stereo downmixing. This setting only applies if you keep the default value of 3/2 - L, R, C, Ls, Rs for the setting Coding mode. If you choose a different value for Coding mode, the service ignores Stereo downmix.
1038
+ * @public
1039
1039
  */
1040
1040
  StereoDownmix?: Eac3StereoDownmix;
1041
1041
  /**
1042
- * @public
1043
1042
  * When encoding 3/2 audio, sets whether an extra center back surround channel is matrix encoded into the left and right surround channels.
1043
+ * @public
1044
1044
  */
1045
1045
  SurroundExMode?: Eac3SurroundExMode;
1046
1046
  /**
1047
- * @public
1048
1047
  * When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into the two channels.
1048
+ * @public
1049
1049
  */
1050
1050
  SurroundMode?: Eac3SurroundMode;
1051
1051
  }
1052
1052
  /**
1053
- * @public
1054
1053
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC.
1054
+ * @public
1055
1055
  */
1056
1056
  export interface FlacSettings {
1057
1057
  /**
1058
- * @public
1059
1058
  * Specify Bit depth (BitDepth), in bits per sample, to choose the encoding quality for this audio track.
1059
+ * @public
1060
1060
  */
1061
1061
  BitDepth?: number;
1062
1062
  /**
1063
- * @public
1064
1063
  * Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are between 1 and 8.
1064
+ * @public
1065
1065
  */
1066
1066
  Channels?: number;
1067
1067
  /**
1068
- * @public
1069
1068
  * Sample rate in Hz.
1069
+ * @public
1070
1070
  */
1071
1071
  SampleRate?: number;
1072
1072
  }
1073
1073
  /**
1074
- * @public
1075
1074
  * Required when you set Codec to the value MP2.
1075
+ * @public
1076
1076
  */
1077
1077
  export interface Mp2Settings {
1078
1078
  /**
1079
- * @public
1080
1079
  * Specify the average bitrate in bits per second.
1080
+ * @public
1081
1081
  */
1082
1082
  Bitrate?: number;
1083
1083
  /**
1084
- * @public
1085
1084
  * Set Channels to specify the number of channels in this output audio track. Choosing Mono in will give you 1 output channel; choosing Stereo will give you 2. In the API, valid values are 1 and 2.
1085
+ * @public
1086
1086
  */
1087
1087
  Channels?: number;
1088
1088
  /**
1089
- * @public
1090
1089
  * Sample rate in Hz.
1090
+ * @public
1091
1091
  */
1092
1092
  SampleRate?: number;
1093
1093
  }
@@ -1104,75 +1104,75 @@ export declare const Mp3RateControlMode: {
1104
1104
  */
1105
1105
  export type Mp3RateControlMode = (typeof Mp3RateControlMode)[keyof typeof Mp3RateControlMode];
1106
1106
  /**
1107
- * @public
1108
1107
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3.
1108
+ * @public
1109
1109
  */
1110
1110
  export interface Mp3Settings {
1111
1111
  /**
1112
- * @public
1113
1112
  * Specify the average bitrate in bits per second.
1113
+ * @public
1114
1114
  */
1115
1115
  Bitrate?: number;
1116
1116
  /**
1117
- * @public
1118
1117
  * Specify the number of channels in this output audio track. Choosing Mono gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.
1118
+ * @public
1119
1119
  */
1120
1120
  Channels?: number;
1121
1121
  /**
1122
- * @public
1123
1122
  * Specify whether the service encodes this MP3 audio output with a constant bitrate (CBR) or a variable bitrate (VBR).
1123
+ * @public
1124
1124
  */
1125
1125
  RateControlMode?: Mp3RateControlMode;
1126
1126
  /**
1127
- * @public
1128
1127
  * Sample rate in Hz.
1128
+ * @public
1129
1129
  */
1130
1130
  SampleRate?: number;
1131
1131
  /**
1132
- * @public
1133
1132
  * Required when you set Bitrate control mode to VBR. Specify the audio quality of this MP3 output from 0 (highest quality) to 9 (lowest quality).
1133
+ * @public
1134
1134
  */
1135
1135
  VbrQuality?: number;
1136
1136
  }
1137
1137
  /**
1138
- * @public
1139
1138
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS.
1139
+ * @public
1140
1140
  */
1141
1141
  export interface OpusSettings {
1142
1142
  /**
1143
- * @public
1144
1143
  * Optional. Specify the average bitrate in bits per second. Valid values are multiples of 8000, from 32000 through 192000. The default value is 96000, which we recommend for quality and bandwidth.
1144
+ * @public
1145
1145
  */
1146
1146
  Bitrate?: number;
1147
1147
  /**
1148
- * @public
1149
1148
  * Specify the number of channels in this output audio track. Choosing Mono on gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2.
1149
+ * @public
1150
1150
  */
1151
1151
  Channels?: number;
1152
1152
  /**
1153
- * @public
1154
1153
  * Optional. Sample rate in Hz. Valid values are 16000, 24000, and 48000. The default value is 48000.
1154
+ * @public
1155
1155
  */
1156
1156
  SampleRate?: number;
1157
1157
  }
1158
1158
  /**
1159
- * @public
1160
1159
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis.
1160
+ * @public
1161
1161
  */
1162
1162
  export interface VorbisSettings {
1163
1163
  /**
1164
- * @public
1165
1164
  * Optional. Specify the number of channels in this output audio track. Choosing Mono on the console gives you 1 output channel; choosing Stereo gives you 2. In the API, valid values are 1 and 2. The default value is 2.
1165
+ * @public
1166
1166
  */
1167
1167
  Channels?: number;
1168
1168
  /**
1169
- * @public
1170
1169
  * Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, 44100, and 48000. The default value is 48000.
1170
+ * @public
1171
1171
  */
1172
1172
  SampleRate?: number;
1173
1173
  /**
1174
- * @public
1175
1174
  * Optional. Specify the variable audio quality of this Vorbis output from -1 (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, respectively.
1175
+ * @public
1176
1176
  */
1177
1177
  VbrQuality?: number;
1178
1178
  }
@@ -1189,94 +1189,94 @@ export declare const WavFormat: {
1189
1189
  */
1190
1190
  export type WavFormat = (typeof WavFormat)[keyof typeof WavFormat];
1191
1191
  /**
1192
- * @public
1193
1192
  * Required when you set Codec to the value WAV.
1193
+ * @public
1194
1194
  */
1195
1195
  export interface WavSettings {
1196
1196
  /**
1197
- * @public
1198
1197
  * Specify Bit depth, in bits per sample, to choose the encoding quality for this audio track.
1198
+ * @public
1199
1199
  */
1200
1200
  BitDepth?: number;
1201
1201
  /**
1202
- * @public
1203
1202
  * Specify the number of channels in this output audio track. Valid values are 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64.
1203
+ * @public
1204
1204
  */
1205
1205
  Channels?: number;
1206
1206
  /**
1207
- * @public
1208
1207
  * The service defaults to using RIFF for WAV outputs. If your output audio is likely to exceed 4 GB in file size, or if you otherwise need the extended support of the RF64 format, set your output WAV file format to RF64.
1208
+ * @public
1209
1209
  */
1210
1210
  Format?: WavFormat;
1211
1211
  /**
1212
- * @public
1213
1212
  * Sample rate in Hz.
1213
+ * @public
1214
1214
  */
1215
1215
  SampleRate?: number;
1216
1216
  }
1217
1217
  /**
1218
- * @public
1219
1218
  * Settings related to audio encoding. The settings in this group vary depending on the value that you choose for your audio codec.
1219
+ * @public
1220
1220
  */
1221
1221
  export interface AudioCodecSettings {
1222
1222
  /**
1223
- * @public
1224
1223
  * Required when you set Codec to the value AAC. The service accepts one of two mutually exclusive groups of AAC settings--VBR and CBR. To select one of these modes, set the value of Bitrate control mode to "VBR" or "CBR". In VBR mode, you control the audio quality with the setting VBR quality. In CBR mode, you use the setting Bitrate. Defaults and valid values depend on the rate control mode.
1224
+ * @public
1225
1225
  */
1226
1226
  AacSettings?: AacSettings;
1227
1227
  /**
1228
- * @public
1229
1228
  * Required when you set Codec to the value AC3.
1229
+ * @public
1230
1230
  */
1231
1231
  Ac3Settings?: Ac3Settings;
1232
1232
  /**
1233
- * @public
1234
1233
  * Required when you set Codec to the value AIFF.
1234
+ * @public
1235
1235
  */
1236
1236
  AiffSettings?: AiffSettings;
1237
1237
  /**
1238
- * @public
1239
1238
  * Choose the audio codec for this output. Note that the option Dolby Digital passthrough applies only to Dolby Digital and Dolby Digital Plus audio inputs. Make sure that you choose a codec that's supported with your output container: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#reference-codecs-containers-output-audio For audio-only outputs, make sure that both your input audio codec and your output audio codec are supported for audio-only workflows. For more information, see: https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers-input.html#reference-codecs-containers-input-audio-only and https://docs.aws.amazon.com/mediaconvert/latest/ug/reference-codecs-containers.html#audio-only-output
1239
+ * @public
1240
1240
  */
1241
1241
  Codec?: AudioCodec;
1242
1242
  /**
1243
- * @public
1244
1243
  * Required when you set Codec to the value EAC3_ATMOS.
1244
+ * @public
1245
1245
  */
1246
1246
  Eac3AtmosSettings?: Eac3AtmosSettings;
1247
1247
  /**
1248
- * @public
1249
1248
  * Required when you set Codec to the value EAC3.
1249
+ * @public
1250
1250
  */
1251
1251
  Eac3Settings?: Eac3Settings;
1252
1252
  /**
1253
- * @public
1254
1253
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value FLAC.
1254
+ * @public
1255
1255
  */
1256
1256
  FlacSettings?: FlacSettings;
1257
1257
  /**
1258
- * @public
1259
1258
  * Required when you set Codec to the value MP2.
1259
+ * @public
1260
1260
  */
1261
1261
  Mp2Settings?: Mp2Settings;
1262
1262
  /**
1263
- * @public
1264
1263
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value MP3.
1264
+ * @public
1265
1265
  */
1266
1266
  Mp3Settings?: Mp3Settings;
1267
1267
  /**
1268
- * @public
1269
1268
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value OPUS.
1269
+ * @public
1270
1270
  */
1271
1271
  OpusSettings?: OpusSettings;
1272
1272
  /**
1273
- * @public
1274
1273
  * Required when you set Codec, under AudioDescriptions>CodecSettings, to the value Vorbis.
1274
+ * @public
1275
1275
  */
1276
1276
  VorbisSettings?: VorbisSettings;
1277
1277
  /**
1278
- * @public
1279
1278
  * Required when you set Codec to the value WAV.
1279
+ * @public
1280
1280
  */
1281
1281
  WavSettings?: WavSettings;
1282
1282
  }
@@ -1495,169 +1495,169 @@ export declare const AudioLanguageCodeControl: {
1495
1495
  */
1496
1496
  export type AudioLanguageCodeControl = (typeof AudioLanguageCodeControl)[keyof typeof AudioLanguageCodeControl];
1497
1497
  /**
1498
- * @public
1499
1498
  * OutputChannel mapping settings.
1499
+ * @public
1500
1500
  */
1501
1501
  export interface OutputChannelMapping {
1502
1502
  /**
1503
- * @public
1504
1503
  * Use this setting to specify your remix values when they are integers, such as -10, 0, or 4.
1504
+ * @public
1505
1505
  */
1506
1506
  InputChannels?: number[];
1507
1507
  /**
1508
- * @public
1509
1508
  * Use this setting to specify your remix values when they have a decimal component, such as -10.312, 0.08, or 4.9. MediaConvert rounds your remixing values to the nearest thousandth.
1509
+ * @public
1510
1510
  */
1511
1511
  InputChannelsFineTune?: number[];
1512
1512
  }
1513
1513
  /**
1514
- * @public
1515
1514
  * Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both.
1515
+ * @public
1516
1516
  */
1517
1517
  export interface ChannelMapping {
1518
1518
  /**
1519
- * @public
1520
1519
  * In your JSON job specification, include one child of OutputChannels for each audio channel that you want in your output. Each child should contain one instance of InputChannels or InputChannelsFineTune.
1520
+ * @public
1521
1521
  */
1522
1522
  OutputChannels?: OutputChannelMapping[];
1523
1523
  }
1524
1524
  /**
1525
- * @public
1526
1525
  * Use Manual audio remixing to adjust audio levels for each audio channel in each output of your job. With audio remixing, you can output more or fewer audio channels than your input audio source provides.
1526
+ * @public
1527
1527
  */
1528
1528
  export interface RemixSettings {
1529
1529
  /**
1530
- * @public
1531
1530
  * Optionally specify the channel in your input that contains your audio description audio signal. MediaConvert mixes your audio signal across all output channels, while reducing their volume according to your data stream. When you specify an audio description audio channel, you must also specify an audio description data channel. For more information about audio description signals, see the BBC WHP 198 and 051 white papers.
1531
+ * @public
1532
1532
  */
1533
1533
  AudioDescriptionAudioChannel?: number;
1534
1534
  /**
1535
- * @public
1536
1535
  * Optionally specify the channel in your input that contains your audio description data stream. MediaConvert mixes your audio signal across all output channels, while reducing their volume according to your data stream. When you specify an audio description data channel, you must also specify an audio description audio channel. For more information about audio description signals, see the BBC WHP 198 and 051 white papers.
1536
+ * @public
1537
1537
  */
1538
1538
  AudioDescriptionDataChannel?: number;
1539
1539
  /**
1540
- * @public
1541
1540
  * Channel mapping contains the group of fields that hold the remixing value for each channel, in dB. Specify remix values to indicate how much of the content from your input audio channel you want in your output audio channels. Each instance of the InputChannels or InputChannelsFineTune array specifies these values for one output channel. Use one instance of this array for each output channel. In the console, each array corresponds to a column in the graphical depiction of the mapping matrix. The rows of the graphical matrix correspond to input channels. Valid values are within the range from -60 (mute) through 6. A setting of 0 passes the input channel unchanged to the output channel (no attenuation or amplification). Use InputChannels or InputChannelsFineTune to specify your remix values. Don't use both.
1541
+ * @public
1542
1542
  */
1543
1543
  ChannelMapping?: ChannelMapping;
1544
1544
  /**
1545
- * @public
1546
1545
  * Specify the number of audio channels from your input that you want to use in your output. With remixing, you might combine or split the data in these channels, so the number of channels in your final output might be different. If you are doing both input channel mapping and output channel mapping, the number of output channels in your input mapping must be the same as the number of input channels in your output mapping.
1546
+ * @public
1547
1547
  */
1548
1548
  ChannelsIn?: number;
1549
1549
  /**
1550
- * @public
1551
1550
  * Specify the number of channels in this output after remixing. Valid values: 1, 2, 4, 6, 8... 64. (1 and even numbers to 64.) If you are doing both input channel mapping and output channel mapping, the number of output channels in your input mapping must be the same as the number of input channels in your output mapping.
1551
+ * @public
1552
1552
  */
1553
1553
  ChannelsOut?: number;
1554
1554
  }
1555
1555
  /**
1556
- * @public
1557
1556
  * Settings related to one audio tab on the MediaConvert console. In your job JSON, an instance of AudioDescription is equivalent to one audio tab in the console. Usually, one audio tab corresponds to one output audio track. Depending on how you set up your input audio selectors and whether you use audio selector groups, one audio tab can correspond to a group of output audio tracks.
1557
+ * @public
1558
1558
  */
1559
1559
  export interface AudioDescription {
1560
1560
  /**
1561
- * @public
1562
1561
  * Specify the QuickTime audio channel layout tags for the audio channels in this audio track. When you don't specify a value, MediaConvert labels your track as Center (C) by default. To use Audio layout tagging, your output must be in a QuickTime (MOV) container and your audio codec must be AAC, WAV, or AIFF.
1562
+ * @public
1563
1563
  */
1564
1564
  AudioChannelTaggingSettings?: AudioChannelTaggingSettings;
1565
1565
  /**
1566
- * @public
1567
1566
  * Advanced audio normalization settings. Ignore these settings unless you need to comply with a loudness standard.
1567
+ * @public
1568
1568
  */
1569
1569
  AudioNormalizationSettings?: AudioNormalizationSettings;
1570
1570
  /**
1571
- * @public
1572
1571
  * Specifies which audio data to use from each input. In the simplest case, specify an "Audio Selector":#inputs-audio_selector by name based on its order within each input. For example if you specify "Audio Selector 3", then the third audio selector will be used from each input. If an input does not have an "Audio Selector 3", then the audio selector marked as "default" in that input will be used. If there is no audio selector marked as "default", silence will be inserted for the duration of that input. Alternatively, an "Audio Selector Group":#inputs-audio_selector_group name may be specified, with similar default/silence behavior. If no audio_source_name is specified, then "Audio Selector 1" will be chosen automatically.
1572
+ * @public
1573
1573
  */
1574
1574
  AudioSourceName?: string;
1575
1575
  /**
1576
- * @public
1577
1576
  * Applies only if Follow Input Audio Type is unchecked (false). A number between 0 and 255. The following are defined in ISO-IEC 13818-1: 0 = Undefined, 1 = Clean Effects, 2 = Hearing Impaired, 3 = Visually Impaired Commentary, 4-255 = Reserved.
1577
+ * @public
1578
1578
  */
1579
1579
  AudioType?: number;
1580
1580
  /**
1581
- * @public
1582
1581
  * When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then that value is passed through to the output. If the input contains no ISO 639 audio_type, the value in Audio Type is included in the output. Otherwise the value in Audio Type is included in the output. Note that this field and audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD.
1582
+ * @public
1583
1583
  */
1584
1584
  AudioTypeControl?: AudioTypeControl;
1585
1585
  /**
1586
- * @public
1587
1586
  * Settings related to audio encoding. The settings in this group vary depending on the value that you choose for your audio codec.
1587
+ * @public
1588
1588
  */
1589
1589
  CodecSettings?: AudioCodecSettings;
1590
1590
  /**
1591
- * @public
1592
1591
  * Specify the language for this audio output track. The service puts this language code into your output audio track when you set Language code control to Use configured. The service also uses your specified custom language code when you set Language code control to Follow input, but your input file doesn't specify a language code. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.
1592
+ * @public
1593
1593
  */
1594
1594
  CustomLanguageCode?: string;
1595
1595
  /**
1596
- * @public
1597
1596
  * Indicates the language of the audio output track. The ISO 639 language specified in the 'Language Code' drop down will be used when 'Follow Input Language Code' is not selected or when 'Follow Input Language Code' is selected but there is no ISO 639 language code specified by the input.
1597
+ * @public
1598
1598
  */
1599
1599
  LanguageCode?: LanguageCode;
1600
1600
  /**
1601
- * @public
1602
1601
  * Specify which source for language code takes precedence for this audio track. When you choose Follow input, the service uses the language code from the input track if it's present. If there's no languge code on the input track, the service uses the code that you specify in the setting Language code. When you choose Use configured, the service uses the language code that you specify.
1602
+ * @public
1603
1603
  */
1604
1604
  LanguageCodeControl?: AudioLanguageCodeControl;
1605
1605
  /**
1606
- * @public
1607
1606
  * Advanced audio remixing settings.
1607
+ * @public
1608
1608
  */
1609
1609
  RemixSettings?: RemixSettings;
1610
1610
  /**
1611
- * @public
1612
1611
  * Specify a label for this output audio stream. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.
1612
+ * @public
1613
1613
  */
1614
1614
  StreamName?: string;
1615
1615
  }
1616
1616
  /**
1617
- * @public
1618
1617
  * Use Force include renditions to specify one or more resolutions to include your ABR stack. * (Recommended) To optimize automated ABR, specify as few resolutions as possible. * (Required) The number of resolutions that you specify must be equal to, or less than, the Max renditions setting. * If you specify a Min top rendition size rule, specify at least one resolution that is equal to, or greater than, Min top rendition size. * If you specify a Min bottom rendition size rule, only specify resolutions that are equal to, or greater than, Min bottom rendition size. * If you specify a Force include renditions rule, do not specify a separate rule for Allowed renditions. * Note: The ABR stack may include other resolutions that you do not specify here, depending on the Max renditions setting.
1618
+ * @public
1619
1619
  */
1620
1620
  export interface ForceIncludeRenditionSize {
1621
1621
  /**
1622
- * @public
1623
1622
  * Use Height to define the video resolution height, in pixels, for this rule.
1623
+ * @public
1624
1624
  */
1625
1625
  Height?: number;
1626
1626
  /**
1627
- * @public
1628
1627
  * Use Width to define the video resolution width, in pixels, for this rule.
1628
+ * @public
1629
1629
  */
1630
1630
  Width?: number;
1631
1631
  }
1632
1632
  /**
1633
- * @public
1634
1633
  * Use Min bottom rendition size to specify a minimum size for the lowest resolution in your ABR stack. * The lowest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 640x360 the lowest resolution in your ABR stack will be equal to or greater than to 640x360. * If you specify a Min top rendition size rule, the value that you specify for Min bottom rendition size must be less than, or equal to, Min top rendition size.
1634
+ * @public
1635
1635
  */
1636
1636
  export interface MinBottomRenditionSize {
1637
1637
  /**
1638
- * @public
1639
1638
  * Use Height to define the video resolution height, in pixels, for this rule.
1639
+ * @public
1640
1640
  */
1641
1641
  Height?: number;
1642
1642
  /**
1643
- * @public
1644
1643
  * Use Width to define the video resolution width, in pixels, for this rule.
1644
+ * @public
1645
1645
  */
1646
1646
  Width?: number;
1647
1647
  }
1648
1648
  /**
1649
- * @public
1650
1649
  * Use Min top rendition size to specify a minimum size for the highest resolution in your ABR stack. * The highest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 1280x720 the highest resolution in your ABR stack will be equal to or greater than 1280x720. * If you specify a value for Max resolution, the value that you specify for Min top rendition size must be less than, or equal to, Max resolution.
1650
+ * @public
1651
1651
  */
1652
1652
  export interface MinTopRenditionSize {
1653
1653
  /**
1654
- * @public
1655
1654
  * Use Height to define the video resolution height, in pixels, for this rule.
1655
+ * @public
1656
1656
  */
1657
1657
  Height?: number;
1658
1658
  /**
1659
- * @public
1660
1659
  * Use Width to define the video resolution width, in pixels, for this rule.
1660
+ * @public
1661
1661
  */
1662
1662
  Width?: number;
1663
1663
  }
@@ -1676,33 +1676,33 @@ export declare const RuleType: {
1676
1676
  */
1677
1677
  export type RuleType = (typeof RuleType)[keyof typeof RuleType];
1678
1678
  /**
1679
- * @public
1680
1679
  * Specify one or more Automated ABR rule types. Note: Force include and Allowed renditions are mutually exclusive.
1680
+ * @public
1681
1681
  */
1682
1682
  export interface AutomatedAbrRule {
1683
1683
  /**
1684
- * @public
1685
1684
  * When customer adds the allowed renditions rule for auto ABR ladder, they are required to add at leat one rendition to allowedRenditions list
1685
+ * @public
1686
1686
  */
1687
1687
  AllowedRenditions?: AllowedRenditionSize[];
1688
1688
  /**
1689
- * @public
1690
1689
  * When customer adds the force include renditions rule for auto ABR ladder, they are required to add at leat one rendition to forceIncludeRenditions list
1690
+ * @public
1691
1691
  */
1692
1692
  ForceIncludeRenditions?: ForceIncludeRenditionSize[];
1693
1693
  /**
1694
- * @public
1695
1694
  * Use Min bottom rendition size to specify a minimum size for the lowest resolution in your ABR stack. * The lowest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 640x360 the lowest resolution in your ABR stack will be equal to or greater than to 640x360. * If you specify a Min top rendition size rule, the value that you specify for Min bottom rendition size must be less than, or equal to, Min top rendition size.
1695
+ * @public
1696
1696
  */
1697
1697
  MinBottomRenditionSize?: MinBottomRenditionSize;
1698
1698
  /**
1699
- * @public
1700
1699
  * Use Min top rendition size to specify a minimum size for the highest resolution in your ABR stack. * The highest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 1280x720 the highest resolution in your ABR stack will be equal to or greater than 1280x720. * If you specify a value for Max resolution, the value that you specify for Min top rendition size must be less than, or equal to, Max resolution.
1700
+ * @public
1701
1701
  */
1702
1702
  MinTopRenditionSize?: MinTopRenditionSize;
1703
1703
  /**
1704
- * @public
1705
1704
  * Use Min top rendition size to specify a minimum size for the highest resolution in your ABR stack. * The highest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 1280x720 the highest resolution in your ABR stack will be equal to or greater than 1280x720. * If you specify a value for Max resolution, the value that you specify for Min top rendition size must be less than, or equal to, Max resolution. Use Min bottom rendition size to specify a minimum size for the lowest resolution in your ABR stack. * The lowest resolution in your ABR stack will be equal to or greater than the value that you enter. For example: If you specify 640x360 the lowest resolution in your ABR stack will be equal to or greater than to 640x360. * If you specify a Min top rendition size rule, the value that you specify for Min bottom rendition size must be less than, or equal to, Min top rendition size. Use Force include renditions to specify one or more resolutions to include your ABR stack. * (Recommended) To optimize automated ABR, specify as few resolutions as possible. * (Required) The number of resolutions that you specify must be equal to, or less than, the Max renditions setting. * If you specify a Min top rendition size rule, specify at least one resolution that is equal to, or greater than, Min top rendition size. * If you specify a Min bottom rendition size rule, only specify resolutions that are equal to, or greater than, Min bottom rendition size. * If you specify a Force include renditions rule, do not specify a separate rule for Allowed renditions. * Note: The ABR stack may include other resolutions that you do not specify here, depending on the Max renditions setting. Use Allowed renditions to specify a list of possible resolutions in your ABR stack. * (Required) The number of resolutions that you specify must be equal to, or greater than, the Max renditions setting. * MediaConvert will create an ABR stack exclusively from the list of resolutions that you specify. * Some resolutions in the Allowed renditions list may not be included, however you can force a resolution to be included by setting Required to ENABLED. * You must specify at least one resolution that is greater than or equal to any resolutions that you specify in Min top rendition size or Min bottom rendition size. * If you specify Allowed renditions, you must not specify a separate rule for Force include renditions.
1705
+ * @public
1706
1706
  */
1707
1707
  Type?: RuleType;
1708
1708
  }
@@ -1848,113 +1848,113 @@ export declare const BurninSubtitleTeletextSpacing: {
1848
1848
  */
1849
1849
  export type BurninSubtitleTeletextSpacing = (typeof BurninSubtitleTeletextSpacing)[keyof typeof BurninSubtitleTeletextSpacing];
1850
1850
  /**
1851
- * @public
1852
1851
  * Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
1852
+ * @public
1853
1853
  */
1854
1854
  export interface BurninDestinationSettings {
1855
1855
  /**
1856
- * @public
1857
1856
  * Specify the alignment of your captions. If no explicit x_position is provided, setting alignment to centered will placethe captions at the bottom center of the output. Similarly, setting a left alignment willalign captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates.
1857
+ * @public
1858
1858
  */
1859
1859
  Alignment?: BurninSubtitleAlignment;
1860
1860
  /**
1861
- * @public
1862
1861
  * Ignore this setting unless Style passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.
1862
+ * @public
1863
1863
  */
1864
1864
  ApplyFontColor?: BurninSubtitleApplyFontColor;
1865
1865
  /**
1866
- * @public
1867
1866
  * Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.
1867
+ * @public
1868
1868
  */
1869
1869
  BackgroundColor?: BurninSubtitleBackgroundColor;
1870
1870
  /**
1871
- * @public
1872
1871
  * Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions.
1872
+ * @public
1873
1873
  */
1874
1874
  BackgroundOpacity?: number;
1875
1875
  /**
1876
- * @public
1877
1876
  * Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.
1877
+ * @public
1878
1878
  */
1879
1879
  FallbackFont?: BurninSubtitleFallbackFont;
1880
1880
  /**
1881
- * @public
1882
1881
  * Specify the color of the burned-in captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present.
1882
+ * @public
1883
1883
  */
1884
1884
  FontColor?: BurninSubtitleFontColor;
1885
1885
  /**
1886
- * @public
1887
1886
  * Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent.
1887
+ * @public
1888
1888
  */
1889
1889
  FontOpacity?: number;
1890
1890
  /**
1891
- * @public
1892
1891
  * Specify the Font resolution in DPI (dots per inch).
1892
+ * @public
1893
1893
  */
1894
1894
  FontResolution?: number;
1895
1895
  /**
1896
- * @public
1897
1896
  * Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese.
1897
+ * @public
1898
1898
  */
1899
1899
  FontScript?: FontScript;
1900
1900
  /**
1901
- * @public
1902
1901
  * Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size.
1902
+ * @public
1903
1903
  */
1904
1904
  FontSize?: number;
1905
1905
  /**
1906
- * @public
1907
1906
  * Ignore this setting unless your Font color is set to Hex. Enter either six or eight hexidecimal digits, representing red, green, and blue, with two optional extra digits for alpha. For example a value of 1122AABB is a red value of 0x11, a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB.
1907
+ * @public
1908
1908
  */
1909
1909
  HexFontColor?: string;
1910
1910
  /**
1911
- * @public
1912
1911
  * Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present.
1912
+ * @public
1913
1913
  */
1914
1914
  OutlineColor?: BurninSubtitleOutlineColor;
1915
1915
  /**
1916
- * @public
1917
1916
  * Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present.
1917
+ * @public
1918
1918
  */
1919
1919
  OutlineSize?: number;
1920
1920
  /**
1921
- * @public
1922
1921
  * Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present.
1922
+ * @public
1923
1923
  */
1924
1924
  ShadowColor?: BurninSubtitleShadowColor;
1925
1925
  /**
1926
- * @public
1927
1926
  * Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions.
1927
+ * @public
1928
1928
  */
1929
1929
  ShadowOpacity?: number;
1930
1930
  /**
1931
- * @public
1932
1931
  * Specify the horizontal offset of the shadow, relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left.
1932
+ * @public
1933
1933
  */
1934
1934
  ShadowXOffset?: number;
1935
1935
  /**
1936
- * @public
1937
1936
  * Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present.
1937
+ * @public
1938
1938
  */
1939
1939
  ShadowYOffset?: number;
1940
1940
  /**
1941
- * @public
1942
1941
  * Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.
1942
+ * @public
1943
1943
  */
1944
1944
  StylePassthrough?: BurnInSubtitleStylePassthrough;
1945
1945
  /**
1946
- * @public
1947
1946
  * Specify whether the text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions.
1947
+ * @public
1948
1948
  */
1949
1949
  TeletextSpacing?: BurninSubtitleTeletextSpacing;
1950
1950
  /**
1951
- * @public
1952
1951
  * Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter.
1952
+ * @public
1953
1953
  */
1954
1954
  XPosition?: number;
1955
1955
  /**
1956
- * @public
1957
1956
  * Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output.
1957
+ * @public
1958
1958
  */
1959
1959
  YPosition?: number;
1960
1960
  }
@@ -2134,161 +2134,161 @@ export declare const DvbSubtitleTeletextSpacing: {
2134
2134
  */
2135
2135
  export type DvbSubtitleTeletextSpacing = (typeof DvbSubtitleTeletextSpacing)[keyof typeof DvbSubtitleTeletextSpacing];
2136
2136
  /**
2137
- * @public
2138
2137
  * Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
2138
+ * @public
2139
2139
  */
2140
2140
  export interface DvbSubDestinationSettings {
2141
2141
  /**
2142
- * @public
2143
2142
  * Specify the alignment of your captions. If no explicit x_position is provided, setting alignment to centered will placethe captions at the bottom center of the output. Similarly, setting a left alignment willalign captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. Within your job settings, all of your DVB-Sub settings must be identical.
2143
+ * @public
2144
2144
  */
2145
2145
  Alignment?: DvbSubtitleAlignment;
2146
2146
  /**
2147
- * @public
2148
2147
  * Ignore this setting unless Style Passthrough is set to Enabled and Font color set to Black, Yellow, Red, Green, Blue, or Hex. Use Apply font color for additional font color controls. When you choose White text only, or leave blank, your font color setting only applies to white text in your input captions. For example, if your font color setting is Yellow, and your input captions have red and white text, your output captions will have red and yellow text. When you choose ALL_TEXT, your font color setting applies to all of your output captions text.
2148
+ * @public
2149
2149
  */
2150
2150
  ApplyFontColor?: DvbSubtitleApplyFontColor;
2151
2151
  /**
2152
- * @public
2153
2152
  * Specify the color of the rectangle behind the captions. Leave background color blank and set Style passthrough to enabled to use the background color data from your input captions, if present.
2153
+ * @public
2154
2154
  */
2155
2155
  BackgroundColor?: DvbSubtitleBackgroundColor;
2156
2156
  /**
2157
- * @public
2158
2157
  * Specify the opacity of the background rectangle. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to enabled, leave blank to pass through the background style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all backgrounds from your output captions. Within your job settings, all of your DVB-Sub settings must be identical.
2158
+ * @public
2159
2159
  */
2160
2160
  BackgroundOpacity?: number;
2161
2161
  /**
2162
- * @public
2163
2162
  * Specify how MediaConvert handles the display definition segment (DDS). To exclude the DDS from this set of captions: Keep the default, None. To include the DDS: Choose Specified. When you do, also specify the offset coordinates of the display window with DDS x-coordinate and DDS y-coordinate. To include the DDS, but not include display window data: Choose No display window. When you do, you can write position metadata to the page composition segment (PCS) with DDS x-coordinate and DDS y-coordinate. For video resolutions with a height of 576 pixels or less, MediaConvert doesn't include the DDS, regardless of the value you choose for DDS handling. All burn-in and DVB-Sub font settings must match.
2163
+ * @public
2164
2164
  */
2165
2165
  DdsHandling?: DvbddsHandling;
2166
2166
  /**
2167
- * @public
2168
2167
  * Use this setting, along with DDS y-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the left side of the frame and the left side of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment. All burn-in and DVB-Sub font settings must match.
2168
+ * @public
2169
2169
  */
2170
2170
  DdsXCoordinate?: number;
2171
2171
  /**
2172
- * @public
2173
2172
  * Use this setting, along with DDS x-coordinate, to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling to a value other than None. MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match.
2173
+ * @public
2174
2174
  */
2175
2175
  DdsYCoordinate?: number;
2176
2176
  /**
2177
- * @public
2178
2177
  * Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you set Fallback font to best match, or leave blank, MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.
2178
+ * @public
2179
2179
  */
2180
2180
  FallbackFont?: DvbSubSubtitleFallbackFont;
2181
2181
  /**
2182
- * @public
2183
2182
  * Specify the color of the captions text. Leave Font color blank and set Style passthrough to enabled to use the font color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.
2183
+ * @public
2184
2184
  */
2185
2185
  FontColor?: DvbSubtitleFontColor;
2186
2186
  /**
2187
- * @public
2188
2187
  * Specify the opacity of the burned-in captions. 255 is opaque; 0 is transparent.
2189
2188
  * Within your job settings, all of your DVB-Sub settings must be identical.
2189
+ * @public
2190
2190
  */
2191
2191
  FontOpacity?: number;
2192
2192
  /**
2193
- * @public
2194
2193
  * Specify the Font resolution in DPI (dots per inch).
2195
2194
  * Within your job settings, all of your DVB-Sub settings must be identical.
2195
+ * @public
2196
2196
  */
2197
2197
  FontResolution?: number;
2198
2198
  /**
2199
- * @public
2200
2199
  * Set Font script to Automatically determined, or leave blank, to automatically determine the font script in your input captions. Otherwise, set to Simplified Chinese (HANS) or Traditional Chinese (HANT) if your input font script uses Simplified or Traditional Chinese. Within your job settings, all of your DVB-Sub settings must be identical.
2200
+ * @public
2201
2201
  */
2202
2202
  FontScript?: FontScript;
2203
2203
  /**
2204
- * @public
2205
2204
  * Specify the Font size in pixels. Must be a positive integer. Set to 0, or leave blank, for automatic font size. Within your job settings, all of your DVB-Sub settings must be identical.
2205
+ * @public
2206
2206
  */
2207
2207
  FontSize?: number;
2208
2208
  /**
2209
- * @public
2210
2209
  * Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match.
2210
+ * @public
2211
2211
  */
2212
2212
  Height?: number;
2213
2213
  /**
2214
- * @public
2215
2214
  * Ignore this setting unless your Font color is set to Hex. Enter either six or eight hexidecimal digits, representing red, green, and blue, with two optional extra digits for alpha. For example a value of 1122AABB is a red value of 0x11, a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB.
2215
+ * @public
2216
2216
  */
2217
2217
  HexFontColor?: string;
2218
2218
  /**
2219
- * @public
2220
2219
  * Specify font outline color. Leave Outline color blank and set Style passthrough to enabled to use the font outline color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.
2220
+ * @public
2221
2221
  */
2222
2222
  OutlineColor?: DvbSubtitleOutlineColor;
2223
2223
  /**
2224
- * @public
2225
2224
  * Specify the Outline size of the caption text, in pixels. Leave Outline size blank and set Style passthrough to enabled to use the outline size data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.
2225
+ * @public
2226
2226
  */
2227
2227
  OutlineSize?: number;
2228
2228
  /**
2229
- * @public
2230
2229
  * Specify the color of the shadow cast by the captions. Leave Shadow color blank and set Style passthrough to enabled to use the shadow color data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.
2230
+ * @public
2231
2231
  */
2232
2232
  ShadowColor?: DvbSubtitleShadowColor;
2233
2233
  /**
2234
- * @public
2235
2234
  * Specify the opacity of the shadow. Enter a value from 0 to 255, where 0 is transparent and 255 is opaque. If Style passthrough is set to Enabled, leave Shadow opacity blank to pass through the shadow style information in your input captions to your output captions. If Style passthrough is set to disabled, leave blank to use a value of 0 and remove all shadows from your output captions. Within your job settings, all of your DVB-Sub settings must be identical.
2235
+ * @public
2236
2236
  */
2237
2237
  ShadowOpacity?: number;
2238
2238
  /**
2239
- * @public
2240
2239
  * Specify the horizontal offset of the shadow, relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels to the left. Within your job settings, all of your DVB-Sub settings must be identical.
2240
+ * @public
2241
2241
  */
2242
2242
  ShadowXOffset?: number;
2243
2243
  /**
2244
- * @public
2245
2244
  * Specify the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. Leave Shadow y-offset blank and set Style passthrough to enabled to use the shadow y-offset data from your input captions, if present. Within your job settings, all of your DVB-Sub settings must be identical.
2245
+ * @public
2246
2246
  */
2247
2247
  ShadowYOffset?: number;
2248
2248
  /**
2249
- * @public
2250
2249
  * Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use default settings: white text with black outlining, bottom-center positioning, and automatic sizing. Whether you set Style passthrough to enabled or not, you can also choose to manually override any of the individual style and position settings.
2250
+ * @public
2251
2251
  */
2252
2252
  StylePassthrough?: DvbSubtitleStylePassthrough;
2253
2253
  /**
2254
- * @public
2255
2254
  * Specify whether your DVB subtitles are standard or for hearing impaired. Choose hearing impaired if your subtitles include audio descriptions and dialogue. Choose standard if your subtitles include only dialogue.
2255
+ * @public
2256
2256
  */
2257
2257
  SubtitlingType?: DvbSubtitlingType;
2258
2258
  /**
2259
- * @public
2260
2259
  * Specify whether the Text spacing in your captions is set by the captions grid, or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read for closed captions. Within your job settings, all of your DVB-Sub settings must be identical.
2260
+ * @public
2261
2261
  */
2262
2262
  TeletextSpacing?: DvbSubtitleTeletextSpacing;
2263
2263
  /**
2264
- * @public
2265
2264
  * Specify the width, in pixels, of this set of DVB-Sub captions. The default value is 720 pixels. Related setting: When you use this setting, you must set DDS handling to a value other than None. All burn-in and DVB-Sub font settings must match.
2265
+ * @public
2266
2266
  */
2267
2267
  Width?: number;
2268
2268
  /**
2269
- * @public
2270
2269
  * Specify the horizontal position of the captions, relative to the left side of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the left of the output. If no explicit x_position is provided, the horizontal caption position will be determined by the alignment parameter. Within your job settings, all of your DVB-Sub settings must be identical.
2270
+ * @public
2271
2271
  */
2272
2272
  XPosition?: number;
2273
2273
  /**
2274
- * @public
2275
2274
  * Specify the vertical position of the captions, relative to the top of the output in pixels. A value of 10 would result in the captions starting 10 pixels from the top of the output. If no explicit y_position is provided, the caption will be positioned towards the bottom of the output. Within your job settings, all of your DVB-Sub settings must be identical.
2275
+ * @public
2276
2276
  */
2277
2277
  YPosition?: number;
2278
2278
  }
2279
2279
  /**
2280
- * @public
2281
2280
  * Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
2281
+ * @public
2282
2282
  */
2283
2283
  export interface EmbeddedDestinationSettings {
2284
2284
  /**
2285
- * @public
2286
2285
  * Ignore this setting unless your input captions are SCC format and your output captions are embedded in the video stream. Specify a CC number for each captions channel in this output. If you have two channels, choose CC numbers that aren't in the same field. For example, choose 1 and 3. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
2286
+ * @public
2287
2287
  */
2288
2288
  Destination608ChannelNumber?: number;
2289
2289
  /**
2290
- * @public
2291
2290
  * Ignore this setting unless your input captions are SCC format and you want both 608 and 708 captions embedded in your output stream. Optionally, specify the 708 service number for each output captions channel. Choose a different number for each channel. To use this setting, also set Force 608 to 708 upconvert to Upconvert in your input captions selector settings. If you choose to upconvert but don't specify a 708 service number, MediaConvert uses the number that you specify for CC channel number for the 708 service number. For more information, see https://docs.aws.amazon.com/console/mediaconvert/dual-scc-to-embedded.
2291
+ * @public
2292
2292
  */
2293
2293
  Destination708ServiceNumber?: number;
2294
2294
  }
@@ -2317,18 +2317,18 @@ export declare const ImscStylePassthrough: {
2317
2317
  */
2318
2318
  export type ImscStylePassthrough = (typeof ImscStylePassthrough)[keyof typeof ImscStylePassthrough];
2319
2319
  /**
2320
- * @public
2321
2320
  * Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2321
+ * @public
2322
2322
  */
2323
2323
  export interface ImscDestinationSettings {
2324
2324
  /**
2325
- * @public
2326
2325
  * If the IMSC captions track is intended to provide accessibility for people who are deaf or hard of hearing: Set Accessibility subtitles to Enabled. When you do, MediaConvert adds accessibility attributes to your output HLS or DASH manifest. For HLS manifests, MediaConvert adds the following accessibility attributes under EXT-X-MEDIA for this track: CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES". For DASH manifests, MediaConvert adds the following in the adaptation set for this track: <Accessibility schemeIdUri="urn:mpeg:dash:role:2011" value="caption"/>. If the captions track is not intended to provide such accessibility: Keep the default value, Disabled. When you do, for DASH manifests, MediaConvert instead adds the following in the adaptation set for this track: <Role schemeIDUri="urn:mpeg:dash:role:2011" value="subtitle"/>.
2326
+ * @public
2327
2327
  */
2328
2328
  Accessibility?: ImscAccessibilitySubs;
2329
2329
  /**
2330
- * @public
2331
2330
  * Keep this setting enabled to have MediaConvert use the font style and position information from the captions source in the output. This option is available only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting for simplified output captions.
2331
+ * @public
2332
2332
  */
2333
2333
  StylePassthrough?: ImscStylePassthrough;
2334
2334
  }
@@ -2348,13 +2348,13 @@ export declare const SccDestinationFramerate: {
2348
2348
  */
2349
2349
  export type SccDestinationFramerate = (typeof SccDestinationFramerate)[keyof typeof SccDestinationFramerate];
2350
2350
  /**
2351
- * @public
2352
2351
  * Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
2352
+ * @public
2353
2353
  */
2354
2354
  export interface SccDestinationSettings {
2355
2355
  /**
2356
- * @public
2357
2356
  * Set Framerate to make sure that the captions and the video are synchronized in the output. Specify a frame rate that matches the frame rate of the associated video. If the video frame rate is 29.97, choose 29.97 dropframe only if the video has video_insertion=true and drop_frame_timecode=true; otherwise, choose 29.97 non-dropframe.
2357
+ * @public
2358
2358
  */
2359
2359
  Framerate?: SccDestinationFramerate;
2360
2360
  }
@@ -2371,13 +2371,13 @@ export declare const SrtStylePassthrough: {
2371
2371
  */
2372
2372
  export type SrtStylePassthrough = (typeof SrtStylePassthrough)[keyof typeof SrtStylePassthrough];
2373
2373
  /**
2374
- * @public
2375
2374
  * Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video.
2375
+ * @public
2376
2376
  */
2377
2377
  export interface SrtDestinationSettings {
2378
2378
  /**
2379
- * @public
2380
2379
  * Set Style passthrough to ENABLED to use the available style, color, and position information from your input captions. MediaConvert uses default settings for any missing style and position information in your input captions. Set Style passthrough to DISABLED, or leave blank, to ignore the style and position information from your input captions and use simplified output captions.
2380
+ * @public
2381
2381
  */
2382
2382
  StylePassthrough?: SrtStylePassthrough;
2383
2383
  }
@@ -2397,18 +2397,18 @@ export declare const TeletextPageType: {
2397
2397
  */
2398
2398
  export type TeletextPageType = (typeof TeletextPageType)[keyof typeof TeletextPageType];
2399
2399
  /**
2400
- * @public
2401
2400
  * Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
2401
+ * @public
2402
2402
  */
2403
2403
  export interface TeletextDestinationSettings {
2404
2404
  /**
2405
- * @public
2406
2405
  * Set pageNumber to the Teletext page number for the destination captions for this output. This value must be a three-digit hexadecimal string; strings ending in -FF are invalid. If you are passing through the entire set of Teletext data, do not use this field.
2406
+ * @public
2407
2407
  */
2408
2408
  PageNumber?: string;
2409
2409
  /**
2410
- * @public
2411
2410
  * Specify the page types for this Teletext page. If you don't specify a value here, the service sets the page type to the default value Subtitle. If you pass through the entire set of Teletext data, don't use this field. When you pass through a set of Teletext pages, your output has the same page types as your input.
2411
+ * @public
2412
2412
  */
2413
2413
  PageTypes?: TeletextPageType[];
2414
2414
  }
@@ -2425,13 +2425,13 @@ export declare const TtmlStylePassthrough: {
2425
2425
  */
2426
2426
  export type TtmlStylePassthrough = (typeof TtmlStylePassthrough)[keyof typeof TtmlStylePassthrough];
2427
2427
  /**
2428
- * @public
2429
2428
  * Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2429
+ * @public
2430
2430
  */
2431
2431
  export interface TtmlDestinationSettings {
2432
2432
  /**
2433
- * @public
2434
2433
  * Pass through style and position information from a TTML-like input source (TTML, IMSC, SMPTE-TT) to the TTML output.
2434
+ * @public
2435
2435
  */
2436
2436
  StylePassthrough?: TtmlStylePassthrough;
2437
2437
  }
@@ -2461,147 +2461,147 @@ export declare const WebvttStylePassthrough: {
2461
2461
  */
2462
2462
  export type WebvttStylePassthrough = (typeof WebvttStylePassthrough)[keyof typeof WebvttStylePassthrough];
2463
2463
  /**
2464
- * @public
2465
2464
  * Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2465
+ * @public
2466
2466
  */
2467
2467
  export interface WebvttDestinationSettings {
2468
2468
  /**
2469
- * @public
2470
2469
  * If the WebVTT captions track is intended to provide accessibility for people who are deaf or hard of hearing: Set Accessibility subtitles to Enabled. When you do, MediaConvert adds accessibility attributes to your output HLS or DASH manifest. For HLS manifests, MediaConvert adds the following accessibility attributes under EXT-X-MEDIA for this track: CHARACTERISTICS="public.accessibility.describes-spoken-dialog,public.accessibility.describes-music-and-sound" and AUTOSELECT="YES". For DASH manifests, MediaConvert adds the following in the adaptation set for this track: <Accessibility schemeIdUri="urn:mpeg:dash:role:2011" value="caption"/>. If the captions track is not intended to provide such accessibility: Keep the default value, Disabled. When you do, for DASH manifests, MediaConvert instead adds the following in the adaptation set for this track: <Role schemeIDUri="urn:mpeg:dash:role:2011" value="subtitle"/>.
2470
+ * @public
2471
2471
  */
2472
2472
  Accessibility?: WebvttAccessibilitySubs;
2473
2473
  /**
2474
- * @public
2475
2474
  * To use the available style, color, and position information from your input captions: Set Style passthrough to Enabled. MediaConvert uses default settings when style and position information is missing from your input captions. To recreate the input captions exactly: Set Style passthrough to Strict. MediaConvert automatically applies timing adjustments, including adjustments for frame rate conversion, ad avails, and input clipping. Your input captions format must be WebVTT. To ignore the style and position information from your input captions and use simplified output captions: Set Style passthrough to Disabled, or leave blank.
2475
+ * @public
2476
2476
  */
2477
2477
  StylePassthrough?: WebvttStylePassthrough;
2478
2478
  }
2479
2479
  /**
2480
- * @public
2481
2480
  * Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
2481
+ * @public
2482
2482
  */
2483
2483
  export interface CaptionDestinationSettings {
2484
2484
  /**
2485
- * @public
2486
2485
  * Burn-in is a captions delivery method, rather than a captions format. Burn-in writes the captions directly on your video frames, replacing pixels of video content with the captions. Set up burn-in captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/burn-in-output-captions.html.
2486
+ * @public
2487
2487
  */
2488
2488
  BurninDestinationSettings?: BurninDestinationSettings;
2489
2489
  /**
2490
- * @public
2491
2490
  * Specify the format for this set of captions on this output. The default format is embedded without SCTE-20. Note that your choice of video output container constrains your choice of output captions format. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/captions-support-tables.html. If you are using SCTE-20 and you want to create an output that complies with the SCTE-43 spec, choose SCTE-20 plus embedded. To create a non-compliant output where the embedded captions come first, choose Embedded plus SCTE-20.
2491
+ * @public
2492
2492
  */
2493
2493
  DestinationType?: CaptionDestinationType;
2494
2494
  /**
2495
- * @public
2496
2495
  * Settings related to DVB-Sub captions. Set up DVB-Sub captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/dvb-sub-output-captions.html.
2496
+ * @public
2497
2497
  */
2498
2498
  DvbSubDestinationSettings?: DvbSubDestinationSettings;
2499
2499
  /**
2500
- * @public
2501
2500
  * Settings related to CEA/EIA-608 and CEA/EIA-708 (also called embedded or ancillary) captions. Set up embedded captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/embedded-output-captions.html.
2501
+ * @public
2502
2502
  */
2503
2503
  EmbeddedDestinationSettings?: EmbeddedDestinationSettings;
2504
2504
  /**
2505
- * @public
2506
2505
  * Settings related to IMSC captions. IMSC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2506
+ * @public
2507
2507
  */
2508
2508
  ImscDestinationSettings?: ImscDestinationSettings;
2509
2509
  /**
2510
- * @public
2511
2510
  * Settings related to SCC captions. SCC is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/scc-srt-output-captions.html.
2511
+ * @public
2512
2512
  */
2513
2513
  SccDestinationSettings?: SccDestinationSettings;
2514
2514
  /**
2515
- * @public
2516
2515
  * Settings related to SRT captions. SRT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video.
2516
+ * @public
2517
2517
  */
2518
2518
  SrtDestinationSettings?: SrtDestinationSettings;
2519
2519
  /**
2520
- * @public
2521
2520
  * Settings related to teletext captions. Set up teletext captions in the same output as your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/teletext-output-captions.html.
2521
+ * @public
2522
2522
  */
2523
2523
  TeletextDestinationSettings?: TeletextDestinationSettings;
2524
2524
  /**
2525
- * @public
2526
2525
  * Settings related to TTML captions. TTML is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2526
+ * @public
2527
2527
  */
2528
2528
  TtmlDestinationSettings?: TtmlDestinationSettings;
2529
2529
  /**
2530
- * @public
2531
2530
  * Settings related to WebVTT captions. WebVTT is a sidecar format that holds captions in a file that is separate from the video container. Set up sidecar captions in the same output group, but different output from your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ttml-and-webvtt-output-captions.html.
2531
+ * @public
2532
2532
  */
2533
2533
  WebvttDestinationSettings?: WebvttDestinationSettings;
2534
2534
  }
2535
2535
  /**
2536
- * @public
2537
2536
  * This object holds groups of settings related to captions for one output. For each output that has captions, include one instance of CaptionDescriptions.
2537
+ * @public
2538
2538
  */
2539
2539
  export interface CaptionDescription {
2540
2540
  /**
2541
- * @public
2542
2541
  * Specifies which "Caption Selector":#inputs-caption_selector to use from each input when generating captions. The name should be of the format "Caption Selector <N>", which denotes that the Nth Caption Selector will be used from each input.
2542
+ * @public
2543
2543
  */
2544
2544
  CaptionSelectorName?: string;
2545
2545
  /**
2546
- * @public
2547
2546
  * Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.
2547
+ * @public
2548
2548
  */
2549
2549
  CustomLanguageCode?: string;
2550
2550
  /**
2551
- * @public
2552
2551
  * Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
2552
+ * @public
2553
2553
  */
2554
2554
  DestinationSettings?: CaptionDestinationSettings;
2555
2555
  /**
2556
- * @public
2557
2556
  * Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.
2557
+ * @public
2558
2558
  */
2559
2559
  LanguageCode?: LanguageCode;
2560
2560
  /**
2561
- * @public
2562
2561
  * Specify a label for this set of output captions. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.
2562
+ * @public
2563
2563
  */
2564
2564
  LanguageDescription?: string;
2565
2565
  }
2566
2566
  /**
2567
- * @public
2568
2567
  * Caption Description for preset
2568
+ * @public
2569
2569
  */
2570
2570
  export interface CaptionDescriptionPreset {
2571
2571
  /**
2572
- * @public
2573
2572
  * Specify the language for this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information when automatically selecting the font script for rendering the captions text. For all outputs, you can use an ISO 639-2 or ISO 639-3 code. For streaming outputs, you can also use any other code in the full RFC-5646 specification. Streaming outputs are those that are in one of the following output groups: CMAF, DASH ISO, Apple HLS, or Microsoft Smooth Streaming.
2573
+ * @public
2574
2574
  */
2575
2575
  CustomLanguageCode?: string;
2576
2576
  /**
2577
- * @public
2578
2577
  * Settings related to one captions tab on the MediaConvert console. Usually, one captions tab corresponds to one output captions track. Depending on your output captions format, one tab might correspond to a set of output captions tracks. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/including-captions.html.
2578
+ * @public
2579
2579
  */
2580
2580
  DestinationSettings?: CaptionDestinationSettings;
2581
2581
  /**
2582
- * @public
2583
2582
  * Specify the language of this captions output track. For most captions output formats, the encoder puts this language information in the output captions metadata. If your output captions format is DVB-Sub or Burn in, the encoder uses this language information to choose the font language for rendering the captions text.
2583
+ * @public
2584
2584
  */
2585
2585
  LanguageCode?: LanguageCode;
2586
2586
  /**
2587
- * @public
2588
2587
  * Specify a label for this set of output captions. For example, "English", "Director commentary", or "track_2". For streaming outputs, MediaConvert passes this information into destination manifests for display on the end-viewer's player device. For outputs in other output groups, the service ignores this setting.
2588
+ * @public
2589
2589
  */
2590
2590
  LanguageDescription?: string;
2591
2591
  }
2592
2592
  /**
2593
- * @public
2594
2593
  * Specify the details for each pair of HLS and DASH additional manifests that you want the service to generate for this CMAF output group. Each pair of manifests can reference a different subset of outputs in the group.
2594
+ * @public
2595
2595
  */
2596
2596
  export interface CmafAdditionalManifest {
2597
2597
  /**
2598
- * @public
2599
2598
  * Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your HLS group is film-name.m3u8. If you enter "-no-premium" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. For HLS output groups, specify a manifestNameModifier that is different from the nameModifier of the output. The service uses the output name modifier to create unique names for the individual variant manifests.
2599
+ * @public
2600
2600
  */
2601
2601
  ManifestNameModifier?: string;
2602
2602
  /**
2603
- * @public
2604
2603
  * Specify the outputs that you want this additional top-level manifest to reference.
2604
+ * @public
2605
2605
  */
2606
2606
  SelectedOutputs?: string[];
2607
2607
  }
@@ -2624,76 +2624,76 @@ export declare const ColorSpace: {
2624
2624
  */
2625
2625
  export type ColorSpace = (typeof ColorSpace)[keyof typeof ColorSpace];
2626
2626
  /**
2627
- * @public
2628
2627
  * Custom 3D lut settings
2628
+ * @public
2629
2629
  */
2630
2630
  export interface ColorConversion3DLUTSetting {
2631
2631
  /**
2632
- * @public
2633
2632
  * Specify the input file S3, HTTP, or HTTPS URL for your 3D LUT .cube file. Note that MediaConvert accepts 3D LUT files up to 8MB in size.
2633
+ * @public
2634
2634
  */
2635
2635
  FileInput?: string;
2636
2636
  /**
2637
- * @public
2638
2637
  * Specify which inputs use this 3D LUT, according to their color space.
2638
+ * @public
2639
2639
  */
2640
2640
  InputColorSpace?: ColorSpace;
2641
2641
  /**
2642
- * @public
2643
2642
  * Specify which inputs use this 3D LUT, according to their luminance. To apply this 3D LUT to HDR10 or P3D65 (HDR) inputs with a specific mastering luminance: Enter an integer from 0 to 2147483647, corresponding to the input's Maximum luminance value. To apply this 3D LUT to any input regardless of its luminance: Leave blank, or enter 0.
2643
+ * @public
2644
2644
  */
2645
2645
  InputMasteringLuminance?: number;
2646
2646
  /**
2647
- * @public
2648
2647
  * Specify which outputs use this 3D LUT, according to their color space.
2648
+ * @public
2649
2649
  */
2650
2650
  OutputColorSpace?: ColorSpace;
2651
2651
  /**
2652
- * @public
2653
2652
  * Specify which outputs use this 3D LUT, according to their luminance. To apply this 3D LUT to HDR10 or P3D65 (HDR) outputs with a specific luminance: Enter an integer from 0 to 2147483647, corresponding to the output's luminance. To apply this 3D LUT to any output regardless of its luminance: Leave blank, or enter 0.
2653
+ * @public
2654
2654
  */
2655
2655
  OutputMasteringLuminance?: number;
2656
2656
  }
2657
2657
  /**
2658
- * @public
2659
2658
  * Specify the details for each additional DASH manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.
2659
+ * @public
2660
2660
  */
2661
2661
  export interface DashAdditionalManifest {
2662
2662
  /**
2663
- * @public
2664
2663
  * Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your DASH group is film-name.mpd. If you enter "-no-premium" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.mpd.
2664
+ * @public
2665
2665
  */
2666
2666
  ManifestNameModifier?: string;
2667
2667
  /**
2668
- * @public
2669
2668
  * Specify the outputs that you want this additional top-level manifest to reference.
2669
+ * @public
2670
2670
  */
2671
2671
  SelectedOutputs?: string[];
2672
2672
  }
2673
2673
  /**
2674
- * @public
2675
2674
  * Describes an account-specific API endpoint.
2675
+ * @public
2676
2676
  */
2677
2677
  export interface Endpoint {
2678
2678
  /**
2679
- * @public
2680
2679
  * URL of endpoint
2680
+ * @public
2681
2681
  */
2682
2682
  Url?: string;
2683
2683
  }
2684
2684
  /**
2685
- * @public
2686
2685
  * Specify the details for each additional HLS manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.
2686
+ * @public
2687
2687
  */
2688
2688
  export interface HlsAdditionalManifest {
2689
2689
  /**
2690
- * @public
2691
2690
  * Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your HLS group is film-name.m3u8. If you enter "-no-premium" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.m3u8. For HLS output groups, specify a manifestNameModifier that is different from the nameModifier of the output. The service uses the output name modifier to create unique names for the individual variant manifests.
2691
+ * @public
2692
2692
  */
2693
2693
  ManifestNameModifier?: string;
2694
2694
  /**
2695
- * @public
2696
2695
  * Specify the outputs that you want this additional top-level manifest to reference.
2696
+ * @public
2697
2697
  */
2698
2698
  SelectedOutputs?: string[];
2699
2699
  }
@@ -2710,65 +2710,65 @@ export declare const HlsAdMarkers: {
2710
2710
  */
2711
2711
  export type HlsAdMarkers = (typeof HlsAdMarkers)[keyof typeof HlsAdMarkers];
2712
2712
  /**
2713
- * @public
2714
2713
  * Caption Language Mapping
2714
+ * @public
2715
2715
  */
2716
2716
  export interface HlsCaptionLanguageMapping {
2717
2717
  /**
2718
- * @public
2719
2718
  * Caption channel.
2719
+ * @public
2720
2720
  */
2721
2721
  CaptionChannel?: number;
2722
2722
  /**
2723
- * @public
2724
2723
  * Specify the language for this captions channel, using the ISO 639-2 or ISO 639-3 three-letter language code
2724
+ * @public
2725
2725
  */
2726
2726
  CustomLanguageCode?: string;
2727
2727
  /**
2728
- * @public
2729
2728
  * Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php.
2729
+ * @public
2730
2730
  */
2731
2731
  LanguageCode?: LanguageCode;
2732
2732
  /**
2733
- * @public
2734
2733
  * Caption language description.
2734
+ * @public
2735
2735
  */
2736
2736
  LanguageDescription?: string;
2737
2737
  }
2738
2738
  /**
2739
- * @public
2740
2739
  * Optional. Configuration for a destination queue to which the job can hop once a customer-defined minimum wait time has passed.
2740
+ * @public
2741
2741
  */
2742
2742
  export interface HopDestination {
2743
2743
  /**
2744
- * @public
2745
2744
  * Optional. When you set up a job to use queue hopping, you can specify a different relative priority for the job in the destination queue. If you don't specify, the relative priority will remain the same as in the previous queue.
2745
+ * @public
2746
2746
  */
2747
2747
  Priority?: number;
2748
2748
  /**
2749
- * @public
2750
2749
  * Optional unless the job is submitted on the default queue. When you set up a job to use queue hopping, you can specify a destination queue. This queue cannot be the original queue to which the job is submitted. If the original queue isn't the default queue and you don't specify the destination queue, the job will move to the default queue.
2750
+ * @public
2751
2751
  */
2752
2752
  Queue?: string;
2753
2753
  /**
2754
- * @public
2755
2754
  * Required for setting up a job to use queue hopping. Minimum wait time in minutes until the job can hop to the destination queue. Valid range is 1 to 4320 minutes, inclusive.
2755
+ * @public
2756
2756
  */
2757
2757
  WaitMinutes?: number;
2758
2758
  }
2759
2759
  /**
2760
- * @public
2761
2760
  * To insert ID3 tags in your output, specify two values. Use ID3 tag to specify the base 64 encoded string and use Timecode to specify the time when the tag should be inserted. To insert multiple ID3 tags in your output, create multiple instances of ID3 insertion.
2761
+ * @public
2762
2762
  */
2763
2763
  export interface Id3Insertion {
2764
2764
  /**
2765
- * @public
2766
2765
  * Use ID3 tag to provide a fully formed ID3 tag in base64-encode format.
2766
+ * @public
2767
2767
  */
2768
2768
  Id3?: string;
2769
2769
  /**
2770
- * @public
2771
2770
  * Provide a Timecode in HH:MM:SS:FF or HH:MM:SS;FF format.
2771
+ * @public
2772
2772
  */
2773
2773
  Timecode?: string;
2774
2774
  }
@@ -2810,29 +2810,29 @@ export declare const AdvancedInputFilterSharpen: {
2810
2810
  */
2811
2811
  export type AdvancedInputFilterSharpen = (typeof AdvancedInputFilterSharpen)[keyof typeof AdvancedInputFilterSharpen];
2812
2812
  /**
2813
- * @public
2814
2813
  * Optional settings for Advanced input filter when you set Advanced input filter to Enabled.
2814
+ * @public
2815
2815
  */
2816
2816
  export interface AdvancedInputFilterSettings {
2817
2817
  /**
2818
- * @public
2819
2818
  * Add texture and detail to areas of your input video content that were lost after applying the Advanced input filter. To adaptively add texture and reduce softness: Choose Enabled. To not add any texture: Keep the default value, Disabled. We recommend that you choose Disabled for input video content that doesn't have texture, including screen recordings, computer graphics, or cartoons.
2819
+ * @public
2820
2820
  */
2821
2821
  AddTexture?: AdvancedInputFilterAddTexture;
2822
2822
  /**
2823
- * @public
2824
2823
  * Optionally specify the amount of sharpening to apply when you use the Advanced input filter. Sharpening adds contrast to the edges of your video content and can reduce softness. To apply no sharpening: Keep the default value, Off. To apply a minimal amount of sharpening choose Low, or for the maximum choose High.
2824
+ * @public
2825
2825
  */
2826
2826
  Sharpening?: AdvancedInputFilterSharpen;
2827
2827
  }
2828
2828
  /**
2829
- * @public
2830
2829
  * Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.
2830
+ * @public
2831
2831
  */
2832
2832
  export interface AudioSelectorGroup {
2833
2833
  /**
2834
- * @public
2835
2834
  * Name of an Audio Selector within the same input to include in the group. Audio selector names are standardized, based on their order within the input (e.g., "Audio Selector 1"). The audio selector name parameter can be repeated to add any number of audio selectors to the group.
2835
+ * @public
2836
2836
  */
2837
2837
  AudioSelectorNames?: string[];
2838
2838
  }
@@ -2863,23 +2863,23 @@ export declare const AudioDefaultSelection: {
2863
2863
  */
2864
2864
  export type AudioDefaultSelection = (typeof AudioDefaultSelection)[keyof typeof AudioDefaultSelection];
2865
2865
  /**
2866
- * @public
2867
2866
  * Settings specific to audio sources in an HLS alternate rendition group. Specify the properties (renditionGroupId, renditionName or renditionLanguageCode) to identify the unique audio track among the alternative rendition groups present in the HLS manifest. If no unique track is found, or multiple tracks match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings are specified, the default audio track within the video segment is chosen. If there is no audio within video segment, the alternative audio with DEFAULT=YES is chosen instead.
2867
+ * @public
2868
2868
  */
2869
2869
  export interface HlsRenditionGroupSettings {
2870
2870
  /**
2871
- * @public
2872
2871
  * Optional. Specify alternative group ID
2872
+ * @public
2873
2873
  */
2874
2874
  RenditionGroupId?: string;
2875
2875
  /**
2876
- * @public
2877
2876
  * Optional. Specify ISO 639-2 or ISO 639-3 code in the language property
2877
+ * @public
2878
2878
  */
2879
2879
  RenditionLanguageCode?: LanguageCode;
2880
2880
  /**
2881
- * @public
2882
2881
  * Optional. Specify media name
2882
+ * @public
2883
2883
  */
2884
2884
  RenditionName?: string;
2885
2885
  }
@@ -2898,68 +2898,68 @@ export declare const AudioSelectorType: {
2898
2898
  */
2899
2899
  export type AudioSelectorType = (typeof AudioSelectorType)[keyof typeof AudioSelectorType];
2900
2900
  /**
2901
- * @public
2902
2901
  * Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.
2902
+ * @public
2903
2903
  */
2904
2904
  export interface AudioSelector {
2905
2905
  /**
2906
- * @public
2907
2906
  * Apply audio timing corrections to help synchronize audio and video in your output. To apply timing corrections, your input must meet the following requirements: * Container: MP4, or MOV, with an accurate time-to-sample (STTS) table. * Audio track: AAC. Choose from the following audio timing correction settings: * Disabled (Default): Apply no correction. * Auto: Recommended for most inputs. MediaConvert analyzes the audio timing in your input and determines which correction setting to use, if needed. * Track: Adjust the duration of each audio frame by a constant amount to align the audio track length with STTS duration. Track-level correction does not affect pitch, and is recommended for tonal audio content such as music. * Frame: Adjust the duration of each audio frame by a variable amount to align audio frames with STTS timestamps. No corrections are made to already-aligned frames. Frame-level correction may affect the pitch of corrected frames, and is recommended for atonal audio content such as speech or percussion.
2907
+ * @public
2908
2908
  */
2909
2909
  AudioDurationCorrection?: AudioDurationCorrection;
2910
2910
  /**
2911
- * @public
2912
2911
  * Selects a specific language code from within an audio source, using the ISO 639-2 or ISO 639-3 three-letter language code
2912
+ * @public
2913
2913
  */
2914
2914
  CustomLanguageCode?: string;
2915
2915
  /**
2916
- * @public
2917
2916
  * Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio.
2917
+ * @public
2918
2918
  */
2919
2919
  DefaultSelection?: AudioDefaultSelection;
2920
2920
  /**
2921
- * @public
2922
2921
  * Specifies audio data from an external file source.
2922
+ * @public
2923
2923
  */
2924
2924
  ExternalAudioFileInput?: string;
2925
2925
  /**
2926
- * @public
2927
2926
  * Settings specific to audio sources in an HLS alternate rendition group. Specify the properties (renditionGroupId, renditionName or renditionLanguageCode) to identify the unique audio track among the alternative rendition groups present in the HLS manifest. If no unique track is found, or multiple tracks match the properties provided, the job fails. If no properties in hlsRenditionGroupSettings are specified, the default audio track within the video segment is chosen. If there is no audio within video segment, the alternative audio with DEFAULT=YES is chosen instead.
2927
+ * @public
2928
2928
  */
2929
2929
  HlsRenditionGroupSettings?: HlsRenditionGroupSettings;
2930
2930
  /**
2931
- * @public
2932
2931
  * Selects a specific language code from within an audio source.
2932
+ * @public
2933
2933
  */
2934
2934
  LanguageCode?: LanguageCode;
2935
2935
  /**
2936
- * @public
2937
2936
  * Specifies a time delta in milliseconds to offset the audio from the input video.
2937
+ * @public
2938
2938
  */
2939
2939
  Offset?: number;
2940
2940
  /**
2941
- * @public
2942
2941
  * Selects a specific PID from within an audio source (e.g. 257 selects PID 0x101).
2942
+ * @public
2943
2943
  */
2944
2944
  Pids?: number[];
2945
2945
  /**
2946
- * @public
2947
2946
  * Use this setting for input streams that contain Dolby E, to have the service extract specific program data from the track. To select multiple programs, create multiple selectors with the same Track and different Program numbers. In the console, this setting is visible when you set Selector type to Track. Choose the program number from the dropdown list. If your input file has incorrect metadata, you can choose All channels instead of a program number to have the service ignore the program IDs and include all the programs in the track.
2947
+ * @public
2948
2948
  */
2949
2949
  ProgramSelection?: number;
2950
2950
  /**
2951
- * @public
2952
2951
  * Use these settings to reorder the audio channels of one input to match those of another input. This allows you to combine the two files into a single output, one after the other.
2952
+ * @public
2953
2953
  */
2954
2954
  RemixSettings?: RemixSettings;
2955
2955
  /**
2956
- * @public
2957
2956
  * Specifies the type of the audio selector.
2957
+ * @public
2958
2958
  */
2959
2959
  SelectorType?: AudioSelectorType;
2960
2960
  /**
2961
- * @public
2962
2961
  * Identify a track from the input audio to include in this selector by entering the track index number. To include several tracks in a single audio selector, specify multiple tracks as follows. Using the console, enter a comma-separated list. For example, type "1,2,3" to include tracks 1 through 3.
2962
+ * @public
2963
2963
  */
2964
2964
  Tracks?: number[];
2965
2965
  }
@@ -2988,34 +2988,34 @@ export declare const AncillaryTerminateCaptions: {
2988
2988
  */
2989
2989
  export type AncillaryTerminateCaptions = (typeof AncillaryTerminateCaptions)[keyof typeof AncillaryTerminateCaptions];
2990
2990
  /**
2991
- * @public
2992
2991
  * Settings for ancillary captions source.
2992
+ * @public
2993
2993
  */
2994
2994
  export interface AncillarySourceSettings {
2995
2995
  /**
2996
- * @public
2997
2996
  * Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.
2997
+ * @public
2998
2998
  */
2999
2999
  Convert608To708?: AncillaryConvert608To708;
3000
3000
  /**
3001
- * @public
3002
3001
  * Specifies the 608 channel number in the ancillary data track from which to extract captions. Unused for passthrough.
3002
+ * @public
3003
3003
  */
3004
3004
  SourceAncillaryChannelNumber?: number;
3005
3005
  /**
3006
- * @public
3007
3006
  * By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.
3007
+ * @public
3008
3008
  */
3009
3009
  TerminateCaptions?: AncillaryTerminateCaptions;
3010
3010
  }
3011
3011
  /**
3012
- * @public
3013
3012
  * DVB Sub Source Settings
3013
+ * @public
3014
3014
  */
3015
3015
  export interface DvbSubSourceSettings {
3016
3016
  /**
3017
- * @public
3018
3017
  * When using DVB-Sub with Burn-in, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.
3018
+ * @public
3019
3019
  */
3020
3020
  Pid?: number;
3021
3021
  }
@@ -3044,28 +3044,28 @@ export declare const EmbeddedTerminateCaptions: {
3044
3044
  */
3045
3045
  export type EmbeddedTerminateCaptions = (typeof EmbeddedTerminateCaptions)[keyof typeof EmbeddedTerminateCaptions];
3046
3046
  /**
3047
- * @public
3048
3047
  * Settings for embedded captions Source
3048
+ * @public
3049
3049
  */
3050
3050
  export interface EmbeddedSourceSettings {
3051
3051
  /**
3052
- * @public
3053
3052
  * Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.
3053
+ * @public
3054
3054
  */
3055
3055
  Convert608To708?: EmbeddedConvert608To708;
3056
3056
  /**
3057
- * @public
3058
3057
  * Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough.
3058
+ * @public
3059
3059
  */
3060
3060
  Source608ChannelNumber?: number;
3061
3061
  /**
3062
- * @public
3063
3062
  * Specifies the video track index used for extracting captions. The system only supports one input video track, so this should always be set to '1'.
3063
+ * @public
3064
3064
  */
3065
3065
  Source608TrackNumber?: number;
3066
3066
  /**
3067
- * @public
3068
3067
  * By default, the service terminates any unterminated captions at the end of each input. If you want the caption to continue onto your next input, disable this setting.
3068
+ * @public
3069
3069
  */
3070
3070
  TerminateCaptions?: EmbeddedTerminateCaptions;
3071
3071
  }
@@ -3094,18 +3094,18 @@ export declare const CaptionSourceConvertPaintOnToPopOn: {
3094
3094
  */
3095
3095
  export type CaptionSourceConvertPaintOnToPopOn = (typeof CaptionSourceConvertPaintOnToPopOn)[keyof typeof CaptionSourceConvertPaintOnToPopOn];
3096
3096
  /**
3097
- * @public
3098
3097
  * Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps.
3098
+ * @public
3099
3099
  */
3100
3100
  export interface CaptionSourceFramerate {
3101
3101
  /**
3102
- * @public
3103
3102
  * Specify the denominator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate numerator.
3103
+ * @public
3104
3104
  */
3105
3105
  FramerateDenominator?: number;
3106
3106
  /**
3107
- * @public
3108
3107
  * Specify the numerator of the fraction that represents the frame rate for the setting Caption source frame rate. Use this setting along with the setting Framerate denominator.
3108
+ * @public
3109
3109
  */
3110
3110
  FramerateNumerator?: number;
3111
3111
  }
@@ -3122,38 +3122,38 @@ export declare const FileSourceTimeDeltaUnits: {
3122
3122
  */
3123
3123
  export type FileSourceTimeDeltaUnits = (typeof FileSourceTimeDeltaUnits)[keyof typeof FileSourceTimeDeltaUnits];
3124
3124
  /**
3125
- * @public
3126
3125
  * If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
3126
+ * @public
3127
3127
  */
3128
3128
  export interface FileSourceSettings {
3129
3129
  /**
3130
- * @public
3131
3130
  * Specify whether this set of input captions appears in your outputs in both 608 and 708 format. If you choose Upconvert, MediaConvert includes the captions data in two ways: it passes the 608 data through using the 608 compatibility bytes fields of the 708 wrapper, and it also translates the 608 data into 708.
3131
+ * @public
3132
3132
  */
3133
3133
  Convert608To708?: FileSourceConvert608To708;
3134
3134
  /**
3135
- * @public
3136
3135
  * Choose the presentation style of your input SCC captions. To use the same presentation style as your input: Keep the default value, Disabled. To convert paint-on captions to pop-on: Choose Enabled. We also recommend that you choose Enabled if you notice additional repeated lines in your output captions.
3136
+ * @public
3137
3137
  */
3138
3138
  ConvertPaintToPop?: CaptionSourceConvertPaintOnToPopOn;
3139
3139
  /**
3140
- * @public
3141
3140
  * Ignore this setting unless your input captions format is SCC. To have the service compensate for differing frame rates between your input captions and input video, specify the frame rate of the captions file. Specify this value as a fraction. For example, you might specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, or 30000 / 1001 for 29.97 fps.
3141
+ * @public
3142
3142
  */
3143
3143
  Framerate?: CaptionSourceFramerate;
3144
3144
  /**
3145
- * @public
3146
3145
  * External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', 'smi', 'webvtt', and 'vtt'.
3146
+ * @public
3147
3147
  */
3148
3148
  SourceFile?: string;
3149
3149
  /**
3150
- * @public
3151
3150
  * Optional. Use this setting when you need to adjust the sync between your sidecar captions and your video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/time-delta-use-cases.html. Enter a positive or negative number to modify the times in the captions file. For example, type 15 to add 15 seconds to all the times in the captions file. Type -5 to subtract 5 seconds from the times in the captions file. You can optionally specify your time delta in milliseconds instead of seconds. When you do so, set the related setting, Time delta units to Milliseconds. Note that, when you specify a time delta for timecode-based caption sources, such as SCC and STL, and your time delta isn't a multiple of the input frame rate, MediaConvert snaps the captions to the nearest frame. For example, when your input video frame rate is 25 fps and you specify 1010ms for time delta, MediaConvert delays your captions by 1000 ms.
3151
+ * @public
3152
3152
  */
3153
3153
  TimeDelta?: number;
3154
3154
  /**
3155
- * @public
3156
3155
  * When you use the setting Time delta to adjust the sync between your sidecar captions and your video, use this setting to specify the units for the delta that you specify. When you don't specify a value for Time delta units, MediaConvert uses seconds by default.
3156
+ * @public
3157
3157
  */
3158
3158
  TimeDeltaUnits?: FileSourceTimeDeltaUnits;
3159
3159
  }
@@ -3182,138 +3182,138 @@ export declare const CaptionSourceType: {
3182
3182
  */
3183
3183
  export type CaptionSourceType = (typeof CaptionSourceType)[keyof typeof CaptionSourceType];
3184
3184
  /**
3185
- * @public
3186
3185
  * Settings specific to Teletext caption sources, including Page number.
3186
+ * @public
3187
3187
  */
3188
3188
  export interface TeletextSourceSettings {
3189
3189
  /**
3190
- * @public
3191
3190
  * Use Page Number to specify the three-digit hexadecimal page number that will be used for Teletext captions. Do not use this setting if you are passing through teletext from the input source to output.
3191
+ * @public
3192
3192
  */
3193
3193
  PageNumber?: string;
3194
3194
  }
3195
3195
  /**
3196
- * @public
3197
3196
  * Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings.
3197
+ * @public
3198
3198
  */
3199
3199
  export interface TrackSourceSettings {
3200
3200
  /**
3201
- * @public
3202
3201
  * Use this setting to select a single captions track from a source. Track numbers correspond to the order in the captions source file. For IMF sources, track numbering is based on the order that the captions appear in the CPL. For example, use 1 to select the captions asset that is listed first in the CPL. To include more than one captions track in your job outputs, create multiple input captions selectors. Specify one track per selector.
3202
+ * @public
3203
3203
  */
3204
3204
  TrackNumber?: number;
3205
3205
  }
3206
3206
  /**
3207
- * @public
3208
3207
  * Settings specific to WebVTT sources in HLS alternative rendition group. Specify the properties (renditionGroupId, renditionName or renditionLanguageCode) to identify the unique subtitle track among the alternative rendition groups present in the HLS manifest. If no unique track is found, or multiple tracks match the specified properties, the job fails. If there is only one subtitle track in the rendition group, the settings can be left empty and the default subtitle track will be chosen. If your caption source is a sidecar file, use FileSourceSettings instead of WebvttHlsSourceSettings.
3208
+ * @public
3209
3209
  */
3210
3210
  export interface WebvttHlsSourceSettings {
3211
3211
  /**
3212
- * @public
3213
3212
  * Optional. Specify alternative group ID
3213
+ * @public
3214
3214
  */
3215
3215
  RenditionGroupId?: string;
3216
3216
  /**
3217
- * @public
3218
3217
  * Optional. Specify ISO 639-2 or ISO 639-3 code in the language property
3218
+ * @public
3219
3219
  */
3220
3220
  RenditionLanguageCode?: LanguageCode;
3221
3221
  /**
3222
- * @public
3223
3222
  * Optional. Specify media name
3223
+ * @public
3224
3224
  */
3225
3225
  RenditionName?: string;
3226
3226
  }
3227
3227
  /**
3228
- * @public
3229
3228
  * If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
3229
+ * @public
3230
3230
  */
3231
3231
  export interface CaptionSourceSettings {
3232
3232
  /**
3233
- * @public
3234
3233
  * Settings for ancillary captions source.
3234
+ * @public
3235
3235
  */
3236
3236
  AncillarySourceSettings?: AncillarySourceSettings;
3237
3237
  /**
3238
- * @public
3239
3238
  * DVB Sub Source Settings
3239
+ * @public
3240
3240
  */
3241
3241
  DvbSubSourceSettings?: DvbSubSourceSettings;
3242
3242
  /**
3243
- * @public
3244
3243
  * Settings for embedded captions Source
3244
+ * @public
3245
3245
  */
3246
3246
  EmbeddedSourceSettings?: EmbeddedSourceSettings;
3247
3247
  /**
3248
- * @public
3249
3248
  * If your input captions are SCC, SMI, SRT, STL, TTML, WebVTT, or IMSC 1.1 in an xml file, specify the URI of the input caption source file. If your caption source is IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
3249
+ * @public
3250
3250
  */
3251
3251
  FileSourceSettings?: FileSourceSettings;
3252
3252
  /**
3253
- * @public
3254
3253
  * Use Source to identify the format of your input captions. The service cannot auto-detect caption format.
3254
+ * @public
3255
3255
  */
3256
3256
  SourceType?: CaptionSourceType;
3257
3257
  /**
3258
- * @public
3259
3258
  * Settings specific to Teletext caption sources, including Page number.
3259
+ * @public
3260
3260
  */
3261
3261
  TeletextSourceSettings?: TeletextSourceSettings;
3262
3262
  /**
3263
- * @public
3264
3263
  * Settings specific to caption sources that are specified by track number. Currently, this is only IMSC captions in an IMF package. If your caption source is IMSC 1.1 in a separate xml file, use FileSourceSettings instead of TrackSourceSettings.
3264
+ * @public
3265
3265
  */
3266
3266
  TrackSourceSettings?: TrackSourceSettings;
3267
3267
  /**
3268
- * @public
3269
3268
  * Settings specific to WebVTT sources in HLS alternative rendition group. Specify the properties (renditionGroupId, renditionName or renditionLanguageCode) to identify the unique subtitle track among the alternative rendition groups present in the HLS manifest. If no unique track is found, or multiple tracks match the specified properties, the job fails. If there is only one subtitle track in the rendition group, the settings can be left empty and the default subtitle track will be chosen. If your caption source is a sidecar file, use FileSourceSettings instead of WebvttHlsSourceSettings.
3269
+ * @public
3270
3270
  */
3271
3271
  WebvttHlsSourceSettings?: WebvttHlsSourceSettings;
3272
3272
  }
3273
3273
  /**
3274
- * @public
3275
3274
  * Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input.
3275
+ * @public
3276
3276
  */
3277
3277
  export interface CaptionSelector {
3278
3278
  /**
3279
- * @public
3280
3279
  * The specific language to extract from source, using the ISO 639-2 or ISO 639-3 three-letter language code. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.
3280
+ * @public
3281
3281
  */
3282
3282
  CustomLanguageCode?: string;
3283
3283
  /**
3284
- * @public
3285
3284
  * The specific language to extract from source. If input is SCTE-27, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub and output is Burn-in, complete this field and/or PID to select the caption language to extract. If input is DVB-Sub that is being passed through, omit this field (and PID field); there is no way to extract a specific language with pass-through captions.
3285
+ * @public
3286
3286
  */
3287
3287
  LanguageCode?: LanguageCode;
3288
3288
  /**
3289
- * @public
3290
3289
  * If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, specify the URI of the input captions source file. If your input captions are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings.
3290
+ * @public
3291
3291
  */
3292
3292
  SourceSettings?: CaptionSourceSettings;
3293
3293
  }
3294
3294
  /**
3295
- * @public
3296
3295
  * Use Rectangle to identify a specific area of the video frame.
3296
+ * @public
3297
3297
  */
3298
3298
  export interface Rectangle {
3299
3299
  /**
3300
- * @public
3301
3300
  * Height of rectangle in pixels. Specify only even numbers.
3301
+ * @public
3302
3302
  */
3303
3303
  Height?: number;
3304
3304
  /**
3305
- * @public
3306
3305
  * Width of rectangle in pixels. Specify only even numbers.
3306
+ * @public
3307
3307
  */
3308
3308
  Width?: number;
3309
3309
  /**
3310
- * @public
3311
3310
  * The distance, in pixels, between the rectangle and the left edge of the video frame. Specify only even numbers.
3311
+ * @public
3312
3312
  */
3313
3313
  X?: number;
3314
3314
  /**
3315
- * @public
3316
3315
  * The distance, in pixels, between the rectangle and the top edge of the video frame. Specify only even numbers.
3316
+ * @public
3317
3317
  */
3318
3318
  Y?: number;
3319
3319
  }
@@ -3343,28 +3343,28 @@ export declare const DecryptionMode: {
3343
3343
  */
3344
3344
  export type DecryptionMode = (typeof DecryptionMode)[keyof typeof DecryptionMode];
3345
3345
  /**
3346
- * @public
3347
3346
  * Settings for decrypting any input files that you encrypt before you upload them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key Management Service (KMS) to encrypt the data key that you use to encrypt your content.
3347
+ * @public
3348
3348
  */
3349
3349
  export interface InputDecryptionSettings {
3350
3350
  /**
3351
- * @public
3352
3351
  * Specify the encryption mode that you used to encrypt your input files.
3352
+ * @public
3353
3353
  */
3354
3354
  DecryptionMode?: DecryptionMode;
3355
3355
  /**
3356
- * @public
3357
3356
  * Warning! Don't provide your encryption key in plaintext. Your job settings could be intercepted, making your encrypted content vulnerable. Specify the encrypted version of the data key that you used to encrypt your content. The data key must be encrypted by AWS Key Management Service (KMS). The key can be 128, 192, or 256 bits.
3357
+ * @public
3358
3358
  */
3359
3359
  EncryptedDecryptionKey?: string;
3360
3360
  /**
3361
- * @public
3362
3361
  * Specify the initialization vector that you used when you encrypted your content before uploading it to Amazon S3. You can use a 16-byte initialization vector with any encryption mode. Or, you can use a 12-byte initialization vector with GCM or CTR. MediaConvert accepts only initialization vectors that are base64-encoded.
3362
+ * @public
3363
3363
  */
3364
3364
  InitializationVector?: string;
3365
3365
  /**
3366
- * @public
3367
3366
  * Specify the AWS Region for AWS Key Management Service (KMS) that you used to encrypt your data key, if that Region is different from the one you are using for AWS Elemental MediaConvert.
3367
+ * @public
3368
3368
  */
3369
3369
  KmsKeyRegion?: string;
3370
3370
  }
@@ -3394,95 +3394,95 @@ export declare const InputFilterEnable: {
3394
3394
  */
3395
3395
  export type InputFilterEnable = (typeof InputFilterEnable)[keyof typeof InputFilterEnable];
3396
3396
  /**
3397
- * @public
3398
3397
  * These settings apply to a specific graphic overlay. You can include multiple overlays in your job.
3398
+ * @public
3399
3399
  */
3400
3400
  export interface InsertableImage {
3401
3401
  /**
3402
- * @public
3403
3402
  * Specify the time, in milliseconds, for the image to remain on the output video. This duration includes fade-in time but not fade-out time.
3403
+ * @public
3404
3404
  */
3405
3405
  Duration?: number;
3406
3406
  /**
3407
- * @public
3408
3407
  * Specify the length of time, in milliseconds, between the Start time that you specify for the image insertion and the time that the image appears at full opacity. Full opacity is the level that you specify for the opacity setting. If you don't specify a value for Fade-in, the image will appear abruptly at the overlay start time.
3408
+ * @public
3409
3409
  */
3410
3410
  FadeIn?: number;
3411
3411
  /**
3412
- * @public
3413
3412
  * Specify the length of time, in milliseconds, between the end of the time that you have specified for the image overlay Duration and when the overlaid image has faded to total transparency. If you don't specify a value for Fade-out, the image will disappear abruptly at the end of the inserted image duration.
3413
+ * @public
3414
3414
  */
3415
3415
  FadeOut?: number;
3416
3416
  /**
3417
- * @public
3418
3417
  * Specify the height of the inserted image in pixels. If you specify a value that's larger than the video resolution height, the service will crop your overlaid image to fit. To use the native height of the image, keep this setting blank.
3418
+ * @public
3419
3419
  */
3420
3420
  Height?: number;
3421
3421
  /**
3422
- * @public
3423
3422
  * Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file.
3423
+ * @public
3424
3424
  */
3425
3425
  ImageInserterInput?: string;
3426
3426
  /**
3427
- * @public
3428
3427
  * Specify the distance, in pixels, between the inserted image and the left edge of the video frame. Required for any image overlay that you specify.
3428
+ * @public
3429
3429
  */
3430
3430
  ImageX?: number;
3431
3431
  /**
3432
- * @public
3433
3432
  * Specify the distance, in pixels, between the overlaid image and the top edge of the video frame. Required for any image overlay that you specify.
3433
+ * @public
3434
3434
  */
3435
3435
  ImageY?: number;
3436
3436
  /**
3437
- * @public
3438
3437
  * Specify how overlapping inserted images appear. Images with higher values for Layer appear on top of images with lower values for Layer.
3438
+ * @public
3439
3439
  */
3440
3440
  Layer?: number;
3441
3441
  /**
3442
- * @public
3443
3442
  * Use Opacity to specify how much of the underlying video shows through the inserted image. 0 is transparent and 100 is fully opaque. Default is 50.
3443
+ * @public
3444
3444
  */
3445
3445
  Opacity?: number;
3446
3446
  /**
3447
- * @public
3448
3447
  * Specify the timecode of the frame that you want the overlay to first appear on. This must be in timecode (HH:MM:SS:FF or HH:MM:SS;FF) format. Remember to take into account your timecode source settings.
3448
+ * @public
3449
3449
  */
3450
3450
  StartTime?: string;
3451
3451
  /**
3452
- * @public
3453
3452
  * Specify the width of the inserted image in pixels. If you specify a value that's larger than the video resolution width, the service will crop your overlaid image to fit. To use the native width of the image, keep this setting blank.
3453
+ * @public
3454
3454
  */
3455
3455
  Width?: number;
3456
3456
  }
3457
3457
  /**
3458
- * @public
3459
3458
  * Use the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input or output individually. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/graphic-overlay.html. This setting is disabled by default.
3459
+ * @public
3460
3460
  */
3461
3461
  export interface ImageInserter {
3462
3462
  /**
3463
- * @public
3464
3463
  * Specify the images that you want to overlay on your video. The images must be PNG or TGA files.
3464
+ * @public
3465
3465
  */
3466
3466
  InsertableImages?: InsertableImage[];
3467
3467
  /**
3468
- * @public
3469
3468
  * Specify the reference white level, in nits, for all of your image inserter images. Use to correct brightness levels within HDR10 outputs. For 1,000 nit peak brightness displays, we recommend that you set SDR reference white level to 203 (according to ITU-R BT.2408). Leave blank to use the default value of 100, or specify an integer from 100 to 1000.
3469
+ * @public
3470
3470
  */
3471
3471
  SdrReferenceWhiteLevel?: number;
3472
3472
  }
3473
3473
  /**
3474
- * @public
3475
3474
  * To transcode only portions of your input, include one input clip for each part of your input that you want in your output. All input clips that you specify will be included in every output of the job. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html.
3475
+ * @public
3476
3476
  */
3477
3477
  export interface InputClipping {
3478
3478
  /**
3479
- * @public
3480
3479
  * Set End timecode to the end of the portion of the input you are clipping. The frame corresponding to the End timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for timecode source under input settings. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to end six minutes into the video, use 01:06:00:00.
3480
+ * @public
3481
3481
  */
3482
3482
  EndTimecode?: string;
3483
3483
  /**
3484
- * @public
3485
3484
  * Set Start timecode to the beginning of the portion of the input you are clipping. The frame corresponding to the Start timecode value is included in the clip. Start timecode or End timecode may be left blank, but not both. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When choosing this value, take into account your setting for Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your clip to begin five minutes into the video, use 01:05:00:00.
3485
+ * @public
3486
3486
  */
3487
3487
  StartTimecode?: string;
3488
3488
  }
@@ -3524,76 +3524,76 @@ export declare const InputTimecodeSource: {
3524
3524
  */
3525
3525
  export type InputTimecodeSource = (typeof InputTimecodeSource)[keyof typeof InputTimecodeSource];
3526
3526
  /**
3527
- * @public
3528
3527
  * When you include Video generator, MediaConvert creates a video input with black frames. Use this setting if you do not have a video input or if you want to add black video frames before, or after, other inputs. You can specify Video generator, or you can specify an Input file, but you cannot specify both. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-generator.html
3528
+ * @public
3529
3529
  */
3530
3530
  export interface InputVideoGenerator {
3531
3531
  /**
3532
- * @public
3533
3532
  * Specify an integer value for Black video duration from 50 to 86400000 to generate a black video input for that many milliseconds. Required when you include Video generator.
3533
+ * @public
3534
3534
  */
3535
3535
  Duration?: number;
3536
3536
  }
3537
3537
  /**
3538
- * @public
3539
3538
  * To transcode only portions of your video overlay, include one input clip for each part of your video overlay that you want in your output.
3539
+ * @public
3540
3540
  */
3541
3541
  export interface VideoOverlayInputClipping {
3542
3542
  /**
3543
- * @public
3544
3543
  * Specify the timecode of the last frame to include in your video overlay's clip. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.
3544
+ * @public
3545
3545
  */
3546
3546
  EndTimecode?: string;
3547
3547
  /**
3548
- * @public
3549
3548
  * Specify the timecode of the first frame to include in your video overlay's clip. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for Timecode source.
3549
+ * @public
3550
3550
  */
3551
3551
  StartTimecode?: string;
3552
3552
  }
3553
3553
  /**
3554
- * @public
3555
3554
  * Input settings for Video overlay. You can include one or more video overlays in sequence at different times that you specify.
3555
+ * @public
3556
3556
  */
3557
3557
  export interface VideoOverlayInput {
3558
3558
  /**
3559
- * @public
3560
3559
  * Specify the input file S3, HTTP, or HTTPS URI for your video overlay. For consistency in color and formatting in your output video image, we recommend that you specify a video with similar characteristics as the underlying input video.
3560
+ * @public
3561
3561
  */
3562
3562
  FileInput?: string;
3563
3563
  /**
3564
- * @public
3565
3564
  * Specify one or more clips to use from your video overlay. When you include an input clip, you must also specify its start timecode, end timecode, or both start and end timecode.
3565
+ * @public
3566
3566
  */
3567
3567
  InputClippings?: VideoOverlayInputClipping[];
3568
3568
  /**
3569
- * @public
3570
3569
  * Specify the timecode source for your video overlay input clips. To use the timecode present in your video overlay: Choose Embedded. To use a zerobased timecode: Choose Start at 0. To choose a timecode: Choose Specified start. When you do, enter the starting timecode in Start timecode. If you don't specify a value for Timecode source, MediaConvert uses Embedded by default.
3570
+ * @public
3571
3571
  */
3572
3572
  TimecodeSource?: InputTimecodeSource;
3573
3573
  /**
3574
- * @public
3575
3574
  * Specify the starting timecode for this video overlay. To use this setting, you must set Timecode source to Specified start.
3575
+ * @public
3576
3576
  */
3577
3577
  TimecodeStart?: string;
3578
3578
  }
3579
3579
  /**
3580
- * @public
3581
3580
  * Overlay one or more videos on top of your input video. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-overlays.html
3581
+ * @public
3582
3582
  */
3583
3583
  export interface VideoOverlay {
3584
3584
  /**
3585
- * @public
3586
3585
  * Enter the end timecode in the underlying input video for this overlay. Your overlay will be active through this frame. To display your video overlay for the duration of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to end ten minutes into the video, enter 01:10:00:00.
3586
+ * @public
3587
3587
  */
3588
3588
  EndTimecode?: string;
3589
3589
  /**
3590
- * @public
3591
3590
  * Input settings for Video overlay. You can include one or more video overlays in sequence at different times that you specify.
3591
+ * @public
3592
3592
  */
3593
3593
  Input?: VideoOverlayInput;
3594
3594
  /**
3595
- * @public
3596
3595
  * Enter the start timecode in the underlying input video for this overlay. Your overlay will be active starting with this frame. To display your video overlay starting at the beginning of the underlying video: Leave blank. Use the format HH:MM:SS:FF or HH:MM:SS;FF, where HH is the hour, MM is the minute, SS is the second, and FF is the frame number. When entering this value, take into account your choice for the underlying Input timecode source. For example, if you have embedded timecodes that start at 01:00:00:00 and you want your overlay to begin five minutes into the video, enter 01:05:00:00.
3596
+ * @public
3597
3597
  */
3598
3598
  StartTimecode?: string;
3599
3599
  }
@@ -3634,68 +3634,68 @@ export declare const EmbeddedTimecodeOverride: {
3634
3634
  */
3635
3635
  export type EmbeddedTimecodeOverride = (typeof EmbeddedTimecodeOverride)[keyof typeof EmbeddedTimecodeOverride];
3636
3636
  /**
3637
- * @public
3638
3637
  * Use these settings to specify static color calibration metadata, as defined by SMPTE ST 2086. These values don't affect the pixel values that are encoded in the video stream. They are intended to help the downstream video player display content in a way that reflects the intentions of the the content creator.
3638
+ * @public
3639
3639
  */
3640
3640
  export interface Hdr10Metadata {
3641
3641
  /**
3642
- * @public
3643
3642
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3643
+ * @public
3644
3644
  */
3645
3645
  BluePrimaryX?: number;
3646
3646
  /**
3647
- * @public
3648
3647
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3648
+ * @public
3649
3649
  */
3650
3650
  BluePrimaryY?: number;
3651
3651
  /**
3652
- * @public
3653
3652
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3653
+ * @public
3654
3654
  */
3655
3655
  GreenPrimaryX?: number;
3656
3656
  /**
3657
- * @public
3658
3657
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3658
+ * @public
3659
3659
  */
3660
3660
  GreenPrimaryY?: number;
3661
3661
  /**
3662
- * @public
3663
3662
  * Maximum light level among all samples in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.
3663
+ * @public
3664
3664
  */
3665
3665
  MaxContentLightLevel?: number;
3666
3666
  /**
3667
- * @public
3668
3667
  * Maximum average light level of any frame in the coded video sequence, in units of candelas per square meter. This setting doesn't have a default value; you must specify a value that is suitable for the content.
3668
+ * @public
3669
3669
  */
3670
3670
  MaxFrameAverageLightLevel?: number;
3671
3671
  /**
3672
- * @public
3673
3672
  * Nominal maximum mastering display luminance in units of of 0.0001 candelas per square meter.
3673
+ * @public
3674
3674
  */
3675
3675
  MaxLuminance?: number;
3676
3676
  /**
3677
- * @public
3678
3677
  * Nominal minimum mastering display luminance in units of of 0.0001 candelas per square meter
3678
+ * @public
3679
3679
  */
3680
3680
  MinLuminance?: number;
3681
3681
  /**
3682
- * @public
3683
3682
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3683
+ * @public
3684
3684
  */
3685
3685
  RedPrimaryX?: number;
3686
3686
  /**
3687
- * @public
3688
3687
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3688
+ * @public
3689
3689
  */
3690
3690
  RedPrimaryY?: number;
3691
3691
  /**
3692
- * @public
3693
3692
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3693
+ * @public
3694
3694
  */
3695
3695
  WhitePointX?: number;
3696
3696
  /**
3697
- * @public
3698
3697
  * HDR Master Display Information must be provided by a color grader, using color grading tools. Range is 0 to 50,000, each increment represents 0.00002 in CIE1931 color coordinate. Note that this setting is not for color correction.
3698
+ * @public
3699
3699
  */
3700
3700
  WhitePointY?: number;
3701
3701
  }
@@ -3740,314 +3740,314 @@ export declare const InputSampleRange: {
3740
3740
  */
3741
3741
  export type InputSampleRange = (typeof InputSampleRange)[keyof typeof InputSampleRange];
3742
3742
  /**
3743
- * @public
3744
3743
  * Input video selectors contain the video settings for the input. Each of your inputs can have up to one video selector.
3744
+ * @public
3745
3745
  */
3746
3746
  export interface VideoSelector {
3747
3747
  /**
3748
- * @public
3749
3748
  * Ignore this setting unless this input is a QuickTime animation with an alpha channel. Use this setting to create separate Key and Fill outputs. In each output, specify which part of the input MediaConvert uses. Leave this setting at the default value DISCARD to delete the alpha channel and preserve the video. Set it to REMAP_TO_LUMA to delete the video and map the alpha channel to the luma channel of your outputs.
3749
+ * @public
3750
3750
  */
3751
3751
  AlphaBehavior?: AlphaBehavior;
3752
3752
  /**
3753
- * @public
3754
3753
  * If your input video has accurate color space metadata, or if you don't know about color space: Keep the default value, Follow. MediaConvert will automatically detect your input color space. If your input video has metadata indicating the wrong color space, or has missing metadata: Specify the accurate color space here. If your input video is HDR 10 and the SMPTE ST 2086 Mastering Display Color Volume static metadata isn't present in your video stream, or if that metadata is present but not accurate: Choose Force HDR 10. Specify correct values in the input HDR 10 metadata settings. For more information about HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr. When you specify an input color space, MediaConvert uses the following color space metadata, which includes color primaries, transfer characteristics, and matrix coefficients:
3755
3754
  * * HDR 10: BT.2020, PQ, BT.2020 non-constant
3756
3755
  * * HLG 2020: BT.2020, HLG, BT.2020 non-constant
3757
3756
  * * P3DCI (Theater): DCIP3, SMPTE 428M, BT.709
3758
3757
  * * P3D65 (SDR): Display P3, sRGB, BT.709
3759
3758
  * * P3D65 (HDR): Display P3, PQ, BT.709
3759
+ * @public
3760
3760
  */
3761
3761
  ColorSpace?: ColorSpace;
3762
3762
  /**
3763
- * @public
3764
3763
  * There are two sources for color metadata, the input file and the job input settings Color space and HDR master display information settings. The Color space usage setting determines which takes precedence. Choose Force to use color metadata from the input job settings. If you don't specify values for those settings, the service defaults to using metadata from your input. FALLBACK - Choose Fallback to use color metadata from the source when it is present. If there's no color metadata in your input file, the service defaults to using values you specify in the input settings.
3764
+ * @public
3765
3765
  */
3766
3766
  ColorSpaceUsage?: ColorSpaceUsage;
3767
3767
  /**
3768
- * @public
3769
3768
  * Set Embedded timecode override to Use MDPM when your AVCHD input contains timecode tag data in the Modified Digital Video Pack Metadata. When you do, we recommend you also set Timecode source to Embedded. Leave Embedded timecode override blank, or set to None, when your input does not contain MDPM timecode.
3769
+ * @public
3770
3770
  */
3771
3771
  EmbeddedTimecodeOverride?: EmbeddedTimecodeOverride;
3772
3772
  /**
3773
- * @public
3774
3773
  * Use these settings to provide HDR 10 metadata that is missing or inaccurate in your input video. Appropriate values vary depending on the input video and must be provided by a color grader. The color grader generates these values during the HDR 10 mastering process. The valid range for each of these settings is 0 to 50,000. Each increment represents 0.00002 in CIE1931 color coordinate. Related settings - When you specify these values, you must also set Color space to HDR 10. To specify whether the the values you specify here take precedence over the values in the metadata of your input file, set Color space usage. To specify whether color metadata is included in an output, set Color metadata. For more information about MediaConvert HDR jobs, see https://docs.aws.amazon.com/console/mediaconvert/hdr.
3774
+ * @public
3775
3775
  */
3776
3776
  Hdr10Metadata?: Hdr10Metadata;
3777
3777
  /**
3778
- * @public
3779
3778
  * Specify the maximum mastering display luminance. Enter an integer from 0 to 2147483647, in units of 0.0001 nits. For example, enter 10000000 for 1000 nits.
3779
+ * @public
3780
3780
  */
3781
3781
  MaxLuminance?: number;
3782
3782
  /**
3783
- * @public
3784
3783
  * Use this setting if your input has video and audio durations that don't align, and your output or player has strict alignment requirements. Examples: Input audio track has a delayed start. Input video track ends before audio ends. When you set Pad video to Black, MediaConvert generates black video frames so that output video and audio durations match. Black video frames are added at the beginning or end, depending on your input. To keep the default behavior and not generate black video, set Pad video to Disabled or leave blank.
3784
+ * @public
3785
3785
  */
3786
3786
  PadVideo?: PadVideo;
3787
3787
  /**
3788
- * @public
3789
3788
  * Use PID to select specific video data from an input file. Specify this value as an integer; the system automatically converts it to the hexidecimal value. For example, 257 selects PID 0x101. A PID, or packet identifier, is an identifier for a set of data in an MPEG-2 transport stream container.
3789
+ * @public
3790
3790
  */
3791
3791
  Pid?: number;
3792
3792
  /**
3793
- * @public
3794
3793
  * Selects a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported.
3794
+ * @public
3795
3795
  */
3796
3796
  ProgramNumber?: number;
3797
3797
  /**
3798
- * @public
3799
3798
  * Use Rotate to specify how the service rotates your video. You can choose automatic rotation or specify a rotation. You can specify a clockwise rotation of 0, 90, 180, or 270 degrees. If your input video container is .mov or .mp4 and your input has rotation metadata, you can choose Automatic to have the service rotate your video according to the rotation specified in the metadata. The rotation must be within one degree of 90, 180, or 270 degrees. If the rotation metadata specifies any other rotation, the service will default to no rotation. By default, the service does no rotation, even if your input video has rotation metadata. The service doesn't pass through rotation metadata.
3799
+ * @public
3800
3800
  */
3801
3801
  Rotate?: InputRotate;
3802
3802
  /**
3803
- * @public
3804
3803
  * If the sample range metadata in your input video is accurate, or if you don't know about sample range, keep the default value, Follow, for this setting. When you do, the service automatically detects your input sample range. If your input video has metadata indicating the wrong sample range, specify the accurate sample range here. When you do, MediaConvert ignores any sample range information in the input metadata. Regardless of whether MediaConvert uses the input sample range or the sample range that you specify, MediaConvert uses the sample range for transcoding and also writes it to the output metadata.
3804
+ * @public
3805
3805
  */
3806
3806
  SampleRange?: InputSampleRange;
3807
3807
  }
3808
3808
  /**
3809
- * @public
3810
3809
  * Use inputs to define the source files used in your transcoding job. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/specify-input-settings.html. You can use multiple video inputs to do input stitching. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/assembling-multiple-inputs-and-input-clips.html
3810
+ * @public
3811
3811
  */
3812
3812
  export interface Input {
3813
3813
  /**
3814
- * @public
3815
3814
  * Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step. Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise.
3815
+ * @public
3816
3816
  */
3817
3817
  AdvancedInputFilter?: AdvancedInputFilter;
3818
3818
  /**
3819
- * @public
3820
3819
  * Optional settings for Advanced input filter when you set Advanced input filter to Enabled.
3820
+ * @public
3821
3821
  */
3822
3822
  AdvancedInputFilterSettings?: AdvancedInputFilterSettings;
3823
3823
  /**
3824
- * @public
3825
3824
  * Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.
3825
+ * @public
3826
3826
  */
3827
3827
  AudioSelectorGroups?: Record<string, AudioSelectorGroup>;
3828
3828
  /**
3829
- * @public
3830
3829
  * Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.
3830
+ * @public
3831
3831
  */
3832
3832
  AudioSelectors?: Record<string, AudioSelector>;
3833
3833
  /**
3834
- * @public
3835
3834
  * Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input.
3835
+ * @public
3836
3836
  */
3837
3837
  CaptionSelectors?: Record<string, CaptionSelector>;
3838
3838
  /**
3839
- * @public
3840
3839
  * Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection.
3840
+ * @public
3841
3841
  */
3842
3842
  Crop?: Rectangle;
3843
3843
  /**
3844
- * @public
3845
3844
  * Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.
3845
+ * @public
3846
3846
  */
3847
3847
  DeblockFilter?: InputDeblockFilter;
3848
3848
  /**
3849
- * @public
3850
3849
  * Settings for decrypting any input files that you encrypt before you upload them to Amazon S3. MediaConvert can decrypt files only when you use AWS Key Management Service (KMS) to encrypt the data key that you use to encrypt your content.
3850
+ * @public
3851
3851
  */
3852
3852
  DecryptionSettings?: InputDecryptionSettings;
3853
3853
  /**
3854
- * @public
3855
3854
  * Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.
3855
+ * @public
3856
3856
  */
3857
3857
  DenoiseFilter?: InputDenoiseFilter;
3858
3858
  /**
3859
- * @public
3860
3859
  * Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html.
3860
+ * @public
3861
3861
  */
3862
3862
  DolbyVisionMetadataXml?: string;
3863
3863
  /**
3864
- * @public
3865
3864
  * Specify the source file for your transcoding job. You can use multiple inputs in a single job. The service concatenates these inputs, in the order that you specify them in the job, to create the outputs. If your input format is IMF, specify your input by providing the path to your CPL. For example, "s3://bucket/vf/cpl.xml". If the CPL is in an incomplete IMP, make sure to use *Supplemental IMPs* to specify any supplemental IMPs that contain assets referenced by the CPL.
3865
+ * @public
3866
3866
  */
3867
3867
  FileInput?: string;
3868
3868
  /**
3869
- * @public
3870
3869
  * Specify whether to apply input filtering to improve the video quality of your input. To apply filtering depending on your input type and quality: Choose Auto. To apply no filtering: Choose Disable. To apply filtering regardless of your input type and quality: Choose Force. When you do, you must also specify a value for Filter strength.
3870
+ * @public
3871
3871
  */
3872
3872
  FilterEnable?: InputFilterEnable;
3873
3873
  /**
3874
- * @public
3875
3874
  * Specify the strength of the input filter. To apply an automatic amount of filtering based the compression artifacts measured in your input: We recommend that you leave Filter strength blank and set Filter enable to Auto. To manually apply filtering: Enter a value from 1 to 5, where 1 is the least amount of filtering and 5 is the most. The value that you enter applies to the strength of the Deblock or Denoise filters, or to the strength of the Advanced input filter.
3875
+ * @public
3876
3876
  */
3877
3877
  FilterStrength?: number;
3878
3878
  /**
3879
- * @public
3880
3879
  * Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default.
3880
+ * @public
3881
3881
  */
3882
3882
  ImageInserter?: ImageInserter;
3883
3883
  /**
3884
- * @public
3885
3884
  * Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them.
3885
+ * @public
3886
3886
  */
3887
3887
  InputClippings?: InputClipping[];
3888
3888
  /**
3889
- * @public
3890
3889
  * When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.
3890
+ * @public
3891
3891
  */
3892
3892
  InputScanType?: InputScanType;
3893
3893
  /**
3894
- * @public
3895
3894
  * Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior.
3895
+ * @public
3896
3896
  */
3897
3897
  Position?: Rectangle;
3898
3898
  /**
3899
- * @public
3900
3899
  * Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.
3900
+ * @public
3901
3901
  */
3902
3902
  ProgramNumber?: number;
3903
3903
  /**
3904
- * @public
3905
3904
  * Set PSI control for transport stream inputs to specify which data the demux process to scans.
3906
3905
  * * Ignore PSI - Scan all PIDs for audio and video.
3907
3906
  * * Use PSI - Scan only PSI data.
3907
+ * @public
3908
3908
  */
3909
3909
  PsiControl?: InputPsiControl;
3910
3910
  /**
3911
- * @public
3912
3911
  * Provide a list of any necessary supplemental IMPs. You need supplemental IMPs if the CPL that you're using for your input is in an incomplete IMP. Specify either the supplemental IMP directories with a trailing slash or the ASSETMAP.xml files. For example ["s3://bucket/ov/", "s3://bucket/vf2/ASSETMAP.xml"]. You don't need to specify the IMP that contains your input CPL, because the service automatically detects it.
3912
+ * @public
3913
3913
  */
3914
3914
  SupplementalImps?: string[];
3915
3915
  /**
3916
- * @public
3917
3916
  * Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
3917
+ * @public
3918
3918
  */
3919
3919
  TimecodeSource?: InputTimecodeSource;
3920
3920
  /**
3921
- * @public
3922
3921
  * Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
3922
+ * @public
3923
3923
  */
3924
3924
  TimecodeStart?: string;
3925
3925
  /**
3926
- * @public
3927
3926
  * When you include Video generator, MediaConvert creates a video input with black frames. Use this setting if you do not have a video input or if you want to add black video frames before, or after, other inputs. You can specify Video generator, or you can specify an Input file, but you cannot specify both. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/video-generator.html
3927
+ * @public
3928
3928
  */
3929
3929
  VideoGenerator?: InputVideoGenerator;
3930
3930
  /**
3931
- * @public
3932
3931
  * Contains an array of video overlays.
3932
+ * @public
3933
3933
  */
3934
3934
  VideoOverlays?: VideoOverlay[];
3935
3935
  /**
3936
- * @public
3937
3936
  * Input video selectors contain the video settings for the input. Each of your inputs can have up to one video selector.
3937
+ * @public
3938
3938
  */
3939
3939
  VideoSelector?: VideoSelector;
3940
3940
  }
3941
3941
  /**
3942
- * @public
3943
3942
  * Specified video input in a template.
3943
+ * @public
3944
3944
  */
3945
3945
  export interface InputTemplate {
3946
3946
  /**
3947
- * @public
3948
3947
  * Use to remove noise, blocking, blurriness, or ringing from your input as a pre-filter step before encoding. The Advanced input filter removes more types of compression artifacts and is an improvement when compared to basic Deblock and Denoise filters. To remove video compression artifacts from your input and improve the video quality: Choose Enabled. Additionally, this filter can help increase the video quality of your output relative to its bitrate, since noisy inputs are more complex and require more bits to encode. To help restore loss of detail after applying the filter, you can optionally add texture or sharpening as an additional step. Jobs that use this feature incur pro-tier pricing. To not apply advanced input filtering: Choose Disabled. Note that you can still apply basic filtering with Deblock and Denoise.
3948
+ * @public
3949
3949
  */
3950
3950
  AdvancedInputFilter?: AdvancedInputFilter;
3951
3951
  /**
3952
- * @public
3953
3952
  * Optional settings for Advanced input filter when you set Advanced input filter to Enabled.
3953
+ * @public
3954
3954
  */
3955
3955
  AdvancedInputFilterSettings?: AdvancedInputFilterSettings;
3956
3956
  /**
3957
- * @public
3958
3957
  * Use audio selector groups to combine multiple sidecar audio inputs so that you can assign them to a single output audio tab. Note that, if you're working with embedded audio, it's simpler to assign multiple input tracks into a single audio selector rather than use an audio selector group.
3958
+ * @public
3959
3959
  */
3960
3960
  AudioSelectorGroups?: Record<string, AudioSelectorGroup>;
3961
3961
  /**
3962
- * @public
3963
3962
  * Use Audio selectors to specify a track or set of tracks from the input that you will use in your outputs. You can use multiple Audio selectors per input.
3963
+ * @public
3964
3964
  */
3965
3965
  AudioSelectors?: Record<string, AudioSelector>;
3966
3966
  /**
3967
- * @public
3968
3967
  * Use captions selectors to specify the captions data from your input that you use in your outputs. You can use up to 100 captions selectors per input.
3968
+ * @public
3969
3969
  */
3970
3970
  CaptionSelectors?: Record<string, CaptionSelector>;
3971
3971
  /**
3972
- * @public
3973
3972
  * Use Cropping selection to specify the video area that the service will include in the output video frame. If you specify a value here, it will override any value that you specify in the output setting Cropping selection.
3973
+ * @public
3974
3974
  */
3975
3975
  Crop?: Rectangle;
3976
3976
  /**
3977
- * @public
3978
3977
  * Enable Deblock to produce smoother motion in the output. Default is disabled. Only manually controllable for MPEG2 and uncompressed video inputs.
3978
+ * @public
3979
3979
  */
3980
3980
  DeblockFilter?: InputDeblockFilter;
3981
3981
  /**
3982
- * @public
3983
3982
  * Enable Denoise to filter noise from the input. Default is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video inputs.
3983
+ * @public
3984
3984
  */
3985
3985
  DenoiseFilter?: InputDenoiseFilter;
3986
3986
  /**
3987
- * @public
3988
3987
  * Use this setting only when your video source has Dolby Vision studio mastering metadata that is carried in a separate XML file. Specify the Amazon S3 location for the metadata XML file. MediaConvert uses this file to provide global and frame-level metadata for Dolby Vision preprocessing. When you specify a file here and your input also has interleaved global and frame level metadata, MediaConvert ignores the interleaved metadata and uses only the the metadata from this external XML file. Note that your IAM service role must grant MediaConvert read permissions to this file. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html.
3988
+ * @public
3989
3989
  */
3990
3990
  DolbyVisionMetadataXml?: string;
3991
3991
  /**
3992
- * @public
3993
3992
  * Specify whether to apply input filtering to improve the video quality of your input. To apply filtering depending on your input type and quality: Choose Auto. To apply no filtering: Choose Disable. To apply filtering regardless of your input type and quality: Choose Force. When you do, you must also specify a value for Filter strength.
3993
+ * @public
3994
3994
  */
3995
3995
  FilterEnable?: InputFilterEnable;
3996
3996
  /**
3997
- * @public
3998
3997
  * Specify the strength of the input filter. To apply an automatic amount of filtering based the compression artifacts measured in your input: We recommend that you leave Filter strength blank and set Filter enable to Auto. To manually apply filtering: Enter a value from 1 to 5, where 1 is the least amount of filtering and 5 is the most. The value that you enter applies to the strength of the Deblock or Denoise filters, or to the strength of the Advanced input filter.
3998
+ * @public
3999
3999
  */
4000
4000
  FilterStrength?: number;
4001
4001
  /**
4002
- * @public
4003
4002
  * Enable the image inserter feature to include a graphic overlay on your video. Enable or disable this feature for each input individually. This setting is disabled by default.
4003
+ * @public
4004
4004
  */
4005
4005
  ImageInserter?: ImageInserter;
4006
4006
  /**
4007
- * @public
4008
4007
  * Contains sets of start and end times that together specify a portion of the input to be used in the outputs. If you provide only a start time, the clip will be the entire input from that point to the end. If you provide only an end time, it will be the entire input up to that point. When you specify more than one input clip, the transcoding service creates the job outputs by stringing the clips together in the order you specify them.
4008
+ * @public
4009
4009
  */
4010
4010
  InputClippings?: InputClipping[];
4011
4011
  /**
4012
- * @public
4013
4012
  * When you have a progressive segmented frame (PsF) input, use this setting to flag the input as PsF. MediaConvert doesn't automatically detect PsF. Therefore, flagging your input as PsF results in better preservation of video quality when you do deinterlacing and frame rate conversion. If you don't specify, the default value is Auto. Auto is the correct setting for all inputs that are not PsF. Don't set this value to PsF when your input is interlaced. Doing so creates horizontal interlacing artifacts.
4013
+ * @public
4014
4014
  */
4015
4015
  InputScanType?: InputScanType;
4016
4016
  /**
4017
- * @public
4018
4017
  * Use Selection placement to define the video area in your output frame. The area outside of the rectangle that you specify here is black. If you specify a value here, it will override any value that you specify in the output setting Selection placement. If you specify a value here, this will override any AFD values in your input, even if you set Respond to AFD to Respond. If you specify a value here, this will ignore anything that you specify for the setting Scaling Behavior.
4018
+ * @public
4019
4019
  */
4020
4020
  Position?: Rectangle;
4021
4021
  /**
4022
- * @public
4023
4022
  * Use Program to select a specific program from within a multi-program transport stream. Note that Quad 4K is not currently supported. Default is the first program within the transport stream. If the program you specify doesn't exist, the transcoding service will use this default.
4023
+ * @public
4024
4024
  */
4025
4025
  ProgramNumber?: number;
4026
4026
  /**
4027
- * @public
4028
4027
  * Set PSI control for transport stream inputs to specify which data the demux process to scans.
4029
4028
  * * Ignore PSI - Scan all PIDs for audio and video.
4030
4029
  * * Use PSI - Scan only PSI data.
4030
+ * @public
4031
4031
  */
4032
4032
  PsiControl?: InputPsiControl;
4033
4033
  /**
4034
- * @public
4035
4034
  * Use this Timecode source setting, located under the input settings, to specify how the service counts input video frames. This input frame count affects only the behavior of features that apply to a single input at a time, such as input clipping and synchronizing some captions formats. Choose Embedded to use the timecodes in your input video. Choose Start at zero to start the first frame at zero. Choose Specified start to start the first frame at the timecode that you specify in the setting Start timecode. If you don't specify a value for Timecode source, the service will use Embedded by default. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
4035
+ * @public
4036
4036
  */
4037
4037
  TimecodeSource?: InputTimecodeSource;
4038
4038
  /**
4039
- * @public
4040
4039
  * Specify the timecode that you want the service to use for this input's initial frame. To use this setting, you must set the Timecode source setting, located under the input settings, to Specified start. For more information about timecodes, see https://docs.aws.amazon.com/console/mediaconvert/timecode.
4040
+ * @public
4041
4041
  */
4042
4042
  TimecodeStart?: string;
4043
4043
  /**
4044
- * @public
4045
4044
  * Contains an array of video overlays.
4045
+ * @public
4046
4046
  */
4047
4047
  VideoOverlays?: VideoOverlay[];
4048
4048
  /**
4049
- * @public
4050
4049
  * Input video selectors contain the video settings for the input. Each of your inputs can have up to one video selector.
4050
+ * @public
4051
4051
  */
4052
4052
  VideoSelector?: VideoSelector;
4053
4053
  }
@@ -4065,13 +4065,13 @@ export declare const AccelerationMode: {
4065
4065
  */
4066
4066
  export type AccelerationMode = (typeof AccelerationMode)[keyof typeof AccelerationMode];
4067
4067
  /**
4068
- * @public
4069
4068
  * Accelerated transcoding can significantly speed up jobs with long, visually complex content.
4069
+ * @public
4070
4070
  */
4071
4071
  export interface AccelerationSettings {
4072
4072
  /**
4073
- * @public
4074
4073
  * Specify the conditions when the service will run your job with accelerated transcoding.
4074
+ * @public
4075
4075
  */
4076
4076
  Mode: AccelerationMode | undefined;
4077
4077
  }
@@ -4117,136 +4117,136 @@ export declare const JobPhase: {
4117
4117
  */
4118
4118
  export type JobPhase = (typeof JobPhase)[keyof typeof JobPhase];
4119
4119
  /**
4120
- * @public
4121
4120
  * Provides messages from the service about jobs that you have already successfully submitted.
4121
+ * @public
4122
4122
  */
4123
4123
  export interface JobMessages {
4124
4124
  /**
4125
- * @public
4126
4125
  * List of messages that are informational only and don't indicate a problem with your job.
4126
+ * @public
4127
4127
  */
4128
4128
  Info?: string[];
4129
4129
  /**
4130
- * @public
4131
4130
  * List of messages that warn about conditions that might cause your job not to run or to fail.
4131
+ * @public
4132
4132
  */
4133
4133
  Warning?: string[];
4134
4134
  }
4135
4135
  /**
4136
- * @public
4137
4136
  * Contains details about the output's video stream
4137
+ * @public
4138
4138
  */
4139
4139
  export interface VideoDetail {
4140
4140
  /**
4141
- * @public
4142
4141
  * Height in pixels for the output
4142
+ * @public
4143
4143
  */
4144
4144
  HeightInPx?: number;
4145
4145
  /**
4146
- * @public
4147
4146
  * Width in pixels for the output
4147
+ * @public
4148
4148
  */
4149
4149
  WidthInPx?: number;
4150
4150
  }
4151
4151
  /**
4152
- * @public
4153
4152
  * Details regarding output
4153
+ * @public
4154
4154
  */
4155
4155
  export interface OutputDetail {
4156
4156
  /**
4157
- * @public
4158
4157
  * Duration in milliseconds
4158
+ * @public
4159
4159
  */
4160
4160
  DurationInMs?: number;
4161
4161
  /**
4162
- * @public
4163
4162
  * Contains details about the output's video stream
4163
+ * @public
4164
4164
  */
4165
4165
  VideoDetails?: VideoDetail;
4166
4166
  }
4167
4167
  /**
4168
- * @public
4169
4168
  * Contains details about the output groups specified in the job settings.
4169
+ * @public
4170
4170
  */
4171
4171
  export interface OutputGroupDetail {
4172
4172
  /**
4173
- * @public
4174
4173
  * Details about the output
4174
+ * @public
4175
4175
  */
4176
4176
  OutputDetails?: OutputDetail[];
4177
4177
  }
4178
4178
  /**
4179
- * @public
4180
4179
  * Description of the source and destination queues between which the job has moved, along with the timestamp of the move
4180
+ * @public
4181
4181
  */
4182
4182
  export interface QueueTransition {
4183
4183
  /**
4184
- * @public
4185
4184
  * The queue that the job was on after the transition.
4185
+ * @public
4186
4186
  */
4187
4187
  DestinationQueue?: string;
4188
4188
  /**
4189
- * @public
4190
4189
  * The queue that the job was on before the transition.
4190
+ * @public
4191
4191
  */
4192
4192
  SourceQueue?: string;
4193
4193
  /**
4194
- * @public
4195
4194
  * The time, in Unix epoch format, that the job moved from the source queue to the destination queue.
4195
+ * @public
4196
4196
  */
4197
4197
  Timestamp?: Date;
4198
4198
  }
4199
4199
  /**
4200
- * @public
4201
4200
  * Use ad avail blanking settings to specify your output content during SCTE-35 triggered ad avails. You can blank your video or overlay it with an image. MediaConvert also removes any audio and embedded captions during the ad avail. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/ad-avail-blanking.html.
4201
+ * @public
4202
4202
  */
4203
4203
  export interface AvailBlanking {
4204
4204
  /**
4205
- * @public
4206
4205
  * Blanking image to be used. Leave empty for solid black. Only bmp and png images are supported.
4206
+ * @public
4207
4207
  */
4208
4208
  AvailBlankingImage?: string;
4209
4209
  }
4210
4210
  /**
4211
- * @public
4212
4211
  * ESAM ManifestConfirmConditionNotification defined by OC-SP-ESAM-API-I03-131025.
4212
+ * @public
4213
4213
  */
4214
4214
  export interface EsamManifestConfirmConditionNotification {
4215
4215
  /**
4216
- * @public
4217
4216
  * Provide your ESAM ManifestConfirmConditionNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the Manifest Conditioning instructions in the message that you supply.
4217
+ * @public
4218
4218
  */
4219
4219
  MccXml?: string;
4220
4220
  }
4221
4221
  /**
4222
- * @public
4223
4222
  * ESAM SignalProcessingNotification data defined by OC-SP-ESAM-API-I03-131025.
4223
+ * @public
4224
4224
  */
4225
4225
  export interface EsamSignalProcessingNotification {
4226
4226
  /**
4227
- * @public
4228
4227
  * Provide your ESAM SignalProcessingNotification XML document inside your JSON job settings. Form the XML document as per OC-SP-ESAM-API-I03-131025. The transcoder will use the signal processing instructions in the message that you supply. For your MPEG2-TS file outputs, if you want the service to place SCTE-35 markers at the insertion points you specify in the XML document, you must also enable SCTE-35 ESAM. Note that you can either specify an ESAM XML document or enable SCTE-35 passthrough. You can't do both.
4228
+ * @public
4229
4229
  */
4230
4230
  SccXml?: string;
4231
4231
  }
4232
4232
  /**
4233
- * @public
4234
4233
  * Settings for Event Signaling And Messaging (ESAM). If you don't do ad insertion, you can ignore these settings.
4234
+ * @public
4235
4235
  */
4236
4236
  export interface EsamSettings {
4237
4237
  /**
4238
- * @public
4239
4238
  * Specifies an ESAM ManifestConfirmConditionNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the manifest conditioning instructions that you provide in the setting MCC XML.
4239
+ * @public
4240
4240
  */
4241
4241
  ManifestConfirmConditionNotification?: EsamManifestConfirmConditionNotification;
4242
4242
  /**
4243
- * @public
4244
4243
  * Specifies the stream distance, in milliseconds, between the SCTE 35 messages that the transcoder places and the splice points that they refer to. If the time between the start of the asset and the SCTE-35 message is less than this value, then the transcoder places the SCTE-35 marker at the beginning of the stream.
4244
+ * @public
4245
4245
  */
4246
4246
  ResponseSignalPreroll?: number;
4247
4247
  /**
4248
- * @public
4249
4248
  * Specifies an ESAM SignalProcessingNotification XML as per OC-SP-ESAM-API-I03-131025. The transcoder uses the signal processing instructions that you provide in the setting SCC XML.
4249
+ * @public
4250
4250
  */
4251
4251
  SignalProcessingNotification?: EsamSignalProcessingNotification;
4252
4252
  }
@@ -4275,105 +4275,105 @@ export declare const VchipAction: {
4275
4275
  */
4276
4276
  export type VchipAction = (typeof VchipAction)[keyof typeof VchipAction];
4277
4277
  /**
4278
- * @public
4279
4278
  * If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory.
4279
+ * @public
4280
4280
  */
4281
4281
  export interface ExtendedDataServices {
4282
4282
  /**
4283
- * @public
4284
4283
  * The action to take on copy and redistribution control XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions.
4284
+ * @public
4285
4285
  */
4286
4286
  CopyProtectionAction?: CopyProtectionAction;
4287
4287
  /**
4288
- * @public
4289
4288
  * The action to take on content advisory XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions.
4289
+ * @public
4290
4290
  */
4291
4291
  VchipAction?: VchipAction;
4292
4292
  }
4293
4293
  /**
4294
- * @public
4295
4294
  * Use these settings only when you use Kantar watermarking. Specify the values that MediaConvert uses to generate and place Kantar watermarks in your output audio. These settings apply to every output in your job. In addition to specifying these values, you also need to store your Kantar credentials in AWS Secrets Manager. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/kantar-watermarking.html.
4295
+ * @public
4296
4296
  */
4297
4297
  export interface KantarWatermarkSettings {
4298
4298
  /**
4299
- * @public
4300
4299
  * Provide an audio channel name from your Kantar audio license.
4300
+ * @public
4301
4301
  */
4302
4302
  ChannelName?: string;
4303
4303
  /**
4304
- * @public
4305
4304
  * Specify a unique identifier for Kantar to use for this piece of content.
4305
+ * @public
4306
4306
  */
4307
4307
  ContentReference?: string;
4308
4308
  /**
4309
- * @public
4310
4309
  * Provide the name of the AWS Secrets Manager secret where your Kantar credentials are stored. Note that your MediaConvert service role must provide access to this secret. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/granting-permissions-for-mediaconvert-to-access-secrets-manager-secret.html. For instructions on creating a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html, in the AWS Secrets Manager User Guide.
4310
+ * @public
4311
4311
  */
4312
4312
  CredentialsSecretName?: string;
4313
4313
  /**
4314
- * @public
4315
4314
  * Optional. Specify an offset, in whole seconds, from the start of your output and the beginning of the watermarking. When you don't specify an offset, Kantar defaults to zero.
4315
+ * @public
4316
4316
  */
4317
4317
  FileOffset?: number;
4318
4318
  /**
4319
- * @public
4320
4319
  * Provide your Kantar license ID number. You should get this number from Kantar.
4320
+ * @public
4321
4321
  */
4322
4322
  KantarLicenseId?: number;
4323
4323
  /**
4324
- * @public
4325
4324
  * Provide the HTTPS endpoint to the Kantar server. You should get this endpoint from Kantar.
4325
+ * @public
4326
4326
  */
4327
4327
  KantarServerUrl?: string;
4328
4328
  /**
4329
- * @public
4330
4329
  * Optional. Specify the Amazon S3 bucket where you want MediaConvert to store your Kantar watermark XML logs. When you don't specify a bucket, MediaConvert doesn't save these logs. Note that your MediaConvert service role must provide access to this location. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html
4330
+ * @public
4331
4331
  */
4332
4332
  LogDestination?: string;
4333
4333
  /**
4334
- * @public
4335
4334
  * You can optionally use this field to specify the first timestamp that Kantar embeds during watermarking. Kantar suggests that you be very cautious when using this Kantar feature, and that you use it only on channels that are managed specifically for use with this feature by your Audience Measurement Operator. For more information about this feature, contact Kantar technical support.
4335
+ * @public
4336
4336
  */
4337
4337
  Metadata3?: string;
4338
4338
  /**
4339
- * @public
4340
4339
  * Additional metadata that MediaConvert sends to Kantar. Maximum length is 50 characters.
4340
+ * @public
4341
4341
  */
4342
4342
  Metadata4?: string;
4343
4343
  /**
4344
- * @public
4345
4344
  * Additional metadata that MediaConvert sends to Kantar. Maximum length is 50 characters.
4345
+ * @public
4346
4346
  */
4347
4347
  Metadata5?: string;
4348
4348
  /**
4349
- * @public
4350
4349
  * Additional metadata that MediaConvert sends to Kantar. Maximum length is 50 characters.
4350
+ * @public
4351
4351
  */
4352
4352
  Metadata6?: string;
4353
4353
  /**
4354
- * @public
4355
4354
  * Additional metadata that MediaConvert sends to Kantar. Maximum length is 50 characters.
4355
+ * @public
4356
4356
  */
4357
4357
  Metadata7?: string;
4358
4358
  /**
4359
- * @public
4360
4359
  * Additional metadata that MediaConvert sends to Kantar. Maximum length is 50 characters.
4360
+ * @public
4361
4361
  */
4362
4362
  Metadata8?: string;
4363
4363
  }
4364
4364
  /**
4365
- * @public
4366
4365
  * For motion overlays that don't have a built-in frame rate, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. The overlay frame rate doesn't need to match the frame rate of the underlying video.
4366
+ * @public
4367
4367
  */
4368
4368
  export interface MotionImageInsertionFramerate {
4369
4369
  /**
4370
- * @public
4371
4370
  * The bottom of the fraction that expresses your overlay frame rate. For example, if your frame rate is 24 fps, set this value to 1.
4371
+ * @public
4372
4372
  */
4373
4373
  FramerateDenominator?: number;
4374
4374
  /**
4375
- * @public
4376
4375
  * The top of the fraction that expresses your overlay frame rate. For example, if your frame rate is 24 fps, set this value to 24.
4376
+ * @public
4377
4377
  */
4378
4378
  FramerateNumerator?: number;
4379
4379
  }
@@ -4390,18 +4390,18 @@ export declare const MotionImageInsertionMode: {
4390
4390
  */
4391
4391
  export type MotionImageInsertionMode = (typeof MotionImageInsertionMode)[keyof typeof MotionImageInsertionMode];
4392
4392
  /**
4393
- * @public
4394
4393
  * Specify the offset between the upper-left corner of the video frame and the top left corner of the overlay.
4394
+ * @public
4395
4395
  */
4396
4396
  export interface MotionImageInsertionOffset {
4397
4397
  /**
4398
- * @public
4399
4398
  * Set the distance, in pixels, between the overlay and the left edge of the video frame.
4399
+ * @public
4400
4400
  */
4401
4401
  ImageX?: number;
4402
4402
  /**
4403
- * @public
4404
4403
  * Set the distance, in pixels, between the overlay and the top edge of the video frame.
4404
+ * @public
4405
4405
  */
4406
4406
  ImageY?: number;
4407
4407
  }
@@ -4418,54 +4418,54 @@ export declare const MotionImagePlayback: {
4418
4418
  */
4419
4419
  export type MotionImagePlayback = (typeof MotionImagePlayback)[keyof typeof MotionImagePlayback];
4420
4420
  /**
4421
- * @public
4422
4421
  * Overlay motion graphics on top of your video. The motion graphics that you specify here appear on all outputs in all output groups. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/motion-graphic-overlay.html.
4422
+ * @public
4423
4423
  */
4424
4424
  export interface MotionImageInserter {
4425
4425
  /**
4426
- * @public
4427
4426
  * If your motion graphic asset is a .mov file, keep this setting unspecified. If your motion graphic asset is a series of .png files, specify the frame rate of the overlay in frames per second, as a fraction. For example, specify 24 fps as 24/1. Make sure that the number of images in your series matches the frame rate and your intended overlay duration. For example, if you want a 30-second overlay at 30 fps, you should have 900 .png images. This overlay frame rate doesn't need to match the frame rate of the underlying video.
4427
+ * @public
4428
4428
  */
4429
4429
  Framerate?: MotionImageInsertionFramerate;
4430
4430
  /**
4431
- * @public
4432
4431
  * Specify the .mov file or series of .png files that you want to overlay on your video. For .png files, provide the file name of the first file in the series. Make sure that the names of the .png files end with sequential numbers that specify the order that they are played in. For example, overlay_000.png, overlay_001.png, overlay_002.png, and so on. The sequence must start at zero, and each image file name must have the same number of digits. Pad your initial file names with enough zeros to complete the sequence. For example, if the first image is overlay_0.png, there can be only 10 images in the sequence, with the last image being overlay_9.png. But if the first image is overlay_00.png, there can be 100 images in the sequence.
4432
+ * @public
4433
4433
  */
4434
4434
  Input?: string;
4435
4435
  /**
4436
- * @public
4437
4436
  * Choose the type of motion graphic asset that you are providing for your overlay. You can choose either a .mov file or a series of .png files.
4437
+ * @public
4438
4438
  */
4439
4439
  InsertionMode?: MotionImageInsertionMode;
4440
4440
  /**
4441
- * @public
4442
4441
  * Use Offset to specify the placement of your motion graphic overlay on the video frame. Specify in pixels, from the upper-left corner of the frame. If you don't specify an offset, the service scales your overlay to the full size of the frame. Otherwise, the service inserts the overlay at its native resolution and scales the size up or down with any video scaling.
4442
+ * @public
4443
4443
  */
4444
4444
  Offset?: MotionImageInsertionOffset;
4445
4445
  /**
4446
- * @public
4447
4446
  * Specify whether your motion graphic overlay repeats on a loop or plays only once.
4447
+ * @public
4448
4448
  */
4449
4449
  Playback?: MotionImagePlayback;
4450
4450
  /**
4451
- * @public
4452
4451
  * Specify when the motion overlay begins. Use timecode format (HH:MM:SS:FF or HH:MM:SS;FF). Make sure that the timecode you provide here takes into account how you have set up your timecode configuration under both job settings and input settings. The simplest way to do that is to set both to start at 0. If you need to set up your job to follow timecodes embedded in your source that don't start at zero, make sure that you specify a start time that is after the first embedded timecode. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/setting-up-timecode.html
4452
+ * @public
4453
4453
  */
4454
4454
  StartTime?: string;
4455
4455
  }
4456
4456
  /**
4457
- * @public
4458
4457
  * Settings for your Nielsen configuration. If you don't do Nielsen measurement and analytics, ignore these settings. When you enable Nielsen configuration, MediaConvert enables PCM to ID3 tagging for all outputs in the job.
4458
+ * @public
4459
4459
  */
4460
4460
  export interface NielsenConfiguration {
4461
4461
  /**
4462
- * @public
4463
4462
  * Nielsen has discontinued the use of breakout code functionality. If you must include this property, set the value to zero.
4463
+ * @public
4464
4464
  */
4465
4465
  BreakoutCode?: number;
4466
4466
  /**
4467
- * @public
4468
4467
  * Use Distributor ID to specify the distributor ID that is assigned to your organization by Nielsen.
4468
+ * @public
4469
4469
  */
4470
4470
  DistributorId?: string;
4471
4471
  }
@@ -4507,100 +4507,100 @@ export declare const NielsenUniqueTicPerAudioTrackType: {
4507
4507
  */
4508
4508
  export type NielsenUniqueTicPerAudioTrackType = (typeof NielsenUniqueTicPerAudioTrackType)[keyof typeof NielsenUniqueTicPerAudioTrackType];
4509
4509
  /**
4510
- * @public
4511
4510
  * Ignore these settings unless you are using Nielsen non-linear watermarking. Specify the values that MediaConvert uses to generate and place Nielsen watermarks in your output audio. In addition to specifying these values, you also need to set up your cloud TIC server. These settings apply to every output in your job. The MediaConvert implementation is currently with the following Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0]
4511
+ * @public
4512
4512
  */
4513
4513
  export interface NielsenNonLinearWatermarkSettings {
4514
4514
  /**
4515
- * @public
4516
4515
  * Choose the type of Nielsen watermarks that you want in your outputs. When you choose NAES 2 and NW, you must provide a value for the setting SID. When you choose CBET, you must provide a value for the setting CSID. When you choose NAES 2, NW, and CBET, you must provide values for both of these settings.
4516
+ * @public
4517
4517
  */
4518
4518
  ActiveWatermarkProcess?: NielsenActiveWatermarkProcessType;
4519
4519
  /**
4520
- * @public
4521
4520
  * Optional. Use this setting when you want the service to include an ADI file in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon S3 and provide a URL to it here. The URL should be in the following format: S3://bucket/path/ADI-file. For more information about the metadata .zip file, see the setting Metadata destination.
4521
+ * @public
4522
4522
  */
4523
4523
  AdiFilename?: string;
4524
4524
  /**
4525
- * @public
4526
4525
  * Use the asset ID that you provide to Nielsen to uniquely identify this asset. Required for all Nielsen non-linear watermarking.
4526
+ * @public
4527
4527
  */
4528
4528
  AssetId?: string;
4529
4529
  /**
4530
- * @public
4531
4530
  * Use the asset name that you provide to Nielsen for this asset. Required for all Nielsen non-linear watermarking.
4531
+ * @public
4532
4532
  */
4533
4533
  AssetName?: string;
4534
4534
  /**
4535
- * @public
4536
4535
  * Use the CSID that Nielsen provides to you. This CBET source ID should be unique to your Nielsen account but common to all of your output assets that have CBET watermarking. Required when you choose a value for the setting Watermark types that includes CBET.
4536
+ * @public
4537
4537
  */
4538
4538
  CbetSourceId?: string;
4539
4539
  /**
4540
- * @public
4541
4540
  * Optional. If this asset uses an episode ID with Nielsen, provide it here.
4541
+ * @public
4542
4542
  */
4543
4543
  EpisodeId?: string;
4544
4544
  /**
4545
- * @public
4546
4545
  * Specify the Amazon S3 location where you want MediaConvert to save your Nielsen non-linear metadata .zip file. This Amazon S3 bucket must be in the same Region as the one where you do your MediaConvert transcoding. If you want to include an ADI file in this .zip file, use the setting ADI file to specify it. MediaConvert delivers the Nielsen metadata .zip files only to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip files to Nielsen. You are responsible for delivering the metadata .zip files to Nielsen.
4546
+ * @public
4547
4547
  */
4548
4548
  MetadataDestination?: string;
4549
4549
  /**
4550
- * @public
4551
4550
  * Use the SID that Nielsen provides to you. This source ID should be unique to your Nielsen account but common to all of your output assets. Required for all Nielsen non-linear watermarking. This ID should be unique to your Nielsen account but common to all of your output assets. Required for all Nielsen non-linear watermarking.
4551
+ * @public
4552
4552
  */
4553
4553
  SourceId?: number;
4554
4554
  /**
4555
- * @public
4556
4555
  * Required. Specify whether your source content already contains Nielsen non-linear watermarks. When you set this value to Watermarked, the service fails the job. Nielsen requires that you add non-linear watermarking to only clean content that doesn't already have non-linear Nielsen watermarks.
4556
+ * @public
4557
4557
  */
4558
4558
  SourceWatermarkStatus?: NielsenSourceWatermarkStatusType;
4559
4559
  /**
4560
- * @public
4561
4560
  * Specify the endpoint for the TIC server that you have deployed and configured in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert can't connect directly to a TIC server. Instead, you must use API Gateway to provide a RESTful interface between MediaConvert and a TIC server that you deploy in your AWS account. For more information on deploying a TIC server in your AWS account and the required API Gateway, contact Nielsen support.
4561
+ * @public
4562
4562
  */
4563
4563
  TicServerUrl?: string;
4564
4564
  /**
4565
- * @public
4566
4565
  * To create assets that have the same TIC values in each audio track, keep the default value Share TICs. To create assets that have unique TIC values for each audio track, choose Use unique TICs.
4566
+ * @public
4567
4567
  */
4568
4568
  UniqueTicPerAudioTrack?: NielsenUniqueTicPerAudioTrackType;
4569
4569
  }
4570
4570
  /**
4571
- * @public
4572
4571
  * Use automated ABR to have MediaConvert set up the renditions in your ABR package for you automatically, based on characteristics of your input video. This feature optimizes video quality while minimizing the overall size of your ABR package.
4572
+ * @public
4573
4573
  */
4574
4574
  export interface AutomatedAbrSettings {
4575
4575
  /**
4576
- * @public
4577
4576
  * Specify the maximum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 8,000,000 (8 mb/s) by default. The average bitrate of your highest-quality rendition will be equal to or below this value, depending on the quality, complexity, and resolution of your content. Note that the instantaneous maximum bitrate may vary above the value that you specify.
4577
+ * @public
4578
4578
  */
4579
4579
  MaxAbrBitrate?: number;
4580
4580
  /**
4581
- * @public
4582
4581
  * Optional. The maximum number of renditions that MediaConvert will create in your automated ABR stack. The number of renditions is determined automatically, based on analysis of each job, but will never exceed this limit. When you set this to Auto in the console, which is equivalent to excluding it from your JSON job specification, MediaConvert defaults to a limit of 15.
4582
+ * @public
4583
4583
  */
4584
4584
  MaxRenditions?: number;
4585
4585
  /**
4586
- * @public
4587
4586
  * Specify the minimum average bitrate for MediaConvert to use in your automated ABR stack. If you don't specify a value, MediaConvert uses 600,000 (600 kb/s) by default. The average bitrate of your lowest-quality rendition will be near this value. Note that the instantaneous minimum bitrate may vary below the value that you specify.
4587
+ * @public
4588
4588
  */
4589
4589
  MinAbrBitrate?: number;
4590
4590
  /**
4591
- * @public
4592
4591
  * Optional. Use Automated ABR rules to specify restrictions for the rendition sizes MediaConvert will create in your ABR stack. You can use these rules if your ABR workflow has specific rendition size requirements, but you still want MediaConvert to optimize for video quality and overall file size.
4592
+ * @public
4593
4593
  */
4594
4594
  Rules?: AutomatedAbrRule[];
4595
4595
  }
4596
4596
  /**
4597
- * @public
4598
4597
  * Use automated encoding to have MediaConvert choose your encoding settings for you, based on characteristics of your input video.
4598
+ * @public
4599
4599
  */
4600
4600
  export interface AutomatedEncodingSettings {
4601
4601
  /**
4602
- * @public
4603
4602
  * Use automated ABR to have MediaConvert set up the renditions in your ABR package for you automatically, based on characteristics of your input video. This feature optimizes video quality while minimizing the overall size of your ABR package.
4603
+ * @public
4604
4604
  */
4605
4605
  AbrSettings?: AutomatedAbrSettings;
4606
4606
  }
@@ -4656,13 +4656,13 @@ export declare const S3ObjectCannedAcl: {
4656
4656
  */
4657
4657
  export type S3ObjectCannedAcl = (typeof S3ObjectCannedAcl)[keyof typeof S3ObjectCannedAcl];
4658
4658
  /**
4659
- * @public
4660
4659
  * Optional. Have MediaConvert automatically apply Amazon S3 access control for the outputs in this output group. When you don't use this setting, S3 automatically applies the default access control list PRIVATE.
4660
+ * @public
4661
4661
  */
4662
4662
  export interface S3DestinationAccessControl {
4663
4663
  /**
4664
- * @public
4665
4664
  * Choose an Amazon S3 canned ACL for MediaConvert to apply to this output.
4665
+ * @public
4666
4666
  */
4667
4667
  CannedAcl?: S3ObjectCannedAcl;
4668
4668
  }
@@ -4679,23 +4679,23 @@ export declare const S3ServerSideEncryptionType: {
4679
4679
  */
4680
4680
  export type S3ServerSideEncryptionType = (typeof S3ServerSideEncryptionType)[keyof typeof S3ServerSideEncryptionType];
4681
4681
  /**
4682
- * @public
4683
4682
  * Settings for how your job outputs are encrypted as they are uploaded to Amazon S3.
4683
+ * @public
4684
4684
  */
4685
4685
  export interface S3EncryptionSettings {
4686
4686
  /**
4687
- * @public
4688
4687
  * Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3. If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS. By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN.
4688
+ * @public
4689
4689
  */
4690
4690
  EncryptionType?: S3ServerSideEncryptionType;
4691
4691
  /**
4692
- * @public
4693
4692
  * Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption to AWS KMS. For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context.
4693
+ * @public
4694
4694
  */
4695
4695
  KmsEncryptionContext?: string;
4696
4696
  /**
4697
- * @public
4698
4697
  * Optionally, specify the customer master key (CMK) that you want to use to encrypt the data key that AWS uses to encrypt your output content. Enter the Amazon Resource Name (ARN) of the CMK. To use this setting, you must also set Server-side encryption to AWS KMS. If you set Server-side encryption to AWS KMS but don't specify a CMK here, AWS uses the AWS managed CMK associated with Amazon S3.
4698
+ * @public
4699
4699
  */
4700
4700
  KmsKeyArn?: string;
4701
4701
  }
@@ -4717,34 +4717,34 @@ export declare const S3StorageClass: {
4717
4717
  */
4718
4718
  export type S3StorageClass = (typeof S3StorageClass)[keyof typeof S3StorageClass];
4719
4719
  /**
4720
- * @public
4721
4720
  * Settings associated with S3 destination
4721
+ * @public
4722
4722
  */
4723
4723
  export interface S3DestinationSettings {
4724
4724
  /**
4725
- * @public
4726
4725
  * Optional. Have MediaConvert automatically apply Amazon S3 access control for the outputs in this output group. When you don't use this setting, S3 automatically applies the default access control list PRIVATE.
4726
+ * @public
4727
4727
  */
4728
4728
  AccessControl?: S3DestinationAccessControl;
4729
4729
  /**
4730
- * @public
4731
4730
  * Settings for how your job outputs are encrypted as they are uploaded to Amazon S3.
4731
+ * @public
4732
4732
  */
4733
4733
  Encryption?: S3EncryptionSettings;
4734
4734
  /**
4735
- * @public
4736
4735
  * Specify the S3 storage class to use for this output. To use your destination's default storage class: Keep the default value, Not set. For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html
4736
+ * @public
4737
4737
  */
4738
4738
  StorageClass?: S3StorageClass;
4739
4739
  }
4740
4740
  /**
4741
- * @public
4742
4741
  * Settings associated with the destination. Will vary based on the type of destination
4742
+ * @public
4743
4743
  */
4744
4744
  export interface DestinationSettings {
4745
4745
  /**
4746
- * @public
4747
4746
  * Settings associated with S3 destination
4747
+ * @public
4748
4748
  */
4749
4749
  S3Settings?: S3DestinationSettings;
4750
4750
  }
@@ -4773,59 +4773,59 @@ export declare const CmafInitializationVectorInManifest: {
4773
4773
  */
4774
4774
  export type CmafInitializationVectorInManifest = (typeof CmafInitializationVectorInManifest)[keyof typeof CmafInitializationVectorInManifest];
4775
4775
  /**
4776
- * @public
4777
4776
  * If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead.
4777
+ * @public
4778
4778
  */
4779
4779
  export interface SpekeKeyProviderCmaf {
4780
4780
  /**
4781
- * @public
4782
4781
  * If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here.
4782
+ * @public
4783
4783
  */
4784
4784
  CertificateArn?: string;
4785
4785
  /**
4786
- * @public
4787
4786
  * Specify the DRM system IDs that you want signaled in the DASH manifest that MediaConvert creates as part of this CMAF package. The DASH manifest can currently signal up to three system IDs. For more information, see https://dashif.org/identifiers/content_protection/.
4787
+ * @public
4788
4788
  */
4789
4789
  DashSignaledSystemIds?: string[];
4790
4790
  /**
4791
- * @public
4792
4791
  * Specify the DRM system ID that you want signaled in the HLS manifest that MediaConvert creates as part of this CMAF package. The HLS manifest can currently signal only one system ID. For more information, see https://dashif.org/identifiers/content_protection/.
4792
+ * @public
4793
4793
  */
4794
4794
  HlsSignaledSystemIds?: string[];
4795
4795
  /**
4796
- * @public
4797
4796
  * Specify the resource ID that your SPEKE-compliant key provider uses to identify this content.
4797
+ * @public
4798
4798
  */
4799
4799
  ResourceId?: string;
4800
4800
  /**
4801
- * @public
4802
4801
  * Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.
4802
+ * @public
4803
4803
  */
4804
4804
  Url?: string;
4805
4805
  }
4806
4806
  /**
4807
- * @public
4808
4807
  * Use these settings to set up encryption with a static key provider.
4808
+ * @public
4809
4809
  */
4810
4810
  export interface StaticKeyProvider {
4811
4811
  /**
4812
- * @public
4813
4812
  * Relates to DRM implementation. Sets the value of the KEYFORMAT attribute. Must be 'identity' or a reverse DNS string. May be omitted to indicate an implicit value of 'identity'.
4813
+ * @public
4814
4814
  */
4815
4815
  KeyFormat?: string;
4816
4816
  /**
4817
- * @public
4818
4817
  * Relates to DRM implementation. Either a single positive integer version value or a slash delimited list of version values (1/2/3).
4818
+ * @public
4819
4819
  */
4820
4820
  KeyFormatVersions?: string;
4821
4821
  /**
4822
- * @public
4823
4822
  * Relates to DRM implementation. Use a 32-character hexidecimal string to specify Key Value.
4823
+ * @public
4824
4824
  */
4825
4825
  StaticKeyValue?: string;
4826
4826
  /**
4827
- * @public
4828
4827
  * Relates to DRM implementation. The location of the license server used for protecting content.
4828
+ * @public
4829
4829
  */
4830
4830
  Url?: string;
4831
4831
  }
@@ -4842,38 +4842,38 @@ export declare const CmafKeyProviderType: {
4842
4842
  */
4843
4843
  export type CmafKeyProviderType = (typeof CmafKeyProviderType)[keyof typeof CmafKeyProviderType];
4844
4844
  /**
4845
- * @public
4846
4845
  * Settings for CMAF encryption
4846
+ * @public
4847
4847
  */
4848
4848
  export interface CmafEncryptionSettings {
4849
4849
  /**
4850
- * @public
4851
4850
  * This is a 128-bit, 16-byte hex value represented by a 32-character text string. If this parameter is not set then the Initialization Vector will follow the segment number by default.
4851
+ * @public
4852
4852
  */
4853
4853
  ConstantInitializationVector?: string;
4854
4854
  /**
4855
- * @public
4856
4855
  * Specify the encryption scheme that you want the service to use when encrypting your CMAF segments. Choose AES-CBC subsample or AES_CTR.
4856
+ * @public
4857
4857
  */
4858
4858
  EncryptionMethod?: CmafEncryptionType;
4859
4859
  /**
4860
- * @public
4861
4860
  * When you use DRM with CMAF outputs, choose whether the service writes the 128-bit encryption initialization vector in the HLS and DASH manifests.
4861
+ * @public
4862
4862
  */
4863
4863
  InitializationVectorInManifest?: CmafInitializationVectorInManifest;
4864
4864
  /**
4865
- * @public
4866
4865
  * If your output group type is CMAF, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is HLS, DASH, or Microsoft Smooth, use the SpekeKeyProvider settings instead.
4866
+ * @public
4867
4867
  */
4868
4868
  SpekeKeyProvider?: SpekeKeyProviderCmaf;
4869
4869
  /**
4870
- * @public
4871
4870
  * Use these settings to set up encryption with a static key provider.
4871
+ * @public
4872
4872
  */
4873
4873
  StaticKeyProvider?: StaticKeyProvider;
4874
4874
  /**
4875
- * @public
4876
4875
  * Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
4876
+ * @public
4877
4877
  */
4878
4878
  Type?: CmafKeyProviderType;
4879
4879
  }
@@ -4904,38 +4904,38 @@ export declare const CmafIntervalCadence: {
4904
4904
  */
4905
4905
  export type CmafIntervalCadence = (typeof CmafIntervalCadence)[keyof typeof CmafIntervalCadence];
4906
4906
  /**
4907
- * @public
4908
4907
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
4908
+ * @public
4909
4909
  */
4910
4910
  export interface CmafImageBasedTrickPlaySettings {
4911
4911
  /**
4912
- * @public
4913
4912
  * The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.
4913
+ * @public
4914
4914
  */
4915
4915
  IntervalCadence?: CmafIntervalCadence;
4916
4916
  /**
4917
- * @public
4918
4917
  * Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2.
4918
+ * @public
4919
4919
  */
4920
4920
  ThumbnailHeight?: number;
4921
4921
  /**
4922
- * @public
4923
4922
  * Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails.
4923
+ * @public
4924
4924
  */
4925
4925
  ThumbnailInterval?: number;
4926
4926
  /**
4927
- * @public
4928
4927
  * Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8.
4928
+ * @public
4929
4929
  */
4930
4930
  ThumbnailWidth?: number;
4931
4931
  /**
4932
- * @public
4933
4932
  * Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2.
4933
+ * @public
4934
4934
  */
4935
4935
  TileHeight?: number;
4936
4936
  /**
4937
- * @public
4938
4937
  * Number of thumbnails in each row of a tile image. Set a value between 1 and 512.
4938
+ * @public
4939
4939
  */
4940
4940
  TileWidth?: number;
4941
4941
  }
@@ -5096,143 +5096,143 @@ export declare const CmafWriteSegmentTimelineInRepresentation: {
5096
5096
  */
5097
5097
  export type CmafWriteSegmentTimelineInRepresentation = (typeof CmafWriteSegmentTimelineInRepresentation)[keyof typeof CmafWriteSegmentTimelineInRepresentation];
5098
5098
  /**
5099
- * @public
5100
5099
  * Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
5100
+ * @public
5101
5101
  */
5102
5102
  export interface CmafGroupSettings {
5103
5103
  /**
5104
- * @public
5105
5104
  * By default, the service creates one top-level .m3u8 HLS manifest and one top -level .mpd DASH manifest for each CMAF output group in your job. These default manifests reference every output in the output group. To create additional top-level manifests that reference a subset of the outputs in the output group, specify a list of them here. For each additional manifest that you specify, the service creates one HLS manifest and one DASH manifest.
5105
+ * @public
5106
5106
  */
5107
5107
  AdditionalManifests?: CmafAdditionalManifest[];
5108
5108
  /**
5109
- * @public
5110
5109
  * A partial URI prefix that will be put in the manifest file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file.
5110
+ * @public
5111
5111
  */
5112
5112
  BaseUrl?: string;
5113
5113
  /**
5114
- * @public
5115
5114
  * Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.
5115
+ * @public
5116
5116
  */
5117
5117
  ClientCache?: CmafClientCache;
5118
5118
  /**
5119
- * @public
5120
5119
  * Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist generation.
5120
+ * @public
5121
5121
  */
5122
5122
  CodecSpecification?: CmafCodecSpecification;
5123
5123
  /**
5124
- * @public
5125
5124
  * Specify how MediaConvert writes SegmentTimeline in your output DASH manifest. To write a SegmentTimeline in each video Representation: Keep the default value, Basic. To write a common SegmentTimeline in the video AdaptationSet: Choose Compact. Note that MediaConvert will still write a SegmentTimeline in any Representation that does not share a common timeline. To write a video AdaptationSet for each different output framerate, and a common SegmentTimeline in each AdaptationSet: Choose Distinct.
5125
+ * @public
5126
5126
  */
5127
5127
  DashManifestStyle?: DashManifestStyle;
5128
5128
  /**
5129
- * @public
5130
5129
  * Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.
5130
+ * @public
5131
5131
  */
5132
5132
  Destination?: string;
5133
5133
  /**
5134
- * @public
5135
5134
  * Settings associated with the destination. Will vary based on the type of destination
5135
+ * @public
5136
5136
  */
5137
5137
  DestinationSettings?: DestinationSettings;
5138
5138
  /**
5139
- * @public
5140
5139
  * DRM settings.
5140
+ * @public
5141
5141
  */
5142
5142
  Encryption?: CmafEncryptionSettings;
5143
5143
  /**
5144
- * @public
5145
5144
  * Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control to specify whether the encoder enforces this value strictly.
5145
+ * @public
5146
5146
  */
5147
5147
  FragmentLength?: number;
5148
5148
  /**
5149
- * @public
5150
5149
  * Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest, MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest, MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
5150
+ * @public
5151
5151
  */
5152
5152
  ImageBasedTrickPlay?: CmafImageBasedTrickPlay;
5153
5153
  /**
5154
- * @public
5155
5154
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
5155
+ * @public
5156
5156
  */
5157
5157
  ImageBasedTrickPlaySettings?: CmafImageBasedTrickPlaySettings;
5158
5158
  /**
5159
- * @public
5160
5159
  * When set to GZIP, compresses HLS playlist.
5160
+ * @public
5161
5161
  */
5162
5162
  ManifestCompression?: CmafManifestCompression;
5163
5163
  /**
5164
- * @public
5165
5164
  * Indicates whether the output manifest should use floating point values for segment duration.
5165
+ * @public
5166
5166
  */
5167
5167
  ManifestDurationFormat?: CmafManifestDurationFormat;
5168
5168
  /**
5169
- * @public
5170
5169
  * Minimum time of initially buffered media that is needed to ensure smooth playout.
5170
+ * @public
5171
5171
  */
5172
5172
  MinBufferTime?: number;
5173
5173
  /**
5174
- * @public
5175
5174
  * Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds.
5175
+ * @public
5176
5176
  */
5177
5177
  MinFinalSegmentLength?: number;
5178
5178
  /**
5179
- * @public
5180
5179
  * Specify how the value for bandwidth is determined for each video Representation in your output MPD manifest. We recommend that you choose a MPD manifest bandwidth type that is compatible with your downstream player configuration. Max: Use the same value that you specify for Max bitrate in the video output, in bits per second. Average: Use the calculated average bitrate of the encoded video output, in bits per second.
5180
+ * @public
5181
5181
  */
5182
5182
  MpdManifestBandwidthType?: CmafMpdManifestBandwidthType;
5183
5183
  /**
5184
- * @public
5185
5184
  * Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.
5185
+ * @public
5186
5186
  */
5187
5187
  MpdProfile?: CmafMpdProfile;
5188
5188
  /**
5189
- * @public
5190
5189
  * Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.
5190
+ * @public
5191
5191
  */
5192
5192
  PtsOffsetHandlingForBFrames?: CmafPtsOffsetHandlingForBFrames;
5193
5193
  /**
5194
- * @public
5195
5194
  * When set to SINGLE_FILE, a single output file is generated, which is internally segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, separate segment files will be created.
5195
+ * @public
5196
5196
  */
5197
5197
  SegmentControl?: CmafSegmentControl;
5198
5198
  /**
5199
- * @public
5200
5199
  * Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.
5200
+ * @public
5201
5201
  */
5202
5202
  SegmentLength?: number;
5203
5203
  /**
5204
- * @public
5205
5204
  * Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.
5205
+ * @public
5206
5206
  */
5207
5207
  SegmentLengthControl?: CmafSegmentLengthControl;
5208
5208
  /**
5209
- * @public
5210
5209
  * Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of variant manifest.
5210
+ * @public
5211
5211
  */
5212
5212
  StreamInfResolution?: CmafStreamInfResolution;
5213
5213
  /**
5214
- * @public
5215
5214
  * When set to LEGACY, the segment target duration is always rounded up to the nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT, the segment target duration is rounded up to the nearest integer value if fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs to ensure that the target duration is always longer than the actual duration of the segment. Some older players may experience interrupted playback when the actual duration of a track in a segment is longer than the target duration.
5215
+ * @public
5216
5216
  */
5217
5217
  TargetDurationCompatibilityMode?: CmafTargetDurationCompatibilityMode;
5218
5218
  /**
5219
- * @public
5220
5219
  * Specify the video sample composition time offset mode in the output fMP4 TRUN box. For wider player compatibility, set Video composition offsets to Unsigned or leave blank. The earliest presentation time may be greater than zero, and sample composition time offsets will increment using unsigned integers. For strict fMP4 video and audio timing, set Video composition offsets to Signed. The earliest presentation time will be equal to zero, and sample composition time offsets will increment using signed integers.
5220
+ * @public
5221
5221
  */
5222
5222
  VideoCompositionOffsets?: CmafVideoCompositionOffsets;
5223
5223
  /**
5224
- * @public
5225
5224
  * When set to ENABLED, a DASH MPD manifest will be generated for this output.
5225
+ * @public
5226
5226
  */
5227
5227
  WriteDashManifest?: CmafWriteDASHManifest;
5228
5228
  /**
5229
- * @public
5230
5229
  * When set to ENABLED, an Apple HLS manifest will be generated for this output.
5230
+ * @public
5231
5231
  */
5232
5232
  WriteHlsManifest?: CmafWriteHLSManifest;
5233
5233
  /**
5234
- * @public
5235
5234
  * When you enable Precise segment duration in DASH manifests, your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.
5235
+ * @public
5236
5236
  */
5237
5237
  WriteSegmentTimelineInRepresentation?: CmafWriteSegmentTimelineInRepresentation;
5238
5238
  }
@@ -5261,45 +5261,45 @@ export declare const DashIsoPlaybackDeviceCompatibility: {
5261
5261
  */
5262
5262
  export type DashIsoPlaybackDeviceCompatibility = (typeof DashIsoPlaybackDeviceCompatibility)[keyof typeof DashIsoPlaybackDeviceCompatibility];
5263
5263
  /**
5264
- * @public
5265
5264
  * If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
5265
+ * @public
5266
5266
  */
5267
5267
  export interface SpekeKeyProvider {
5268
5268
  /**
5269
- * @public
5270
5269
  * If you want your key provider to encrypt the content keys that it provides to MediaConvert, set up a certificate with a master key using AWS Certificate Manager. Specify the certificate's Amazon Resource Name (ARN) here.
5270
+ * @public
5271
5271
  */
5272
5272
  CertificateArn?: string;
5273
5273
  /**
5274
- * @public
5275
5274
  * Specify the resource ID that your SPEKE-compliant key provider uses to identify this content.
5275
+ * @public
5276
5276
  */
5277
5277
  ResourceId?: string;
5278
5278
  /**
5279
- * @public
5280
5279
  * Relates to SPEKE implementation. DRM system identifiers. DASH output groups support a max of two system ids. Other group types support one system id. See
5281
5280
  * https://dashif.org/identifiers/content_protection/ for more details.
5281
+ * @public
5282
5282
  */
5283
5283
  SystemIds?: string[];
5284
5284
  /**
5285
- * @public
5286
5285
  * Specify the URL to the key server that your SPEKE-compliant DRM key provider uses to provide keys for encrypting your content.
5286
+ * @public
5287
5287
  */
5288
5288
  Url?: string;
5289
5289
  }
5290
5290
  /**
5291
- * @public
5292
5291
  * Specifies DRM settings for DASH outputs.
5292
+ * @public
5293
5293
  */
5294
5294
  export interface DashIsoEncryptionSettings {
5295
5295
  /**
5296
- * @public
5297
5296
  * This setting can improve the compatibility of your output with video players on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. Choose Unencrypted SEI only to correct problems with playback on older devices. Otherwise, keep the default setting CENC v1. If you choose Unencrypted SEI, for that output, the service will exclude the access unit delimiter and will leave the SEI NAL units unencrypted.
5297
+ * @public
5298
5298
  */
5299
5299
  PlaybackDeviceCompatibility?: DashIsoPlaybackDeviceCompatibility;
5300
5300
  /**
5301
- * @public
5302
5301
  * If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
5302
+ * @public
5303
5303
  */
5304
5304
  SpekeKeyProvider?: SpekeKeyProvider;
5305
5305
  }
@@ -5342,38 +5342,38 @@ export declare const DashIsoIntervalCadence: {
5342
5342
  */
5343
5343
  export type DashIsoIntervalCadence = (typeof DashIsoIntervalCadence)[keyof typeof DashIsoIntervalCadence];
5344
5344
  /**
5345
- * @public
5346
5345
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
5346
+ * @public
5347
5347
  */
5348
5348
  export interface DashIsoImageBasedTrickPlaySettings {
5349
5349
  /**
5350
- * @public
5351
5350
  * The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.
5351
+ * @public
5352
5352
  */
5353
5353
  IntervalCadence?: DashIsoIntervalCadence;
5354
5354
  /**
5355
- * @public
5356
5355
  * Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2.
5356
+ * @public
5357
5357
  */
5358
5358
  ThumbnailHeight?: number;
5359
5359
  /**
5360
- * @public
5361
5360
  * Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails.
5361
+ * @public
5362
5362
  */
5363
5363
  ThumbnailInterval?: number;
5364
5364
  /**
5365
- * @public
5366
5365
  * Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8.
5366
+ * @public
5367
5367
  */
5368
5368
  ThumbnailWidth?: number;
5369
5369
  /**
5370
- * @public
5371
5370
  * Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2.
5371
+ * @public
5372
5372
  */
5373
5373
  TileHeight?: number;
5374
5374
  /**
5375
- * @public
5376
5375
  * Number of thumbnails in each row of a tile image. Set a value between 1 and 512.
5376
+ * @public
5377
5377
  */
5378
5378
  TileWidth?: number;
5379
5379
  }
@@ -5462,129 +5462,129 @@ export declare const DashIsoWriteSegmentTimelineInRepresentation: {
5462
5462
  */
5463
5463
  export type DashIsoWriteSegmentTimelineInRepresentation = (typeof DashIsoWriteSegmentTimelineInRepresentation)[keyof typeof DashIsoWriteSegmentTimelineInRepresentation];
5464
5464
  /**
5465
- * @public
5466
5465
  * Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
5466
+ * @public
5467
5467
  */
5468
5468
  export interface DashIsoGroupSettings {
5469
5469
  /**
5470
- * @public
5471
5470
  * By default, the service creates one .mpd DASH manifest for each DASH ISO output group in your job. This default manifest references every output in the output group. To create additional DASH manifests that reference a subset of the outputs in the output group, specify a list of them here.
5471
+ * @public
5472
5472
  */
5473
5473
  AdditionalManifests?: DashAdditionalManifest[];
5474
5474
  /**
5475
- * @public
5476
5475
  * Use this setting only when your audio codec is a Dolby one (AC3, EAC3, or Atmos) and your downstream workflow requires that your DASH manifest use the Dolby channel configuration tag, rather than the MPEG one. For example, you might need to use this to make dynamic ad insertion work. Specify which audio channel configuration scheme ID URI MediaConvert writes in your DASH manifest. Keep the default value, MPEG channel configuration, to have MediaConvert write this: urn:mpeg:mpegB:cicp:ChannelConfiguration. Choose Dolby channel configuration to have MediaConvert write this instead: tag:dolby.com,2014:dash:audio_channel_configuration:2011.
5476
+ * @public
5477
5477
  */
5478
5478
  AudioChannelConfigSchemeIdUri?: DashIsoGroupAudioChannelConfigSchemeIdUri;
5479
5479
  /**
5480
- * @public
5481
5480
  * A partial URI prefix that will be put in the manifest (.mpd) file at the top level BaseURL element. Can be used if streams are delivered from a different URL than the manifest file.
5481
+ * @public
5482
5482
  */
5483
5483
  BaseUrl?: string;
5484
5484
  /**
5485
- * @public
5486
5485
  * Specify how MediaConvert writes SegmentTimeline in your output DASH manifest. To write a SegmentTimeline in each video Representation: Keep the default value, Basic. To write a common SegmentTimeline in the video AdaptationSet: Choose Compact. Note that MediaConvert will still write a SegmentTimeline in any Representation that does not share a common timeline. To write a video AdaptationSet for each different output framerate, and a common SegmentTimeline in each AdaptationSet: Choose Distinct.
5486
+ * @public
5487
5487
  */
5488
5488
  DashManifestStyle?: DashManifestStyle;
5489
5489
  /**
5490
- * @public
5491
5490
  * Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.
5491
+ * @public
5492
5492
  */
5493
5493
  Destination?: string;
5494
5494
  /**
5495
- * @public
5496
5495
  * Settings associated with the destination. Will vary based on the type of destination
5496
+ * @public
5497
5497
  */
5498
5498
  DestinationSettings?: DestinationSettings;
5499
5499
  /**
5500
- * @public
5501
5500
  * DRM settings.
5501
+ * @public
5502
5502
  */
5503
5503
  Encryption?: DashIsoEncryptionSettings;
5504
5504
  /**
5505
- * @public
5506
5505
  * Length of fragments to generate (in seconds). Fragment length must be compatible with GOP size and Framerate. Note that fragments will end on the next keyframe after this number of seconds, so actual fragment length may be longer. When Emit Single File is checked, the fragmentation is internal to a single output file and it does not cause the creation of many output files as in other output types.
5506
+ * @public
5507
5507
  */
5508
5508
  FragmentLength?: number;
5509
5509
  /**
5510
- * @public
5511
5510
  * Supports HbbTV specification as indicated
5511
+ * @public
5512
5512
  */
5513
5513
  HbbtvCompliance?: DashIsoHbbtvCompliance;
5514
5514
  /**
5515
- * @public
5516
5515
  * Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
5516
+ * @public
5517
5517
  */
5518
5518
  ImageBasedTrickPlay?: DashIsoImageBasedTrickPlay;
5519
5519
  /**
5520
- * @public
5521
5520
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
5521
+ * @public
5522
5522
  */
5523
5523
  ImageBasedTrickPlaySettings?: DashIsoImageBasedTrickPlaySettings;
5524
5524
  /**
5525
- * @public
5526
5525
  * Minimum time of initially buffered media that is needed to ensure smooth playout.
5526
+ * @public
5527
5527
  */
5528
5528
  MinBufferTime?: number;
5529
5529
  /**
5530
- * @public
5531
5530
  * Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds.
5531
+ * @public
5532
5532
  */
5533
5533
  MinFinalSegmentLength?: number;
5534
5534
  /**
5535
- * @public
5536
5535
  * Specify how the value for bandwidth is determined for each video Representation in your output MPD manifest. We recommend that you choose a MPD manifest bandwidth type that is compatible with your downstream player configuration. Max: Use the same value that you specify for Max bitrate in the video output, in bits per second. Average: Use the calculated average bitrate of the encoded video output, in bits per second.
5536
+ * @public
5537
5537
  */
5538
5538
  MpdManifestBandwidthType?: DashIsoMpdManifestBandwidthType;
5539
5539
  /**
5540
- * @public
5541
5540
  * Specify whether your DASH profile is on-demand or main. When you choose Main profile, the service signals urn:mpeg:dash:profile:isoff-main:2011 in your .mpd DASH manifest. When you choose On-demand, the service signals urn:mpeg:dash:profile:isoff-on-demand:2011 in your .mpd. When you choose On-demand, you must also set the output group setting Segment control to Single file.
5541
+ * @public
5542
5542
  */
5543
5543
  MpdProfile?: DashIsoMpdProfile;
5544
5544
  /**
5545
- * @public
5546
5545
  * Use this setting only when your output video stream has B-frames, which causes the initial presentation time stamp (PTS) to be offset from the initial decode time stamp (DTS). Specify how MediaConvert handles PTS when writing time stamps in output DASH manifests. Choose Match initial PTS when you want MediaConvert to use the initial PTS as the first time stamp in the manifest. Choose Zero-based to have MediaConvert ignore the initial PTS in the video stream and instead write the initial time stamp as zero in the manifest. For outputs that don't have B-frames, the time stamps in your DASH manifests start at zero regardless of your choice here.
5546
+ * @public
5547
5547
  */
5548
5548
  PtsOffsetHandlingForBFrames?: DashIsoPtsOffsetHandlingForBFrames;
5549
5549
  /**
5550
- * @public
5551
5550
  * When set to SINGLE_FILE, a single output file is generated, which is internally segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, separate segment files will be created.
5551
+ * @public
5552
5552
  */
5553
5553
  SegmentControl?: DashIsoSegmentControl;
5554
5554
  /**
5555
- * @public
5556
5555
  * Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.
5556
+ * @public
5557
5557
  */
5558
5558
  SegmentLength?: number;
5559
5559
  /**
5560
- * @public
5561
5560
  * Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.
5561
+ * @public
5562
5562
  */
5563
5563
  SegmentLengthControl?: DashIsoSegmentLengthControl;
5564
5564
  /**
5565
- * @public
5566
5565
  * Specify the video sample composition time offset mode in the output fMP4 TRUN box. For wider player compatibility, set Video composition offsets to Unsigned or leave blank. The earliest presentation time may be greater than zero, and sample composition time offsets will increment using unsigned integers. For strict fMP4 video and audio timing, set Video composition offsets to Signed. The earliest presentation time will be equal to zero, and sample composition time offsets will increment using signed integers.
5566
+ * @public
5567
5567
  */
5568
5568
  VideoCompositionOffsets?: DashIsoVideoCompositionOffsets;
5569
5569
  /**
5570
- * @public
5571
5570
  * If you get an HTTP error in the 400 range when you play back your DASH output, enable this setting and run your transcoding job again. When you enable this setting, the service writes precise segment durations in the DASH manifest. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When you don't enable this setting, the service writes approximate segment durations in your DASH manifest.
5571
+ * @public
5572
5572
  */
5573
5573
  WriteSegmentTimelineInRepresentation?: DashIsoWriteSegmentTimelineInRepresentation;
5574
5574
  }
5575
5575
  /**
5576
- * @public
5577
5576
  * Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package.
5577
+ * @public
5578
5578
  */
5579
5579
  export interface FileGroupSettings {
5580
5580
  /**
5581
- * @public
5582
5581
  * Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.
5582
+ * @public
5583
5583
  */
5584
5584
  Destination?: string;
5585
5585
  /**
5586
- * @public
5587
5586
  * Settings associated with the destination. Will vary based on the type of destination
5587
+ * @public
5588
5588
  */
5589
5589
  DestinationSettings?: DestinationSettings;
5590
5590
  }
@@ -5710,43 +5710,43 @@ export declare const HlsKeyProviderType: {
5710
5710
  */
5711
5711
  export type HlsKeyProviderType = (typeof HlsKeyProviderType)[keyof typeof HlsKeyProviderType];
5712
5712
  /**
5713
- * @public
5714
5713
  * Settings for HLS encryption
5714
+ * @public
5715
5715
  */
5716
5716
  export interface HlsEncryptionSettings {
5717
5717
  /**
5718
- * @public
5719
5718
  * This is a 128-bit, 16-byte hex value represented by a 32-character text string. If this parameter is not set then the Initialization Vector will follow the segment number by default.
5719
+ * @public
5720
5720
  */
5721
5721
  ConstantInitializationVector?: string;
5722
5722
  /**
5723
- * @public
5724
5723
  * Encrypts the segments with the given encryption scheme. Leave blank to disable. Selecting 'Disabled' in the web interface also disables encryption.
5724
+ * @public
5725
5725
  */
5726
5726
  EncryptionMethod?: HlsEncryptionType;
5727
5727
  /**
5728
- * @public
5729
5728
  * The Initialization Vector is a 128-bit number used in conjunction with the key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed in the manifest. Otherwise Initialization Vector is not in the manifest.
5729
+ * @public
5730
5730
  */
5731
5731
  InitializationVectorInManifest?: HlsInitializationVectorInManifest;
5732
5732
  /**
5733
- * @public
5734
5733
  * Enable this setting to insert the EXT-X-SESSION-KEY element into the master playlist. This allows for offline Apple HLS FairPlay content protection.
5734
+ * @public
5735
5735
  */
5736
5736
  OfflineEncrypted?: HlsOfflineEncrypted;
5737
5737
  /**
5738
- * @public
5739
5738
  * If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
5739
+ * @public
5740
5740
  */
5741
5741
  SpekeKeyProvider?: SpekeKeyProvider;
5742
5742
  /**
5743
- * @public
5744
5743
  * Use these settings to set up encryption with a static key provider.
5744
+ * @public
5745
5745
  */
5746
5746
  StaticKeyProvider?: StaticKeyProvider;
5747
5747
  /**
5748
- * @public
5749
5748
  * Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.
5749
+ * @public
5750
5750
  */
5751
5751
  Type?: HlsKeyProviderType;
5752
5752
  }
@@ -5777,38 +5777,38 @@ export declare const HlsIntervalCadence: {
5777
5777
  */
5778
5778
  export type HlsIntervalCadence = (typeof HlsIntervalCadence)[keyof typeof HlsIntervalCadence];
5779
5779
  /**
5780
- * @public
5781
5780
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
5781
+ * @public
5782
5782
  */
5783
5783
  export interface HlsImageBasedTrickPlaySettings {
5784
5784
  /**
5785
- * @public
5786
5785
  * The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.
5786
+ * @public
5787
5787
  */
5788
5788
  IntervalCadence?: HlsIntervalCadence;
5789
5789
  /**
5790
- * @public
5791
5790
  * Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2.
5791
+ * @public
5792
5792
  */
5793
5793
  ThumbnailHeight?: number;
5794
5794
  /**
5795
- * @public
5796
5795
  * Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails.
5796
+ * @public
5797
5797
  */
5798
5798
  ThumbnailInterval?: number;
5799
5799
  /**
5800
- * @public
5801
5800
  * Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8.
5801
+ * @public
5802
5802
  */
5803
5803
  ThumbnailWidth?: number;
5804
5804
  /**
5805
- * @public
5806
5805
  * Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2.
5806
+ * @public
5807
5807
  */
5808
5808
  TileHeight?: number;
5809
5809
  /**
5810
- * @public
5811
5810
  * Number of thumbnails in each row of a tile image. Set a value between 1 and 512.
5811
+ * @public
5812
5812
  */
5813
5813
  TileWidth?: number;
5814
5814
  }
@@ -5934,184 +5934,184 @@ export declare const HlsTimedMetadataId3Frame: {
5934
5934
  */
5935
5935
  export type HlsTimedMetadataId3Frame = (typeof HlsTimedMetadataId3Frame)[keyof typeof HlsTimedMetadataId3Frame];
5936
5936
  /**
5937
- * @public
5938
5937
  * Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
5938
+ * @public
5939
5939
  */
5940
5940
  export interface HlsGroupSettings {
5941
5941
  /**
5942
- * @public
5943
5942
  * Choose one or more ad marker types to decorate your Apple HLS manifest. This setting does not determine whether SCTE-35 markers appear in the outputs themselves.
5943
+ * @public
5944
5944
  */
5945
5945
  AdMarkers?: HlsAdMarkers[];
5946
5946
  /**
5947
- * @public
5948
5947
  * By default, the service creates one top-level .m3u8 HLS manifest for each HLS output group in your job. This default manifest references every output in the output group. To create additional top-level manifests that reference a subset of the outputs in the output group, specify a list of them here.
5948
+ * @public
5949
5949
  */
5950
5950
  AdditionalManifests?: HlsAdditionalManifest[];
5951
5951
  /**
5952
- * @public
5953
5952
  * Ignore this setting unless you are using FairPlay DRM with Verimatrix and you encounter playback issues. Keep the default value, Include, to output audio-only headers. Choose Exclude to remove the audio-only headers from your audio segments.
5953
+ * @public
5954
5954
  */
5955
5955
  AudioOnlyHeader?: HlsAudioOnlyHeader;
5956
5956
  /**
5957
- * @public
5958
5957
  * A partial URI prefix that will be prepended to each output in the media .m3u8 file. Can be used if base manifest is delivered from a different URL than the main .m3u8 file.
5958
+ * @public
5959
5959
  */
5960
5960
  BaseUrl?: string;
5961
5961
  /**
5962
- * @public
5963
5962
  * Language to be used on Caption outputs
5963
+ * @public
5964
5964
  */
5965
5965
  CaptionLanguageMappings?: HlsCaptionLanguageMapping[];
5966
5966
  /**
5967
- * @public
5968
5967
  * Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS lines in the manifest. Specify at least one language in the CC1 Language Code field. One CLOSED-CAPTION line is added for each Language Code you specify. Make sure to specify the languages in the order in which they appear in the original source (if the source is embedded format) or the order of the caption selectors (if the source is other than embedded). Otherwise, languages in the manifest will not match up properly with the output captions. None: Include CLOSED-CAPTIONS=NONE line in the manifest. Omit: Omit any CLOSED-CAPTIONS line from the manifest.
5968
+ * @public
5969
5969
  */
5970
5970
  CaptionLanguageSetting?: HlsCaptionLanguageSetting;
5971
5971
  /**
5972
- * @public
5973
5972
  * Set Caption segment length control to Match video to create caption segments that align with the video segments from the first video output in this output group. For example, if the video segments are 2 seconds long, your WebVTT segments will also be 2 seconds long. Keep the default setting, Large segments to create caption segments that are 300 seconds long.
5973
+ * @public
5974
5974
  */
5975
5975
  CaptionSegmentLengthControl?: HlsCaptionSegmentLengthControl;
5976
5976
  /**
5977
- * @public
5978
5977
  * Disable this setting only when your workflow requires the #EXT-X-ALLOW-CACHE:no tag. Otherwise, keep the default value Enabled and control caching in your video distribution set up. For example, use the Cache-Control http header.
5978
+ * @public
5979
5979
  */
5980
5980
  ClientCache?: HlsClientCache;
5981
5981
  /**
5982
- * @public
5983
5982
  * Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist generation.
5983
+ * @public
5984
5984
  */
5985
5985
  CodecSpecification?: HlsCodecSpecification;
5986
5986
  /**
5987
- * @public
5988
5987
  * Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.
5988
+ * @public
5989
5989
  */
5990
5990
  Destination?: string;
5991
5991
  /**
5992
- * @public
5993
5992
  * Settings associated with the destination. Will vary based on the type of destination
5993
+ * @public
5994
5994
  */
5995
5995
  DestinationSettings?: DestinationSettings;
5996
5996
  /**
5997
- * @public
5998
5997
  * Indicates whether segments should be placed in subdirectories.
5998
+ * @public
5999
5999
  */
6000
6000
  DirectoryStructure?: HlsDirectoryStructure;
6001
6001
  /**
6002
- * @public
6003
6002
  * DRM settings.
6003
+ * @public
6004
6004
  */
6005
6005
  Encryption?: HlsEncryptionSettings;
6006
6006
  /**
6007
- * @public
6008
6007
  * Specify whether MediaConvert generates images for trick play. Keep the default value, None, to not generate any images. Choose Thumbnail to generate tiled thumbnails. Choose Thumbnail and full frame to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md
6008
+ * @public
6009
6009
  */
6010
6010
  ImageBasedTrickPlay?: HlsImageBasedTrickPlay;
6011
6011
  /**
6012
- * @public
6013
6012
  * Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED
6013
+ * @public
6014
6014
  */
6015
6015
  ImageBasedTrickPlaySettings?: HlsImageBasedTrickPlaySettings;
6016
6016
  /**
6017
- * @public
6018
6017
  * When set to GZIP, compresses HLS playlist.
6018
+ * @public
6019
6019
  */
6020
6020
  ManifestCompression?: HlsManifestCompression;
6021
6021
  /**
6022
- * @public
6023
6022
  * Indicates whether the output manifest should use floating point values for segment duration.
6023
+ * @public
6024
6024
  */
6025
6025
  ManifestDurationFormat?: HlsManifestDurationFormat;
6026
6026
  /**
6027
- * @public
6028
6027
  * Keep this setting at the default value of 0, unless you are troubleshooting a problem with how devices play back the end of your video asset. If you know that player devices are hanging on the final segment of your video because the length of your final segment is too short, use this setting to specify a minimum final segment length, in seconds. Choose a value that is greater than or equal to 1 and less than your segment length. When you specify a value for this setting, the encoder will combine any final segment that is shorter than the length that you specify with the previous segment. For example, your segment length is 3 seconds and your final segment is .5 seconds without a minimum final segment length; when you set the minimum final segment length to 1, your final segment is 3.5 seconds.
6028
+ * @public
6029
6029
  */
6030
6030
  MinFinalSegmentLength?: number;
6031
6031
  /**
6032
- * @public
6033
6032
  * When set, Minimum Segment Size is enforced by looking ahead and back within the specified range for a nearby avail and extending the segment size if needed.
6033
+ * @public
6034
6034
  */
6035
6035
  MinSegmentLength?: number;
6036
6036
  /**
6037
- * @public
6038
6037
  * Indicates whether the .m3u8 manifest file should be generated for this HLS output group.
6038
+ * @public
6039
6039
  */
6040
6040
  OutputSelection?: HlsOutputSelection;
6041
6041
  /**
6042
- * @public
6043
6042
  * Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated as follows: either the program date and time are initialized using the input timecode source, or the time is initialized using the input timecode source and the date is initialized using the timestamp_offset.
6043
+ * @public
6044
6044
  */
6045
6045
  ProgramDateTime?: HlsProgramDateTime;
6046
6046
  /**
6047
- * @public
6048
6047
  * Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds.
6048
+ * @public
6049
6049
  */
6050
6050
  ProgramDateTimePeriod?: number;
6051
6051
  /**
6052
- * @public
6053
6052
  * Specify whether MediaConvert generates HLS manifests while your job is running or when your job is complete. To generate HLS manifests while your job is running: Choose Enabled. Use if you want to play back your content as soon as it's available. MediaConvert writes the parent and child manifests after the first three media segments are written to your destination S3 bucket. It then writes new updated manifests after each additional segment is written. The parent manifest includes the latest BANDWIDTH and AVERAGE-BANDWIDTH attributes, and child manifests include the latest available media segment. When your job completes, the final child playlists include an EXT-X-ENDLIST tag. To generate HLS manifests only when your job completes: Choose Disabled.
6053
+ * @public
6054
6054
  */
6055
6055
  ProgressiveWriteHlsManifest?: HlsProgressiveWriteHlsManifest;
6056
6056
  /**
6057
- * @public
6058
6057
  * When set to SINGLE_FILE, emits program as a single media resource (.ts) file, uses #EXT-X-BYTERANGE tags to index segment for playback.
6058
+ * @public
6059
6059
  */
6060
6060
  SegmentControl?: HlsSegmentControl;
6061
6061
  /**
6062
- * @public
6063
6062
  * Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control to specify whether the encoder enforces this value strictly. Use Segment control to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries.
6063
+ * @public
6064
6064
  */
6065
6065
  SegmentLength?: number;
6066
6066
  /**
6067
- * @public
6068
6067
  * Specify how you want MediaConvert to determine the segment length. Choose Exact to have the encoder use the exact length that you specify with the setting Segment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.
6068
+ * @public
6069
6069
  */
6070
6070
  SegmentLengthControl?: HlsSegmentLengthControl;
6071
6071
  /**
6072
- * @public
6073
6072
  * Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.
6073
+ * @public
6074
6074
  */
6075
6075
  SegmentsPerSubdirectory?: number;
6076
6076
  /**
6077
- * @public
6078
6077
  * Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of variant manifest.
6078
+ * @public
6079
6079
  */
6080
6080
  StreamInfResolution?: HlsStreamInfResolution;
6081
6081
  /**
6082
- * @public
6083
6082
  * When set to LEGACY, the segment target duration is always rounded up to the nearest integer value above its current value in seconds. When set to SPEC\\_COMPLIANT, the segment target duration is rounded up to the nearest integer value if fraction seconds are greater than or equal to 0.5 (>= 0.5) and rounded down if less than 0.5 (< 0.5). You may need to use LEGACY if your client needs to ensure that the target duration is always longer than the actual duration of the segment. Some older players may experience interrupted playback when the actual duration of a track in a segment is longer than the target duration.
6083
+ * @public
6084
6084
  */
6085
6085
  TargetDurationCompatibilityMode?: HlsTargetDurationCompatibilityMode;
6086
6086
  /**
6087
- * @public
6088
6087
  * Specify the type of the ID3 frame to use for ID3 timestamps in your output. To include ID3 timestamps: Specify PRIV or TDRL and set ID3 metadata to Passthrough. To exclude ID3 timestamps: Set ID3 timestamp frame type to None.
6088
+ * @public
6089
6089
  */
6090
6090
  TimedMetadataId3Frame?: HlsTimedMetadataId3Frame;
6091
6091
  /**
6092
- * @public
6093
6092
  * Specify the interval in seconds to write ID3 timestamps in your output. The first timestamp starts at the output timecode and date, and increases incrementally with each ID3 timestamp. To use the default interval of 10 seconds: Leave blank. To include this metadata in your output: Set ID3 timestamp frame type to PRIV or TDRL, and set ID3 metadata to Passthrough.
6093
+ * @public
6094
6094
  */
6095
6095
  TimedMetadataId3Period?: number;
6096
6096
  /**
6097
- * @public
6098
6097
  * Provides an extra millisecond delta offset to fine tune the timestamps.
6098
+ * @public
6099
6099
  */
6100
6100
  TimestampDeltaMilliseconds?: number;
6101
6101
  }
6102
6102
  /**
6103
- * @public
6104
6103
  * Specify the details for each additional Microsoft Smooth Streaming manifest that you want the service to generate for this output group. Each manifest can reference a different subset of outputs in the group.
6104
+ * @public
6105
6105
  */
6106
6106
  export interface MsSmoothAdditionalManifest {
6107
6107
  /**
6108
- * @public
6109
6108
  * Specify a name modifier that the service adds to the name of this manifest to make it different from the file names of the other main manifests in the output group. For example, say that the default main manifest for your Microsoft Smooth group is film-name.ismv. If you enter "-no-premium" for this setting, then the file name the service generates for this top-level manifest is film-name-no-premium.ismv.
6109
+ * @public
6110
6110
  */
6111
6111
  ManifestNameModifier?: string;
6112
6112
  /**
6113
- * @public
6114
6113
  * Specify the outputs that you want this additional top-level manifest to reference.
6114
+ * @public
6115
6115
  */
6116
6116
  SelectedOutputs?: string[];
6117
6117
  }
@@ -6128,13 +6128,13 @@ export declare const MsSmoothAudioDeduplication: {
6128
6128
  */
6129
6129
  export type MsSmoothAudioDeduplication = (typeof MsSmoothAudioDeduplication)[keyof typeof MsSmoothAudioDeduplication];
6130
6130
  /**
6131
- * @public
6132
6131
  * If you are using DRM, set DRM System to specify the value SpekeKeyProvider.
6132
+ * @public
6133
6133
  */
6134
6134
  export interface MsSmoothEncryptionSettings {
6135
6135
  /**
6136
- * @public
6137
6136
  * If your output group type is HLS, DASH, or Microsoft Smooth, use these settings when doing DRM encryption with a SPEKE-compliant key provider. If your output group type is CMAF, use the SpekeKeyProviderCmaf settings instead.
6137
+ * @public
6138
6138
  */
6139
6139
  SpekeKeyProvider?: SpekeKeyProvider;
6140
6140
  }
@@ -6163,48 +6163,48 @@ export declare const MsSmoothManifestEncoding: {
6163
6163
  */
6164
6164
  export type MsSmoothManifestEncoding = (typeof MsSmoothManifestEncoding)[keyof typeof MsSmoothManifestEncoding];
6165
6165
  /**
6166
- * @public
6167
6166
  * Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
6167
+ * @public
6168
6168
  */
6169
6169
  export interface MsSmoothGroupSettings {
6170
6170
  /**
6171
- * @public
6172
6171
  * By default, the service creates one .ism Microsoft Smooth Streaming manifest for each Microsoft Smooth Streaming output group in your job. This default manifest references every output in the output group. To create additional manifests that reference a subset of the outputs in the output group, specify a list of them here.
6172
+ * @public
6173
6173
  */
6174
6174
  AdditionalManifests?: MsSmoothAdditionalManifest[];
6175
6175
  /**
6176
- * @public
6177
6176
  * COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across a Microsoft Smooth output group into a single audio stream.
6177
+ * @public
6178
6178
  */
6179
6179
  AudioDeduplication?: MsSmoothAudioDeduplication;
6180
6180
  /**
6181
- * @public
6182
6181
  * Use Destination to specify the S3 output location and the output filename base. Destination accepts format identifiers. If you do not specify the base filename in the URI, the service will use the filename of the input file. If your job has multiple inputs, the service uses the filename of the first input file.
6182
+ * @public
6183
6183
  */
6184
6184
  Destination?: string;
6185
6185
  /**
6186
- * @public
6187
6186
  * Settings associated with the destination. Will vary based on the type of destination
6187
+ * @public
6188
6188
  */
6189
6189
  DestinationSettings?: DestinationSettings;
6190
6190
  /**
6191
- * @public
6192
6191
  * If you are using DRM, set DRM System to specify the value SpekeKeyProvider.
6192
+ * @public
6193
6193
  */
6194
6194
  Encryption?: MsSmoothEncryptionSettings;
6195
6195
  /**
6196
- * @public
6197
6196
  * Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.
6197
+ * @public
6198
6198
  */
6199
6199
  FragmentLength?: number;
6200
6200
  /**
6201
- * @public
6202
6201
  * Specify how you want MediaConvert to determine the fragment length. Choose Exact to have the encoder use the exact length that you specify with the setting Fragment length. This might result in extra I-frames. Choose Multiple of GOP to have the encoder round up the segment lengths to match the next GOP boundary.
6202
+ * @public
6203
6203
  */
6204
6204
  FragmentLengthControl?: MsSmoothFragmentLengthControl;
6205
6205
  /**
6206
- * @public
6207
6206
  * Use Manifest encoding to specify the encoding format for the server and client manifest. Valid options are utf8 and utf16.
6207
+ * @public
6208
6208
  */
6209
6209
  ManifestEncoding?: MsSmoothManifestEncoding;
6210
6210
  }
@@ -6224,38 +6224,38 @@ export declare const OutputGroupType: {
6224
6224
  */
6225
6225
  export type OutputGroupType = (typeof OutputGroupType)[keyof typeof OutputGroupType];
6226
6226
  /**
6227
- * @public
6228
6227
  * Output Group settings, including type
6228
+ * @public
6229
6229
  */
6230
6230
  export interface OutputGroupSettings {
6231
6231
  /**
6232
- * @public
6233
6232
  * Settings related to your CMAF output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
6233
+ * @public
6234
6234
  */
6235
6235
  CmafGroupSettings?: CmafGroupSettings;
6236
6236
  /**
6237
- * @public
6238
6237
  * Settings related to your DASH output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
6238
+ * @public
6239
6239
  */
6240
6240
  DashIsoGroupSettings?: DashIsoGroupSettings;
6241
6241
  /**
6242
- * @public
6243
6242
  * Settings related to your File output group. MediaConvert uses this group of settings to generate a single standalone file, rather than a streaming package.
6243
+ * @public
6244
6244
  */
6245
6245
  FileGroupSettings?: FileGroupSettings;
6246
6246
  /**
6247
- * @public
6248
6247
  * Settings related to your HLS output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
6248
+ * @public
6249
6249
  */
6250
6250
  HlsGroupSettings?: HlsGroupSettings;
6251
6251
  /**
6252
- * @public
6253
6252
  * Settings related to your Microsoft Smooth Streaming output package. For more information, see https://docs.aws.amazon.com/mediaconvert/latest/ug/outputs-file-ABR.html.
6253
+ * @public
6254
6254
  */
6255
6255
  MsSmoothGroupSettings?: MsSmoothGroupSettings;
6256
6256
  /**
6257
- * @public
6258
6257
  * Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, CMAF)
6258
+ * @public
6259
6259
  */
6260
6260
  Type?: OutputGroupType;
6261
6261
  }