alibabacloud-ice20201109 6.4.0__tar.gz → 6.4.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/ChangeLog.md +5 -0
  2. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/PKG-INFO +1 -1
  3. alibabacloud_ice20201109-6.4.1/alibabacloud_ice20201109/__init__.py +1 -0
  4. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109/client.py +20 -8
  5. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109/models.py +455 -11
  6. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109.egg-info/PKG-INFO +1 -1
  7. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/setup.py +1 -1
  8. alibabacloud_ice20201109-6.4.0/alibabacloud_ice20201109/__init__.py +0 -1
  9. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/LICENSE +0 -0
  10. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/MANIFEST.in +0 -0
  11. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/README-CN.md +0 -0
  12. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/README.md +0 -0
  13. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109.egg-info/SOURCES.txt +0 -0
  14. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109.egg-info/dependency_links.txt +0 -0
  15. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109.egg-info/requires.txt +0 -0
  16. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/alibabacloud_ice20201109.egg-info/top_level.txt +0 -0
  17. {alibabacloud_ice20201109-6.4.0 → alibabacloud_ice20201109-6.4.1}/setup.cfg +0 -0
@@ -1,3 +1,8 @@
1
+ 2025-06-19 Version: 6.4.0
2
+ - Support API ListMediaConvertJobs.
3
+ - Update API StartWorkflow: add request parameters SkipInputVerification.
4
+
5
+
1
6
  2025-06-17 Version: 6.3.2
2
7
  - Update API GetMediaConvertJob: add response parameters Body.Job.CreateTime.
3
8
  - Update API GetMediaConvertJob: add response parameters Body.Job.FinishTime.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_ice20201109
3
- Version: 6.4.0
3
+ Version: 6.4.1
4
4
  Summary: Alibaba Cloud ICE (20201109) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -0,0 +1 @@
1
+ __version__ = '6.4.1'
@@ -18406,10 +18406,14 @@ class Client(OpenApiClient):
18406
18406
  """
18407
18407
  UtilClient.validate_model(request)
18408
18408
  query = {}
18409
+ if not UtilClient.is_unset(request.number):
18410
+ query['Number'] = request.number
18409
18411
  if not UtilClient.is_unset(request.page_number):
18410
18412
  query['PageNumber'] = request.page_number
18411
18413
  if not UtilClient.is_unset(request.page_size):
18412
18414
  query['PageSize'] = request.page_size
18415
+ if not UtilClient.is_unset(request.status):
18416
+ query['Status'] = request.status
18413
18417
  req = open_api_models.OpenApiRequest(
18414
18418
  query=OpenApiUtilClient.query(query)
18415
18419
  )
@@ -18443,10 +18447,14 @@ class Client(OpenApiClient):
18443
18447
  """
18444
18448
  UtilClient.validate_model(request)
18445
18449
  query = {}
18450
+ if not UtilClient.is_unset(request.number):
18451
+ query['Number'] = request.number
18446
18452
  if not UtilClient.is_unset(request.page_number):
18447
18453
  query['PageNumber'] = request.page_number
18448
18454
  if not UtilClient.is_unset(request.page_size):
18449
18455
  query['PageSize'] = request.page_size
18456
+ if not UtilClient.is_unset(request.status):
18457
+ query['Status'] = request.status
18450
18458
  req = open_api_models.OpenApiRequest(
18451
18459
  query=OpenApiUtilClient.query(query)
18452
18460
  )
@@ -29322,6 +29330,8 @@ class Client(OpenApiClient):
29322
29330
  query['CallerNumber'] = request.caller_number
29323
29331
  if not UtilClient.is_unset(request.config_shrink):
29324
29332
  query['Config'] = request.config_shrink
29333
+ if not UtilClient.is_unset(request.ims_aiagent_free_ob_call):
29334
+ query['ImsAIAgentFreeObCall'] = request.ims_aiagent_free_ob_call
29325
29335
  if not UtilClient.is_unset(request.session_id):
29326
29336
  query['SessionId'] = request.session_id
29327
29337
  if not UtilClient.is_unset(request.user_data):
@@ -29371,6 +29381,8 @@ class Client(OpenApiClient):
29371
29381
  query['CallerNumber'] = request.caller_number
29372
29382
  if not UtilClient.is_unset(request.config_shrink):
29373
29383
  query['Config'] = request.config_shrink
29384
+ if not UtilClient.is_unset(request.ims_aiagent_free_ob_call):
29385
+ query['ImsAIAgentFreeObCall'] = request.ims_aiagent_free_ob_call
29374
29386
  if not UtilClient.is_unset(request.session_id):
29375
29387
  query['SessionId'] = request.session_id
29376
29388
  if not UtilClient.is_unset(request.user_data):
@@ -31846,7 +31858,7 @@ class Client(OpenApiClient):
31846
31858
  runtime: util_models.RuntimeOptions,
31847
31859
  ) -> ice20201109_models.SubmitHighlightExtractionJobResponse:
31848
31860
  """
31849
- @summary 提交高光提取任务
31861
+ @summary Submits a highlight extraction task.
31850
31862
 
31851
31863
  @param request: SubmitHighlightExtractionJobRequest
31852
31864
  @param runtime: runtime options for this request RuntimeOptions
@@ -31889,7 +31901,7 @@ class Client(OpenApiClient):
31889
31901
  runtime: util_models.RuntimeOptions,
31890
31902
  ) -> ice20201109_models.SubmitHighlightExtractionJobResponse:
31891
31903
  """
31892
- @summary 提交高光提取任务
31904
+ @summary Submits a highlight extraction task.
31893
31905
 
31894
31906
  @param request: SubmitHighlightExtractionJobRequest
31895
31907
  @param runtime: runtime options for this request RuntimeOptions
@@ -31931,7 +31943,7 @@ class Client(OpenApiClient):
31931
31943
  request: ice20201109_models.SubmitHighlightExtractionJobRequest,
31932
31944
  ) -> ice20201109_models.SubmitHighlightExtractionJobResponse:
31933
31945
  """
31934
- @summary 提交高光提取任务
31946
+ @summary Submits a highlight extraction task.
31935
31947
 
31936
31948
  @param request: SubmitHighlightExtractionJobRequest
31937
31949
  @return: SubmitHighlightExtractionJobResponse
@@ -31944,7 +31956,7 @@ class Client(OpenApiClient):
31944
31956
  request: ice20201109_models.SubmitHighlightExtractionJobRequest,
31945
31957
  ) -> ice20201109_models.SubmitHighlightExtractionJobResponse:
31946
31958
  """
31947
- @summary 提交高光提取任务
31959
+ @summary Submits a highlight extraction task.
31948
31960
 
31949
31961
  @param request: SubmitHighlightExtractionJobRequest
31950
31962
  @return: SubmitHighlightExtractionJobResponse
@@ -33578,7 +33590,7 @@ class Client(OpenApiClient):
33578
33590
  runtime: util_models.RuntimeOptions,
33579
33591
  ) -> ice20201109_models.SubmitScreenMediaHighlightsJobResponse:
33580
33592
  """
33581
- @summary 提交高燃混剪任务
33593
+ @summary Submits a task to automatically recognize the highlight segments in the video input and compile them into a dramatic and engaging clip.
33582
33594
 
33583
33595
  @param request: SubmitScreenMediaHighlightsJobRequest
33584
33596
  @param runtime: runtime options for this request RuntimeOptions
@@ -33621,7 +33633,7 @@ class Client(OpenApiClient):
33621
33633
  runtime: util_models.RuntimeOptions,
33622
33634
  ) -> ice20201109_models.SubmitScreenMediaHighlightsJobResponse:
33623
33635
  """
33624
- @summary 提交高燃混剪任务
33636
+ @summary Submits a task to automatically recognize the highlight segments in the video input and compile them into a dramatic and engaging clip.
33625
33637
 
33626
33638
  @param request: SubmitScreenMediaHighlightsJobRequest
33627
33639
  @param runtime: runtime options for this request RuntimeOptions
@@ -33663,7 +33675,7 @@ class Client(OpenApiClient):
33663
33675
  request: ice20201109_models.SubmitScreenMediaHighlightsJobRequest,
33664
33676
  ) -> ice20201109_models.SubmitScreenMediaHighlightsJobResponse:
33665
33677
  """
33666
- @summary 提交高燃混剪任务
33678
+ @summary Submits a task to automatically recognize the highlight segments in the video input and compile them into a dramatic and engaging clip.
33667
33679
 
33668
33680
  @param request: SubmitScreenMediaHighlightsJobRequest
33669
33681
  @return: SubmitScreenMediaHighlightsJobResponse
@@ -33676,7 +33688,7 @@ class Client(OpenApiClient):
33676
33688
  request: ice20201109_models.SubmitScreenMediaHighlightsJobRequest,
33677
33689
  ) -> ice20201109_models.SubmitScreenMediaHighlightsJobResponse:
33678
33690
  """
33679
- @summary 提交高燃混剪任务
33691
+ @summary Submits a task to automatically recognize the highlight segments in the video input and compile them into a dramatic and engaging clip.
33680
33692
 
33681
33693
  @param request: SubmitScreenMediaHighlightsJobRequest
33682
33694
  @return: SubmitScreenMediaHighlightsJobResponse
@@ -10,11 +10,13 @@ class AIAgentConfigAsrConfig(TeaModel):
10
10
  asr_hot_words: List[str] = None,
11
11
  asr_language_id: str = None,
12
12
  asr_max_silence: int = None,
13
+ custom_params: str = None,
13
14
  vad_level: int = None,
14
15
  ):
15
16
  self.asr_hot_words = asr_hot_words
16
17
  self.asr_language_id = asr_language_id
17
18
  self.asr_max_silence = asr_max_silence
19
+ self.custom_params = custom_params
18
20
  self.vad_level = vad_level
19
21
 
20
22
  def validate(self):
@@ -32,6 +34,8 @@ class AIAgentConfigAsrConfig(TeaModel):
32
34
  result['AsrLanguageId'] = self.asr_language_id
33
35
  if self.asr_max_silence is not None:
34
36
  result['AsrMaxSilence'] = self.asr_max_silence
37
+ if self.custom_params is not None:
38
+ result['CustomParams'] = self.custom_params
35
39
  if self.vad_level is not None:
36
40
  result['VadLevel'] = self.vad_level
37
41
  return result
@@ -44,6 +48,8 @@ class AIAgentConfigAsrConfig(TeaModel):
44
48
  self.asr_language_id = m.get('AsrLanguageId')
45
49
  if m.get('AsrMaxSilence') is not None:
46
50
  self.asr_max_silence = m.get('AsrMaxSilence')
51
+ if m.get('CustomParams') is not None:
52
+ self.custom_params = m.get('CustomParams')
47
53
  if m.get('VadLevel') is not None:
48
54
  self.vad_level = m.get('VadLevel')
49
55
  return self
@@ -195,17 +201,61 @@ class AIAgentConfigLlmConfig(TeaModel):
195
201
  return self
196
202
 
197
203
 
204
+ class AIAgentConfigTtsConfigPronunciationRules(TeaModel):
205
+ def __init__(
206
+ self,
207
+ pronunciation: str = None,
208
+ type: str = None,
209
+ word: str = None,
210
+ ):
211
+ self.pronunciation = pronunciation
212
+ self.type = type
213
+ self.word = word
214
+
215
+ def validate(self):
216
+ pass
217
+
218
+ def to_map(self):
219
+ _map = super().to_map()
220
+ if _map is not None:
221
+ return _map
222
+
223
+ result = dict()
224
+ if self.pronunciation is not None:
225
+ result['Pronunciation'] = self.pronunciation
226
+ if self.type is not None:
227
+ result['Type'] = self.type
228
+ if self.word is not None:
229
+ result['Word'] = self.word
230
+ return result
231
+
232
+ def from_map(self, m: dict = None):
233
+ m = m or dict()
234
+ if m.get('Pronunciation') is not None:
235
+ self.pronunciation = m.get('Pronunciation')
236
+ if m.get('Type') is not None:
237
+ self.type = m.get('Type')
238
+ if m.get('Word') is not None:
239
+ self.word = m.get('Word')
240
+ return self
241
+
242
+
198
243
  class AIAgentConfigTtsConfig(TeaModel):
199
244
  def __init__(
200
245
  self,
246
+ pronunciation_rules: List[AIAgentConfigTtsConfigPronunciationRules] = None,
201
247
  voice_id: str = None,
202
248
  voice_id_list: List[str] = None,
203
249
  ):
250
+ self.pronunciation_rules = pronunciation_rules
204
251
  self.voice_id = voice_id
205
252
  self.voice_id_list = voice_id_list
206
253
 
207
254
  def validate(self):
208
- pass
255
+ if self.pronunciation_rules:
256
+ for k in self.pronunciation_rules:
257
+ if k:
258
+ k.validate()
209
259
 
210
260
  def to_map(self):
211
261
  _map = super().to_map()
@@ -213,6 +263,10 @@ class AIAgentConfigTtsConfig(TeaModel):
213
263
  return _map
214
264
 
215
265
  result = dict()
266
+ result['PronunciationRules'] = []
267
+ if self.pronunciation_rules is not None:
268
+ for k in self.pronunciation_rules:
269
+ result['PronunciationRules'].append(k.to_map() if k else None)
216
270
  if self.voice_id is not None:
217
271
  result['VoiceId'] = self.voice_id
218
272
  if self.voice_id_list is not None:
@@ -221,6 +275,11 @@ class AIAgentConfigTtsConfig(TeaModel):
221
275
 
222
276
  def from_map(self, m: dict = None):
223
277
  m = m or dict()
278
+ self.pronunciation_rules = []
279
+ if m.get('PronunciationRules') is not None:
280
+ for k in m.get('PronunciationRules'):
281
+ temp_model = AIAgentConfigTtsConfigPronunciationRules()
282
+ self.pronunciation_rules.append(temp_model.from_map(k))
224
283
  if m.get('VoiceId') is not None:
225
284
  self.voice_id = m.get('VoiceId')
226
285
  if m.get('VoiceIdList') is not None:
@@ -231,8 +290,12 @@ class AIAgentConfigTtsConfig(TeaModel):
231
290
  class AIAgentConfigTurnDetectionConfig(TeaModel):
232
291
  def __init__(
233
292
  self,
293
+ mode: str = None,
294
+ semantic_wait_duration: int = None,
234
295
  turn_end_words: List[str] = None,
235
296
  ):
297
+ self.mode = mode
298
+ self.semantic_wait_duration = semantic_wait_duration
236
299
  self.turn_end_words = turn_end_words
237
300
 
238
301
  def validate(self):
@@ -244,17 +307,237 @@ class AIAgentConfigTurnDetectionConfig(TeaModel):
244
307
  return _map
245
308
 
246
309
  result = dict()
310
+ if self.mode is not None:
311
+ result['Mode'] = self.mode
312
+ if self.semantic_wait_duration is not None:
313
+ result['SemanticWaitDuration'] = self.semantic_wait_duration
247
314
  if self.turn_end_words is not None:
248
315
  result['TurnEndWords'] = self.turn_end_words
249
316
  return result
250
317
 
251
318
  def from_map(self, m: dict = None):
252
319
  m = m or dict()
320
+ if m.get('Mode') is not None:
321
+ self.mode = m.get('Mode')
322
+ if m.get('SemanticWaitDuration') is not None:
323
+ self.semantic_wait_duration = m.get('SemanticWaitDuration')
253
324
  if m.get('TurnEndWords') is not None:
254
325
  self.turn_end_words = m.get('TurnEndWords')
255
326
  return self
256
327
 
257
328
 
329
+ class AIAgentConfigVcrConfigEquipment(TeaModel):
330
+ def __init__(
331
+ self,
332
+ enabled: bool = None,
333
+ ):
334
+ self.enabled = enabled
335
+
336
+ def validate(self):
337
+ pass
338
+
339
+ def to_map(self):
340
+ _map = super().to_map()
341
+ if _map is not None:
342
+ return _map
343
+
344
+ result = dict()
345
+ if self.enabled is not None:
346
+ result['Enabled'] = self.enabled
347
+ return result
348
+
349
+ def from_map(self, m: dict = None):
350
+ m = m or dict()
351
+ if m.get('Enabled') is not None:
352
+ self.enabled = m.get('Enabled')
353
+ return self
354
+
355
+
356
+ class AIAgentConfigVcrConfigHeadMotion(TeaModel):
357
+ def __init__(
358
+ self,
359
+ enabled: bool = None,
360
+ ):
361
+ self.enabled = enabled
362
+
363
+ def validate(self):
364
+ pass
365
+
366
+ def to_map(self):
367
+ _map = super().to_map()
368
+ if _map is not None:
369
+ return _map
370
+
371
+ result = dict()
372
+ if self.enabled is not None:
373
+ result['Enabled'] = self.enabled
374
+ return result
375
+
376
+ def from_map(self, m: dict = None):
377
+ m = m or dict()
378
+ if m.get('Enabled') is not None:
379
+ self.enabled = m.get('Enabled')
380
+ return self
381
+
382
+
383
+ class AIAgentConfigVcrConfigInvalidFrameMotion(TeaModel):
384
+ def __init__(
385
+ self,
386
+ callback_delay: int = None,
387
+ enabled: bool = None,
388
+ ):
389
+ self.callback_delay = callback_delay
390
+ self.enabled = enabled
391
+
392
+ def validate(self):
393
+ pass
394
+
395
+ def to_map(self):
396
+ _map = super().to_map()
397
+ if _map is not None:
398
+ return _map
399
+
400
+ result = dict()
401
+ if self.callback_delay is not None:
402
+ result['CallbackDelay'] = self.callback_delay
403
+ if self.enabled is not None:
404
+ result['Enabled'] = self.enabled
405
+ return result
406
+
407
+ def from_map(self, m: dict = None):
408
+ m = m or dict()
409
+ if m.get('CallbackDelay') is not None:
410
+ self.callback_delay = m.get('CallbackDelay')
411
+ if m.get('Enabled') is not None:
412
+ self.enabled = m.get('Enabled')
413
+ return self
414
+
415
+
416
+ class AIAgentConfigVcrConfigPeopleCount(TeaModel):
417
+ def __init__(
418
+ self,
419
+ enabled: bool = None,
420
+ ):
421
+ self.enabled = enabled
422
+
423
+ def validate(self):
424
+ pass
425
+
426
+ def to_map(self):
427
+ _map = super().to_map()
428
+ if _map is not None:
429
+ return _map
430
+
431
+ result = dict()
432
+ if self.enabled is not None:
433
+ result['Enabled'] = self.enabled
434
+ return result
435
+
436
+ def from_map(self, m: dict = None):
437
+ m = m or dict()
438
+ if m.get('Enabled') is not None:
439
+ self.enabled = m.get('Enabled')
440
+ return self
441
+
442
+
443
+ class AIAgentConfigVcrConfigStillFrameMotion(TeaModel):
444
+ def __init__(
445
+ self,
446
+ callback_delay: int = None,
447
+ enabled: bool = None,
448
+ ):
449
+ self.callback_delay = callback_delay
450
+ self.enabled = enabled
451
+
452
+ def validate(self):
453
+ pass
454
+
455
+ def to_map(self):
456
+ _map = super().to_map()
457
+ if _map is not None:
458
+ return _map
459
+
460
+ result = dict()
461
+ if self.callback_delay is not None:
462
+ result['CallbackDelay'] = self.callback_delay
463
+ if self.enabled is not None:
464
+ result['Enabled'] = self.enabled
465
+ return result
466
+
467
+ def from_map(self, m: dict = None):
468
+ m = m or dict()
469
+ if m.get('CallbackDelay') is not None:
470
+ self.callback_delay = m.get('CallbackDelay')
471
+ if m.get('Enabled') is not None:
472
+ self.enabled = m.get('Enabled')
473
+ return self
474
+
475
+
476
+ class AIAgentConfigVcrConfig(TeaModel):
477
+ def __init__(
478
+ self,
479
+ equipment: AIAgentConfigVcrConfigEquipment = None,
480
+ head_motion: AIAgentConfigVcrConfigHeadMotion = None,
481
+ invalid_frame_motion: AIAgentConfigVcrConfigInvalidFrameMotion = None,
482
+ people_count: AIAgentConfigVcrConfigPeopleCount = None,
483
+ still_frame_motion: AIAgentConfigVcrConfigStillFrameMotion = None,
484
+ ):
485
+ self.equipment = equipment
486
+ self.head_motion = head_motion
487
+ self.invalid_frame_motion = invalid_frame_motion
488
+ self.people_count = people_count
489
+ self.still_frame_motion = still_frame_motion
490
+
491
+ def validate(self):
492
+ if self.equipment:
493
+ self.equipment.validate()
494
+ if self.head_motion:
495
+ self.head_motion.validate()
496
+ if self.invalid_frame_motion:
497
+ self.invalid_frame_motion.validate()
498
+ if self.people_count:
499
+ self.people_count.validate()
500
+ if self.still_frame_motion:
501
+ self.still_frame_motion.validate()
502
+
503
+ def to_map(self):
504
+ _map = super().to_map()
505
+ if _map is not None:
506
+ return _map
507
+
508
+ result = dict()
509
+ if self.equipment is not None:
510
+ result['Equipment'] = self.equipment.to_map()
511
+ if self.head_motion is not None:
512
+ result['HeadMotion'] = self.head_motion.to_map()
513
+ if self.invalid_frame_motion is not None:
514
+ result['InvalidFrameMotion'] = self.invalid_frame_motion.to_map()
515
+ if self.people_count is not None:
516
+ result['PeopleCount'] = self.people_count.to_map()
517
+ if self.still_frame_motion is not None:
518
+ result['StillFrameMotion'] = self.still_frame_motion.to_map()
519
+ return result
520
+
521
+ def from_map(self, m: dict = None):
522
+ m = m or dict()
523
+ if m.get('Equipment') is not None:
524
+ temp_model = AIAgentConfigVcrConfigEquipment()
525
+ self.equipment = temp_model.from_map(m['Equipment'])
526
+ if m.get('HeadMotion') is not None:
527
+ temp_model = AIAgentConfigVcrConfigHeadMotion()
528
+ self.head_motion = temp_model.from_map(m['HeadMotion'])
529
+ if m.get('InvalidFrameMotion') is not None:
530
+ temp_model = AIAgentConfigVcrConfigInvalidFrameMotion()
531
+ self.invalid_frame_motion = temp_model.from_map(m['InvalidFrameMotion'])
532
+ if m.get('PeopleCount') is not None:
533
+ temp_model = AIAgentConfigVcrConfigPeopleCount()
534
+ self.people_count = temp_model.from_map(m['PeopleCount'])
535
+ if m.get('StillFrameMotion') is not None:
536
+ temp_model = AIAgentConfigVcrConfigStillFrameMotion()
537
+ self.still_frame_motion = temp_model.from_map(m['StillFrameMotion'])
538
+ return self
539
+
540
+
258
541
  class AIAgentConfigVoiceprintConfig(TeaModel):
259
542
  def __init__(
260
543
  self,
@@ -307,6 +590,7 @@ class AIAgentConfig(TeaModel):
307
590
  turn_detection_config: AIAgentConfigTurnDetectionConfig = None,
308
591
  user_offline_timeout: int = None,
309
592
  user_online_timeout: int = None,
593
+ vcr_config: AIAgentConfigVcrConfig = None,
310
594
  voiceprint_config: AIAgentConfigVoiceprintConfig = None,
311
595
  volume: int = None,
312
596
  wake_up_query: str = None,
@@ -328,6 +612,7 @@ class AIAgentConfig(TeaModel):
328
612
  self.turn_detection_config = turn_detection_config
329
613
  self.user_offline_timeout = user_offline_timeout
330
614
  self.user_online_timeout = user_online_timeout
615
+ self.vcr_config = vcr_config
331
616
  self.voiceprint_config = voiceprint_config
332
617
  self.volume = volume
333
618
  self.wake_up_query = wake_up_query
@@ -346,6 +631,8 @@ class AIAgentConfig(TeaModel):
346
631
  self.tts_config.validate()
347
632
  if self.turn_detection_config:
348
633
  self.turn_detection_config.validate()
634
+ if self.vcr_config:
635
+ self.vcr_config.validate()
349
636
  if self.voiceprint_config:
350
637
  self.voiceprint_config.validate()
351
638
 
@@ -387,6 +674,8 @@ class AIAgentConfig(TeaModel):
387
674
  result['UserOfflineTimeout'] = self.user_offline_timeout
388
675
  if self.user_online_timeout is not None:
389
676
  result['UserOnlineTimeout'] = self.user_online_timeout
677
+ if self.vcr_config is not None:
678
+ result['VcrConfig'] = self.vcr_config.to_map()
390
679
  if self.voiceprint_config is not None:
391
680
  result['VoiceprintConfig'] = self.voiceprint_config.to_map()
392
681
  if self.volume is not None:
@@ -437,6 +726,9 @@ class AIAgentConfig(TeaModel):
437
726
  self.user_offline_timeout = m.get('UserOfflineTimeout')
438
727
  if m.get('UserOnlineTimeout') is not None:
439
728
  self.user_online_timeout = m.get('UserOnlineTimeout')
729
+ if m.get('VcrConfig') is not None:
730
+ temp_model = AIAgentConfigVcrConfig()
731
+ self.vcr_config = temp_model.from_map(m['VcrConfig'])
440
732
  if m.get('VoiceprintConfig') is not None:
441
733
  temp_model = AIAgentConfigVoiceprintConfig()
442
734
  self.voiceprint_config = temp_model.from_map(m['VoiceprintConfig'])
@@ -455,11 +747,13 @@ class AIAgentOutboundCallConfigAsrConfig(TeaModel):
455
747
  asr_hot_words: List[str] = None,
456
748
  asr_language_id: str = None,
457
749
  asr_max_silence: int = None,
750
+ custom_params: str = None,
458
751
  vad_level: int = None,
459
752
  ):
460
753
  self.asr_hot_words = asr_hot_words
461
754
  self.asr_language_id = asr_language_id
462
755
  self.asr_max_silence = asr_max_silence
756
+ self.custom_params = custom_params
463
757
  self.vad_level = vad_level
464
758
 
465
759
  def validate(self):
@@ -477,6 +771,8 @@ class AIAgentOutboundCallConfigAsrConfig(TeaModel):
477
771
  result['AsrLanguageId'] = self.asr_language_id
478
772
  if self.asr_max_silence is not None:
479
773
  result['AsrMaxSilence'] = self.asr_max_silence
774
+ if self.custom_params is not None:
775
+ result['CustomParams'] = self.custom_params
480
776
  if self.vad_level is not None:
481
777
  result['VadLevel'] = self.vad_level
482
778
  return result
@@ -489,6 +785,8 @@ class AIAgentOutboundCallConfigAsrConfig(TeaModel):
489
785
  self.asr_language_id = m.get('AsrLanguageId')
490
786
  if m.get('AsrMaxSilence') is not None:
491
787
  self.asr_max_silence = m.get('AsrMaxSilence')
788
+ if m.get('CustomParams') is not None:
789
+ self.custom_params = m.get('CustomParams')
492
790
  if m.get('VadLevel') is not None:
493
791
  self.vad_level = m.get('VadLevel')
494
792
  return self
@@ -613,17 +911,61 @@ class AIAgentOutboundCallConfigLlmConfig(TeaModel):
613
911
  return self
614
912
 
615
913
 
914
+ class AIAgentOutboundCallConfigTtsConfigPronunciationRules(TeaModel):
915
+ def __init__(
916
+ self,
917
+ pronunciation: str = None,
918
+ type: str = None,
919
+ word: str = None,
920
+ ):
921
+ self.pronunciation = pronunciation
922
+ self.type = type
923
+ self.word = word
924
+
925
+ def validate(self):
926
+ pass
927
+
928
+ def to_map(self):
929
+ _map = super().to_map()
930
+ if _map is not None:
931
+ return _map
932
+
933
+ result = dict()
934
+ if self.pronunciation is not None:
935
+ result['Pronunciation'] = self.pronunciation
936
+ if self.type is not None:
937
+ result['Type'] = self.type
938
+ if self.word is not None:
939
+ result['Word'] = self.word
940
+ return result
941
+
942
+ def from_map(self, m: dict = None):
943
+ m = m or dict()
944
+ if m.get('Pronunciation') is not None:
945
+ self.pronunciation = m.get('Pronunciation')
946
+ if m.get('Type') is not None:
947
+ self.type = m.get('Type')
948
+ if m.get('Word') is not None:
949
+ self.word = m.get('Word')
950
+ return self
951
+
952
+
616
953
  class AIAgentOutboundCallConfigTtsConfig(TeaModel):
617
954
  def __init__(
618
955
  self,
956
+ pronunciation_rules: List[AIAgentOutboundCallConfigTtsConfigPronunciationRules] = None,
619
957
  voice_id: str = None,
620
958
  voice_id_list: List[str] = None,
621
959
  ):
960
+ self.pronunciation_rules = pronunciation_rules
622
961
  self.voice_id = voice_id
623
962
  self.voice_id_list = voice_id_list
624
963
 
625
964
  def validate(self):
626
- pass
965
+ if self.pronunciation_rules:
966
+ for k in self.pronunciation_rules:
967
+ if k:
968
+ k.validate()
627
969
 
628
970
  def to_map(self):
629
971
  _map = super().to_map()
@@ -631,6 +973,10 @@ class AIAgentOutboundCallConfigTtsConfig(TeaModel):
631
973
  return _map
632
974
 
633
975
  result = dict()
976
+ result['PronunciationRules'] = []
977
+ if self.pronunciation_rules is not None:
978
+ for k in self.pronunciation_rules:
979
+ result['PronunciationRules'].append(k.to_map() if k else None)
634
980
  if self.voice_id is not None:
635
981
  result['VoiceId'] = self.voice_id
636
982
  if self.voice_id_list is not None:
@@ -639,6 +985,11 @@ class AIAgentOutboundCallConfigTtsConfig(TeaModel):
639
985
 
640
986
  def from_map(self, m: dict = None):
641
987
  m = m or dict()
988
+ self.pronunciation_rules = []
989
+ if m.get('PronunciationRules') is not None:
990
+ for k in m.get('PronunciationRules'):
991
+ temp_model = AIAgentOutboundCallConfigTtsConfigPronunciationRules()
992
+ self.pronunciation_rules.append(temp_model.from_map(k))
642
993
  if m.get('VoiceId') is not None:
643
994
  self.voice_id = m.get('VoiceId')
644
995
  if m.get('VoiceIdList') is not None:
@@ -649,8 +1000,12 @@ class AIAgentOutboundCallConfigTtsConfig(TeaModel):
649
1000
  class AIAgentOutboundCallConfigTurnDetectionConfig(TeaModel):
650
1001
  def __init__(
651
1002
  self,
1003
+ mode: str = None,
1004
+ semantic_wait_duration: int = None,
652
1005
  turn_end_words: List[str] = None,
653
1006
  ):
1007
+ self.mode = mode
1008
+ self.semantic_wait_duration = semantic_wait_duration
654
1009
  self.turn_end_words = turn_end_words
655
1010
 
656
1011
  def validate(self):
@@ -662,12 +1017,20 @@ class AIAgentOutboundCallConfigTurnDetectionConfig(TeaModel):
662
1017
  return _map
663
1018
 
664
1019
  result = dict()
1020
+ if self.mode is not None:
1021
+ result['Mode'] = self.mode
1022
+ if self.semantic_wait_duration is not None:
1023
+ result['SemanticWaitDuration'] = self.semantic_wait_duration
665
1024
  if self.turn_end_words is not None:
666
1025
  result['TurnEndWords'] = self.turn_end_words
667
1026
  return result
668
1027
 
669
1028
  def from_map(self, m: dict = None):
670
1029
  m = m or dict()
1030
+ if m.get('Mode') is not None:
1031
+ self.mode = m.get('Mode')
1032
+ if m.get('SemanticWaitDuration') is not None:
1033
+ self.semantic_wait_duration = m.get('SemanticWaitDuration')
671
1034
  if m.get('TurnEndWords') is not None:
672
1035
  self.turn_end_words = m.get('TurnEndWords')
673
1036
  return self
@@ -679,6 +1042,7 @@ class AIAgentOutboundCallConfig(TeaModel):
679
1042
  asr_config: AIAgentOutboundCallConfigAsrConfig = None,
680
1043
  enable_intelligent_segment: bool = None,
681
1044
  greeting: str = None,
1045
+ greeting_delay: int = None,
682
1046
  interrupt_config: AIAgentOutboundCallConfigInterruptConfig = None,
683
1047
  llm_config: AIAgentOutboundCallConfigLlmConfig = None,
684
1048
  tts_config: AIAgentOutboundCallConfigTtsConfig = None,
@@ -687,6 +1051,7 @@ class AIAgentOutboundCallConfig(TeaModel):
687
1051
  self.asr_config = asr_config
688
1052
  self.enable_intelligent_segment = enable_intelligent_segment
689
1053
  self.greeting = greeting
1054
+ self.greeting_delay = greeting_delay
690
1055
  self.interrupt_config = interrupt_config
691
1056
  self.llm_config = llm_config
692
1057
  self.tts_config = tts_config
@@ -716,6 +1081,8 @@ class AIAgentOutboundCallConfig(TeaModel):
716
1081
  result['EnableIntelligentSegment'] = self.enable_intelligent_segment
717
1082
  if self.greeting is not None:
718
1083
  result['Greeting'] = self.greeting
1084
+ if self.greeting_delay is not None:
1085
+ result['GreetingDelay'] = self.greeting_delay
719
1086
  if self.interrupt_config is not None:
720
1087
  result['InterruptConfig'] = self.interrupt_config.to_map()
721
1088
  if self.llm_config is not None:
@@ -735,6 +1102,8 @@ class AIAgentOutboundCallConfig(TeaModel):
735
1102
  self.enable_intelligent_segment = m.get('EnableIntelligentSegment')
736
1103
  if m.get('Greeting') is not None:
737
1104
  self.greeting = m.get('Greeting')
1105
+ if m.get('GreetingDelay') is not None:
1106
+ self.greeting_delay = m.get('GreetingDelay')
738
1107
  if m.get('InterruptConfig') is not None:
739
1108
  temp_model = AIAgentOutboundCallConfigInterruptConfig()
740
1109
  self.interrupt_config = temp_model.from_map(m['InterruptConfig'])
@@ -12834,7 +13203,12 @@ class CreateMediaLiveChannelRequestVideoSettings(TeaModel):
12834
13203
  video_codec_type: str = None,
12835
13204
  width: int = None,
12836
13205
  ):
12837
- # The height of the output. Valid values: 0 to 2000. If you set it to 0 or leave it empty, the height automatically adapts to the specified width to maintain the original aspect ratio.
13206
+ # The height of the output. If you set it to 0 or leave it empty, the height automatically adapts to the specified width to maintain the original aspect ratio.
13207
+ #
13208
+ # Valid values:
13209
+ #
13210
+ # * For regular transcoding, the larger dimension cannot exceed 3840 px, and the smaller one cannot exceed 2160 px.
13211
+ # * For Narrowband HD™ transcoding, the larger dimension cannot exceed 1920 px, and the smaller one cannot exceed 1080 px.
12838
13212
  self.height = height
12839
13213
  # The name of the video settings. Letters, digits, hyphens (-), and underscores (_) are supported. It can be up to 64 characters in length.
12840
13214
  #
@@ -12844,8 +13218,19 @@ class CreateMediaLiveChannelRequestVideoSettings(TeaModel):
12844
13218
  self.video_codec = video_codec
12845
13219
  # The video encoding settings.
12846
13220
  self.video_codec_setting = video_codec_setting
13221
+ # The video transcoding method. Valid values:
13222
+ #
13223
+ # * NORMAL: regular transcoding
13224
+ # * NBHD: Narrowband HD™ transcoding
13225
+ #
13226
+ # If not specified, regular transcoding is used by default.
12847
13227
  self.video_codec_type = video_codec_type
12848
- # The width of the output. Valid values: 0 to 2000. If you set it to 0 or leave it empty, the width automatically adapts to the specified height to maintain the original aspect ratio.
13228
+ # The width of the output. If you set it to 0 or leave it empty, the width automatically adapts to the specified height to maintain the original aspect ratio.
13229
+ #
13230
+ # Valid values:
13231
+ #
13232
+ # * For regular transcoding, the larger dimension cannot exceed 3840 px, and the smaller one cannot exceed 2160 px.
13233
+ # * For Narrowband HD™ transcoding, the larger dimension cannot exceed 1920 px, and the smaller one cannot exceed 1080 px.
12849
13234
  self.width = width
12850
13235
 
12851
13236
  def validate(self):
@@ -13133,9 +13518,11 @@ class CreateMediaLiveInputRequestInputSettings(TeaModel):
13133
13518
  source_url: str = None,
13134
13519
  stream_name: str = None,
13135
13520
  ):
13521
+ # The ID of the flow from MediaConnect. This parameter is required when Type is set to MEDIA_CONNECT.
13136
13522
  self.flow_id = flow_id
13523
+ # The output name of the MediaConnect flow. This parameter is required when Type is set to MEDIA_CONNECT.
13137
13524
  self.flow_output_name = flow_output_name
13138
- # The source URL where the stream is pulled from. This parameter is required for PULL inputs.
13525
+ # The source URL from which the stream is pulled. This parameter is required for PULL inputs.
13139
13526
  self.source_url = source_url
13140
13527
  # The name of the pushed stream. This parameter is required for PUSH inputs. It can be up to 255 characters in length.
13141
13528
  self.stream_name = stream_name
@@ -13190,7 +13577,7 @@ class CreateMediaLiveInputRequest(TeaModel):
13190
13577
  self.name = name
13191
13578
  # The IDs of the security groups to be associated with the input. This parameter is required for PUSH inputs.
13192
13579
  self.security_group_ids = security_group_ids
13193
- # The input type. Valid values: RTMP_PUSH, RTMP_PULL, SRT_PUSH, and SRT_PULL.
13580
+ # The input type. Valid values: RTMP_PUSH, RTMP_PULL, SRT_PUSH, SRT_PULL, and MEDIA_CONNECT.
13194
13581
  #
13195
13582
  # This parameter is required.
13196
13583
  self.type = type
@@ -13253,7 +13640,7 @@ class CreateMediaLiveInputShrinkRequest(TeaModel):
13253
13640
  self.name = name
13254
13641
  # The IDs of the security groups to be associated with the input. This parameter is required for PUSH inputs.
13255
13642
  self.security_group_ids_shrink = security_group_ids_shrink
13256
- # The input type. Valid values: RTMP_PUSH, RTMP_PULL, SRT_PUSH, and SRT_PULL.
13643
+ # The input type. Valid values: RTMP_PUSH, RTMP_PULL, SRT_PUSH, SRT_PULL, and MEDIA_CONNECT.
13257
13644
  #
13258
13645
  # This parameter is required.
13259
13646
  self.type = type
@@ -34944,7 +35331,9 @@ class GetMediaLiveInputResponseBodyInputInputInfos(TeaModel):
34944
35331
  ):
34945
35332
  # The endpoint that the stream is pushed to. This parameter is returned for PUSH inputs.
34946
35333
  self.dest_host = dest_host
35334
+ # The ID of the flow from MediaConnect.
34947
35335
  self.flow_id = flow_id
35336
+ # The output name of the MediaConnect flow.
34948
35337
  self.flow_output_name = flow_output_name
34949
35338
  # The URL for input monitoring.
34950
35339
  self.monitor_url = monitor_url
@@ -45787,11 +46176,15 @@ class ListAIAgentInstanceResponse(TeaModel):
45787
46176
  class ListAIAgentPhoneNumberRequest(TeaModel):
45788
46177
  def __init__(
45789
46178
  self,
46179
+ number: str = None,
45790
46180
  page_number: int = None,
45791
46181
  page_size: int = None,
46182
+ status: int = None,
45792
46183
  ):
46184
+ self.number = number
45793
46185
  self.page_number = page_number
45794
46186
  self.page_size = page_size
46187
+ self.status = status
45795
46188
 
45796
46189
  def validate(self):
45797
46190
  pass
@@ -45802,18 +46195,26 @@ class ListAIAgentPhoneNumberRequest(TeaModel):
45802
46195
  return _map
45803
46196
 
45804
46197
  result = dict()
46198
+ if self.number is not None:
46199
+ result['Number'] = self.number
45805
46200
  if self.page_number is not None:
45806
46201
  result['PageNumber'] = self.page_number
45807
46202
  if self.page_size is not None:
45808
46203
  result['PageSize'] = self.page_size
46204
+ if self.status is not None:
46205
+ result['Status'] = self.status
45809
46206
  return result
45810
46207
 
45811
46208
  def from_map(self, m: dict = None):
45812
46209
  m = m or dict()
46210
+ if m.get('Number') is not None:
46211
+ self.number = m.get('Number')
45813
46212
  if m.get('PageNumber') is not None:
45814
46213
  self.page_number = m.get('PageNumber')
45815
46214
  if m.get('PageSize') is not None:
45816
46215
  self.page_size = m.get('PageSize')
46216
+ if m.get('Status') is not None:
46217
+ self.status = m.get('Status')
45817
46218
  return self
45818
46219
 
45819
46220
 
@@ -56789,7 +57190,9 @@ class ListMediaLiveInputsResponseBodyInputsInputInfos(TeaModel):
56789
57190
  ):
56790
57191
  # The endpoint that the stream is pushed to. This parameter is returned for PUSH inputs.
56791
57192
  self.dest_host = dest_host
57193
+ # The ID of the flow from MediaConnect.
56792
57194
  self.flow_id = flow_id
57195
+ # The output name of the MediaConnect flow.
56793
57196
  self.flow_output_name = flow_output_name
56794
57197
  # The URL for input monitoring.
56795
57198
  self.monitor_url = monitor_url
@@ -75575,6 +75978,7 @@ class StartAIAgentOutboundCallRequest(TeaModel):
75575
75978
  called_number: str = None,
75576
75979
  caller_number: str = None,
75577
75980
  config: AIAgentOutboundCallConfig = None,
75981
+ ims_aiagent_free_ob_call: str = None,
75578
75982
  session_id: str = None,
75579
75983
  user_data: str = None,
75580
75984
  ):
@@ -75585,6 +75989,7 @@ class StartAIAgentOutboundCallRequest(TeaModel):
75585
75989
  # This parameter is required.
75586
75990
  self.caller_number = caller_number
75587
75991
  self.config = config
75992
+ self.ims_aiagent_free_ob_call = ims_aiagent_free_ob_call
75588
75993
  self.session_id = session_id
75589
75994
  self.user_data = user_data
75590
75995
 
@@ -75606,6 +76011,8 @@ class StartAIAgentOutboundCallRequest(TeaModel):
75606
76011
  result['CallerNumber'] = self.caller_number
75607
76012
  if self.config is not None:
75608
76013
  result['Config'] = self.config.to_map()
76014
+ if self.ims_aiagent_free_ob_call is not None:
76015
+ result['ImsAIAgentFreeObCall'] = self.ims_aiagent_free_ob_call
75609
76016
  if self.session_id is not None:
75610
76017
  result['SessionId'] = self.session_id
75611
76018
  if self.user_data is not None:
@@ -75623,6 +76030,8 @@ class StartAIAgentOutboundCallRequest(TeaModel):
75623
76030
  if m.get('Config') is not None:
75624
76031
  temp_model = AIAgentOutboundCallConfig()
75625
76032
  self.config = temp_model.from_map(m['Config'])
76033
+ if m.get('ImsAIAgentFreeObCall') is not None:
76034
+ self.ims_aiagent_free_ob_call = m.get('ImsAIAgentFreeObCall')
75626
76035
  if m.get('SessionId') is not None:
75627
76036
  self.session_id = m.get('SessionId')
75628
76037
  if m.get('UserData') is not None:
@@ -75637,6 +76046,7 @@ class StartAIAgentOutboundCallShrinkRequest(TeaModel):
75637
76046
  called_number: str = None,
75638
76047
  caller_number: str = None,
75639
76048
  config_shrink: str = None,
76049
+ ims_aiagent_free_ob_call: str = None,
75640
76050
  session_id: str = None,
75641
76051
  user_data: str = None,
75642
76052
  ):
@@ -75647,6 +76057,7 @@ class StartAIAgentOutboundCallShrinkRequest(TeaModel):
75647
76057
  # This parameter is required.
75648
76058
  self.caller_number = caller_number
75649
76059
  self.config_shrink = config_shrink
76060
+ self.ims_aiagent_free_ob_call = ims_aiagent_free_ob_call
75650
76061
  self.session_id = session_id
75651
76062
  self.user_data = user_data
75652
76063
 
@@ -75667,6 +76078,8 @@ class StartAIAgentOutboundCallShrinkRequest(TeaModel):
75667
76078
  result['CallerNumber'] = self.caller_number
75668
76079
  if self.config_shrink is not None:
75669
76080
  result['Config'] = self.config_shrink
76081
+ if self.ims_aiagent_free_ob_call is not None:
76082
+ result['ImsAIAgentFreeObCall'] = self.ims_aiagent_free_ob_call
75670
76083
  if self.session_id is not None:
75671
76084
  result['SessionId'] = self.session_id
75672
76085
  if self.user_data is not None:
@@ -75683,6 +76096,8 @@ class StartAIAgentOutboundCallShrinkRequest(TeaModel):
75683
76096
  self.caller_number = m.get('CallerNumber')
75684
76097
  if m.get('Config') is not None:
75685
76098
  self.config_shrink = m.get('Config')
76099
+ if m.get('ImsAIAgentFreeObCall') is not None:
76100
+ self.ims_aiagent_free_ob_call = m.get('ImsAIAgentFreeObCall')
75686
76101
  if m.get('SessionId') is not None:
75687
76102
  self.session_id = m.get('SessionId')
75688
76103
  if m.get('UserData') is not None:
@@ -79667,9 +80082,13 @@ class SubmitHighlightExtractionJobRequest(TeaModel):
79667
80082
  output_config: str = None,
79668
80083
  user_data: str = None,
79669
80084
  ):
80085
+ # The client token used to ensure the idempotency of the request.
79670
80086
  self.client_token = client_token
80087
+ # The input configuration.
79671
80088
  self.input_config = input_config
80089
+ # The output configuration.
79672
80090
  self.output_config = output_config
80091
+ # The user-defined data, including the business and callback configurations. For more information, see [UserData](~~357745#section-urj-v3f-0s1~~).
79673
80092
  self.user_data = user_data
79674
80093
 
79675
80094
  def validate(self):
@@ -79710,8 +80129,9 @@ class SubmitHighlightExtractionJobResponseBody(TeaModel):
79710
80129
  job_id: str = None,
79711
80130
  request_id: str = None,
79712
80131
  ):
80132
+ # The ID of the highlight extraction task.
79713
80133
  self.job_id = job_id
79714
- # Id of the request
80134
+ # The ID of the request.
79715
80135
  self.request_id = request_id
79716
80136
 
79717
80137
  def validate(self):
@@ -83811,9 +84231,13 @@ class SubmitScreenMediaHighlightsJobRequest(TeaModel):
83811
84231
  output_config: str = None,
83812
84232
  user_data: str = None,
83813
84233
  ):
84234
+ # The editing configuration. For detailed parameters, see [EditingConfig](~~2863940#9b05519d46e0x~~).
83814
84235
  self.editing_config = editing_config
84236
+ # The input configuration. For detailed parameters, see [InputConfig](~~2863940#dda38bf6ec2pk~~).
83815
84237
  self.input_config = input_config
84238
+ # The output configuration. For detailed parameters, see [OutputConfig](~~2863940#4111a373d0xbz~~).
83816
84239
  self.output_config = output_config
84240
+ # The user-defined data, including the business and callback configurations. For more information, see [UserData](https://help.aliyun.com/document_detail/357745.html).
83817
84241
  self.user_data = user_data
83818
84242
 
83819
84243
  def validate(self):
@@ -83854,7 +84278,9 @@ class SubmitScreenMediaHighlightsJobResponseBody(TeaModel):
83854
84278
  job_id: str = None,
83855
84279
  request_id: str = None,
83856
84280
  ):
84281
+ # The ID of the task.
83857
84282
  self.job_id = job_id
84283
+ # The request ID.
83858
84284
  self.request_id = request_id
83859
84285
 
83860
84286
  def validate(self):
@@ -98176,7 +98602,12 @@ class UpdateMediaLiveChannelRequestVideoSettings(TeaModel):
98176
98602
  video_codec_type: str = None,
98177
98603
  width: int = None,
98178
98604
  ):
98179
- # The height of the output. Valid values: 0 to 2000. If you set it to 0 or leave it empty, the height automatically adapts to the specified width to maintain the original aspect ratio.
98605
+ # The height of the output. If you set it to 0 or leave it empty, the height automatically adapts to the specified width to maintain the original aspect ratio.
98606
+ #
98607
+ # Valid values:
98608
+ #
98609
+ # * For regular transcoding, the larger dimension cannot exceed 3840 px, and the smaller one cannot exceed 2160 px.
98610
+ # * For Narrowband HD™ transcoding, the larger dimension cannot exceed 1920 px, and the smaller one cannot exceed 1080 px.
98180
98611
  self.height = height
98181
98612
  # The name of the video settings. Letters, digits, hyphens (-), and underscores (_) are supported. It can be up to 64 characters in length.
98182
98613
  #
@@ -98186,8 +98617,19 @@ class UpdateMediaLiveChannelRequestVideoSettings(TeaModel):
98186
98617
  self.video_codec = video_codec
98187
98618
  # The video encoding settings.
98188
98619
  self.video_codec_setting = video_codec_setting
98620
+ # The video transcoding method. Valid values:
98621
+ #
98622
+ # * NORMAL: regular transcoding
98623
+ # * NBHD: Narrowband HD™ transcoding
98624
+ #
98625
+ # If not specified, regular transcoding is used by default.
98189
98626
  self.video_codec_type = video_codec_type
98190
- # The width of the output. Valid values: 0 to 2000. If you set it to 0 or leave it empty, the width automatically adapts to the specified height to maintain the original aspect ratio.
98627
+ # The width of the output. If you set it to 0 or leave it empty, the width automatically adapts to the specified height to maintain the original aspect ratio.
98628
+ #
98629
+ # Valid values:
98630
+ #
98631
+ # * For regular transcoding, the larger dimension cannot exceed 3840 px, and the smaller one cannot exceed 2160 px.
98632
+ # * For Narrowband HD™ transcoding, the larger dimension cannot exceed 1920 px, and the smaller one cannot exceed 1080 px.
98191
98633
  self.width = width
98192
98634
 
98193
98635
  def validate(self):
@@ -98486,9 +98928,11 @@ class UpdateMediaLiveInputRequestInputSettings(TeaModel):
98486
98928
  source_url: str = None,
98487
98929
  stream_name: str = None,
98488
98930
  ):
98931
+ # The ID of the flow from MediaConnect. This parameter is required when Type is set to MEDIA_CONNECT.
98489
98932
  self.flow_id = flow_id
98933
+ # The output name of the MediaConnect flow. This parameter is required when Type is set to MEDIA_CONNECT.
98490
98934
  self.flow_output_name = flow_output_name
98491
- # The source URL where the stream is pulled from. This parameter is required for PULL inputs.
98935
+ # The source URL from which the stream is pulled. This parameter is required for PULL inputs.
98492
98936
  self.source_url = source_url
98493
98937
  # The name of the pushed stream. This parameter is required for PUSH inputs. It can be up to 255 characters in length.
98494
98938
  self.stream_name = stream_name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-ice20201109
3
- Version: 6.4.0
3
+ Version: 6.4.1
4
4
  Summary: Alibaba Cloud ICE (20201109) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
24
24
  """
25
25
  setup module for alibabacloud_ice20201109.
26
26
 
27
- Created on 19/06/2025
27
+ Created on 01/07/2025
28
28
 
29
29
  @author: Alibaba Cloud SDK
30
30
  """
@@ -1 +0,0 @@
1
- __version__ = '6.4.0'