alibabacloud-aimiaobi20230801 1.37.0__py3-none-any.whl → 1.37.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = '1.37.0'
1
+ __version__ = '1.37.1'
@@ -298,15 +298,29 @@ class Client(OpenApiClient):
298
298
  OpenApiUtilClient.convert(tmp_req, request)
299
299
  if not UtilClient.is_unset(tmp_req.color_words):
300
300
  request.color_words_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.color_words, 'ColorWords', 'json')
301
- body = {}
301
+ if not UtilClient.is_unset(tmp_req.stickers):
302
+ request.stickers_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.stickers, 'Stickers', 'json')
303
+ body = {}
304
+ if not UtilClient.is_unset(request.close_music):
305
+ body['CloseMusic'] = request.close_music
306
+ if not UtilClient.is_unset(request.close_subtitle):
307
+ body['CloseSubtitle'] = request.close_subtitle
308
+ if not UtilClient.is_unset(request.close_voice):
309
+ body['CloseVoice'] = request.close_voice
302
310
  if not UtilClient.is_unset(request.color_words_shrink):
303
311
  body['ColorWords'] = request.color_words_shrink
312
+ if not UtilClient.is_unset(request.custom_voice_url):
313
+ body['CustomVoiceUrl'] = request.custom_voice_url
314
+ if not UtilClient.is_unset(request.custom_voice_volume):
315
+ body['CustomVoiceVolume'] = request.custom_voice_volume
304
316
  if not UtilClient.is_unset(request.height):
305
317
  body['Height'] = request.height
306
318
  if not UtilClient.is_unset(request.music_url):
307
319
  body['MusicUrl'] = request.music_url
308
320
  if not UtilClient.is_unset(request.music_volume):
309
321
  body['MusicVolume'] = request.music_volume
322
+ if not UtilClient.is_unset(request.stickers_shrink):
323
+ body['Stickers'] = request.stickers_shrink
310
324
  if not UtilClient.is_unset(request.subtitle_font_size):
311
325
  body['SubtitleFontSize'] = request.subtitle_font_size
312
326
  if not UtilClient.is_unset(request.task_id):
@@ -355,15 +369,29 @@ class Client(OpenApiClient):
355
369
  OpenApiUtilClient.convert(tmp_req, request)
356
370
  if not UtilClient.is_unset(tmp_req.color_words):
357
371
  request.color_words_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.color_words, 'ColorWords', 'json')
358
- body = {}
372
+ if not UtilClient.is_unset(tmp_req.stickers):
373
+ request.stickers_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.stickers, 'Stickers', 'json')
374
+ body = {}
375
+ if not UtilClient.is_unset(request.close_music):
376
+ body['CloseMusic'] = request.close_music
377
+ if not UtilClient.is_unset(request.close_subtitle):
378
+ body['CloseSubtitle'] = request.close_subtitle
379
+ if not UtilClient.is_unset(request.close_voice):
380
+ body['CloseVoice'] = request.close_voice
359
381
  if not UtilClient.is_unset(request.color_words_shrink):
360
382
  body['ColorWords'] = request.color_words_shrink
383
+ if not UtilClient.is_unset(request.custom_voice_url):
384
+ body['CustomVoiceUrl'] = request.custom_voice_url
385
+ if not UtilClient.is_unset(request.custom_voice_volume):
386
+ body['CustomVoiceVolume'] = request.custom_voice_volume
361
387
  if not UtilClient.is_unset(request.height):
362
388
  body['Height'] = request.height
363
389
  if not UtilClient.is_unset(request.music_url):
364
390
  body['MusicUrl'] = request.music_url
365
391
  if not UtilClient.is_unset(request.music_volume):
366
392
  body['MusicVolume'] = request.music_volume
393
+ if not UtilClient.is_unset(request.stickers_shrink):
394
+ body['Stickers'] = request.stickers_shrink
367
395
  if not UtilClient.is_unset(request.subtitle_font_size):
368
396
  body['SubtitleFontSize'] = request.subtitle_font_size
369
397
  if not UtilClient.is_unset(request.task_id):
@@ -776,15 +804,25 @@ class Client(OpenApiClient):
776
804
  request.reference_video_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.reference_video, 'ReferenceVideo', 'json')
777
805
  if not UtilClient.is_unset(tmp_req.source_videos):
778
806
  request.source_videos_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.source_videos, 'SourceVideos', 'json')
807
+ if not UtilClient.is_unset(tmp_req.video_roles):
808
+ request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'VideoRoles', 'json')
779
809
  body = {}
780
810
  if not UtilClient.is_unset(request.anlysis_prompt):
781
811
  body['AnlysisPrompt'] = request.anlysis_prompt
812
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
813
+ body['FaceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
782
814
  if not UtilClient.is_unset(request.reference_video_shrink):
783
815
  body['ReferenceVideo'] = request.reference_video_shrink
816
+ if not UtilClient.is_unset(request.remove_subtitle):
817
+ body['RemoveSubtitle'] = request.remove_subtitle
784
818
  if not UtilClient.is_unset(request.source_videos_shrink):
785
819
  body['SourceVideos'] = request.source_videos_shrink
786
820
  if not UtilClient.is_unset(request.split_interval):
787
821
  body['SplitInterval'] = request.split_interval
822
+ if not UtilClient.is_unset(request.video_roles_shrink):
823
+ body['VideoRoles'] = request.video_roles_shrink
824
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
825
+ body['VideoShotFaceIdentityCount'] = request.video_shot_face_identity_count
788
826
  if not UtilClient.is_unset(request.workspace_id):
789
827
  body['WorkspaceId'] = request.workspace_id
790
828
  req = open_api_models.OpenApiRequest(
@@ -825,15 +863,25 @@ class Client(OpenApiClient):
825
863
  request.reference_video_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.reference_video, 'ReferenceVideo', 'json')
826
864
  if not UtilClient.is_unset(tmp_req.source_videos):
827
865
  request.source_videos_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.source_videos, 'SourceVideos', 'json')
866
+ if not UtilClient.is_unset(tmp_req.video_roles):
867
+ request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'VideoRoles', 'json')
828
868
  body = {}
829
869
  if not UtilClient.is_unset(request.anlysis_prompt):
830
870
  body['AnlysisPrompt'] = request.anlysis_prompt
871
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
872
+ body['FaceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
831
873
  if not UtilClient.is_unset(request.reference_video_shrink):
832
874
  body['ReferenceVideo'] = request.reference_video_shrink
875
+ if not UtilClient.is_unset(request.remove_subtitle):
876
+ body['RemoveSubtitle'] = request.remove_subtitle
833
877
  if not UtilClient.is_unset(request.source_videos_shrink):
834
878
  body['SourceVideos'] = request.source_videos_shrink
835
879
  if not UtilClient.is_unset(request.split_interval):
836
880
  body['SplitInterval'] = request.split_interval
881
+ if not UtilClient.is_unset(request.video_roles_shrink):
882
+ body['VideoRoles'] = request.video_roles_shrink
883
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
884
+ body['VideoShotFaceIdentityCount'] = request.video_shot_face_identity_count
837
885
  if not UtilClient.is_unset(request.workspace_id):
838
886
  body['WorkspaceId'] = request.workspace_id
839
887
  req = open_api_models.OpenApiRequest(
@@ -1412,13 +1412,88 @@ class AsyncCreateClipsTaskRequestColorWords(TeaModel):
1412
1412
  return self
1413
1413
 
1414
1414
 
1415
+ class AsyncCreateClipsTaskRequestStickers(TeaModel):
1416
+ def __init__(
1417
+ self,
1418
+ duration: int = None,
1419
+ dync_frames: int = None,
1420
+ height: int = None,
1421
+ timeline_in: int = None,
1422
+ url: str = None,
1423
+ width: int = None,
1424
+ x: float = None,
1425
+ y: float = None,
1426
+ ):
1427
+ self.duration = duration
1428
+ self.dync_frames = dync_frames
1429
+ self.height = height
1430
+ self.timeline_in = timeline_in
1431
+ self.url = url
1432
+ self.width = width
1433
+ self.x = x
1434
+ self.y = y
1435
+
1436
+ def validate(self):
1437
+ pass
1438
+
1439
+ def to_map(self):
1440
+ _map = super().to_map()
1441
+ if _map is not None:
1442
+ return _map
1443
+
1444
+ result = dict()
1445
+ if self.duration is not None:
1446
+ result['Duration'] = self.duration
1447
+ if self.dync_frames is not None:
1448
+ result['DyncFrames'] = self.dync_frames
1449
+ if self.height is not None:
1450
+ result['Height'] = self.height
1451
+ if self.timeline_in is not None:
1452
+ result['TimelineIn'] = self.timeline_in
1453
+ if self.url is not None:
1454
+ result['Url'] = self.url
1455
+ if self.width is not None:
1456
+ result['Width'] = self.width
1457
+ if self.x is not None:
1458
+ result['X'] = self.x
1459
+ if self.y is not None:
1460
+ result['Y'] = self.y
1461
+ return result
1462
+
1463
+ def from_map(self, m: dict = None):
1464
+ m = m or dict()
1465
+ if m.get('Duration') is not None:
1466
+ self.duration = m.get('Duration')
1467
+ if m.get('DyncFrames') is not None:
1468
+ self.dync_frames = m.get('DyncFrames')
1469
+ if m.get('Height') is not None:
1470
+ self.height = m.get('Height')
1471
+ if m.get('TimelineIn') is not None:
1472
+ self.timeline_in = m.get('TimelineIn')
1473
+ if m.get('Url') is not None:
1474
+ self.url = m.get('Url')
1475
+ if m.get('Width') is not None:
1476
+ self.width = m.get('Width')
1477
+ if m.get('X') is not None:
1478
+ self.x = m.get('X')
1479
+ if m.get('Y') is not None:
1480
+ self.y = m.get('Y')
1481
+ return self
1482
+
1483
+
1415
1484
  class AsyncCreateClipsTaskRequest(TeaModel):
1416
1485
  def __init__(
1417
1486
  self,
1487
+ close_music: bool = None,
1488
+ close_subtitle: bool = None,
1489
+ close_voice: bool = None,
1418
1490
  color_words: List[AsyncCreateClipsTaskRequestColorWords] = None,
1491
+ custom_voice_url: str = None,
1492
+ custom_voice_volume: int = None,
1419
1493
  height: int = None,
1420
1494
  music_url: str = None,
1421
1495
  music_volume: int = None,
1496
+ stickers: List[AsyncCreateClipsTaskRequestStickers] = None,
1422
1497
  subtitle_font_size: int = None,
1423
1498
  task_id: str = None,
1424
1499
  voice_style: str = None,
@@ -1426,10 +1501,16 @@ class AsyncCreateClipsTaskRequest(TeaModel):
1426
1501
  width: int = None,
1427
1502
  workspace_id: str = None,
1428
1503
  ):
1504
+ self.close_music = close_music
1505
+ self.close_subtitle = close_subtitle
1506
+ self.close_voice = close_voice
1429
1507
  self.color_words = color_words
1508
+ self.custom_voice_url = custom_voice_url
1509
+ self.custom_voice_volume = custom_voice_volume
1430
1510
  self.height = height
1431
1511
  self.music_url = music_url
1432
1512
  self.music_volume = music_volume
1513
+ self.stickers = stickers
1433
1514
  self.subtitle_font_size = subtitle_font_size
1434
1515
  # This parameter is required.
1435
1516
  self.task_id = task_id
@@ -1444,6 +1525,10 @@ class AsyncCreateClipsTaskRequest(TeaModel):
1444
1525
  for k in self.color_words:
1445
1526
  if k:
1446
1527
  k.validate()
1528
+ if self.stickers:
1529
+ for k in self.stickers:
1530
+ if k:
1531
+ k.validate()
1447
1532
 
1448
1533
  def to_map(self):
1449
1534
  _map = super().to_map()
@@ -1451,16 +1536,30 @@ class AsyncCreateClipsTaskRequest(TeaModel):
1451
1536
  return _map
1452
1537
 
1453
1538
  result = dict()
1539
+ if self.close_music is not None:
1540
+ result['CloseMusic'] = self.close_music
1541
+ if self.close_subtitle is not None:
1542
+ result['CloseSubtitle'] = self.close_subtitle
1543
+ if self.close_voice is not None:
1544
+ result['CloseVoice'] = self.close_voice
1454
1545
  result['ColorWords'] = []
1455
1546
  if self.color_words is not None:
1456
1547
  for k in self.color_words:
1457
1548
  result['ColorWords'].append(k.to_map() if k else None)
1549
+ if self.custom_voice_url is not None:
1550
+ result['CustomVoiceUrl'] = self.custom_voice_url
1551
+ if self.custom_voice_volume is not None:
1552
+ result['CustomVoiceVolume'] = self.custom_voice_volume
1458
1553
  if self.height is not None:
1459
1554
  result['Height'] = self.height
1460
1555
  if self.music_url is not None:
1461
1556
  result['MusicUrl'] = self.music_url
1462
1557
  if self.music_volume is not None:
1463
1558
  result['MusicVolume'] = self.music_volume
1559
+ result['Stickers'] = []
1560
+ if self.stickers is not None:
1561
+ for k in self.stickers:
1562
+ result['Stickers'].append(k.to_map() if k else None)
1464
1563
  if self.subtitle_font_size is not None:
1465
1564
  result['SubtitleFontSize'] = self.subtitle_font_size
1466
1565
  if self.task_id is not None:
@@ -1477,17 +1576,32 @@ class AsyncCreateClipsTaskRequest(TeaModel):
1477
1576
 
1478
1577
  def from_map(self, m: dict = None):
1479
1578
  m = m or dict()
1579
+ if m.get('CloseMusic') is not None:
1580
+ self.close_music = m.get('CloseMusic')
1581
+ if m.get('CloseSubtitle') is not None:
1582
+ self.close_subtitle = m.get('CloseSubtitle')
1583
+ if m.get('CloseVoice') is not None:
1584
+ self.close_voice = m.get('CloseVoice')
1480
1585
  self.color_words = []
1481
1586
  if m.get('ColorWords') is not None:
1482
1587
  for k in m.get('ColorWords'):
1483
1588
  temp_model = AsyncCreateClipsTaskRequestColorWords()
1484
1589
  self.color_words.append(temp_model.from_map(k))
1590
+ if m.get('CustomVoiceUrl') is not None:
1591
+ self.custom_voice_url = m.get('CustomVoiceUrl')
1592
+ if m.get('CustomVoiceVolume') is not None:
1593
+ self.custom_voice_volume = m.get('CustomVoiceVolume')
1485
1594
  if m.get('Height') is not None:
1486
1595
  self.height = m.get('Height')
1487
1596
  if m.get('MusicUrl') is not None:
1488
1597
  self.music_url = m.get('MusicUrl')
1489
1598
  if m.get('MusicVolume') is not None:
1490
1599
  self.music_volume = m.get('MusicVolume')
1600
+ self.stickers = []
1601
+ if m.get('Stickers') is not None:
1602
+ for k in m.get('Stickers'):
1603
+ temp_model = AsyncCreateClipsTaskRequestStickers()
1604
+ self.stickers.append(temp_model.from_map(k))
1491
1605
  if m.get('SubtitleFontSize') is not None:
1492
1606
  self.subtitle_font_size = m.get('SubtitleFontSize')
1493
1607
  if m.get('TaskId') is not None:
@@ -1506,10 +1620,16 @@ class AsyncCreateClipsTaskRequest(TeaModel):
1506
1620
  class AsyncCreateClipsTaskShrinkRequest(TeaModel):
1507
1621
  def __init__(
1508
1622
  self,
1623
+ close_music: bool = None,
1624
+ close_subtitle: bool = None,
1625
+ close_voice: bool = None,
1509
1626
  color_words_shrink: str = None,
1627
+ custom_voice_url: str = None,
1628
+ custom_voice_volume: int = None,
1510
1629
  height: int = None,
1511
1630
  music_url: str = None,
1512
1631
  music_volume: int = None,
1632
+ stickers_shrink: str = None,
1513
1633
  subtitle_font_size: int = None,
1514
1634
  task_id: str = None,
1515
1635
  voice_style: str = None,
@@ -1517,10 +1637,16 @@ class AsyncCreateClipsTaskShrinkRequest(TeaModel):
1517
1637
  width: int = None,
1518
1638
  workspace_id: str = None,
1519
1639
  ):
1640
+ self.close_music = close_music
1641
+ self.close_subtitle = close_subtitle
1642
+ self.close_voice = close_voice
1520
1643
  self.color_words_shrink = color_words_shrink
1644
+ self.custom_voice_url = custom_voice_url
1645
+ self.custom_voice_volume = custom_voice_volume
1521
1646
  self.height = height
1522
1647
  self.music_url = music_url
1523
1648
  self.music_volume = music_volume
1649
+ self.stickers_shrink = stickers_shrink
1524
1650
  self.subtitle_font_size = subtitle_font_size
1525
1651
  # This parameter is required.
1526
1652
  self.task_id = task_id
@@ -1539,14 +1665,26 @@ class AsyncCreateClipsTaskShrinkRequest(TeaModel):
1539
1665
  return _map
1540
1666
 
1541
1667
  result = dict()
1668
+ if self.close_music is not None:
1669
+ result['CloseMusic'] = self.close_music
1670
+ if self.close_subtitle is not None:
1671
+ result['CloseSubtitle'] = self.close_subtitle
1672
+ if self.close_voice is not None:
1673
+ result['CloseVoice'] = self.close_voice
1542
1674
  if self.color_words_shrink is not None:
1543
1675
  result['ColorWords'] = self.color_words_shrink
1676
+ if self.custom_voice_url is not None:
1677
+ result['CustomVoiceUrl'] = self.custom_voice_url
1678
+ if self.custom_voice_volume is not None:
1679
+ result['CustomVoiceVolume'] = self.custom_voice_volume
1544
1680
  if self.height is not None:
1545
1681
  result['Height'] = self.height
1546
1682
  if self.music_url is not None:
1547
1683
  result['MusicUrl'] = self.music_url
1548
1684
  if self.music_volume is not None:
1549
1685
  result['MusicVolume'] = self.music_volume
1686
+ if self.stickers_shrink is not None:
1687
+ result['Stickers'] = self.stickers_shrink
1550
1688
  if self.subtitle_font_size is not None:
1551
1689
  result['SubtitleFontSize'] = self.subtitle_font_size
1552
1690
  if self.task_id is not None:
@@ -1563,14 +1701,26 @@ class AsyncCreateClipsTaskShrinkRequest(TeaModel):
1563
1701
 
1564
1702
  def from_map(self, m: dict = None):
1565
1703
  m = m or dict()
1704
+ if m.get('CloseMusic') is not None:
1705
+ self.close_music = m.get('CloseMusic')
1706
+ if m.get('CloseSubtitle') is not None:
1707
+ self.close_subtitle = m.get('CloseSubtitle')
1708
+ if m.get('CloseVoice') is not None:
1709
+ self.close_voice = m.get('CloseVoice')
1566
1710
  if m.get('ColorWords') is not None:
1567
1711
  self.color_words_shrink = m.get('ColorWords')
1712
+ if m.get('CustomVoiceUrl') is not None:
1713
+ self.custom_voice_url = m.get('CustomVoiceUrl')
1714
+ if m.get('CustomVoiceVolume') is not None:
1715
+ self.custom_voice_volume = m.get('CustomVoiceVolume')
1568
1716
  if m.get('Height') is not None:
1569
1717
  self.height = m.get('Height')
1570
1718
  if m.get('MusicUrl') is not None:
1571
1719
  self.music_url = m.get('MusicUrl')
1572
1720
  if m.get('MusicVolume') is not None:
1573
1721
  self.music_volume = m.get('MusicVolume')
1722
+ if m.get('Stickers') is not None:
1723
+ self.stickers_shrink = m.get('Stickers')
1574
1724
  if m.get('SubtitleFontSize') is not None:
1575
1725
  self.subtitle_font_size = m.get('SubtitleFontSize')
1576
1726
  if m.get('TaskId') is not None:
@@ -2497,20 +2647,108 @@ class AsyncUploadVideoRequestSourceVideos(TeaModel):
2497
2647
  return self
2498
2648
 
2499
2649
 
2650
+ class AsyncUploadVideoRequestVideoRolesRoleUrls(TeaModel):
2651
+ def __init__(
2652
+ self,
2653
+ role_file_name: str = None,
2654
+ role_file_url: str = None,
2655
+ ):
2656
+ self.role_file_name = role_file_name
2657
+ self.role_file_url = role_file_url
2658
+
2659
+ def validate(self):
2660
+ pass
2661
+
2662
+ def to_map(self):
2663
+ _map = super().to_map()
2664
+ if _map is not None:
2665
+ return _map
2666
+
2667
+ result = dict()
2668
+ if self.role_file_name is not None:
2669
+ result['RoleFileName'] = self.role_file_name
2670
+ if self.role_file_url is not None:
2671
+ result['RoleFileUrl'] = self.role_file_url
2672
+ return result
2673
+
2674
+ def from_map(self, m: dict = None):
2675
+ m = m or dict()
2676
+ if m.get('RoleFileName') is not None:
2677
+ self.role_file_name = m.get('RoleFileName')
2678
+ if m.get('RoleFileUrl') is not None:
2679
+ self.role_file_url = m.get('RoleFileUrl')
2680
+ return self
2681
+
2682
+
2683
+ class AsyncUploadVideoRequestVideoRoles(TeaModel):
2684
+ def __init__(
2685
+ self,
2686
+ role_info: str = None,
2687
+ role_name: str = None,
2688
+ role_urls: List[AsyncUploadVideoRequestVideoRolesRoleUrls] = None,
2689
+ ):
2690
+ self.role_info = role_info
2691
+ self.role_name = role_name
2692
+ self.role_urls = role_urls
2693
+
2694
+ def validate(self):
2695
+ if self.role_urls:
2696
+ for k in self.role_urls:
2697
+ if k:
2698
+ k.validate()
2699
+
2700
+ def to_map(self):
2701
+ _map = super().to_map()
2702
+ if _map is not None:
2703
+ return _map
2704
+
2705
+ result = dict()
2706
+ if self.role_info is not None:
2707
+ result['RoleInfo'] = self.role_info
2708
+ if self.role_name is not None:
2709
+ result['RoleName'] = self.role_name
2710
+ result['RoleUrls'] = []
2711
+ if self.role_urls is not None:
2712
+ for k in self.role_urls:
2713
+ result['RoleUrls'].append(k.to_map() if k else None)
2714
+ return result
2715
+
2716
+ def from_map(self, m: dict = None):
2717
+ m = m or dict()
2718
+ if m.get('RoleInfo') is not None:
2719
+ self.role_info = m.get('RoleInfo')
2720
+ if m.get('RoleName') is not None:
2721
+ self.role_name = m.get('RoleName')
2722
+ self.role_urls = []
2723
+ if m.get('RoleUrls') is not None:
2724
+ for k in m.get('RoleUrls'):
2725
+ temp_model = AsyncUploadVideoRequestVideoRolesRoleUrls()
2726
+ self.role_urls.append(temp_model.from_map(k))
2727
+ return self
2728
+
2729
+
2500
2730
  class AsyncUploadVideoRequest(TeaModel):
2501
2731
  def __init__(
2502
2732
  self,
2503
2733
  anlysis_prompt: str = None,
2734
+ face_identity_similarity_min_score: float = None,
2504
2735
  reference_video: AsyncUploadVideoRequestReferenceVideo = None,
2736
+ remove_subtitle: bool = None,
2505
2737
  source_videos: List[AsyncUploadVideoRequestSourceVideos] = None,
2506
2738
  split_interval: int = None,
2739
+ video_roles: List[AsyncUploadVideoRequestVideoRoles] = None,
2740
+ video_shot_face_identity_count: int = None,
2507
2741
  workspace_id: str = None,
2508
2742
  ):
2509
2743
  self.anlysis_prompt = anlysis_prompt
2744
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
2510
2745
  self.reference_video = reference_video
2746
+ self.remove_subtitle = remove_subtitle
2511
2747
  # This parameter is required.
2512
2748
  self.source_videos = source_videos
2513
2749
  self.split_interval = split_interval
2750
+ self.video_roles = video_roles
2751
+ self.video_shot_face_identity_count = video_shot_face_identity_count
2514
2752
  # This parameter is required.
2515
2753
  self.workspace_id = workspace_id
2516
2754
 
@@ -2521,6 +2759,10 @@ class AsyncUploadVideoRequest(TeaModel):
2521
2759
  for k in self.source_videos:
2522
2760
  if k:
2523
2761
  k.validate()
2762
+ if self.video_roles:
2763
+ for k in self.video_roles:
2764
+ if k:
2765
+ k.validate()
2524
2766
 
2525
2767
  def to_map(self):
2526
2768
  _map = super().to_map()
@@ -2530,14 +2772,24 @@ class AsyncUploadVideoRequest(TeaModel):
2530
2772
  result = dict()
2531
2773
  if self.anlysis_prompt is not None:
2532
2774
  result['AnlysisPrompt'] = self.anlysis_prompt
2775
+ if self.face_identity_similarity_min_score is not None:
2776
+ result['FaceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
2533
2777
  if self.reference_video is not None:
2534
2778
  result['ReferenceVideo'] = self.reference_video.to_map()
2779
+ if self.remove_subtitle is not None:
2780
+ result['RemoveSubtitle'] = self.remove_subtitle
2535
2781
  result['SourceVideos'] = []
2536
2782
  if self.source_videos is not None:
2537
2783
  for k in self.source_videos:
2538
2784
  result['SourceVideos'].append(k.to_map() if k else None)
2539
2785
  if self.split_interval is not None:
2540
2786
  result['SplitInterval'] = self.split_interval
2787
+ result['VideoRoles'] = []
2788
+ if self.video_roles is not None:
2789
+ for k in self.video_roles:
2790
+ result['VideoRoles'].append(k.to_map() if k else None)
2791
+ if self.video_shot_face_identity_count is not None:
2792
+ result['VideoShotFaceIdentityCount'] = self.video_shot_face_identity_count
2541
2793
  if self.workspace_id is not None:
2542
2794
  result['WorkspaceId'] = self.workspace_id
2543
2795
  return result
@@ -2546,9 +2798,13 @@ class AsyncUploadVideoRequest(TeaModel):
2546
2798
  m = m or dict()
2547
2799
  if m.get('AnlysisPrompt') is not None:
2548
2800
  self.anlysis_prompt = m.get('AnlysisPrompt')
2801
+ if m.get('FaceIdentitySimilarityMinScore') is not None:
2802
+ self.face_identity_similarity_min_score = m.get('FaceIdentitySimilarityMinScore')
2549
2803
  if m.get('ReferenceVideo') is not None:
2550
2804
  temp_model = AsyncUploadVideoRequestReferenceVideo()
2551
2805
  self.reference_video = temp_model.from_map(m['ReferenceVideo'])
2806
+ if m.get('RemoveSubtitle') is not None:
2807
+ self.remove_subtitle = m.get('RemoveSubtitle')
2552
2808
  self.source_videos = []
2553
2809
  if m.get('SourceVideos') is not None:
2554
2810
  for k in m.get('SourceVideos'):
@@ -2556,6 +2812,13 @@ class AsyncUploadVideoRequest(TeaModel):
2556
2812
  self.source_videos.append(temp_model.from_map(k))
2557
2813
  if m.get('SplitInterval') is not None:
2558
2814
  self.split_interval = m.get('SplitInterval')
2815
+ self.video_roles = []
2816
+ if m.get('VideoRoles') is not None:
2817
+ for k in m.get('VideoRoles'):
2818
+ temp_model = AsyncUploadVideoRequestVideoRoles()
2819
+ self.video_roles.append(temp_model.from_map(k))
2820
+ if m.get('VideoShotFaceIdentityCount') is not None:
2821
+ self.video_shot_face_identity_count = m.get('VideoShotFaceIdentityCount')
2559
2822
  if m.get('WorkspaceId') is not None:
2560
2823
  self.workspace_id = m.get('WorkspaceId')
2561
2824
  return self
@@ -2565,16 +2828,24 @@ class AsyncUploadVideoShrinkRequest(TeaModel):
2565
2828
  def __init__(
2566
2829
  self,
2567
2830
  anlysis_prompt: str = None,
2831
+ face_identity_similarity_min_score: float = None,
2568
2832
  reference_video_shrink: str = None,
2833
+ remove_subtitle: bool = None,
2569
2834
  source_videos_shrink: str = None,
2570
2835
  split_interval: int = None,
2836
+ video_roles_shrink: str = None,
2837
+ video_shot_face_identity_count: int = None,
2571
2838
  workspace_id: str = None,
2572
2839
  ):
2573
2840
  self.anlysis_prompt = anlysis_prompt
2841
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
2574
2842
  self.reference_video_shrink = reference_video_shrink
2843
+ self.remove_subtitle = remove_subtitle
2575
2844
  # This parameter is required.
2576
2845
  self.source_videos_shrink = source_videos_shrink
2577
2846
  self.split_interval = split_interval
2847
+ self.video_roles_shrink = video_roles_shrink
2848
+ self.video_shot_face_identity_count = video_shot_face_identity_count
2578
2849
  # This parameter is required.
2579
2850
  self.workspace_id = workspace_id
2580
2851
 
@@ -2589,12 +2860,20 @@ class AsyncUploadVideoShrinkRequest(TeaModel):
2589
2860
  result = dict()
2590
2861
  if self.anlysis_prompt is not None:
2591
2862
  result['AnlysisPrompt'] = self.anlysis_prompt
2863
+ if self.face_identity_similarity_min_score is not None:
2864
+ result['FaceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
2592
2865
  if self.reference_video_shrink is not None:
2593
2866
  result['ReferenceVideo'] = self.reference_video_shrink
2867
+ if self.remove_subtitle is not None:
2868
+ result['RemoveSubtitle'] = self.remove_subtitle
2594
2869
  if self.source_videos_shrink is not None:
2595
2870
  result['SourceVideos'] = self.source_videos_shrink
2596
2871
  if self.split_interval is not None:
2597
2872
  result['SplitInterval'] = self.split_interval
2873
+ if self.video_roles_shrink is not None:
2874
+ result['VideoRoles'] = self.video_roles_shrink
2875
+ if self.video_shot_face_identity_count is not None:
2876
+ result['VideoShotFaceIdentityCount'] = self.video_shot_face_identity_count
2598
2877
  if self.workspace_id is not None:
2599
2878
  result['WorkspaceId'] = self.workspace_id
2600
2879
  return result
@@ -2603,12 +2882,20 @@ class AsyncUploadVideoShrinkRequest(TeaModel):
2603
2882
  m = m or dict()
2604
2883
  if m.get('AnlysisPrompt') is not None:
2605
2884
  self.anlysis_prompt = m.get('AnlysisPrompt')
2885
+ if m.get('FaceIdentitySimilarityMinScore') is not None:
2886
+ self.face_identity_similarity_min_score = m.get('FaceIdentitySimilarityMinScore')
2606
2887
  if m.get('ReferenceVideo') is not None:
2607
2888
  self.reference_video_shrink = m.get('ReferenceVideo')
2889
+ if m.get('RemoveSubtitle') is not None:
2890
+ self.remove_subtitle = m.get('RemoveSubtitle')
2608
2891
  if m.get('SourceVideos') is not None:
2609
2892
  self.source_videos_shrink = m.get('SourceVideos')
2610
2893
  if m.get('SplitInterval') is not None:
2611
2894
  self.split_interval = m.get('SplitInterval')
2895
+ if m.get('VideoRoles') is not None:
2896
+ self.video_roles_shrink = m.get('VideoRoles')
2897
+ if m.get('VideoShotFaceIdentityCount') is not None:
2898
+ self.video_shot_face_identity_count = m.get('VideoShotFaceIdentityCount')
2612
2899
  if m.get('WorkspaceId') is not None:
2613
2900
  self.workspace_id = m.get('WorkspaceId')
2614
2901
  return self
@@ -11605,6 +11892,75 @@ class GetAutoClipsTaskInfoResponseBodyDataColorWords(TeaModel):
11605
11892
  return self
11606
11893
 
11607
11894
 
11895
+ class GetAutoClipsTaskInfoResponseBodyDataStickers(TeaModel):
11896
+ def __init__(
11897
+ self,
11898
+ duration: int = None,
11899
+ dync_frames: int = None,
11900
+ height: int = None,
11901
+ timeline_in: int = None,
11902
+ url: str = None,
11903
+ width: int = None,
11904
+ x: float = None,
11905
+ y: float = None,
11906
+ ):
11907
+ self.duration = duration
11908
+ self.dync_frames = dync_frames
11909
+ self.height = height
11910
+ self.timeline_in = timeline_in
11911
+ self.url = url
11912
+ self.width = width
11913
+ self.x = x
11914
+ self.y = y
11915
+
11916
+ def validate(self):
11917
+ pass
11918
+
11919
+ def to_map(self):
11920
+ _map = super().to_map()
11921
+ if _map is not None:
11922
+ return _map
11923
+
11924
+ result = dict()
11925
+ if self.duration is not None:
11926
+ result['Duration'] = self.duration
11927
+ if self.dync_frames is not None:
11928
+ result['DyncFrames'] = self.dync_frames
11929
+ if self.height is not None:
11930
+ result['Height'] = self.height
11931
+ if self.timeline_in is not None:
11932
+ result['TimelineIn'] = self.timeline_in
11933
+ if self.url is not None:
11934
+ result['Url'] = self.url
11935
+ if self.width is not None:
11936
+ result['Width'] = self.width
11937
+ if self.x is not None:
11938
+ result['X'] = self.x
11939
+ if self.y is not None:
11940
+ result['Y'] = self.y
11941
+ return result
11942
+
11943
+ def from_map(self, m: dict = None):
11944
+ m = m or dict()
11945
+ if m.get('Duration') is not None:
11946
+ self.duration = m.get('Duration')
11947
+ if m.get('DyncFrames') is not None:
11948
+ self.dync_frames = m.get('DyncFrames')
11949
+ if m.get('Height') is not None:
11950
+ self.height = m.get('Height')
11951
+ if m.get('TimelineIn') is not None:
11952
+ self.timeline_in = m.get('TimelineIn')
11953
+ if m.get('Url') is not None:
11954
+ self.url = m.get('Url')
11955
+ if m.get('Width') is not None:
11956
+ self.width = m.get('Width')
11957
+ if m.get('X') is not None:
11958
+ self.x = m.get('X')
11959
+ if m.get('Y') is not None:
11960
+ self.y = m.get('Y')
11961
+ return self
11962
+
11963
+
11608
11964
  class GetAutoClipsTaskInfoResponseBodyDataTimelinesClips(TeaModel):
11609
11965
  def __init__(
11610
11966
  self,
@@ -11724,8 +12080,13 @@ class GetAutoClipsTaskInfoResponseBodyDataTimelines(TeaModel):
11724
12080
  class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11725
12081
  def __init__(
11726
12082
  self,
12083
+ close_music: bool = None,
12084
+ close_subtitle: bool = None,
12085
+ close_voice: bool = None,
11727
12086
  color_words: List[GetAutoClipsTaskInfoResponseBodyDataColorWords] = None,
11728
12087
  content: str = None,
12088
+ custom_voice_url: str = None,
12089
+ custom_voice_volume: int = None,
11729
12090
  error_message: str = None,
11730
12091
  media_cloud_timeline: str = None,
11731
12092
  music_style: str = None,
@@ -11734,14 +12095,20 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11734
12095
  output_video_url: str = None,
11735
12096
  status: int = None,
11736
12097
  step: str = None,
12098
+ stickers: List[GetAutoClipsTaskInfoResponseBodyDataStickers] = None,
11737
12099
  subtitle_font_size: int = None,
11738
12100
  task_id: str = None,
11739
12101
  timelines: List[GetAutoClipsTaskInfoResponseBodyDataTimelines] = None,
11740
12102
  voice_style: str = None,
11741
12103
  voice_volume: int = None,
11742
12104
  ):
12105
+ self.close_music = close_music
12106
+ self.close_subtitle = close_subtitle
12107
+ self.close_voice = close_voice
11743
12108
  self.color_words = color_words
11744
12109
  self.content = content
12110
+ self.custom_voice_url = custom_voice_url
12111
+ self.custom_voice_volume = custom_voice_volume
11745
12112
  self.error_message = error_message
11746
12113
  self.media_cloud_timeline = media_cloud_timeline
11747
12114
  self.music_style = music_style
@@ -11750,6 +12117,7 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11750
12117
  self.output_video_url = output_video_url
11751
12118
  self.status = status
11752
12119
  self.step = step
12120
+ self.stickers = stickers
11753
12121
  self.subtitle_font_size = subtitle_font_size
11754
12122
  self.task_id = task_id
11755
12123
  self.timelines = timelines
@@ -11761,6 +12129,10 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11761
12129
  for k in self.color_words:
11762
12130
  if k:
11763
12131
  k.validate()
12132
+ if self.stickers:
12133
+ for k in self.stickers:
12134
+ if k:
12135
+ k.validate()
11764
12136
  if self.timelines:
11765
12137
  for k in self.timelines:
11766
12138
  if k:
@@ -11772,12 +12144,22 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11772
12144
  return _map
11773
12145
 
11774
12146
  result = dict()
12147
+ if self.close_music is not None:
12148
+ result['CloseMusic'] = self.close_music
12149
+ if self.close_subtitle is not None:
12150
+ result['CloseSubtitle'] = self.close_subtitle
12151
+ if self.close_voice is not None:
12152
+ result['CloseVoice'] = self.close_voice
11775
12153
  result['ColorWords'] = []
11776
12154
  if self.color_words is not None:
11777
12155
  for k in self.color_words:
11778
12156
  result['ColorWords'].append(k.to_map() if k else None)
11779
12157
  if self.content is not None:
11780
12158
  result['Content'] = self.content
12159
+ if self.custom_voice_url is not None:
12160
+ result['CustomVoiceUrl'] = self.custom_voice_url
12161
+ if self.custom_voice_volume is not None:
12162
+ result['CustomVoiceVolume'] = self.custom_voice_volume
11781
12163
  if self.error_message is not None:
11782
12164
  result['ErrorMessage'] = self.error_message
11783
12165
  if self.media_cloud_timeline is not None:
@@ -11794,6 +12176,10 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11794
12176
  result['Status'] = self.status
11795
12177
  if self.step is not None:
11796
12178
  result['Step'] = self.step
12179
+ result['Stickers'] = []
12180
+ if self.stickers is not None:
12181
+ for k in self.stickers:
12182
+ result['Stickers'].append(k.to_map() if k else None)
11797
12183
  if self.subtitle_font_size is not None:
11798
12184
  result['SubtitleFontSize'] = self.subtitle_font_size
11799
12185
  if self.task_id is not None:
@@ -11810,6 +12196,12 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11810
12196
 
11811
12197
  def from_map(self, m: dict = None):
11812
12198
  m = m or dict()
12199
+ if m.get('CloseMusic') is not None:
12200
+ self.close_music = m.get('CloseMusic')
12201
+ if m.get('CloseSubtitle') is not None:
12202
+ self.close_subtitle = m.get('CloseSubtitle')
12203
+ if m.get('CloseVoice') is not None:
12204
+ self.close_voice = m.get('CloseVoice')
11813
12205
  self.color_words = []
11814
12206
  if m.get('ColorWords') is not None:
11815
12207
  for k in m.get('ColorWords'):
@@ -11817,6 +12209,10 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11817
12209
  self.color_words.append(temp_model.from_map(k))
11818
12210
  if m.get('Content') is not None:
11819
12211
  self.content = m.get('Content')
12212
+ if m.get('CustomVoiceUrl') is not None:
12213
+ self.custom_voice_url = m.get('CustomVoiceUrl')
12214
+ if m.get('CustomVoiceVolume') is not None:
12215
+ self.custom_voice_volume = m.get('CustomVoiceVolume')
11820
12216
  if m.get('ErrorMessage') is not None:
11821
12217
  self.error_message = m.get('ErrorMessage')
11822
12218
  if m.get('MediaCloudTimeline') is not None:
@@ -11833,6 +12229,11 @@ class GetAutoClipsTaskInfoResponseBodyData(TeaModel):
11833
12229
  self.status = m.get('Status')
11834
12230
  if m.get('Step') is not None:
11835
12231
  self.step = m.get('Step')
12232
+ self.stickers = []
12233
+ if m.get('Stickers') is not None:
12234
+ for k in m.get('Stickers'):
12235
+ temp_model = GetAutoClipsTaskInfoResponseBodyDataStickers()
12236
+ self.stickers.append(temp_model.from_map(k))
11836
12237
  if m.get('SubtitleFontSize') is not None:
11837
12238
  self.subtitle_font_size = m.get('SubtitleFontSize')
11838
12239
  if m.get('TaskId') is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-aimiaobi20230801
3
- Version: 1.37.0
3
+ Version: 1.37.1
4
4
  Summary: Alibaba Cloud AiMiaoBi (20230801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -0,0 +1,8 @@
1
+ alibabacloud_aimiaobi20230801/__init__.py,sha256=Qo9x4_7jE4ioMTSFuUr0gjUxyFcHks8xIxyvRu90rXE,22
2
+ alibabacloud_aimiaobi20230801/client.py,sha256=YgKNsgxg81GvQTHtPYLRAVVncQLPLjMlBBeE2YWm1sU,917146
3
+ alibabacloud_aimiaobi20230801/models.py,sha256=5JmmFFUFgSNZylhP1LSsWAHhSA8tEOCTgWkJn4TtE2g,2094356
4
+ alibabacloud_aimiaobi20230801-1.37.1.dist-info/LICENSE,sha256=0CFItL6bHvxqS44T6vlLoW2R4Zaic304OO3WxN0oXF0,600
5
+ alibabacloud_aimiaobi20230801-1.37.1.dist-info/METADATA,sha256=nNFbCbBZCUmcabdEPqx7KDcAFKULyhObfZnRVhbDNmM,2348
6
+ alibabacloud_aimiaobi20230801-1.37.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
+ alibabacloud_aimiaobi20230801-1.37.1.dist-info/top_level.txt,sha256=8_10N8zQLrK-NI6L5TUyufvojDqjPl1Q-dHKwoC_b5Q,30
8
+ alibabacloud_aimiaobi20230801-1.37.1.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- alibabacloud_aimiaobi20230801/__init__.py,sha256=U3JRvT2lvwk0GOGEYWvo3GizAuYHMJGJqd_pLPbRqUM,22
2
- alibabacloud_aimiaobi20230801/client.py,sha256=0_N6yBNJ9yl21W4x748W-ZqqBlZChYIH7KpTBvdi9OE,913788
3
- alibabacloud_aimiaobi20230801/models.py,sha256=PMGy4kXVYRrDKcAYo1ZnYT-PeudwCCeYCRPAHyNiYmk,2078526
4
- alibabacloud_aimiaobi20230801-1.37.0.dist-info/LICENSE,sha256=0CFItL6bHvxqS44T6vlLoW2R4Zaic304OO3WxN0oXF0,600
5
- alibabacloud_aimiaobi20230801-1.37.0.dist-info/METADATA,sha256=USMFoDV7LielInS9HIns6ixUO5H0cChhhsf-7ZyJm1o,2348
6
- alibabacloud_aimiaobi20230801-1.37.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
- alibabacloud_aimiaobi20230801-1.37.0.dist-info/top_level.txt,sha256=8_10N8zQLrK-NI6L5TUyufvojDqjPl1Q-dHKwoC_b5Q,30
8
- alibabacloud_aimiaobi20230801-1.37.0.dist-info/RECORD,,