alibabacloud-quanmiaolightapp20240801 2.6.0__tar.gz → 2.6.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/ChangeLog.md +5 -0
  2. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/PKG-INFO +1 -1
  3. alibabacloud_quanmiaolightapp20240801-2.6.1/alibabacloud_quanmiaolightapp20240801/__init__.py +1 -0
  4. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801/client.py +32 -0
  5. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801/models.py +430 -0
  6. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801.egg-info/PKG-INFO +1 -1
  7. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/setup.py +1 -1
  8. alibabacloud_quanmiaolightapp20240801-2.6.0/alibabacloud_quanmiaolightapp20240801/__init__.py +0 -1
  9. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/LICENSE +0 -0
  10. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/MANIFEST.in +0 -0
  11. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/README-CN.md +0 -0
  12. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/README.md +0 -0
  13. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801.egg-info/SOURCES.txt +0 -0
  14. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801.egg-info/dependency_links.txt +0 -0
  15. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801.egg-info/requires.txt +0 -0
  16. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/alibabacloud_quanmiaolightapp20240801.egg-info/top_level.txt +0 -0
  17. {alibabacloud_quanmiaolightapp20240801-2.6.0 → alibabacloud_quanmiaolightapp20240801-2.6.1}/setup.cfg +0 -0
@@ -1,3 +1,8 @@
1
+ 2025-03-10 Version: 2.6.0
2
+ - Support API GetTagMiningAnalysisTask.
3
+ - Support API SubmitTagMiningAnalysisTask.
4
+
5
+
1
6
  2025-01-23 Version: 2.5.1
2
7
  - Update API RunStyleWriting: add param processStage.
3
8
  - Update API RunStyleWriting: add param useSearch.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_quanmiaolightapp20240801
3
- Version: 2.6.0
3
+ Version: 2.6.1
4
4
  Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -2385,9 +2385,13 @@ class Client(OpenApiClient):
2385
2385
  request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
2386
2386
  if not UtilClient.is_unset(tmp_req.generate_options):
2387
2387
  request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
2388
+ if not UtilClient.is_unset(tmp_req.text_process_tasks):
2389
+ request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
2388
2390
  if not UtilClient.is_unset(tmp_req.video_roles):
2389
2391
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
2390
2392
  body = {}
2393
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
2394
+ body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
2391
2395
  if not UtilClient.is_unset(request.frame_sample_method_shrink):
2392
2396
  body['frameSampleMethod'] = request.frame_sample_method_shrink
2393
2397
  if not UtilClient.is_unset(request.generate_options_shrink):
@@ -2406,6 +2410,8 @@ class Client(OpenApiClient):
2406
2410
  body['snapshotInterval'] = request.snapshot_interval
2407
2411
  if not UtilClient.is_unset(request.task_id):
2408
2412
  body['taskId'] = request.task_id
2413
+ if not UtilClient.is_unset(request.text_process_tasks_shrink):
2414
+ body['textProcessTasks'] = request.text_process_tasks_shrink
2409
2415
  if not UtilClient.is_unset(request.video_extra_info):
2410
2416
  body['videoExtraInfo'] = request.video_extra_info
2411
2417
  if not UtilClient.is_unset(request.video_model_custom_prompt_template):
@@ -2414,6 +2420,8 @@ class Client(OpenApiClient):
2414
2420
  body['videoModelId'] = request.video_model_id
2415
2421
  if not UtilClient.is_unset(request.video_roles_shrink):
2416
2422
  body['videoRoles'] = request.video_roles_shrink
2423
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
2424
+ body['videoShotFaceIdentityCount'] = request.video_shot_face_identity_count
2417
2425
  if not UtilClient.is_unset(request.video_url):
2418
2426
  body['videoUrl'] = request.video_url
2419
2427
  req = open_api_models.OpenApiRequest(
@@ -2464,9 +2472,13 @@ class Client(OpenApiClient):
2464
2472
  request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
2465
2473
  if not UtilClient.is_unset(tmp_req.generate_options):
2466
2474
  request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
2475
+ if not UtilClient.is_unset(tmp_req.text_process_tasks):
2476
+ request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
2467
2477
  if not UtilClient.is_unset(tmp_req.video_roles):
2468
2478
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
2469
2479
  body = {}
2480
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
2481
+ body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
2470
2482
  if not UtilClient.is_unset(request.frame_sample_method_shrink):
2471
2483
  body['frameSampleMethod'] = request.frame_sample_method_shrink
2472
2484
  if not UtilClient.is_unset(request.generate_options_shrink):
@@ -2485,6 +2497,8 @@ class Client(OpenApiClient):
2485
2497
  body['snapshotInterval'] = request.snapshot_interval
2486
2498
  if not UtilClient.is_unset(request.task_id):
2487
2499
  body['taskId'] = request.task_id
2500
+ if not UtilClient.is_unset(request.text_process_tasks_shrink):
2501
+ body['textProcessTasks'] = request.text_process_tasks_shrink
2488
2502
  if not UtilClient.is_unset(request.video_extra_info):
2489
2503
  body['videoExtraInfo'] = request.video_extra_info
2490
2504
  if not UtilClient.is_unset(request.video_model_custom_prompt_template):
@@ -2493,6 +2507,8 @@ class Client(OpenApiClient):
2493
2507
  body['videoModelId'] = request.video_model_id
2494
2508
  if not UtilClient.is_unset(request.video_roles_shrink):
2495
2509
  body['videoRoles'] = request.video_roles_shrink
2510
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
2511
+ body['videoShotFaceIdentityCount'] = request.video_shot_face_identity_count
2496
2512
  if not UtilClient.is_unset(request.video_url):
2497
2513
  body['videoUrl'] = request.video_url
2498
2514
  req = open_api_models.OpenApiRequest(
@@ -2733,9 +2749,13 @@ class Client(OpenApiClient):
2733
2749
  request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
2734
2750
  if not UtilClient.is_unset(tmp_req.generate_options):
2735
2751
  request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
2752
+ if not UtilClient.is_unset(tmp_req.text_process_tasks):
2753
+ request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
2736
2754
  if not UtilClient.is_unset(tmp_req.video_roles):
2737
2755
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
2738
2756
  body = {}
2757
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
2758
+ body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
2739
2759
  if not UtilClient.is_unset(request.frame_sample_method_shrink):
2740
2760
  body['frameSampleMethod'] = request.frame_sample_method_shrink
2741
2761
  if not UtilClient.is_unset(request.generate_options_shrink):
@@ -2750,6 +2770,8 @@ class Client(OpenApiClient):
2750
2770
  body['modelId'] = request.model_id
2751
2771
  if not UtilClient.is_unset(request.snapshot_interval):
2752
2772
  body['snapshotInterval'] = request.snapshot_interval
2773
+ if not UtilClient.is_unset(request.text_process_tasks_shrink):
2774
+ body['textProcessTasks'] = request.text_process_tasks_shrink
2753
2775
  if not UtilClient.is_unset(request.video_extra_info):
2754
2776
  body['videoExtraInfo'] = request.video_extra_info
2755
2777
  if not UtilClient.is_unset(request.video_model_custom_prompt_template):
@@ -2758,6 +2780,8 @@ class Client(OpenApiClient):
2758
2780
  body['videoModelId'] = request.video_model_id
2759
2781
  if not UtilClient.is_unset(request.video_roles_shrink):
2760
2782
  body['videoRoles'] = request.video_roles_shrink
2783
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
2784
+ body['videoShotFaceIdentityCount'] = request.video_shot_face_identity_count
2761
2785
  if not UtilClient.is_unset(request.video_url):
2762
2786
  body['videoUrl'] = request.video_url
2763
2787
  req = open_api_models.OpenApiRequest(
@@ -2808,9 +2832,13 @@ class Client(OpenApiClient):
2808
2832
  request.frame_sample_method_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.frame_sample_method, 'frameSampleMethod', 'json')
2809
2833
  if not UtilClient.is_unset(tmp_req.generate_options):
2810
2834
  request.generate_options_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.generate_options, 'generateOptions', 'json')
2835
+ if not UtilClient.is_unset(tmp_req.text_process_tasks):
2836
+ request.text_process_tasks_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.text_process_tasks, 'textProcessTasks', 'json')
2811
2837
  if not UtilClient.is_unset(tmp_req.video_roles):
2812
2838
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
2813
2839
  body = {}
2840
+ if not UtilClient.is_unset(request.face_identity_similarity_min_score):
2841
+ body['faceIdentitySimilarityMinScore'] = request.face_identity_similarity_min_score
2814
2842
  if not UtilClient.is_unset(request.frame_sample_method_shrink):
2815
2843
  body['frameSampleMethod'] = request.frame_sample_method_shrink
2816
2844
  if not UtilClient.is_unset(request.generate_options_shrink):
@@ -2825,6 +2853,8 @@ class Client(OpenApiClient):
2825
2853
  body['modelId'] = request.model_id
2826
2854
  if not UtilClient.is_unset(request.snapshot_interval):
2827
2855
  body['snapshotInterval'] = request.snapshot_interval
2856
+ if not UtilClient.is_unset(request.text_process_tasks_shrink):
2857
+ body['textProcessTasks'] = request.text_process_tasks_shrink
2828
2858
  if not UtilClient.is_unset(request.video_extra_info):
2829
2859
  body['videoExtraInfo'] = request.video_extra_info
2830
2860
  if not UtilClient.is_unset(request.video_model_custom_prompt_template):
@@ -2833,6 +2863,8 @@ class Client(OpenApiClient):
2833
2863
  body['videoModelId'] = request.video_model_id
2834
2864
  if not UtilClient.is_unset(request.video_roles_shrink):
2835
2865
  body['videoRoles'] = request.video_roles_shrink
2866
+ if not UtilClient.is_unset(request.video_shot_face_identity_count):
2867
+ body['videoShotFaceIdentityCount'] = request.video_shot_face_identity_count
2836
2868
  if not UtilClient.is_unset(request.video_url):
2837
2869
  body['videoUrl'] = request.video_url
2838
2870
  req = open_api_models.OpenApiRequest(
@@ -1466,10 +1466,18 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult(TeaMo
1466
1466
  def __init__(
1467
1467
  self,
1468
1468
  generate_finished: bool = None,
1469
+ index: int = None,
1470
+ model_id: str = None,
1471
+ model_reduce: bool = None,
1472
+ reason_text: str = None,
1469
1473
  text: str = None,
1470
1474
  usage: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResultUsage = None,
1471
1475
  ):
1472
1476
  self.generate_finished = generate_finished
1477
+ self.index = index
1478
+ self.model_id = model_id
1479
+ self.model_reduce = model_reduce
1480
+ self.reason_text = reason_text
1473
1481
  self.text = text
1474
1482
  self.usage = usage
1475
1483
 
@@ -1485,6 +1493,14 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult(TeaMo
1485
1493
  result = dict()
1486
1494
  if self.generate_finished is not None:
1487
1495
  result['generateFinished'] = self.generate_finished
1496
+ if self.index is not None:
1497
+ result['index'] = self.index
1498
+ if self.model_id is not None:
1499
+ result['modelId'] = self.model_id
1500
+ if self.model_reduce is not None:
1501
+ result['modelReduce'] = self.model_reduce
1502
+ if self.reason_text is not None:
1503
+ result['reasonText'] = self.reason_text
1488
1504
  if self.text is not None:
1489
1505
  result['text'] = self.text
1490
1506
  if self.usage is not None:
@@ -1495,6 +1511,14 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult(TeaMo
1495
1511
  m = m or dict()
1496
1512
  if m.get('generateFinished') is not None:
1497
1513
  self.generate_finished = m.get('generateFinished')
1514
+ if m.get('index') is not None:
1515
+ self.index = m.get('index')
1516
+ if m.get('modelId') is not None:
1517
+ self.model_id = m.get('modelId')
1518
+ if m.get('modelReduce') is not None:
1519
+ self.model_reduce = m.get('modelReduce')
1520
+ if m.get('reasonText') is not None:
1521
+ self.reason_text = m.get('reasonText')
1498
1522
  if m.get('text') is not None:
1499
1523
  self.text = m.get('text')
1500
1524
  if m.get('usage') is not None:
@@ -1503,6 +1527,104 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult(TeaMo
1503
1527
  return self
1504
1528
 
1505
1529
 
1530
+ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResultsUsage(TeaModel):
1531
+ def __init__(
1532
+ self,
1533
+ input_tokens: int = None,
1534
+ output_tokens: int = None,
1535
+ total_tokens: int = None,
1536
+ ):
1537
+ self.input_tokens = input_tokens
1538
+ self.output_tokens = output_tokens
1539
+ self.total_tokens = total_tokens
1540
+
1541
+ def validate(self):
1542
+ pass
1543
+
1544
+ def to_map(self):
1545
+ _map = super().to_map()
1546
+ if _map is not None:
1547
+ return _map
1548
+
1549
+ result = dict()
1550
+ if self.input_tokens is not None:
1551
+ result['inputTokens'] = self.input_tokens
1552
+ if self.output_tokens is not None:
1553
+ result['outputTokens'] = self.output_tokens
1554
+ if self.total_tokens is not None:
1555
+ result['totalTokens'] = self.total_tokens
1556
+ return result
1557
+
1558
+ def from_map(self, m: dict = None):
1559
+ m = m or dict()
1560
+ if m.get('inputTokens') is not None:
1561
+ self.input_tokens = m.get('inputTokens')
1562
+ if m.get('outputTokens') is not None:
1563
+ self.output_tokens = m.get('outputTokens')
1564
+ if m.get('totalTokens') is not None:
1565
+ self.total_tokens = m.get('totalTokens')
1566
+ return self
1567
+
1568
+
1569
+ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResults(TeaModel):
1570
+ def __init__(
1571
+ self,
1572
+ generate_finished: bool = None,
1573
+ index: int = None,
1574
+ model_id: str = None,
1575
+ reason_text: str = None,
1576
+ text: str = None,
1577
+ usage: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResultsUsage = None,
1578
+ ):
1579
+ self.generate_finished = generate_finished
1580
+ self.index = index
1581
+ self.model_id = model_id
1582
+ self.reason_text = reason_text
1583
+ self.text = text
1584
+ self.usage = usage
1585
+
1586
+ def validate(self):
1587
+ if self.usage:
1588
+ self.usage.validate()
1589
+
1590
+ def to_map(self):
1591
+ _map = super().to_map()
1592
+ if _map is not None:
1593
+ return _map
1594
+
1595
+ result = dict()
1596
+ if self.generate_finished is not None:
1597
+ result['generateFinished'] = self.generate_finished
1598
+ if self.index is not None:
1599
+ result['index'] = self.index
1600
+ if self.model_id is not None:
1601
+ result['modelId'] = self.model_id
1602
+ if self.reason_text is not None:
1603
+ result['reasonText'] = self.reason_text
1604
+ if self.text is not None:
1605
+ result['text'] = self.text
1606
+ if self.usage is not None:
1607
+ result['usage'] = self.usage.to_map()
1608
+ return result
1609
+
1610
+ def from_map(self, m: dict = None):
1611
+ m = m or dict()
1612
+ if m.get('generateFinished') is not None:
1613
+ self.generate_finished = m.get('generateFinished')
1614
+ if m.get('index') is not None:
1615
+ self.index = m.get('index')
1616
+ if m.get('modelId') is not None:
1617
+ self.model_id = m.get('modelId')
1618
+ if m.get('reasonText') is not None:
1619
+ self.reason_text = m.get('reasonText')
1620
+ if m.get('text') is not None:
1621
+ self.text = m.get('text')
1622
+ if m.get('usage') is not None:
1623
+ temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResultsUsage()
1624
+ self.usage = temp_model.from_map(m['usage'])
1625
+ return self
1626
+
1627
+
1506
1628
  class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateResultUsage(TeaModel):
1507
1629
  def __init__(
1508
1630
  self,
@@ -1794,6 +1916,7 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
1794
1916
  video_analysis_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoAnalysisResult = None,
1795
1917
  video_caption_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoCaptionResult = None,
1796
1918
  video_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult = None,
1919
+ video_generate_results: List[GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResults] = None,
1797
1920
  video_mind_mapping_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateResult = None,
1798
1921
  video_title_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoTitleGenerateResult = None,
1799
1922
  ):
@@ -1801,6 +1924,7 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
1801
1924
  self.video_analysis_result = video_analysis_result
1802
1925
  self.video_caption_result = video_caption_result
1803
1926
  self.video_generate_result = video_generate_result
1927
+ self.video_generate_results = video_generate_results
1804
1928
  self.video_mind_mapping_generate_result = video_mind_mapping_generate_result
1805
1929
  self.video_title_generate_result = video_title_generate_result
1806
1930
 
@@ -1811,6 +1935,10 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
1811
1935
  self.video_caption_result.validate()
1812
1936
  if self.video_generate_result:
1813
1937
  self.video_generate_result.validate()
1938
+ if self.video_generate_results:
1939
+ for k in self.video_generate_results:
1940
+ if k:
1941
+ k.validate()
1814
1942
  if self.video_mind_mapping_generate_result:
1815
1943
  self.video_mind_mapping_generate_result.validate()
1816
1944
  if self.video_title_generate_result:
@@ -1830,6 +1958,10 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
1830
1958
  result['videoCaptionResult'] = self.video_caption_result.to_map()
1831
1959
  if self.video_generate_result is not None:
1832
1960
  result['videoGenerateResult'] = self.video_generate_result.to_map()
1961
+ result['videoGenerateResults'] = []
1962
+ if self.video_generate_results is not None:
1963
+ for k in self.video_generate_results:
1964
+ result['videoGenerateResults'].append(k.to_map() if k else None)
1833
1965
  if self.video_mind_mapping_generate_result is not None:
1834
1966
  result['videoMindMappingGenerateResult'] = self.video_mind_mapping_generate_result.to_map()
1835
1967
  if self.video_title_generate_result is not None:
@@ -1849,6 +1981,11 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
1849
1981
  if m.get('videoGenerateResult') is not None:
1850
1982
  temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult()
1851
1983
  self.video_generate_result = temp_model.from_map(m['videoGenerateResult'])
1984
+ self.video_generate_results = []
1985
+ if m.get('videoGenerateResults') is not None:
1986
+ for k in m.get('videoGenerateResults'):
1987
+ temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResults()
1988
+ self.video_generate_results.append(temp_model.from_map(k))
1852
1989
  if m.get('videoMindMappingGenerateResult') is not None:
1853
1990
  temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateResult()
1854
1991
  self.video_mind_mapping_generate_result = temp_model.from_map(m['videoMindMappingGenerateResult'])
@@ -6916,6 +7053,45 @@ class RunVideoAnalysisRequestFrameSampleMethod(TeaModel):
6916
7053
  return self
6917
7054
 
6918
7055
 
7056
+ class RunVideoAnalysisRequestTextProcessTasks(TeaModel):
7057
+ def __init__(
7058
+ self,
7059
+ model_custom_prompt_template: str = None,
7060
+ model_custom_prompt_template_id: str = None,
7061
+ model_id: str = None,
7062
+ ):
7063
+ self.model_custom_prompt_template = model_custom_prompt_template
7064
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
7065
+ self.model_id = model_id
7066
+
7067
+ def validate(self):
7068
+ pass
7069
+
7070
+ def to_map(self):
7071
+ _map = super().to_map()
7072
+ if _map is not None:
7073
+ return _map
7074
+
7075
+ result = dict()
7076
+ if self.model_custom_prompt_template is not None:
7077
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
7078
+ if self.model_custom_prompt_template_id is not None:
7079
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
7080
+ if self.model_id is not None:
7081
+ result['modelId'] = self.model_id
7082
+ return result
7083
+
7084
+ def from_map(self, m: dict = None):
7085
+ m = m or dict()
7086
+ if m.get('modelCustomPromptTemplate') is not None:
7087
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
7088
+ if m.get('modelCustomPromptTemplateId') is not None:
7089
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
7090
+ if m.get('modelId') is not None:
7091
+ self.model_id = m.get('modelId')
7092
+ return self
7093
+
7094
+
6919
7095
  class RunVideoAnalysisRequestVideoRoles(TeaModel):
6920
7096
  def __init__(
6921
7097
  self,
@@ -6958,6 +7134,7 @@ class RunVideoAnalysisRequestVideoRoles(TeaModel):
6958
7134
  class RunVideoAnalysisRequest(TeaModel):
6959
7135
  def __init__(
6960
7136
  self,
7137
+ face_identity_similarity_min_score: float = None,
6961
7138
  frame_sample_method: RunVideoAnalysisRequestFrameSampleMethod = None,
6962
7139
  generate_options: List[str] = None,
6963
7140
  language: str = None,
@@ -6967,12 +7144,15 @@ class RunVideoAnalysisRequest(TeaModel):
6967
7144
  original_session_id: str = None,
6968
7145
  snapshot_interval: float = None,
6969
7146
  task_id: str = None,
7147
+ text_process_tasks: List[RunVideoAnalysisRequestTextProcessTasks] = None,
6970
7148
  video_extra_info: str = None,
6971
7149
  video_model_custom_prompt_template: str = None,
6972
7150
  video_model_id: str = None,
6973
7151
  video_roles: List[RunVideoAnalysisRequestVideoRoles] = None,
7152
+ video_shot_face_identity_count: int = None,
6974
7153
  video_url: str = None,
6975
7154
  ):
7155
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
6976
7156
  self.frame_sample_method = frame_sample_method
6977
7157
  self.generate_options = generate_options
6978
7158
  self.language = language
@@ -6982,15 +7162,21 @@ class RunVideoAnalysisRequest(TeaModel):
6982
7162
  self.original_session_id = original_session_id
6983
7163
  self.snapshot_interval = snapshot_interval
6984
7164
  self.task_id = task_id
7165
+ self.text_process_tasks = text_process_tasks
6985
7166
  self.video_extra_info = video_extra_info
6986
7167
  self.video_model_custom_prompt_template = video_model_custom_prompt_template
6987
7168
  self.video_model_id = video_model_id
6988
7169
  self.video_roles = video_roles
7170
+ self.video_shot_face_identity_count = video_shot_face_identity_count
6989
7171
  self.video_url = video_url
6990
7172
 
6991
7173
  def validate(self):
6992
7174
  if self.frame_sample_method:
6993
7175
  self.frame_sample_method.validate()
7176
+ if self.text_process_tasks:
7177
+ for k in self.text_process_tasks:
7178
+ if k:
7179
+ k.validate()
6994
7180
  if self.video_roles:
6995
7181
  for k in self.video_roles:
6996
7182
  if k:
@@ -7002,6 +7188,8 @@ class RunVideoAnalysisRequest(TeaModel):
7002
7188
  return _map
7003
7189
 
7004
7190
  result = dict()
7191
+ if self.face_identity_similarity_min_score is not None:
7192
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
7005
7193
  if self.frame_sample_method is not None:
7006
7194
  result['frameSampleMethod'] = self.frame_sample_method.to_map()
7007
7195
  if self.generate_options is not None:
@@ -7020,6 +7208,10 @@ class RunVideoAnalysisRequest(TeaModel):
7020
7208
  result['snapshotInterval'] = self.snapshot_interval
7021
7209
  if self.task_id is not None:
7022
7210
  result['taskId'] = self.task_id
7211
+ result['textProcessTasks'] = []
7212
+ if self.text_process_tasks is not None:
7213
+ for k in self.text_process_tasks:
7214
+ result['textProcessTasks'].append(k.to_map() if k else None)
7023
7215
  if self.video_extra_info is not None:
7024
7216
  result['videoExtraInfo'] = self.video_extra_info
7025
7217
  if self.video_model_custom_prompt_template is not None:
@@ -7030,12 +7222,16 @@ class RunVideoAnalysisRequest(TeaModel):
7030
7222
  if self.video_roles is not None:
7031
7223
  for k in self.video_roles:
7032
7224
  result['videoRoles'].append(k.to_map() if k else None)
7225
+ if self.video_shot_face_identity_count is not None:
7226
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
7033
7227
  if self.video_url is not None:
7034
7228
  result['videoUrl'] = self.video_url
7035
7229
  return result
7036
7230
 
7037
7231
  def from_map(self, m: dict = None):
7038
7232
  m = m or dict()
7233
+ if m.get('faceIdentitySimilarityMinScore') is not None:
7234
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
7039
7235
  if m.get('frameSampleMethod') is not None:
7040
7236
  temp_model = RunVideoAnalysisRequestFrameSampleMethod()
7041
7237
  self.frame_sample_method = temp_model.from_map(m['frameSampleMethod'])
@@ -7055,6 +7251,11 @@ class RunVideoAnalysisRequest(TeaModel):
7055
7251
  self.snapshot_interval = m.get('snapshotInterval')
7056
7252
  if m.get('taskId') is not None:
7057
7253
  self.task_id = m.get('taskId')
7254
+ self.text_process_tasks = []
7255
+ if m.get('textProcessTasks') is not None:
7256
+ for k in m.get('textProcessTasks'):
7257
+ temp_model = RunVideoAnalysisRequestTextProcessTasks()
7258
+ self.text_process_tasks.append(temp_model.from_map(k))
7058
7259
  if m.get('videoExtraInfo') is not None:
7059
7260
  self.video_extra_info = m.get('videoExtraInfo')
7060
7261
  if m.get('videoModelCustomPromptTemplate') is not None:
@@ -7066,6 +7267,8 @@ class RunVideoAnalysisRequest(TeaModel):
7066
7267
  for k in m.get('videoRoles'):
7067
7268
  temp_model = RunVideoAnalysisRequestVideoRoles()
7068
7269
  self.video_roles.append(temp_model.from_map(k))
7270
+ if m.get('videoShotFaceIdentityCount') is not None:
7271
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
7069
7272
  if m.get('videoUrl') is not None:
7070
7273
  self.video_url = m.get('videoUrl')
7071
7274
  return self
@@ -7074,6 +7277,7 @@ class RunVideoAnalysisRequest(TeaModel):
7074
7277
  class RunVideoAnalysisShrinkRequest(TeaModel):
7075
7278
  def __init__(
7076
7279
  self,
7280
+ face_identity_similarity_min_score: float = None,
7077
7281
  frame_sample_method_shrink: str = None,
7078
7282
  generate_options_shrink: str = None,
7079
7283
  language: str = None,
@@ -7083,12 +7287,15 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7083
7287
  original_session_id: str = None,
7084
7288
  snapshot_interval: float = None,
7085
7289
  task_id: str = None,
7290
+ text_process_tasks_shrink: str = None,
7086
7291
  video_extra_info: str = None,
7087
7292
  video_model_custom_prompt_template: str = None,
7088
7293
  video_model_id: str = None,
7089
7294
  video_roles_shrink: str = None,
7295
+ video_shot_face_identity_count: int = None,
7090
7296
  video_url: str = None,
7091
7297
  ):
7298
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
7092
7299
  self.frame_sample_method_shrink = frame_sample_method_shrink
7093
7300
  self.generate_options_shrink = generate_options_shrink
7094
7301
  self.language = language
@@ -7098,10 +7305,12 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7098
7305
  self.original_session_id = original_session_id
7099
7306
  self.snapshot_interval = snapshot_interval
7100
7307
  self.task_id = task_id
7308
+ self.text_process_tasks_shrink = text_process_tasks_shrink
7101
7309
  self.video_extra_info = video_extra_info
7102
7310
  self.video_model_custom_prompt_template = video_model_custom_prompt_template
7103
7311
  self.video_model_id = video_model_id
7104
7312
  self.video_roles_shrink = video_roles_shrink
7313
+ self.video_shot_face_identity_count = video_shot_face_identity_count
7105
7314
  self.video_url = video_url
7106
7315
 
7107
7316
  def validate(self):
@@ -7113,6 +7322,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7113
7322
  return _map
7114
7323
 
7115
7324
  result = dict()
7325
+ if self.face_identity_similarity_min_score is not None:
7326
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
7116
7327
  if self.frame_sample_method_shrink is not None:
7117
7328
  result['frameSampleMethod'] = self.frame_sample_method_shrink
7118
7329
  if self.generate_options_shrink is not None:
@@ -7131,6 +7342,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7131
7342
  result['snapshotInterval'] = self.snapshot_interval
7132
7343
  if self.task_id is not None:
7133
7344
  result['taskId'] = self.task_id
7345
+ if self.text_process_tasks_shrink is not None:
7346
+ result['textProcessTasks'] = self.text_process_tasks_shrink
7134
7347
  if self.video_extra_info is not None:
7135
7348
  result['videoExtraInfo'] = self.video_extra_info
7136
7349
  if self.video_model_custom_prompt_template is not None:
@@ -7139,12 +7352,16 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7139
7352
  result['videoModelId'] = self.video_model_id
7140
7353
  if self.video_roles_shrink is not None:
7141
7354
  result['videoRoles'] = self.video_roles_shrink
7355
+ if self.video_shot_face_identity_count is not None:
7356
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
7142
7357
  if self.video_url is not None:
7143
7358
  result['videoUrl'] = self.video_url
7144
7359
  return result
7145
7360
 
7146
7361
  def from_map(self, m: dict = None):
7147
7362
  m = m or dict()
7363
+ if m.get('faceIdentitySimilarityMinScore') is not None:
7364
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
7148
7365
  if m.get('frameSampleMethod') is not None:
7149
7366
  self.frame_sample_method_shrink = m.get('frameSampleMethod')
7150
7367
  if m.get('generateOptions') is not None:
@@ -7163,6 +7380,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7163
7380
  self.snapshot_interval = m.get('snapshotInterval')
7164
7381
  if m.get('taskId') is not None:
7165
7382
  self.task_id = m.get('taskId')
7383
+ if m.get('textProcessTasks') is not None:
7384
+ self.text_process_tasks_shrink = m.get('textProcessTasks')
7166
7385
  if m.get('videoExtraInfo') is not None:
7167
7386
  self.video_extra_info = m.get('videoExtraInfo')
7168
7387
  if m.get('videoModelCustomPromptTemplate') is not None:
@@ -7171,6 +7390,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
7171
7390
  self.video_model_id = m.get('videoModelId')
7172
7391
  if m.get('videoRoles') is not None:
7173
7392
  self.video_roles_shrink = m.get('videoRoles')
7393
+ if m.get('videoShotFaceIdentityCount') is not None:
7394
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
7174
7395
  if m.get('videoUrl') is not None:
7175
7396
  self.video_url = m.get('videoUrl')
7176
7397
  return self
@@ -7514,14 +7735,18 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult(TeaModel):
7514
7735
  def __init__(
7515
7736
  self,
7516
7737
  generate_finished: bool = None,
7738
+ index: int = None,
7517
7739
  model_id: str = None,
7518
7740
  model_reduce: bool = None,
7741
+ reason_text: str = None,
7519
7742
  text: str = None,
7520
7743
  usage: RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResultUsage = None,
7521
7744
  ):
7522
7745
  self.generate_finished = generate_finished
7746
+ self.index = index
7523
7747
  self.model_id = model_id
7524
7748
  self.model_reduce = model_reduce
7749
+ self.reason_text = reason_text
7525
7750
  self.text = text
7526
7751
  self.usage = usage
7527
7752
 
@@ -7537,10 +7762,14 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult(TeaModel):
7537
7762
  result = dict()
7538
7763
  if self.generate_finished is not None:
7539
7764
  result['generateFinished'] = self.generate_finished
7765
+ if self.index is not None:
7766
+ result['index'] = self.index
7540
7767
  if self.model_id is not None:
7541
7768
  result['modelId'] = self.model_id
7542
7769
  if self.model_reduce is not None:
7543
7770
  result['modelReduce'] = self.model_reduce
7771
+ if self.reason_text is not None:
7772
+ result['reasonText'] = self.reason_text
7544
7773
  if self.text is not None:
7545
7774
  result['text'] = self.text
7546
7775
  if self.usage is not None:
@@ -7551,10 +7780,14 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult(TeaModel):
7551
7780
  m = m or dict()
7552
7781
  if m.get('generateFinished') is not None:
7553
7782
  self.generate_finished = m.get('generateFinished')
7783
+ if m.get('index') is not None:
7784
+ self.index = m.get('index')
7554
7785
  if m.get('modelId') is not None:
7555
7786
  self.model_id = m.get('modelId')
7556
7787
  if m.get('modelReduce') is not None:
7557
7788
  self.model_reduce = m.get('modelReduce')
7789
+ if m.get('reasonText') is not None:
7790
+ self.reason_text = m.get('reasonText')
7558
7791
  if m.get('text') is not None:
7559
7792
  self.text = m.get('text')
7560
7793
  if m.get('usage') is not None:
@@ -7563,6 +7796,104 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult(TeaModel):
7563
7796
  return self
7564
7797
 
7565
7798
 
7799
+ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResultsUsage(TeaModel):
7800
+ def __init__(
7801
+ self,
7802
+ input_tokens: int = None,
7803
+ output_tokens: int = None,
7804
+ total_tokens: int = None,
7805
+ ):
7806
+ self.input_tokens = input_tokens
7807
+ self.output_tokens = output_tokens
7808
+ self.total_tokens = total_tokens
7809
+
7810
+ def validate(self):
7811
+ pass
7812
+
7813
+ def to_map(self):
7814
+ _map = super().to_map()
7815
+ if _map is not None:
7816
+ return _map
7817
+
7818
+ result = dict()
7819
+ if self.input_tokens is not None:
7820
+ result['inputTokens'] = self.input_tokens
7821
+ if self.output_tokens is not None:
7822
+ result['outputTokens'] = self.output_tokens
7823
+ if self.total_tokens is not None:
7824
+ result['totalTokens'] = self.total_tokens
7825
+ return result
7826
+
7827
+ def from_map(self, m: dict = None):
7828
+ m = m or dict()
7829
+ if m.get('inputTokens') is not None:
7830
+ self.input_tokens = m.get('inputTokens')
7831
+ if m.get('outputTokens') is not None:
7832
+ self.output_tokens = m.get('outputTokens')
7833
+ if m.get('totalTokens') is not None:
7834
+ self.total_tokens = m.get('totalTokens')
7835
+ return self
7836
+
7837
+
7838
+ class RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResults(TeaModel):
7839
+ def __init__(
7840
+ self,
7841
+ generate_finished: bool = None,
7842
+ index: int = None,
7843
+ model_id: str = None,
7844
+ reason_text: str = None,
7845
+ text: str = None,
7846
+ usage: RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResultsUsage = None,
7847
+ ):
7848
+ self.generate_finished = generate_finished
7849
+ self.index = index
7850
+ self.model_id = model_id
7851
+ self.reason_text = reason_text
7852
+ self.text = text
7853
+ self.usage = usage
7854
+
7855
+ def validate(self):
7856
+ if self.usage:
7857
+ self.usage.validate()
7858
+
7859
+ def to_map(self):
7860
+ _map = super().to_map()
7861
+ if _map is not None:
7862
+ return _map
7863
+
7864
+ result = dict()
7865
+ if self.generate_finished is not None:
7866
+ result['generateFinished'] = self.generate_finished
7867
+ if self.index is not None:
7868
+ result['index'] = self.index
7869
+ if self.model_id is not None:
7870
+ result['modelId'] = self.model_id
7871
+ if self.reason_text is not None:
7872
+ result['reasonText'] = self.reason_text
7873
+ if self.text is not None:
7874
+ result['text'] = self.text
7875
+ if self.usage is not None:
7876
+ result['usage'] = self.usage.to_map()
7877
+ return result
7878
+
7879
+ def from_map(self, m: dict = None):
7880
+ m = m or dict()
7881
+ if m.get('generateFinished') is not None:
7882
+ self.generate_finished = m.get('generateFinished')
7883
+ if m.get('index') is not None:
7884
+ self.index = m.get('index')
7885
+ if m.get('modelId') is not None:
7886
+ self.model_id = m.get('modelId')
7887
+ if m.get('reasonText') is not None:
7888
+ self.reason_text = m.get('reasonText')
7889
+ if m.get('text') is not None:
7890
+ self.text = m.get('text')
7891
+ if m.get('usage') is not None:
7892
+ temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResultsUsage()
7893
+ self.usage = temp_model.from_map(m['usage'])
7894
+ return self
7895
+
7896
+
7566
7897
  class RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResultUsage(TeaModel):
7567
7898
  def __init__(
7568
7899
  self,
@@ -7999,6 +8330,7 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
7999
8330
  video_analysis_result: RunVideoAnalysisResponseBodyPayloadOutputVideoAnalysisResult = None,
8000
8331
  video_caption_result: RunVideoAnalysisResponseBodyPayloadOutputVideoCaptionResult = None,
8001
8332
  video_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult = None,
8333
+ video_generate_results: List[RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResults] = None,
8002
8334
  video_mind_mapping_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResult = None,
8003
8335
  video_shot_snapshot_result: RunVideoAnalysisResponseBodyPayloadOutputVideoShotSnapshotResult = None,
8004
8336
  video_title_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoTitleGenerateResult = None,
@@ -8007,6 +8339,7 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
8007
8339
  self.video_analysis_result = video_analysis_result
8008
8340
  self.video_caption_result = video_caption_result
8009
8341
  self.video_generate_result = video_generate_result
8342
+ self.video_generate_results = video_generate_results
8010
8343
  self.video_mind_mapping_generate_result = video_mind_mapping_generate_result
8011
8344
  self.video_shot_snapshot_result = video_shot_snapshot_result
8012
8345
  self.video_title_generate_result = video_title_generate_result
@@ -8018,6 +8351,10 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
8018
8351
  self.video_caption_result.validate()
8019
8352
  if self.video_generate_result:
8020
8353
  self.video_generate_result.validate()
8354
+ if self.video_generate_results:
8355
+ for k in self.video_generate_results:
8356
+ if k:
8357
+ k.validate()
8021
8358
  if self.video_mind_mapping_generate_result:
8022
8359
  self.video_mind_mapping_generate_result.validate()
8023
8360
  if self.video_shot_snapshot_result:
@@ -8039,6 +8376,10 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
8039
8376
  result['videoCaptionResult'] = self.video_caption_result.to_map()
8040
8377
  if self.video_generate_result is not None:
8041
8378
  result['videoGenerateResult'] = self.video_generate_result.to_map()
8379
+ result['videoGenerateResults'] = []
8380
+ if self.video_generate_results is not None:
8381
+ for k in self.video_generate_results:
8382
+ result['videoGenerateResults'].append(k.to_map() if k else None)
8042
8383
  if self.video_mind_mapping_generate_result is not None:
8043
8384
  result['videoMindMappingGenerateResult'] = self.video_mind_mapping_generate_result.to_map()
8044
8385
  if self.video_shot_snapshot_result is not None:
@@ -8060,6 +8401,11 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
8060
8401
  if m.get('videoGenerateResult') is not None:
8061
8402
  temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult()
8062
8403
  self.video_generate_result = temp_model.from_map(m['videoGenerateResult'])
8404
+ self.video_generate_results = []
8405
+ if m.get('videoGenerateResults') is not None:
8406
+ for k in m.get('videoGenerateResults'):
8407
+ temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResults()
8408
+ self.video_generate_results.append(temp_model.from_map(k))
8063
8409
  if m.get('videoMindMappingGenerateResult') is not None:
8064
8410
  temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResult()
8065
8411
  self.video_mind_mapping_generate_result = temp_model.from_map(m['videoMindMappingGenerateResult'])
@@ -8580,6 +8926,45 @@ class SubmitVideoAnalysisTaskRequestFrameSampleMethod(TeaModel):
8580
8926
  return self
8581
8927
 
8582
8928
 
8929
+ class SubmitVideoAnalysisTaskRequestTextProcessTasks(TeaModel):
8930
+ def __init__(
8931
+ self,
8932
+ model_custom_prompt_template: str = None,
8933
+ model_custom_prompt_template_id: str = None,
8934
+ model_id: str = None,
8935
+ ):
8936
+ self.model_custom_prompt_template = model_custom_prompt_template
8937
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
8938
+ self.model_id = model_id
8939
+
8940
+ def validate(self):
8941
+ pass
8942
+
8943
+ def to_map(self):
8944
+ _map = super().to_map()
8945
+ if _map is not None:
8946
+ return _map
8947
+
8948
+ result = dict()
8949
+ if self.model_custom_prompt_template is not None:
8950
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
8951
+ if self.model_custom_prompt_template_id is not None:
8952
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
8953
+ if self.model_id is not None:
8954
+ result['modelId'] = self.model_id
8955
+ return result
8956
+
8957
+ def from_map(self, m: dict = None):
8958
+ m = m or dict()
8959
+ if m.get('modelCustomPromptTemplate') is not None:
8960
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
8961
+ if m.get('modelCustomPromptTemplateId') is not None:
8962
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
8963
+ if m.get('modelId') is not None:
8964
+ self.model_id = m.get('modelId')
8965
+ return self
8966
+
8967
+
8583
8968
  class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
8584
8969
  def __init__(
8585
8970
  self,
@@ -8622,6 +9007,7 @@ class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
8622
9007
  class SubmitVideoAnalysisTaskRequest(TeaModel):
8623
9008
  def __init__(
8624
9009
  self,
9010
+ face_identity_similarity_min_score: float = None,
8625
9011
  frame_sample_method: SubmitVideoAnalysisTaskRequestFrameSampleMethod = None,
8626
9012
  generate_options: List[str] = None,
8627
9013
  language: str = None,
@@ -8629,12 +9015,15 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8629
9015
  model_custom_prompt_template_id: str = None,
8630
9016
  model_id: str = None,
8631
9017
  snapshot_interval: float = None,
9018
+ text_process_tasks: List[SubmitVideoAnalysisTaskRequestTextProcessTasks] = None,
8632
9019
  video_extra_info: str = None,
8633
9020
  video_model_custom_prompt_template: str = None,
8634
9021
  video_model_id: str = None,
8635
9022
  video_roles: List[SubmitVideoAnalysisTaskRequestVideoRoles] = None,
9023
+ video_shot_face_identity_count: int = None,
8636
9024
  video_url: str = None,
8637
9025
  ):
9026
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
8638
9027
  self.frame_sample_method = frame_sample_method
8639
9028
  self.generate_options = generate_options
8640
9029
  self.language = language
@@ -8642,16 +9031,22 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8642
9031
  self.model_custom_prompt_template_id = model_custom_prompt_template_id
8643
9032
  self.model_id = model_id
8644
9033
  self.snapshot_interval = snapshot_interval
9034
+ self.text_process_tasks = text_process_tasks
8645
9035
  self.video_extra_info = video_extra_info
8646
9036
  self.video_model_custom_prompt_template = video_model_custom_prompt_template
8647
9037
  self.video_model_id = video_model_id
8648
9038
  self.video_roles = video_roles
9039
+ self.video_shot_face_identity_count = video_shot_face_identity_count
8649
9040
  # This parameter is required.
8650
9041
  self.video_url = video_url
8651
9042
 
8652
9043
  def validate(self):
8653
9044
  if self.frame_sample_method:
8654
9045
  self.frame_sample_method.validate()
9046
+ if self.text_process_tasks:
9047
+ for k in self.text_process_tasks:
9048
+ if k:
9049
+ k.validate()
8655
9050
  if self.video_roles:
8656
9051
  for k in self.video_roles:
8657
9052
  if k:
@@ -8663,6 +9058,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8663
9058
  return _map
8664
9059
 
8665
9060
  result = dict()
9061
+ if self.face_identity_similarity_min_score is not None:
9062
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
8666
9063
  if self.frame_sample_method is not None:
8667
9064
  result['frameSampleMethod'] = self.frame_sample_method.to_map()
8668
9065
  if self.generate_options is not None:
@@ -8677,6 +9074,10 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8677
9074
  result['modelId'] = self.model_id
8678
9075
  if self.snapshot_interval is not None:
8679
9076
  result['snapshotInterval'] = self.snapshot_interval
9077
+ result['textProcessTasks'] = []
9078
+ if self.text_process_tasks is not None:
9079
+ for k in self.text_process_tasks:
9080
+ result['textProcessTasks'].append(k.to_map() if k else None)
8680
9081
  if self.video_extra_info is not None:
8681
9082
  result['videoExtraInfo'] = self.video_extra_info
8682
9083
  if self.video_model_custom_prompt_template is not None:
@@ -8687,12 +9088,16 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8687
9088
  if self.video_roles is not None:
8688
9089
  for k in self.video_roles:
8689
9090
  result['videoRoles'].append(k.to_map() if k else None)
9091
+ if self.video_shot_face_identity_count is not None:
9092
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
8690
9093
  if self.video_url is not None:
8691
9094
  result['videoUrl'] = self.video_url
8692
9095
  return result
8693
9096
 
8694
9097
  def from_map(self, m: dict = None):
8695
9098
  m = m or dict()
9099
+ if m.get('faceIdentitySimilarityMinScore') is not None:
9100
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
8696
9101
  if m.get('frameSampleMethod') is not None:
8697
9102
  temp_model = SubmitVideoAnalysisTaskRequestFrameSampleMethod()
8698
9103
  self.frame_sample_method = temp_model.from_map(m['frameSampleMethod'])
@@ -8708,6 +9113,11 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8708
9113
  self.model_id = m.get('modelId')
8709
9114
  if m.get('snapshotInterval') is not None:
8710
9115
  self.snapshot_interval = m.get('snapshotInterval')
9116
+ self.text_process_tasks = []
9117
+ if m.get('textProcessTasks') is not None:
9118
+ for k in m.get('textProcessTasks'):
9119
+ temp_model = SubmitVideoAnalysisTaskRequestTextProcessTasks()
9120
+ self.text_process_tasks.append(temp_model.from_map(k))
8711
9121
  if m.get('videoExtraInfo') is not None:
8712
9122
  self.video_extra_info = m.get('videoExtraInfo')
8713
9123
  if m.get('videoModelCustomPromptTemplate') is not None:
@@ -8719,6 +9129,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8719
9129
  for k in m.get('videoRoles'):
8720
9130
  temp_model = SubmitVideoAnalysisTaskRequestVideoRoles()
8721
9131
  self.video_roles.append(temp_model.from_map(k))
9132
+ if m.get('videoShotFaceIdentityCount') is not None:
9133
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
8722
9134
  if m.get('videoUrl') is not None:
8723
9135
  self.video_url = m.get('videoUrl')
8724
9136
  return self
@@ -8727,6 +9139,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
8727
9139
  class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8728
9140
  def __init__(
8729
9141
  self,
9142
+ face_identity_similarity_min_score: float = None,
8730
9143
  frame_sample_method_shrink: str = None,
8731
9144
  generate_options_shrink: str = None,
8732
9145
  language: str = None,
@@ -8734,12 +9147,15 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8734
9147
  model_custom_prompt_template_id: str = None,
8735
9148
  model_id: str = None,
8736
9149
  snapshot_interval: float = None,
9150
+ text_process_tasks_shrink: str = None,
8737
9151
  video_extra_info: str = None,
8738
9152
  video_model_custom_prompt_template: str = None,
8739
9153
  video_model_id: str = None,
8740
9154
  video_roles_shrink: str = None,
9155
+ video_shot_face_identity_count: int = None,
8741
9156
  video_url: str = None,
8742
9157
  ):
9158
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
8743
9159
  self.frame_sample_method_shrink = frame_sample_method_shrink
8744
9160
  self.generate_options_shrink = generate_options_shrink
8745
9161
  self.language = language
@@ -8747,10 +9163,12 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8747
9163
  self.model_custom_prompt_template_id = model_custom_prompt_template_id
8748
9164
  self.model_id = model_id
8749
9165
  self.snapshot_interval = snapshot_interval
9166
+ self.text_process_tasks_shrink = text_process_tasks_shrink
8750
9167
  self.video_extra_info = video_extra_info
8751
9168
  self.video_model_custom_prompt_template = video_model_custom_prompt_template
8752
9169
  self.video_model_id = video_model_id
8753
9170
  self.video_roles_shrink = video_roles_shrink
9171
+ self.video_shot_face_identity_count = video_shot_face_identity_count
8754
9172
  # This parameter is required.
8755
9173
  self.video_url = video_url
8756
9174
 
@@ -8763,6 +9181,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8763
9181
  return _map
8764
9182
 
8765
9183
  result = dict()
9184
+ if self.face_identity_similarity_min_score is not None:
9185
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
8766
9186
  if self.frame_sample_method_shrink is not None:
8767
9187
  result['frameSampleMethod'] = self.frame_sample_method_shrink
8768
9188
  if self.generate_options_shrink is not None:
@@ -8777,6 +9197,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8777
9197
  result['modelId'] = self.model_id
8778
9198
  if self.snapshot_interval is not None:
8779
9199
  result['snapshotInterval'] = self.snapshot_interval
9200
+ if self.text_process_tasks_shrink is not None:
9201
+ result['textProcessTasks'] = self.text_process_tasks_shrink
8780
9202
  if self.video_extra_info is not None:
8781
9203
  result['videoExtraInfo'] = self.video_extra_info
8782
9204
  if self.video_model_custom_prompt_template is not None:
@@ -8785,12 +9207,16 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8785
9207
  result['videoModelId'] = self.video_model_id
8786
9208
  if self.video_roles_shrink is not None:
8787
9209
  result['videoRoles'] = self.video_roles_shrink
9210
+ if self.video_shot_face_identity_count is not None:
9211
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
8788
9212
  if self.video_url is not None:
8789
9213
  result['videoUrl'] = self.video_url
8790
9214
  return result
8791
9215
 
8792
9216
  def from_map(self, m: dict = None):
8793
9217
  m = m or dict()
9218
+ if m.get('faceIdentitySimilarityMinScore') is not None:
9219
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
8794
9220
  if m.get('frameSampleMethod') is not None:
8795
9221
  self.frame_sample_method_shrink = m.get('frameSampleMethod')
8796
9222
  if m.get('generateOptions') is not None:
@@ -8805,6 +9231,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8805
9231
  self.model_id = m.get('modelId')
8806
9232
  if m.get('snapshotInterval') is not None:
8807
9233
  self.snapshot_interval = m.get('snapshotInterval')
9234
+ if m.get('textProcessTasks') is not None:
9235
+ self.text_process_tasks_shrink = m.get('textProcessTasks')
8808
9236
  if m.get('videoExtraInfo') is not None:
8809
9237
  self.video_extra_info = m.get('videoExtraInfo')
8810
9238
  if m.get('videoModelCustomPromptTemplate') is not None:
@@ -8813,6 +9241,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
8813
9241
  self.video_model_id = m.get('videoModelId')
8814
9242
  if m.get('videoRoles') is not None:
8815
9243
  self.video_roles_shrink = m.get('videoRoles')
9244
+ if m.get('videoShotFaceIdentityCount') is not None:
9245
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
8816
9246
  if m.get('videoUrl') is not None:
8817
9247
  self.video_url = m.get('videoUrl')
8818
9248
  return self
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-quanmiaolightapp20240801
3
- Version: 2.6.0
3
+ Version: 2.6.1
4
4
  Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
24
24
  """
25
25
  setup module for alibabacloud_quanmiaolightapp20240801.
26
26
 
27
- Created on 10/03/2025
27
+ Created on 13/03/2025
28
28
 
29
29
  @author: Alibaba Cloud SDK
30
30
  """