alibabacloud-quanmiaolightapp20240801 2.10.1__tar.gz → 2.10.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/ChangeLog.md +3 -0
  2. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/PKG-INFO +1 -1
  3. alibabacloud_quanmiaolightapp20240801-2.10.2/alibabacloud_quanmiaolightapp20240801/__init__.py +1 -0
  4. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801/client.py +8 -0
  5. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801/models.py +428 -2
  6. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801.egg-info/PKG-INFO +1 -1
  7. alibabacloud_quanmiaolightapp20240801-2.10.1/alibabacloud_quanmiaolightapp20240801/__init__.py +0 -1
  8. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/LICENSE +0 -0
  9. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/MANIFEST.in +0 -0
  10. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/README-CN.md +0 -0
  11. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/README.md +0 -0
  12. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801.egg-info/SOURCES.txt +0 -0
  13. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801.egg-info/dependency_links.txt +0 -0
  14. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801.egg-info/requires.txt +0 -0
  15. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/alibabacloud_quanmiaolightapp20240801.egg-info/top_level.txt +0 -0
  16. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/setup.cfg +0 -0
  17. {alibabacloud_quanmiaolightapp20240801-2.10.1 → alibabacloud_quanmiaolightapp20240801-2.10.2}/setup.py +0 -0
@@ -1,3 +1,6 @@
1
+ 2025-07-29 Version: 2.10.1
2
+ - Generated python 2024-08-01 for QuanMiaoLightApp.
3
+
1
4
  2025-07-02 Version: 2.10.0
2
5
  - Support API GetEssayCorrectionTask.
3
6
  - Support API RunEssayCorrection.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud_quanmiaolightapp20240801
3
- Version: 2.10.1
3
+ Version: 2.10.2
4
4
  Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK
@@ -3278,6 +3278,8 @@ class Client(OpenApiClient):
3278
3278
  if not UtilClient.is_unset(tmp_req.video_roles):
3279
3279
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
3280
3280
  body = {}
3281
+ if not UtilClient.is_unset(request.auto_role_recognition_video_url):
3282
+ body['autoRoleRecognitionVideoUrl'] = request.auto_role_recognition_video_url
3281
3283
  if not UtilClient.is_unset(request.exclude_generate_options_shrink):
3282
3284
  body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
3283
3285
  if not UtilClient.is_unset(request.face_identity_similarity_min_score):
@@ -3369,6 +3371,8 @@ class Client(OpenApiClient):
3369
3371
  if not UtilClient.is_unset(tmp_req.video_roles):
3370
3372
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
3371
3373
  body = {}
3374
+ if not UtilClient.is_unset(request.auto_role_recognition_video_url):
3375
+ body['autoRoleRecognitionVideoUrl'] = request.auto_role_recognition_video_url
3372
3376
  if not UtilClient.is_unset(request.exclude_generate_options_shrink):
3373
3377
  body['excludeGenerateOptions'] = request.exclude_generate_options_shrink
3374
3378
  if not UtilClient.is_unset(request.face_identity_similarity_min_score):
@@ -3946,6 +3950,8 @@ class Client(OpenApiClient):
3946
3950
  if not UtilClient.is_unset(tmp_req.video_roles):
3947
3951
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
3948
3952
  body = {}
3953
+ if not UtilClient.is_unset(request.auto_role_recognition_video_url):
3954
+ body['autoRoleRecognitionVideoUrl'] = request.auto_role_recognition_video_url
3949
3955
  if not UtilClient.is_unset(request.deduplication_id):
3950
3956
  body['deduplicationId'] = request.deduplication_id
3951
3957
  if not UtilClient.is_unset(request.exclude_generate_options_shrink):
@@ -4035,6 +4041,8 @@ class Client(OpenApiClient):
4035
4041
  if not UtilClient.is_unset(tmp_req.video_roles):
4036
4042
  request.video_roles_shrink = OpenApiUtilClient.array_to_string_with_specified_style(tmp_req.video_roles, 'videoRoles', 'json')
4037
4043
  body = {}
4044
+ if not UtilClient.is_unset(request.auto_role_recognition_video_url):
4045
+ body['autoRoleRecognitionVideoUrl'] = request.auto_role_recognition_video_url
4038
4046
  if not UtilClient.is_unset(request.deduplication_id):
4039
4047
  body['deduplicationId'] = request.deduplication_id
4040
4048
  if not UtilClient.is_unset(request.exclude_generate_options_shrink):
@@ -2752,6 +2752,145 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateR
2752
2752
  return self
2753
2753
 
2754
2754
 
2755
+ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals(TeaModel):
2756
+ def __init__(
2757
+ self,
2758
+ end_time: int = None,
2759
+ start_time: int = None,
2760
+ timestamp: int = None,
2761
+ url: str = None,
2762
+ ):
2763
+ self.end_time = end_time
2764
+ self.start_time = start_time
2765
+ self.timestamp = timestamp
2766
+ self.url = url
2767
+
2768
+ def validate(self):
2769
+ pass
2770
+
2771
+ def to_map(self):
2772
+ _map = super().to_map()
2773
+ if _map is not None:
2774
+ return _map
2775
+
2776
+ result = dict()
2777
+ if self.end_time is not None:
2778
+ result['endTime'] = self.end_time
2779
+ if self.start_time is not None:
2780
+ result['startTime'] = self.start_time
2781
+ if self.timestamp is not None:
2782
+ result['timestamp'] = self.timestamp
2783
+ if self.url is not None:
2784
+ result['url'] = self.url
2785
+ return result
2786
+
2787
+ def from_map(self, m: dict = None):
2788
+ m = m or dict()
2789
+ if m.get('endTime') is not None:
2790
+ self.end_time = m.get('endTime')
2791
+ if m.get('startTime') is not None:
2792
+ self.start_time = m.get('startTime')
2793
+ if m.get('timestamp') is not None:
2794
+ self.timestamp = m.get('timestamp')
2795
+ if m.get('url') is not None:
2796
+ self.url = m.get('url')
2797
+ return self
2798
+
2799
+
2800
+ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRoles(TeaModel):
2801
+ def __init__(
2802
+ self,
2803
+ is_auto_recognition: bool = None,
2804
+ ratio: float = None,
2805
+ role_info: str = None,
2806
+ role_name: str = None,
2807
+ time_intervals: List[GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals] = None,
2808
+ ):
2809
+ self.is_auto_recognition = is_auto_recognition
2810
+ self.ratio = ratio
2811
+ self.role_info = role_info
2812
+ self.role_name = role_name
2813
+ self.time_intervals = time_intervals
2814
+
2815
+ def validate(self):
2816
+ if self.time_intervals:
2817
+ for k in self.time_intervals:
2818
+ if k:
2819
+ k.validate()
2820
+
2821
+ def to_map(self):
2822
+ _map = super().to_map()
2823
+ if _map is not None:
2824
+ return _map
2825
+
2826
+ result = dict()
2827
+ if self.is_auto_recognition is not None:
2828
+ result['isAutoRecognition'] = self.is_auto_recognition
2829
+ if self.ratio is not None:
2830
+ result['ratio'] = self.ratio
2831
+ if self.role_info is not None:
2832
+ result['roleInfo'] = self.role_info
2833
+ if self.role_name is not None:
2834
+ result['roleName'] = self.role_name
2835
+ result['timeIntervals'] = []
2836
+ if self.time_intervals is not None:
2837
+ for k in self.time_intervals:
2838
+ result['timeIntervals'].append(k.to_map() if k else None)
2839
+ return result
2840
+
2841
+ def from_map(self, m: dict = None):
2842
+ m = m or dict()
2843
+ if m.get('isAutoRecognition') is not None:
2844
+ self.is_auto_recognition = m.get('isAutoRecognition')
2845
+ if m.get('ratio') is not None:
2846
+ self.ratio = m.get('ratio')
2847
+ if m.get('roleInfo') is not None:
2848
+ self.role_info = m.get('roleInfo')
2849
+ if m.get('roleName') is not None:
2850
+ self.role_name = m.get('roleName')
2851
+ self.time_intervals = []
2852
+ if m.get('timeIntervals') is not None:
2853
+ for k in m.get('timeIntervals'):
2854
+ temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals()
2855
+ self.time_intervals.append(temp_model.from_map(k))
2856
+ return self
2857
+
2858
+
2859
+ class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResult(TeaModel):
2860
+ def __init__(
2861
+ self,
2862
+ video_roles: List[GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRoles] = None,
2863
+ ):
2864
+ self.video_roles = video_roles
2865
+
2866
+ def validate(self):
2867
+ if self.video_roles:
2868
+ for k in self.video_roles:
2869
+ if k:
2870
+ k.validate()
2871
+
2872
+ def to_map(self):
2873
+ _map = super().to_map()
2874
+ if _map is not None:
2875
+ return _map
2876
+
2877
+ result = dict()
2878
+ result['videoRoles'] = []
2879
+ if self.video_roles is not None:
2880
+ for k in self.video_roles:
2881
+ result['videoRoles'].append(k.to_map() if k else None)
2882
+ return result
2883
+
2884
+ def from_map(self, m: dict = None):
2885
+ m = m or dict()
2886
+ self.video_roles = []
2887
+ if m.get('videoRoles') is not None:
2888
+ for k in m.get('videoRoles'):
2889
+ temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResultVideoRoles()
2890
+ self.video_roles.append(temp_model.from_map(k))
2891
+ return self
2892
+
2893
+
2755
2894
  class GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoTitleGenerateResultUsage(TeaModel):
2756
2895
  def __init__(
2757
2896
  self,
@@ -2841,6 +2980,7 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
2841
2980
  video_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResult = None,
2842
2981
  video_generate_results: List[GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoGenerateResults] = None,
2843
2982
  video_mind_mapping_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateResult = None,
2983
+ video_role_recognition_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResult = None,
2844
2984
  video_title_generate_result: GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoTitleGenerateResult = None,
2845
2985
  ):
2846
2986
  self.result_json_file_url = result_json_file_url
@@ -2849,6 +2989,7 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
2849
2989
  self.video_generate_result = video_generate_result
2850
2990
  self.video_generate_results = video_generate_results
2851
2991
  self.video_mind_mapping_generate_result = video_mind_mapping_generate_result
2992
+ self.video_role_recognition_result = video_role_recognition_result
2852
2993
  self.video_title_generate_result = video_title_generate_result
2853
2994
 
2854
2995
  def validate(self):
@@ -2864,6 +3005,8 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
2864
3005
  k.validate()
2865
3006
  if self.video_mind_mapping_generate_result:
2866
3007
  self.video_mind_mapping_generate_result.validate()
3008
+ if self.video_role_recognition_result:
3009
+ self.video_role_recognition_result.validate()
2867
3010
  if self.video_title_generate_result:
2868
3011
  self.video_title_generate_result.validate()
2869
3012
 
@@ -2887,6 +3030,8 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
2887
3030
  result['videoGenerateResults'].append(k.to_map() if k else None)
2888
3031
  if self.video_mind_mapping_generate_result is not None:
2889
3032
  result['videoMindMappingGenerateResult'] = self.video_mind_mapping_generate_result.to_map()
3033
+ if self.video_role_recognition_result is not None:
3034
+ result['videoRoleRecognitionResult'] = self.video_role_recognition_result.to_map()
2890
3035
  if self.video_title_generate_result is not None:
2891
3036
  result['videoTitleGenerateResult'] = self.video_title_generate_result.to_map()
2892
3037
  return result
@@ -2912,6 +3057,9 @@ class GetVideoAnalysisTaskResponseBodyDataPayloadOutput(TeaModel):
2912
3057
  if m.get('videoMindMappingGenerateResult') is not None:
2913
3058
  temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoMindMappingGenerateResult()
2914
3059
  self.video_mind_mapping_generate_result = temp_model.from_map(m['videoMindMappingGenerateResult'])
3060
+ if m.get('videoRoleRecognitionResult') is not None:
3061
+ temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoRoleRecognitionResult()
3062
+ self.video_role_recognition_result = temp_model.from_map(m['videoRoleRecognitionResult'])
2915
3063
  if m.get('videoTitleGenerateResult') is not None:
2916
3064
  temp_model = GetVideoAnalysisTaskResponseBodyDataPayloadOutputVideoTitleGenerateResult()
2917
3065
  self.video_title_generate_result = temp_model.from_map(m['videoTitleGenerateResult'])
@@ -9831,19 +9979,59 @@ class RunVideoAnalysisRequestVideoCaptionInfo(TeaModel):
9831
9979
  return self
9832
9980
 
9833
9981
 
9982
+ class RunVideoAnalysisRequestVideoRolesTimeIntervals(TeaModel):
9983
+ def __init__(
9984
+ self,
9985
+ end_time: int = None,
9986
+ start_time: int = None,
9987
+ ):
9988
+ self.end_time = end_time
9989
+ self.start_time = start_time
9990
+
9991
+ def validate(self):
9992
+ pass
9993
+
9994
+ def to_map(self):
9995
+ _map = super().to_map()
9996
+ if _map is not None:
9997
+ return _map
9998
+
9999
+ result = dict()
10000
+ if self.end_time is not None:
10001
+ result['endTime'] = self.end_time
10002
+ if self.start_time is not None:
10003
+ result['startTime'] = self.start_time
10004
+ return result
10005
+
10006
+ def from_map(self, m: dict = None):
10007
+ m = m or dict()
10008
+ if m.get('endTime') is not None:
10009
+ self.end_time = m.get('endTime')
10010
+ if m.get('startTime') is not None:
10011
+ self.start_time = m.get('startTime')
10012
+ return self
10013
+
10014
+
9834
10015
  class RunVideoAnalysisRequestVideoRoles(TeaModel):
9835
10016
  def __init__(
9836
10017
  self,
10018
+ is_auto_recognition: bool = None,
9837
10019
  role_info: str = None,
9838
10020
  role_name: str = None,
10021
+ time_intervals: List[RunVideoAnalysisRequestVideoRolesTimeIntervals] = None,
9839
10022
  urls: List[str] = None,
9840
10023
  ):
10024
+ self.is_auto_recognition = is_auto_recognition
9841
10025
  self.role_info = role_info
9842
10026
  self.role_name = role_name
10027
+ self.time_intervals = time_intervals
9843
10028
  self.urls = urls
9844
10029
 
9845
10030
  def validate(self):
9846
- pass
10031
+ if self.time_intervals:
10032
+ for k in self.time_intervals:
10033
+ if k:
10034
+ k.validate()
9847
10035
 
9848
10036
  def to_map(self):
9849
10037
  _map = super().to_map()
@@ -9851,20 +10039,33 @@ class RunVideoAnalysisRequestVideoRoles(TeaModel):
9851
10039
  return _map
9852
10040
 
9853
10041
  result = dict()
10042
+ if self.is_auto_recognition is not None:
10043
+ result['isAutoRecognition'] = self.is_auto_recognition
9854
10044
  if self.role_info is not None:
9855
10045
  result['roleInfo'] = self.role_info
9856
10046
  if self.role_name is not None:
9857
10047
  result['roleName'] = self.role_name
10048
+ result['timeIntervals'] = []
10049
+ if self.time_intervals is not None:
10050
+ for k in self.time_intervals:
10051
+ result['timeIntervals'].append(k.to_map() if k else None)
9858
10052
  if self.urls is not None:
9859
10053
  result['urls'] = self.urls
9860
10054
  return result
9861
10055
 
9862
10056
  def from_map(self, m: dict = None):
9863
10057
  m = m or dict()
10058
+ if m.get('isAutoRecognition') is not None:
10059
+ self.is_auto_recognition = m.get('isAutoRecognition')
9864
10060
  if m.get('roleInfo') is not None:
9865
10061
  self.role_info = m.get('roleInfo')
9866
10062
  if m.get('roleName') is not None:
9867
10063
  self.role_name = m.get('roleName')
10064
+ self.time_intervals = []
10065
+ if m.get('timeIntervals') is not None:
10066
+ for k in m.get('timeIntervals'):
10067
+ temp_model = RunVideoAnalysisRequestVideoRolesTimeIntervals()
10068
+ self.time_intervals.append(temp_model.from_map(k))
9868
10069
  if m.get('urls') is not None:
9869
10070
  self.urls = m.get('urls')
9870
10071
  return self
@@ -9873,6 +10074,7 @@ class RunVideoAnalysisRequestVideoRoles(TeaModel):
9873
10074
  class RunVideoAnalysisRequest(TeaModel):
9874
10075
  def __init__(
9875
10076
  self,
10077
+ auto_role_recognition_video_url: str = None,
9876
10078
  exclude_generate_options: List[str] = None,
9877
10079
  face_identity_similarity_min_score: float = None,
9878
10080
  frame_sample_method: RunVideoAnalysisRequestFrameSampleMethod = None,
@@ -9894,6 +10096,7 @@ class RunVideoAnalysisRequest(TeaModel):
9894
10096
  video_shot_face_identity_count: int = None,
9895
10097
  video_url: str = None,
9896
10098
  ):
10099
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
9897
10100
  self.exclude_generate_options = exclude_generate_options
9898
10101
  self.face_identity_similarity_min_score = face_identity_similarity_min_score
9899
10102
  self.frame_sample_method = frame_sample_method
@@ -9935,6 +10138,8 @@ class RunVideoAnalysisRequest(TeaModel):
9935
10138
  return _map
9936
10139
 
9937
10140
  result = dict()
10141
+ if self.auto_role_recognition_video_url is not None:
10142
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
9938
10143
  if self.exclude_generate_options is not None:
9939
10144
  result['excludeGenerateOptions'] = self.exclude_generate_options
9940
10145
  if self.face_identity_similarity_min_score is not None:
@@ -9983,6 +10188,8 @@ class RunVideoAnalysisRequest(TeaModel):
9983
10188
 
9984
10189
  def from_map(self, m: dict = None):
9985
10190
  m = m or dict()
10191
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
10192
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
9986
10193
  if m.get('excludeGenerateOptions') is not None:
9987
10194
  self.exclude_generate_options = m.get('excludeGenerateOptions')
9988
10195
  if m.get('faceIdentitySimilarityMinScore') is not None:
@@ -10037,6 +10244,7 @@ class RunVideoAnalysisRequest(TeaModel):
10037
10244
  class RunVideoAnalysisShrinkRequest(TeaModel):
10038
10245
  def __init__(
10039
10246
  self,
10247
+ auto_role_recognition_video_url: str = None,
10040
10248
  exclude_generate_options_shrink: str = None,
10041
10249
  face_identity_similarity_min_score: float = None,
10042
10250
  frame_sample_method_shrink: str = None,
@@ -10058,6 +10266,7 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
10058
10266
  video_shot_face_identity_count: int = None,
10059
10267
  video_url: str = None,
10060
10268
  ):
10269
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
10061
10270
  self.exclude_generate_options_shrink = exclude_generate_options_shrink
10062
10271
  self.face_identity_similarity_min_score = face_identity_similarity_min_score
10063
10272
  self.frame_sample_method_shrink = frame_sample_method_shrink
@@ -10088,6 +10297,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
10088
10297
  return _map
10089
10298
 
10090
10299
  result = dict()
10300
+ if self.auto_role_recognition_video_url is not None:
10301
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
10091
10302
  if self.exclude_generate_options_shrink is not None:
10092
10303
  result['excludeGenerateOptions'] = self.exclude_generate_options_shrink
10093
10304
  if self.face_identity_similarity_min_score is not None:
@@ -10132,6 +10343,8 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
10132
10343
 
10133
10344
  def from_map(self, m: dict = None):
10134
10345
  m = m or dict()
10346
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
10347
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
10135
10348
  if m.get('excludeGenerateOptions') is not None:
10136
10349
  self.exclude_generate_options_shrink = m.get('excludeGenerateOptions')
10137
10350
  if m.get('faceIdentitySimilarityMinScore') is not None:
@@ -10894,6 +11107,145 @@ class RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResult(Te
10894
11107
  return self
10895
11108
 
10896
11109
 
11110
+ class RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals(TeaModel):
11111
+ def __init__(
11112
+ self,
11113
+ end_time: int = None,
11114
+ start_time: int = None,
11115
+ timestamp: int = None,
11116
+ url: str = None,
11117
+ ):
11118
+ self.end_time = end_time
11119
+ self.start_time = start_time
11120
+ self.timestamp = timestamp
11121
+ self.url = url
11122
+
11123
+ def validate(self):
11124
+ pass
11125
+
11126
+ def to_map(self):
11127
+ _map = super().to_map()
11128
+ if _map is not None:
11129
+ return _map
11130
+
11131
+ result = dict()
11132
+ if self.end_time is not None:
11133
+ result['endTime'] = self.end_time
11134
+ if self.start_time is not None:
11135
+ result['startTime'] = self.start_time
11136
+ if self.timestamp is not None:
11137
+ result['timestamp'] = self.timestamp
11138
+ if self.url is not None:
11139
+ result['url'] = self.url
11140
+ return result
11141
+
11142
+ def from_map(self, m: dict = None):
11143
+ m = m or dict()
11144
+ if m.get('endTime') is not None:
11145
+ self.end_time = m.get('endTime')
11146
+ if m.get('startTime') is not None:
11147
+ self.start_time = m.get('startTime')
11148
+ if m.get('timestamp') is not None:
11149
+ self.timestamp = m.get('timestamp')
11150
+ if m.get('url') is not None:
11151
+ self.url = m.get('url')
11152
+ return self
11153
+
11154
+
11155
+ class RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRoles(TeaModel):
11156
+ def __init__(
11157
+ self,
11158
+ is_auto_recognition: bool = None,
11159
+ ratio: float = None,
11160
+ role_info: str = None,
11161
+ role_name: str = None,
11162
+ time_intervals: List[RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals] = None,
11163
+ ):
11164
+ self.is_auto_recognition = is_auto_recognition
11165
+ self.ratio = ratio
11166
+ self.role_info = role_info
11167
+ self.role_name = role_name
11168
+ self.time_intervals = time_intervals
11169
+
11170
+ def validate(self):
11171
+ if self.time_intervals:
11172
+ for k in self.time_intervals:
11173
+ if k:
11174
+ k.validate()
11175
+
11176
+ def to_map(self):
11177
+ _map = super().to_map()
11178
+ if _map is not None:
11179
+ return _map
11180
+
11181
+ result = dict()
11182
+ if self.is_auto_recognition is not None:
11183
+ result['isAutoRecognition'] = self.is_auto_recognition
11184
+ if self.ratio is not None:
11185
+ result['ratio'] = self.ratio
11186
+ if self.role_info is not None:
11187
+ result['roleInfo'] = self.role_info
11188
+ if self.role_name is not None:
11189
+ result['roleName'] = self.role_name
11190
+ result['timeIntervals'] = []
11191
+ if self.time_intervals is not None:
11192
+ for k in self.time_intervals:
11193
+ result['timeIntervals'].append(k.to_map() if k else None)
11194
+ return result
11195
+
11196
+ def from_map(self, m: dict = None):
11197
+ m = m or dict()
11198
+ if m.get('isAutoRecognition') is not None:
11199
+ self.is_auto_recognition = m.get('isAutoRecognition')
11200
+ if m.get('ratio') is not None:
11201
+ self.ratio = m.get('ratio')
11202
+ if m.get('roleInfo') is not None:
11203
+ self.role_info = m.get('roleInfo')
11204
+ if m.get('roleName') is not None:
11205
+ self.role_name = m.get('roleName')
11206
+ self.time_intervals = []
11207
+ if m.get('timeIntervals') is not None:
11208
+ for k in m.get('timeIntervals'):
11209
+ temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRolesTimeIntervals()
11210
+ self.time_intervals.append(temp_model.from_map(k))
11211
+ return self
11212
+
11213
+
11214
+ class RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResult(TeaModel):
11215
+ def __init__(
11216
+ self,
11217
+ video_roles: List[RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRoles] = None,
11218
+ ):
11219
+ self.video_roles = video_roles
11220
+
11221
+ def validate(self):
11222
+ if self.video_roles:
11223
+ for k in self.video_roles:
11224
+ if k:
11225
+ k.validate()
11226
+
11227
+ def to_map(self):
11228
+ _map = super().to_map()
11229
+ if _map is not None:
11230
+ return _map
11231
+
11232
+ result = dict()
11233
+ result['videoRoles'] = []
11234
+ if self.video_roles is not None:
11235
+ for k in self.video_roles:
11236
+ result['videoRoles'].append(k.to_map() if k else None)
11237
+ return result
11238
+
11239
+ def from_map(self, m: dict = None):
11240
+ m = m or dict()
11241
+ self.video_roles = []
11242
+ if m.get('videoRoles') is not None:
11243
+ for k in m.get('videoRoles'):
11244
+ temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResultVideoRoles()
11245
+ self.video_roles.append(temp_model.from_map(k))
11246
+ return self
11247
+
11248
+
10897
11249
  class RunVideoAnalysisResponseBodyPayloadOutputVideoShotSnapshotResultVideoShotsVideoSnapshots(TeaModel):
10898
11250
  def __init__(
10899
11251
  self,
@@ -11116,6 +11468,7 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
11116
11468
  video_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResult = None,
11117
11469
  video_generate_results: List[RunVideoAnalysisResponseBodyPayloadOutputVideoGenerateResults] = None,
11118
11470
  video_mind_mapping_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResult = None,
11471
+ video_role_recognition_result: RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResult = None,
11119
11472
  video_shot_snapshot_result: RunVideoAnalysisResponseBodyPayloadOutputVideoShotSnapshotResult = None,
11120
11473
  video_title_generate_result: RunVideoAnalysisResponseBodyPayloadOutputVideoTitleGenerateResult = None,
11121
11474
  ):
@@ -11125,6 +11478,7 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
11125
11478
  self.video_generate_result = video_generate_result
11126
11479
  self.video_generate_results = video_generate_results
11127
11480
  self.video_mind_mapping_generate_result = video_mind_mapping_generate_result
11481
+ self.video_role_recognition_result = video_role_recognition_result
11128
11482
  self.video_shot_snapshot_result = video_shot_snapshot_result
11129
11483
  self.video_title_generate_result = video_title_generate_result
11130
11484
 
@@ -11141,6 +11495,8 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
11141
11495
  k.validate()
11142
11496
  if self.video_mind_mapping_generate_result:
11143
11497
  self.video_mind_mapping_generate_result.validate()
11498
+ if self.video_role_recognition_result:
11499
+ self.video_role_recognition_result.validate()
11144
11500
  if self.video_shot_snapshot_result:
11145
11501
  self.video_shot_snapshot_result.validate()
11146
11502
  if self.video_title_generate_result:
@@ -11166,6 +11522,8 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
11166
11522
  result['videoGenerateResults'].append(k.to_map() if k else None)
11167
11523
  if self.video_mind_mapping_generate_result is not None:
11168
11524
  result['videoMindMappingGenerateResult'] = self.video_mind_mapping_generate_result.to_map()
11525
+ if self.video_role_recognition_result is not None:
11526
+ result['videoRoleRecognitionResult'] = self.video_role_recognition_result.to_map()
11169
11527
  if self.video_shot_snapshot_result is not None:
11170
11528
  result['videoShotSnapshotResult'] = self.video_shot_snapshot_result.to_map()
11171
11529
  if self.video_title_generate_result is not None:
@@ -11193,6 +11551,9 @@ class RunVideoAnalysisResponseBodyPayloadOutput(TeaModel):
11193
11551
  if m.get('videoMindMappingGenerateResult') is not None:
11194
11552
  temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoMindMappingGenerateResult()
11195
11553
  self.video_mind_mapping_generate_result = temp_model.from_map(m['videoMindMappingGenerateResult'])
11554
+ if m.get('videoRoleRecognitionResult') is not None:
11555
+ temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoRoleRecognitionResult()
11556
+ self.video_role_recognition_result = temp_model.from_map(m['videoRoleRecognitionResult'])
11196
11557
  if m.get('videoShotSnapshotResult') is not None:
11197
11558
  temp_model = RunVideoAnalysisResponseBodyPayloadOutputVideoShotSnapshotResult()
11198
11559
  self.video_shot_snapshot_result = temp_model.from_map(m['videoShotSnapshotResult'])
@@ -12592,19 +12953,59 @@ class SubmitVideoAnalysisTaskRequestVideoCaptionInfo(TeaModel):
12592
12953
  return self
12593
12954
 
12594
12955
 
12956
+ class SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals(TeaModel):
12957
+ def __init__(
12958
+ self,
12959
+ end_time: int = None,
12960
+ start_time: int = None,
12961
+ ):
12962
+ self.end_time = end_time
12963
+ self.start_time = start_time
12964
+
12965
+ def validate(self):
12966
+ pass
12967
+
12968
+ def to_map(self):
12969
+ _map = super().to_map()
12970
+ if _map is not None:
12971
+ return _map
12972
+
12973
+ result = dict()
12974
+ if self.end_time is not None:
12975
+ result['endTime'] = self.end_time
12976
+ if self.start_time is not None:
12977
+ result['startTime'] = self.start_time
12978
+ return result
12979
+
12980
+ def from_map(self, m: dict = None):
12981
+ m = m or dict()
12982
+ if m.get('endTime') is not None:
12983
+ self.end_time = m.get('endTime')
12984
+ if m.get('startTime') is not None:
12985
+ self.start_time = m.get('startTime')
12986
+ return self
12987
+
12988
+
12595
12989
  class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
12596
12990
  def __init__(
12597
12991
  self,
12992
+ is_auto_recognition: bool = None,
12598
12993
  role_info: str = None,
12599
12994
  role_name: str = None,
12995
+ time_intervals: List[SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals] = None,
12600
12996
  urls: List[str] = None,
12601
12997
  ):
12998
+ self.is_auto_recognition = is_auto_recognition
12602
12999
  self.role_info = role_info
12603
13000
  self.role_name = role_name
13001
+ self.time_intervals = time_intervals
12604
13002
  self.urls = urls
12605
13003
 
12606
13004
  def validate(self):
12607
- pass
13005
+ if self.time_intervals:
13006
+ for k in self.time_intervals:
13007
+ if k:
13008
+ k.validate()
12608
13009
 
12609
13010
  def to_map(self):
12610
13011
  _map = super().to_map()
@@ -12612,20 +13013,33 @@ class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
12612
13013
  return _map
12613
13014
 
12614
13015
  result = dict()
13016
+ if self.is_auto_recognition is not None:
13017
+ result['isAutoRecognition'] = self.is_auto_recognition
12615
13018
  if self.role_info is not None:
12616
13019
  result['roleInfo'] = self.role_info
12617
13020
  if self.role_name is not None:
12618
13021
  result['roleName'] = self.role_name
13022
+ result['timeIntervals'] = []
13023
+ if self.time_intervals is not None:
13024
+ for k in self.time_intervals:
13025
+ result['timeIntervals'].append(k.to_map() if k else None)
12619
13026
  if self.urls is not None:
12620
13027
  result['urls'] = self.urls
12621
13028
  return result
12622
13029
 
12623
13030
  def from_map(self, m: dict = None):
12624
13031
  m = m or dict()
13032
+ if m.get('isAutoRecognition') is not None:
13033
+ self.is_auto_recognition = m.get('isAutoRecognition')
12625
13034
  if m.get('roleInfo') is not None:
12626
13035
  self.role_info = m.get('roleInfo')
12627
13036
  if m.get('roleName') is not None:
12628
13037
  self.role_name = m.get('roleName')
13038
+ self.time_intervals = []
13039
+ if m.get('timeIntervals') is not None:
13040
+ for k in m.get('timeIntervals'):
13041
+ temp_model = SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals()
13042
+ self.time_intervals.append(temp_model.from_map(k))
12629
13043
  if m.get('urls') is not None:
12630
13044
  self.urls = m.get('urls')
12631
13045
  return self
@@ -12634,6 +13048,7 @@ class SubmitVideoAnalysisTaskRequestVideoRoles(TeaModel):
12634
13048
  class SubmitVideoAnalysisTaskRequest(TeaModel):
12635
13049
  def __init__(
12636
13050
  self,
13051
+ auto_role_recognition_video_url: str = None,
12637
13052
  deduplication_id: str = None,
12638
13053
  exclude_generate_options: List[str] = None,
12639
13054
  face_identity_similarity_min_score: float = None,
@@ -12654,6 +13069,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
12654
13069
  video_shot_face_identity_count: int = None,
12655
13070
  video_url: str = None,
12656
13071
  ):
13072
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
12657
13073
  self.deduplication_id = deduplication_id
12658
13074
  self.exclude_generate_options = exclude_generate_options
12659
13075
  self.face_identity_similarity_min_score = face_identity_similarity_min_score
@@ -12695,6 +13111,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
12695
13111
  return _map
12696
13112
 
12697
13113
  result = dict()
13114
+ if self.auto_role_recognition_video_url is not None:
13115
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
12698
13116
  if self.deduplication_id is not None:
12699
13117
  result['deduplicationId'] = self.deduplication_id
12700
13118
  if self.exclude_generate_options is not None:
@@ -12741,6 +13159,8 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
12741
13159
 
12742
13160
  def from_map(self, m: dict = None):
12743
13161
  m = m or dict()
13162
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
13163
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
12744
13164
  if m.get('deduplicationId') is not None:
12745
13165
  self.deduplication_id = m.get('deduplicationId')
12746
13166
  if m.get('excludeGenerateOptions') is not None:
@@ -12793,6 +13213,7 @@ class SubmitVideoAnalysisTaskRequest(TeaModel):
12793
13213
  class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
12794
13214
  def __init__(
12795
13215
  self,
13216
+ auto_role_recognition_video_url: str = None,
12796
13217
  deduplication_id: str = None,
12797
13218
  exclude_generate_options_shrink: str = None,
12798
13219
  face_identity_similarity_min_score: float = None,
@@ -12813,6 +13234,7 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
12813
13234
  video_shot_face_identity_count: int = None,
12814
13235
  video_url: str = None,
12815
13236
  ):
13237
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
12816
13238
  self.deduplication_id = deduplication_id
12817
13239
  self.exclude_generate_options_shrink = exclude_generate_options_shrink
12818
13240
  self.face_identity_similarity_min_score = face_identity_similarity_min_score
@@ -12843,6 +13265,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
12843
13265
  return _map
12844
13266
 
12845
13267
  result = dict()
13268
+ if self.auto_role_recognition_video_url is not None:
13269
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
12846
13270
  if self.deduplication_id is not None:
12847
13271
  result['deduplicationId'] = self.deduplication_id
12848
13272
  if self.exclude_generate_options_shrink is not None:
@@ -12885,6 +13309,8 @@ class SubmitVideoAnalysisTaskShrinkRequest(TeaModel):
12885
13309
 
12886
13310
  def from_map(self, m: dict = None):
12887
13311
  m = m or dict()
13312
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
13313
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
12888
13314
  if m.get('deduplicationId') is not None:
12889
13315
  self.deduplication_id = m.get('deduplicationId')
12890
13316
  if m.get('excludeGenerateOptions') is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: alibabacloud-quanmiaolightapp20240801
3
- Version: 2.10.1
3
+ Version: 2.10.2
4
4
  Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
5
5
  Home-page: https://github.com/aliyun/alibabacloud-python-sdk
6
6
  Author: Alibaba Cloud SDK