alibabacloud-quanmiaolightapp20240801 2.13.1__py3-none-any.whl → 2.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
  2. alibabacloud_quanmiaolightapp20240801/client.py +4452 -3757
  3. alibabacloud_quanmiaolightapp20240801/models/__init__.py +691 -0
  4. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
  5. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
  6. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
  7. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
  8. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
  9. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
  10. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
  11. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
  12. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
  13. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
  14. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
  15. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
  16. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
  17. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
  18. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
  19. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
  20. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
  21. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
  22. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
  23. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +174 -0
  24. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
  25. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
  26. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
  27. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
  28. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
  29. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
  30. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
  31. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
  32. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
  33. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
  34. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
  35. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
  36. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
  37. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
  38. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
  39. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
  40. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
  41. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
  42. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
  43. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
  44. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
  45. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
  46. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
  47. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
  48. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
  49. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
  50. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
  51. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
  52. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
  53. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
  54. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
  55. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
  56. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
  57. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
  58. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
  59. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
  60. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
  61. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
  62. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
  63. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
  64. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
  65. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
  66. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
  67. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
  68. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
  69. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
  70. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
  71. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
  72. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
  73. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
  74. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
  75. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
  76. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
  77. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
  78. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
  79. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
  80. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
  81. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
  82. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
  83. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
  84. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
  85. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
  86. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
  87. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
  88. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
  89. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
  90. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
  91. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
  92. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
  93. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
  94. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
  95. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
  96. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
  97. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
  98. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
  99. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
  100. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
  101. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
  102. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
  103. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
  104. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
  105. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
  106. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
  107. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
  108. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
  109. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
  110. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
  111. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
  112. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
  113. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
  114. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
  115. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
  116. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
  117. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
  118. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
  119. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
  120. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
  121. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
  122. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
  123. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
  124. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
  125. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
  126. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
  127. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
  128. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
  129. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
  130. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
  131. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
  132. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
  133. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
  134. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
  135. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
  136. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
  137. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
  138. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
  139. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
  140. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
  141. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
  142. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/METADATA +7 -7
  143. alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info/RECORD +146 -0
  144. alibabacloud_quanmiaolightapp20240801/models.py +0 -16030
  145. alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info/RECORD +0 -8
  146. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/LICENSE +0 -0
  147. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/WHEEL +0 -0
  148. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,600 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from typing import List
6
+
7
+ from alibabacloud_quanmiaolightapp20240801 import models as main_models
8
+ from darabonba.model import DaraModel
9
+
10
+ class RunVideoAnalysisRequest(DaraModel):
11
+ def __init__(
12
+ self,
13
+ add_document_param: main_models.RunVideoAnalysisRequestAddDocumentParam = None,
14
+ auto_role_recognition_video_url: str = None,
15
+ exclude_generate_options: List[str] = None,
16
+ face_identity_similarity_min_score: float = None,
17
+ frame_sample_method: main_models.RunVideoAnalysisRequestFrameSampleMethod = None,
18
+ generate_options: List[str] = None,
19
+ language: str = None,
20
+ model_custom_prompt_template: str = None,
21
+ model_custom_prompt_template_id: str = None,
22
+ model_id: str = None,
23
+ original_session_id: str = None,
24
+ snapshot_interval: float = None,
25
+ split_interval: int = None,
26
+ split_type: str = None,
27
+ task_id: str = None,
28
+ text_process_tasks: List[main_models.RunVideoAnalysisRequestTextProcessTasks] = None,
29
+ video_caption_info: main_models.RunVideoAnalysisRequestVideoCaptionInfo = None,
30
+ video_extra_info: str = None,
31
+ video_model_custom_prompt_template: str = None,
32
+ video_model_id: str = None,
33
+ video_roles: List[main_models.RunVideoAnalysisRequestVideoRoles] = None,
34
+ video_shot_face_identity_count: int = None,
35
+ video_url: str = None,
36
+ ):
37
+ self.add_document_param = add_document_param
38
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
39
+ self.exclude_generate_options = exclude_generate_options
40
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
41
+ self.frame_sample_method = frame_sample_method
42
+ self.generate_options = generate_options
43
+ self.language = language
44
+ self.model_custom_prompt_template = model_custom_prompt_template
45
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
46
+ self.model_id = model_id
47
+ self.original_session_id = original_session_id
48
+ self.snapshot_interval = snapshot_interval
49
+ self.split_interval = split_interval
50
+ self.split_type = split_type
51
+ self.task_id = task_id
52
+ self.text_process_tasks = text_process_tasks
53
+ self.video_caption_info = video_caption_info
54
+ self.video_extra_info = video_extra_info
55
+ self.video_model_custom_prompt_template = video_model_custom_prompt_template
56
+ self.video_model_id = video_model_id
57
+ self.video_roles = video_roles
58
+ self.video_shot_face_identity_count = video_shot_face_identity_count
59
+ self.video_url = video_url
60
+
61
+ def validate(self):
62
+ if self.add_document_param:
63
+ self.add_document_param.validate()
64
+ if self.frame_sample_method:
65
+ self.frame_sample_method.validate()
66
+ if self.text_process_tasks:
67
+ for v1 in self.text_process_tasks:
68
+ if v1:
69
+ v1.validate()
70
+ if self.video_caption_info:
71
+ self.video_caption_info.validate()
72
+ if self.video_roles:
73
+ for v1 in self.video_roles:
74
+ if v1:
75
+ v1.validate()
76
+
77
+ def to_map(self):
78
+ result = dict()
79
+ _map = super().to_map()
80
+ if _map is not None:
81
+ result = _map
82
+ if self.add_document_param is not None:
83
+ result['addDocumentParam'] = self.add_document_param.to_map()
84
+
85
+ if self.auto_role_recognition_video_url is not None:
86
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
87
+
88
+ if self.exclude_generate_options is not None:
89
+ result['excludeGenerateOptions'] = self.exclude_generate_options
90
+
91
+ if self.face_identity_similarity_min_score is not None:
92
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
93
+
94
+ if self.frame_sample_method is not None:
95
+ result['frameSampleMethod'] = self.frame_sample_method.to_map()
96
+
97
+ if self.generate_options is not None:
98
+ result['generateOptions'] = self.generate_options
99
+
100
+ if self.language is not None:
101
+ result['language'] = self.language
102
+
103
+ if self.model_custom_prompt_template is not None:
104
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
105
+
106
+ if self.model_custom_prompt_template_id is not None:
107
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
108
+
109
+ if self.model_id is not None:
110
+ result['modelId'] = self.model_id
111
+
112
+ if self.original_session_id is not None:
113
+ result['originalSessionId'] = self.original_session_id
114
+
115
+ if self.snapshot_interval is not None:
116
+ result['snapshotInterval'] = self.snapshot_interval
117
+
118
+ if self.split_interval is not None:
119
+ result['splitInterval'] = self.split_interval
120
+
121
+ if self.split_type is not None:
122
+ result['splitType'] = self.split_type
123
+
124
+ if self.task_id is not None:
125
+ result['taskId'] = self.task_id
126
+
127
+ result['textProcessTasks'] = []
128
+ if self.text_process_tasks is not None:
129
+ for k1 in self.text_process_tasks:
130
+ result['textProcessTasks'].append(k1.to_map() if k1 else None)
131
+
132
+ if self.video_caption_info is not None:
133
+ result['videoCaptionInfo'] = self.video_caption_info.to_map()
134
+
135
+ if self.video_extra_info is not None:
136
+ result['videoExtraInfo'] = self.video_extra_info
137
+
138
+ if self.video_model_custom_prompt_template is not None:
139
+ result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
140
+
141
+ if self.video_model_id is not None:
142
+ result['videoModelId'] = self.video_model_id
143
+
144
+ result['videoRoles'] = []
145
+ if self.video_roles is not None:
146
+ for k1 in self.video_roles:
147
+ result['videoRoles'].append(k1.to_map() if k1 else None)
148
+
149
+ if self.video_shot_face_identity_count is not None:
150
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
151
+
152
+ if self.video_url is not None:
153
+ result['videoUrl'] = self.video_url
154
+
155
+ return result
156
+
157
+ def from_map(self, m: dict = None):
158
+ m = m or dict()
159
+ if m.get('addDocumentParam') is not None:
160
+ temp_model = main_models.RunVideoAnalysisRequestAddDocumentParam()
161
+ self.add_document_param = temp_model.from_map(m.get('addDocumentParam'))
162
+
163
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
164
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
165
+
166
+ if m.get('excludeGenerateOptions') is not None:
167
+ self.exclude_generate_options = m.get('excludeGenerateOptions')
168
+
169
+ if m.get('faceIdentitySimilarityMinScore') is not None:
170
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
171
+
172
+ if m.get('frameSampleMethod') is not None:
173
+ temp_model = main_models.RunVideoAnalysisRequestFrameSampleMethod()
174
+ self.frame_sample_method = temp_model.from_map(m.get('frameSampleMethod'))
175
+
176
+ if m.get('generateOptions') is not None:
177
+ self.generate_options = m.get('generateOptions')
178
+
179
+ if m.get('language') is not None:
180
+ self.language = m.get('language')
181
+
182
+ if m.get('modelCustomPromptTemplate') is not None:
183
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
184
+
185
+ if m.get('modelCustomPromptTemplateId') is not None:
186
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
187
+
188
+ if m.get('modelId') is not None:
189
+ self.model_id = m.get('modelId')
190
+
191
+ if m.get('originalSessionId') is not None:
192
+ self.original_session_id = m.get('originalSessionId')
193
+
194
+ if m.get('snapshotInterval') is not None:
195
+ self.snapshot_interval = m.get('snapshotInterval')
196
+
197
+ if m.get('splitInterval') is not None:
198
+ self.split_interval = m.get('splitInterval')
199
+
200
+ if m.get('splitType') is not None:
201
+ self.split_type = m.get('splitType')
202
+
203
+ if m.get('taskId') is not None:
204
+ self.task_id = m.get('taskId')
205
+
206
+ self.text_process_tasks = []
207
+ if m.get('textProcessTasks') is not None:
208
+ for k1 in m.get('textProcessTasks'):
209
+ temp_model = main_models.RunVideoAnalysisRequestTextProcessTasks()
210
+ self.text_process_tasks.append(temp_model.from_map(k1))
211
+
212
+ if m.get('videoCaptionInfo') is not None:
213
+ temp_model = main_models.RunVideoAnalysisRequestVideoCaptionInfo()
214
+ self.video_caption_info = temp_model.from_map(m.get('videoCaptionInfo'))
215
+
216
+ if m.get('videoExtraInfo') is not None:
217
+ self.video_extra_info = m.get('videoExtraInfo')
218
+
219
+ if m.get('videoModelCustomPromptTemplate') is not None:
220
+ self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
221
+
222
+ if m.get('videoModelId') is not None:
223
+ self.video_model_id = m.get('videoModelId')
224
+
225
+ self.video_roles = []
226
+ if m.get('videoRoles') is not None:
227
+ for k1 in m.get('videoRoles'):
228
+ temp_model = main_models.RunVideoAnalysisRequestVideoRoles()
229
+ self.video_roles.append(temp_model.from_map(k1))
230
+
231
+ if m.get('videoShotFaceIdentityCount') is not None:
232
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
233
+
234
+ if m.get('videoUrl') is not None:
235
+ self.video_url = m.get('videoUrl')
236
+
237
+ return self
238
+
239
+ class RunVideoAnalysisRequestVideoRoles(DaraModel):
240
+ def __init__(
241
+ self,
242
+ is_auto_recognition: bool = None,
243
+ role_info: str = None,
244
+ role_name: str = None,
245
+ time_intervals: List[main_models.RunVideoAnalysisRequestVideoRolesTimeIntervals] = None,
246
+ urls: List[str] = None,
247
+ ):
248
+ self.is_auto_recognition = is_auto_recognition
249
+ self.role_info = role_info
250
+ self.role_name = role_name
251
+ self.time_intervals = time_intervals
252
+ self.urls = urls
253
+
254
+ def validate(self):
255
+ if self.time_intervals:
256
+ for v1 in self.time_intervals:
257
+ if v1:
258
+ v1.validate()
259
+
260
+ def to_map(self):
261
+ result = dict()
262
+ _map = super().to_map()
263
+ if _map is not None:
264
+ result = _map
265
+ if self.is_auto_recognition is not None:
266
+ result['isAutoRecognition'] = self.is_auto_recognition
267
+
268
+ if self.role_info is not None:
269
+ result['roleInfo'] = self.role_info
270
+
271
+ if self.role_name is not None:
272
+ result['roleName'] = self.role_name
273
+
274
+ result['timeIntervals'] = []
275
+ if self.time_intervals is not None:
276
+ for k1 in self.time_intervals:
277
+ result['timeIntervals'].append(k1.to_map() if k1 else None)
278
+
279
+ if self.urls is not None:
280
+ result['urls'] = self.urls
281
+
282
+ return result
283
+
284
+ def from_map(self, m: dict = None):
285
+ m = m or dict()
286
+ if m.get('isAutoRecognition') is not None:
287
+ self.is_auto_recognition = m.get('isAutoRecognition')
288
+
289
+ if m.get('roleInfo') is not None:
290
+ self.role_info = m.get('roleInfo')
291
+
292
+ if m.get('roleName') is not None:
293
+ self.role_name = m.get('roleName')
294
+
295
+ self.time_intervals = []
296
+ if m.get('timeIntervals') is not None:
297
+ for k1 in m.get('timeIntervals'):
298
+ temp_model = main_models.RunVideoAnalysisRequestVideoRolesTimeIntervals()
299
+ self.time_intervals.append(temp_model.from_map(k1))
300
+
301
+ if m.get('urls') is not None:
302
+ self.urls = m.get('urls')
303
+
304
+ return self
305
+
306
+ class RunVideoAnalysisRequestVideoRolesTimeIntervals(DaraModel):
307
+ def __init__(
308
+ self,
309
+ end_time: int = None,
310
+ start_time: int = None,
311
+ ):
312
+ self.end_time = end_time
313
+ self.start_time = start_time
314
+
315
+ def validate(self):
316
+ pass
317
+
318
+ def to_map(self):
319
+ result = dict()
320
+ _map = super().to_map()
321
+ if _map is not None:
322
+ result = _map
323
+ if self.end_time is not None:
324
+ result['endTime'] = self.end_time
325
+
326
+ if self.start_time is not None:
327
+ result['startTime'] = self.start_time
328
+
329
+ return result
330
+
331
+ def from_map(self, m: dict = None):
332
+ m = m or dict()
333
+ if m.get('endTime') is not None:
334
+ self.end_time = m.get('endTime')
335
+
336
+ if m.get('startTime') is not None:
337
+ self.start_time = m.get('startTime')
338
+
339
+ return self
340
+
341
+ class RunVideoAnalysisRequestVideoCaptionInfo(DaraModel):
342
+ def __init__(
343
+ self,
344
+ video_caption_file_url: str = None,
345
+ video_captions: List[main_models.RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions] = None,
346
+ ):
347
+ self.video_caption_file_url = video_caption_file_url
348
+ self.video_captions = video_captions
349
+
350
+ def validate(self):
351
+ if self.video_captions:
352
+ for v1 in self.video_captions:
353
+ if v1:
354
+ v1.validate()
355
+
356
+ def to_map(self):
357
+ result = dict()
358
+ _map = super().to_map()
359
+ if _map is not None:
360
+ result = _map
361
+ if self.video_caption_file_url is not None:
362
+ result['videoCaptionFileUrl'] = self.video_caption_file_url
363
+
364
+ result['videoCaptions'] = []
365
+ if self.video_captions is not None:
366
+ for k1 in self.video_captions:
367
+ result['videoCaptions'].append(k1.to_map() if k1 else None)
368
+
369
+ return result
370
+
371
+ def from_map(self, m: dict = None):
372
+ m = m or dict()
373
+ if m.get('videoCaptionFileUrl') is not None:
374
+ self.video_caption_file_url = m.get('videoCaptionFileUrl')
375
+
376
+ self.video_captions = []
377
+ if m.get('videoCaptions') is not None:
378
+ for k1 in m.get('videoCaptions'):
379
+ temp_model = main_models.RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions()
380
+ self.video_captions.append(temp_model.from_map(k1))
381
+
382
+ return self
383
+
384
+ class RunVideoAnalysisRequestVideoCaptionInfoVideoCaptions(DaraModel):
385
+ def __init__(
386
+ self,
387
+ end_time: int = None,
388
+ speaker: str = None,
389
+ start_time: int = None,
390
+ text: str = None,
391
+ ):
392
+ self.end_time = end_time
393
+ self.speaker = speaker
394
+ self.start_time = start_time
395
+ self.text = text
396
+
397
+ def validate(self):
398
+ pass
399
+
400
+ def to_map(self):
401
+ result = dict()
402
+ _map = super().to_map()
403
+ if _map is not None:
404
+ result = _map
405
+ if self.end_time is not None:
406
+ result['endTime'] = self.end_time
407
+
408
+ if self.speaker is not None:
409
+ result['speaker'] = self.speaker
410
+
411
+ if self.start_time is not None:
412
+ result['startTime'] = self.start_time
413
+
414
+ if self.text is not None:
415
+ result['text'] = self.text
416
+
417
+ return result
418
+
419
+ def from_map(self, m: dict = None):
420
+ m = m or dict()
421
+ if m.get('endTime') is not None:
422
+ self.end_time = m.get('endTime')
423
+
424
+ if m.get('speaker') is not None:
425
+ self.speaker = m.get('speaker')
426
+
427
+ if m.get('startTime') is not None:
428
+ self.start_time = m.get('startTime')
429
+
430
+ if m.get('text') is not None:
431
+ self.text = m.get('text')
432
+
433
+ return self
434
+
435
+ class RunVideoAnalysisRequestTextProcessTasks(DaraModel):
436
+ def __init__(
437
+ self,
438
+ model_custom_prompt_template: str = None,
439
+ model_custom_prompt_template_id: str = None,
440
+ model_id: str = None,
441
+ ):
442
+ self.model_custom_prompt_template = model_custom_prompt_template
443
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
444
+ self.model_id = model_id
445
+
446
+ def validate(self):
447
+ pass
448
+
449
+ def to_map(self):
450
+ result = dict()
451
+ _map = super().to_map()
452
+ if _map is not None:
453
+ result = _map
454
+ if self.model_custom_prompt_template is not None:
455
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
456
+
457
+ if self.model_custom_prompt_template_id is not None:
458
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
459
+
460
+ if self.model_id is not None:
461
+ result['modelId'] = self.model_id
462
+
463
+ return result
464
+
465
+ def from_map(self, m: dict = None):
466
+ m = m or dict()
467
+ if m.get('modelCustomPromptTemplate') is not None:
468
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
469
+
470
+ if m.get('modelCustomPromptTemplateId') is not None:
471
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
472
+
473
+ if m.get('modelId') is not None:
474
+ self.model_id = m.get('modelId')
475
+
476
+ return self
477
+
478
+ class RunVideoAnalysisRequestFrameSampleMethod(DaraModel):
479
+ def __init__(
480
+ self,
481
+ interval: float = None,
482
+ method_name: str = None,
483
+ pixel: int = None,
484
+ ):
485
+ self.interval = interval
486
+ self.method_name = method_name
487
+ self.pixel = pixel
488
+
489
+ def validate(self):
490
+ pass
491
+
492
+ def to_map(self):
493
+ result = dict()
494
+ _map = super().to_map()
495
+ if _map is not None:
496
+ result = _map
497
+ if self.interval is not None:
498
+ result['interval'] = self.interval
499
+
500
+ if self.method_name is not None:
501
+ result['methodName'] = self.method_name
502
+
503
+ if self.pixel is not None:
504
+ result['pixel'] = self.pixel
505
+
506
+ return result
507
+
508
+ def from_map(self, m: dict = None):
509
+ m = m or dict()
510
+ if m.get('interval') is not None:
511
+ self.interval = m.get('interval')
512
+
513
+ if m.get('methodName') is not None:
514
+ self.method_name = m.get('methodName')
515
+
516
+ if m.get('pixel') is not None:
517
+ self.pixel = m.get('pixel')
518
+
519
+ return self
520
+
521
+ class RunVideoAnalysisRequestAddDocumentParam(DaraModel):
522
+ def __init__(
523
+ self,
524
+ dataset_id: int = None,
525
+ dataset_name: str = None,
526
+ document: main_models.RunVideoAnalysisRequestAddDocumentParamDocument = None,
527
+ ):
528
+ self.dataset_id = dataset_id
529
+ self.dataset_name = dataset_name
530
+ self.document = document
531
+
532
+ def validate(self):
533
+ if self.document:
534
+ self.document.validate()
535
+
536
+ def to_map(self):
537
+ result = dict()
538
+ _map = super().to_map()
539
+ if _map is not None:
540
+ result = _map
541
+ if self.dataset_id is not None:
542
+ result['datasetId'] = self.dataset_id
543
+
544
+ if self.dataset_name is not None:
545
+ result['datasetName'] = self.dataset_name
546
+
547
+ if self.document is not None:
548
+ result['document'] = self.document.to_map()
549
+
550
+ return result
551
+
552
+ def from_map(self, m: dict = None):
553
+ m = m or dict()
554
+ if m.get('datasetId') is not None:
555
+ self.dataset_id = m.get('datasetId')
556
+
557
+ if m.get('datasetName') is not None:
558
+ self.dataset_name = m.get('datasetName')
559
+
560
+ if m.get('document') is not None:
561
+ temp_model = main_models.RunVideoAnalysisRequestAddDocumentParamDocument()
562
+ self.document = temp_model.from_map(m.get('document'))
563
+
564
+ return self
565
+
566
+ class RunVideoAnalysisRequestAddDocumentParamDocument(DaraModel):
567
+ def __init__(
568
+ self,
569
+ doc_id: str = None,
570
+ title: str = None,
571
+ ):
572
+ self.doc_id = doc_id
573
+ self.title = title
574
+
575
+ def validate(self):
576
+ pass
577
+
578
+ def to_map(self):
579
+ result = dict()
580
+ _map = super().to_map()
581
+ if _map is not None:
582
+ result = _map
583
+ if self.doc_id is not None:
584
+ result['docId'] = self.doc_id
585
+
586
+ if self.title is not None:
587
+ result['title'] = self.title
588
+
589
+ return result
590
+
591
+ def from_map(self, m: dict = None):
592
+ m = m or dict()
593
+ if m.get('docId') is not None:
594
+ self.doc_id = m.get('docId')
595
+
596
+ if m.get('title') is not None:
597
+ self.title = m.get('title')
598
+
599
+ return self
600
+
@@ -0,0 +1,54 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict
6
+
7
+ from alibabacloud_quanmiaolightapp20240801 import models as main_models
8
+ from darabonba.model import DaraModel
9
+
10
+ class RunVideoAnalysisResponse(DaraModel):
11
+ def __init__(
12
+ self,
13
+ headers: Dict[str, str] = None,
14
+ status_code: int = None,
15
+ body: main_models.RunVideoAnalysisResponseBody = None,
16
+ ):
17
+ self.headers = headers
18
+ self.status_code = status_code
19
+ self.body = body
20
+
21
+ def validate(self):
22
+ if self.body:
23
+ self.body.validate()
24
+
25
+ def to_map(self):
26
+ result = dict()
27
+ _map = super().to_map()
28
+ if _map is not None:
29
+ result = _map
30
+ if self.headers is not None:
31
+ result['headers'] = self.headers
32
+
33
+ if self.status_code is not None:
34
+ result['statusCode'] = self.status_code
35
+
36
+ if self.body is not None:
37
+ result['body'] = self.body.to_map()
38
+
39
+ return result
40
+
41
+ def from_map(self, m: dict = None):
42
+ m = m or dict()
43
+ if m.get('headers') is not None:
44
+ self.headers = m.get('headers')
45
+
46
+ if m.get('statusCode') is not None:
47
+ self.status_code = m.get('statusCode')
48
+
49
+ if m.get('body') is not None:
50
+ temp_model = main_models.RunVideoAnalysisResponseBody()
51
+ self.body = temp_model.from_map(m.get('body'))
52
+
53
+ return self
54
+