alibabacloud-quanmiaolightapp20240801 2.13.1__py3-none-any.whl → 2.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
  2. alibabacloud_quanmiaolightapp20240801/client.py +4452 -3757
  3. alibabacloud_quanmiaolightapp20240801/models/__init__.py +691 -0
  4. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
  5. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
  6. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
  7. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
  8. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
  9. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
  10. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
  11. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
  12. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
  13. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
  14. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
  15. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
  16. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
  17. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
  18. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
  19. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
  20. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
  21. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
  22. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
  23. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +174 -0
  24. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
  25. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
  26. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
  27. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
  28. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
  29. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
  30. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
  31. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
  32. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
  33. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
  34. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
  35. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
  36. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
  37. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
  38. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
  39. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
  40. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
  41. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
  42. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
  43. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
  44. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
  45. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
  46. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
  47. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
  48. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
  49. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
  50. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
  51. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
  52. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
  53. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
  54. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
  55. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
  56. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
  57. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
  58. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
  59. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
  60. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
  61. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
  62. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
  63. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
  64. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
  65. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
  66. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
  67. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
  68. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
  69. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
  70. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
  71. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
  72. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
  73. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
  74. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
  75. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
  76. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
  77. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
  78. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
  79. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
  80. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
  81. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
  82. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
  83. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
  84. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
  85. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
  86. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
  87. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
  88. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
  89. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
  90. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
  91. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
  92. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
  93. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
  94. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
  95. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
  96. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
  97. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
  98. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
  99. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
  100. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
  101. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
  102. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
  103. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
  104. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
  105. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
  106. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
  107. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
  108. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
  109. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
  110. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
  111. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
  112. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
  113. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
  114. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
  115. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
  116. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
  117. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
  118. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
  119. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
  120. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
  121. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
  122. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
  123. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
  124. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
  125. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
  126. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
  127. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
  128. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
  129. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
  130. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
  131. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
  132. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
  133. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
  134. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
  135. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
  136. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
  137. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
  138. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
  139. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
  140. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
  141. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
  142. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/METADATA +7 -7
  143. alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info/RECORD +146 -0
  144. alibabacloud_quanmiaolightapp20240801/models.py +0 -16030
  145. alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info/RECORD +0 -8
  146. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/LICENSE +0 -0
  147. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/WHEEL +0 -0
  148. {alibabacloud_quanmiaolightapp20240801-2.13.1.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,363 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from typing import List, Dict
6
+
7
+ from alibabacloud_quanmiaolightapp20240801 import models as main_models
8
+ from darabonba.model import DaraModel
9
+
10
+ class RunVideoDetectShotResponseBody(DaraModel):
11
+ def __init__(
12
+ self,
13
+ header: main_models.RunVideoDetectShotResponseBodyHeader = None,
14
+ payload: main_models.RunVideoDetectShotResponseBodyPayload = None,
15
+ request_id: str = None,
16
+ ):
17
+ self.header = header
18
+ self.payload = payload
19
+ # Id of the request
20
+ self.request_id = request_id
21
+
22
+ def validate(self):
23
+ if self.header:
24
+ self.header.validate()
25
+ if self.payload:
26
+ self.payload.validate()
27
+
28
+ def to_map(self):
29
+ result = dict()
30
+ _map = super().to_map()
31
+ if _map is not None:
32
+ result = _map
33
+ if self.header is not None:
34
+ result['header'] = self.header.to_map()
35
+
36
+ if self.payload is not None:
37
+ result['payload'] = self.payload.to_map()
38
+
39
+ if self.request_id is not None:
40
+ result['requestId'] = self.request_id
41
+
42
+ return result
43
+
44
+ def from_map(self, m: dict = None):
45
+ m = m or dict()
46
+ if m.get('header') is not None:
47
+ temp_model = main_models.RunVideoDetectShotResponseBodyHeader()
48
+ self.header = temp_model.from_map(m.get('header'))
49
+
50
+ if m.get('payload') is not None:
51
+ temp_model = main_models.RunVideoDetectShotResponseBodyPayload()
52
+ self.payload = temp_model.from_map(m.get('payload'))
53
+
54
+ if m.get('requestId') is not None:
55
+ self.request_id = m.get('requestId')
56
+
57
+ return self
58
+
59
+ class RunVideoDetectShotResponseBodyPayload(DaraModel):
60
+ def __init__(
61
+ self,
62
+ output: main_models.RunVideoDetectShotResponseBodyPayloadOutput = None,
63
+ usage: main_models.RunVideoDetectShotResponseBodyPayloadUsage = None,
64
+ ):
65
+ self.output = output
66
+ self.usage = usage
67
+
68
+ def validate(self):
69
+ if self.output:
70
+ self.output.validate()
71
+ if self.usage:
72
+ self.usage.validate()
73
+
74
+ def to_map(self):
75
+ result = dict()
76
+ _map = super().to_map()
77
+ if _map is not None:
78
+ result = _map
79
+ if self.output is not None:
80
+ result['output'] = self.output.to_map()
81
+
82
+ if self.usage is not None:
83
+ result['usage'] = self.usage.to_map()
84
+
85
+ return result
86
+
87
+ def from_map(self, m: dict = None):
88
+ m = m or dict()
89
+ if m.get('output') is not None:
90
+ temp_model = main_models.RunVideoDetectShotResponseBodyPayloadOutput()
91
+ self.output = temp_model.from_map(m.get('output'))
92
+
93
+ if m.get('usage') is not None:
94
+ temp_model = main_models.RunVideoDetectShotResponseBodyPayloadUsage()
95
+ self.usage = temp_model.from_map(m.get('usage'))
96
+
97
+ return self
98
+
99
+ class RunVideoDetectShotResponseBodyPayloadUsage(DaraModel):
100
+ def __init__(
101
+ self,
102
+ input_tokens: int = None,
103
+ output_tokens: int = None,
104
+ total_tokens: int = None,
105
+ ):
106
+ self.input_tokens = input_tokens
107
+ self.output_tokens = output_tokens
108
+ self.total_tokens = total_tokens
109
+
110
+ def validate(self):
111
+ pass
112
+
113
+ def to_map(self):
114
+ result = dict()
115
+ _map = super().to_map()
116
+ if _map is not None:
117
+ result = _map
118
+ if self.input_tokens is not None:
119
+ result['inputTokens'] = self.input_tokens
120
+
121
+ if self.output_tokens is not None:
122
+ result['outputTokens'] = self.output_tokens
123
+
124
+ if self.total_tokens is not None:
125
+ result['totalTokens'] = self.total_tokens
126
+
127
+ return result
128
+
129
+ def from_map(self, m: dict = None):
130
+ m = m or dict()
131
+ if m.get('inputTokens') is not None:
132
+ self.input_tokens = m.get('inputTokens')
133
+
134
+ if m.get('outputTokens') is not None:
135
+ self.output_tokens = m.get('outputTokens')
136
+
137
+ if m.get('totalTokens') is not None:
138
+ self.total_tokens = m.get('totalTokens')
139
+
140
+ return self
141
+
142
+ class RunVideoDetectShotResponseBodyPayloadOutput(DaraModel):
143
+ def __init__(
144
+ self,
145
+ video_split_result: main_models.RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResult = None,
146
+ ):
147
+ self.video_split_result = video_split_result
148
+
149
+ def validate(self):
150
+ if self.video_split_result:
151
+ self.video_split_result.validate()
152
+
153
+ def to_map(self):
154
+ result = dict()
155
+ _map = super().to_map()
156
+ if _map is not None:
157
+ result = _map
158
+ if self.video_split_result is not None:
159
+ result['videoSplitResult'] = self.video_split_result.to_map()
160
+
161
+ return result
162
+
163
+ def from_map(self, m: dict = None):
164
+ m = m or dict()
165
+ if m.get('videoSplitResult') is not None:
166
+ temp_model = main_models.RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResult()
167
+ self.video_split_result = temp_model.from_map(m.get('videoSplitResult'))
168
+
169
+ return self
170
+
171
+ class RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResult(DaraModel):
172
+ def __init__(
173
+ self,
174
+ reason_text: str = None,
175
+ text: str = None,
176
+ video_parts: List[Dict[str, str]] = None,
177
+ video_recognition_result: List[main_models.RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResultVideoRecognitionResult] = None,
178
+ ):
179
+ self.reason_text = reason_text
180
+ self.text = text
181
+ self.video_parts = video_parts
182
+ self.video_recognition_result = video_recognition_result
183
+
184
+ def validate(self):
185
+ if self.video_recognition_result:
186
+ for v1 in self.video_recognition_result:
187
+ if v1:
188
+ v1.validate()
189
+
190
+ def to_map(self):
191
+ result = dict()
192
+ _map = super().to_map()
193
+ if _map is not None:
194
+ result = _map
195
+ if self.reason_text is not None:
196
+ result['reasonText'] = self.reason_text
197
+
198
+ if self.text is not None:
199
+ result['text'] = self.text
200
+
201
+ if self.video_parts is not None:
202
+ result['videoParts'] = self.video_parts
203
+
204
+ result['videoRecognitionResult'] = []
205
+ if self.video_recognition_result is not None:
206
+ for k1 in self.video_recognition_result:
207
+ result['videoRecognitionResult'].append(k1.to_map() if k1 else None)
208
+
209
+ return result
210
+
211
+ def from_map(self, m: dict = None):
212
+ m = m or dict()
213
+ if m.get('reasonText') is not None:
214
+ self.reason_text = m.get('reasonText')
215
+
216
+ if m.get('text') is not None:
217
+ self.text = m.get('text')
218
+
219
+ if m.get('videoParts') is not None:
220
+ self.video_parts = m.get('videoParts')
221
+
222
+ self.video_recognition_result = []
223
+ if m.get('videoRecognitionResult') is not None:
224
+ for k1 in m.get('videoRecognitionResult'):
225
+ temp_model = main_models.RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResultVideoRecognitionResult()
226
+ self.video_recognition_result.append(temp_model.from_map(k1))
227
+
228
+ return self
229
+
230
+ class RunVideoDetectShotResponseBodyPayloadOutputVideoSplitResultVideoRecognitionResult(DaraModel):
231
+ def __init__(
232
+ self,
233
+ asr: str = None,
234
+ end_time: int = None,
235
+ ocr: str = None,
236
+ start_time: int = None,
237
+ vl: str = None,
238
+ ):
239
+ self.asr = asr
240
+ self.end_time = end_time
241
+ self.ocr = ocr
242
+ self.start_time = start_time
243
+ self.vl = vl
244
+
245
+ def validate(self):
246
+ pass
247
+
248
+ def to_map(self):
249
+ result = dict()
250
+ _map = super().to_map()
251
+ if _map is not None:
252
+ result = _map
253
+ if self.asr is not None:
254
+ result['asr'] = self.asr
255
+
256
+ if self.end_time is not None:
257
+ result['endTime'] = self.end_time
258
+
259
+ if self.ocr is not None:
260
+ result['ocr'] = self.ocr
261
+
262
+ if self.start_time is not None:
263
+ result['startTime'] = self.start_time
264
+
265
+ if self.vl is not None:
266
+ result['vl'] = self.vl
267
+
268
+ return result
269
+
270
+ def from_map(self, m: dict = None):
271
+ m = m or dict()
272
+ if m.get('asr') is not None:
273
+ self.asr = m.get('asr')
274
+
275
+ if m.get('endTime') is not None:
276
+ self.end_time = m.get('endTime')
277
+
278
+ if m.get('ocr') is not None:
279
+ self.ocr = m.get('ocr')
280
+
281
+ if m.get('startTime') is not None:
282
+ self.start_time = m.get('startTime')
283
+
284
+ if m.get('vl') is not None:
285
+ self.vl = m.get('vl')
286
+
287
+ return self
288
+
289
+ class RunVideoDetectShotResponseBodyHeader(DaraModel):
290
+ def __init__(
291
+ self,
292
+ error_code: str = None,
293
+ error_message: str = None,
294
+ event: str = None,
295
+ event_info: str = None,
296
+ session_id: str = None,
297
+ task_id: str = None,
298
+ trace_id: str = None,
299
+ ):
300
+ self.error_code = error_code
301
+ self.error_message = error_message
302
+ self.event = event
303
+ self.event_info = event_info
304
+ self.session_id = session_id
305
+ self.task_id = task_id
306
+ self.trace_id = trace_id
307
+
308
+ def validate(self):
309
+ pass
310
+
311
+ def to_map(self):
312
+ result = dict()
313
+ _map = super().to_map()
314
+ if _map is not None:
315
+ result = _map
316
+ if self.error_code is not None:
317
+ result['errorCode'] = self.error_code
318
+
319
+ if self.error_message is not None:
320
+ result['errorMessage'] = self.error_message
321
+
322
+ if self.event is not None:
323
+ result['event'] = self.event
324
+
325
+ if self.event_info is not None:
326
+ result['eventInfo'] = self.event_info
327
+
328
+ if self.session_id is not None:
329
+ result['sessionId'] = self.session_id
330
+
331
+ if self.task_id is not None:
332
+ result['taskId'] = self.task_id
333
+
334
+ if self.trace_id is not None:
335
+ result['traceId'] = self.trace_id
336
+
337
+ return result
338
+
339
+ def from_map(self, m: dict = None):
340
+ m = m or dict()
341
+ if m.get('errorCode') is not None:
342
+ self.error_code = m.get('errorCode')
343
+
344
+ if m.get('errorMessage') is not None:
345
+ self.error_message = m.get('errorMessage')
346
+
347
+ if m.get('event') is not None:
348
+ self.event = m.get('event')
349
+
350
+ if m.get('eventInfo') is not None:
351
+ self.event_info = m.get('eventInfo')
352
+
353
+ if m.get('sessionId') is not None:
354
+ self.session_id = m.get('sessionId')
355
+
356
+ if m.get('taskId') is not None:
357
+ self.task_id = m.get('taskId')
358
+
359
+ if m.get('traceId') is not None:
360
+ self.trace_id = m.get('traceId')
361
+
362
+ return self
363
+
@@ -0,0 +1,140 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from darabonba.model import DaraModel
6
+
7
+ class RunVideoDetectShotShrinkRequest(DaraModel):
8
+ def __init__(
9
+ self,
10
+ intelli_simp_prompt: str = None,
11
+ intelli_simp_prompt_template_id: str = None,
12
+ language: str = None,
13
+ model_custom_prompt_template_id: str = None,
14
+ model_id: str = None,
15
+ model_vl_custom_prompt_template_id: str = None,
16
+ options_shrink: str = None,
17
+ original_session_id: str = None,
18
+ pre_model_id: str = None,
19
+ prompt: str = None,
20
+ recognition_options_shrink: str = None,
21
+ task_id: str = None,
22
+ video_url: str = None,
23
+ vl_prompt: str = None,
24
+ ):
25
+ self.intelli_simp_prompt = intelli_simp_prompt
26
+ self.intelli_simp_prompt_template_id = intelli_simp_prompt_template_id
27
+ self.language = language
28
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
29
+ self.model_id = model_id
30
+ self.model_vl_custom_prompt_template_id = model_vl_custom_prompt_template_id
31
+ # This parameter is required.
32
+ self.options_shrink = options_shrink
33
+ self.original_session_id = original_session_id
34
+ self.pre_model_id = pre_model_id
35
+ self.prompt = prompt
36
+ # This parameter is required.
37
+ self.recognition_options_shrink = recognition_options_shrink
38
+ self.task_id = task_id
39
+ # This parameter is required.
40
+ self.video_url = video_url
41
+ self.vl_prompt = vl_prompt
42
+
43
+ def validate(self):
44
+ pass
45
+
46
+ def to_map(self):
47
+ result = dict()
48
+ _map = super().to_map()
49
+ if _map is not None:
50
+ result = _map
51
+ if self.intelli_simp_prompt is not None:
52
+ result['intelliSimpPrompt'] = self.intelli_simp_prompt
53
+
54
+ if self.intelli_simp_prompt_template_id is not None:
55
+ result['intelliSimpPromptTemplateId'] = self.intelli_simp_prompt_template_id
56
+
57
+ if self.language is not None:
58
+ result['language'] = self.language
59
+
60
+ if self.model_custom_prompt_template_id is not None:
61
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
62
+
63
+ if self.model_id is not None:
64
+ result['modelId'] = self.model_id
65
+
66
+ if self.model_vl_custom_prompt_template_id is not None:
67
+ result['modelVlCustomPromptTemplateId'] = self.model_vl_custom_prompt_template_id
68
+
69
+ if self.options_shrink is not None:
70
+ result['options'] = self.options_shrink
71
+
72
+ if self.original_session_id is not None:
73
+ result['originalSessionId'] = self.original_session_id
74
+
75
+ if self.pre_model_id is not None:
76
+ result['preModelId'] = self.pre_model_id
77
+
78
+ if self.prompt is not None:
79
+ result['prompt'] = self.prompt
80
+
81
+ if self.recognition_options_shrink is not None:
82
+ result['recognitionOptions'] = self.recognition_options_shrink
83
+
84
+ if self.task_id is not None:
85
+ result['taskId'] = self.task_id
86
+
87
+ if self.video_url is not None:
88
+ result['videoUrl'] = self.video_url
89
+
90
+ if self.vl_prompt is not None:
91
+ result['vlPrompt'] = self.vl_prompt
92
+
93
+ return result
94
+
95
+ def from_map(self, m: dict = None):
96
+ m = m or dict()
97
+ if m.get('intelliSimpPrompt') is not None:
98
+ self.intelli_simp_prompt = m.get('intelliSimpPrompt')
99
+
100
+ if m.get('intelliSimpPromptTemplateId') is not None:
101
+ self.intelli_simp_prompt_template_id = m.get('intelliSimpPromptTemplateId')
102
+
103
+ if m.get('language') is not None:
104
+ self.language = m.get('language')
105
+
106
+ if m.get('modelCustomPromptTemplateId') is not None:
107
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
108
+
109
+ if m.get('modelId') is not None:
110
+ self.model_id = m.get('modelId')
111
+
112
+ if m.get('modelVlCustomPromptTemplateId') is not None:
113
+ self.model_vl_custom_prompt_template_id = m.get('modelVlCustomPromptTemplateId')
114
+
115
+ if m.get('options') is not None:
116
+ self.options_shrink = m.get('options')
117
+
118
+ if m.get('originalSessionId') is not None:
119
+ self.original_session_id = m.get('originalSessionId')
120
+
121
+ if m.get('preModelId') is not None:
122
+ self.pre_model_id = m.get('preModelId')
123
+
124
+ if m.get('prompt') is not None:
125
+ self.prompt = m.get('prompt')
126
+
127
+ if m.get('recognitionOptions') is not None:
128
+ self.recognition_options_shrink = m.get('recognitionOptions')
129
+
130
+ if m.get('taskId') is not None:
131
+ self.task_id = m.get('taskId')
132
+
133
+ if m.get('videoUrl') is not None:
134
+ self.video_url = m.get('videoUrl')
135
+
136
+ if m.get('vlPrompt') is not None:
137
+ self.vl_prompt = m.get('vlPrompt')
138
+
139
+ return self
140
+