alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
  2. alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
  3. alibabacloud_quanmiaolightapp20240801/models/__init__.py +691 -0
  4. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
  5. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
  6. alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
  7. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
  8. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
  9. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
  10. alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
  11. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
  12. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
  13. alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
  14. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
  15. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
  16. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
  17. alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
  18. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
  19. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
  20. alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
  21. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
  22. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
  23. alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +174 -0
  24. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
  25. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
  26. alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
  27. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
  28. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
  29. alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
  30. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
  31. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
  32. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
  33. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
  34. alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
  35. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
  36. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
  37. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
  38. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
  39. alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
  40. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
  41. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
  42. alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
  43. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
  44. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
  45. alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
  46. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
  47. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
  48. alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
  49. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
  50. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
  51. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
  52. alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
  53. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
  54. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
  55. alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
  56. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
  57. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
  58. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
  59. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
  60. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
  61. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
  62. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
  63. alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
  64. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
  65. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
  66. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
  67. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
  68. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
  69. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
  70. alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
  71. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
  72. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
  73. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
  74. alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
  75. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
  76. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
  77. alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
  78. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
  79. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
  80. alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
  81. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
  82. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
  83. alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
  84. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
  85. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
  86. alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
  87. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
  88. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
  89. alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
  90. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
  91. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
  92. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
  93. alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
  94. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
  95. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
  96. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
  97. alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
  98. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
  99. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
  100. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
  101. alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
  102. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
  103. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
  104. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
  105. alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
  106. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
  107. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
  108. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
  109. alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
  110. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
  111. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
  112. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
  113. alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
  114. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
  115. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
  116. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
  117. alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
  118. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
  119. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
  120. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
  121. alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
  122. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
  123. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
  124. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
  125. alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
  126. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
  127. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
  128. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
  129. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
  130. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
  131. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
  132. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
  133. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
  134. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
  135. alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
  136. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
  137. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
  138. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
  139. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
  140. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
  141. alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
  142. {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/METADATA +7 -7
  143. alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info/RECORD +146 -0
  144. alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
  145. alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
  146. {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/LICENSE +0 -0
  147. {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/WHEEL +0 -0
  148. {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,593 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from typing import List
6
+
7
+ from alibabacloud_quanmiaolightapp20240801 import models as main_models
8
+ from darabonba.model import DaraModel
9
+
10
+ class SubmitVideoAnalysisTaskRequest(DaraModel):
11
+ def __init__(
12
+ self,
13
+ add_document_param: main_models.SubmitVideoAnalysisTaskRequestAddDocumentParam = None,
14
+ auto_role_recognition_video_url: str = None,
15
+ deduplication_id: str = None,
16
+ exclude_generate_options: List[str] = None,
17
+ face_identity_similarity_min_score: float = None,
18
+ frame_sample_method: main_models.SubmitVideoAnalysisTaskRequestFrameSampleMethod = None,
19
+ generate_options: List[str] = None,
20
+ language: str = None,
21
+ model_custom_prompt_template: str = None,
22
+ model_custom_prompt_template_id: str = None,
23
+ model_id: str = None,
24
+ snapshot_interval: float = None,
25
+ split_interval: int = None,
26
+ split_type: str = None,
27
+ text_process_tasks: List[main_models.SubmitVideoAnalysisTaskRequestTextProcessTasks] = None,
28
+ video_caption_info: main_models.SubmitVideoAnalysisTaskRequestVideoCaptionInfo = None,
29
+ video_extra_info: str = None,
30
+ video_model_custom_prompt_template: str = None,
31
+ video_model_id: str = None,
32
+ video_roles: List[main_models.SubmitVideoAnalysisTaskRequestVideoRoles] = None,
33
+ video_shot_face_identity_count: int = None,
34
+ video_url: str = None,
35
+ ):
36
+ self.add_document_param = add_document_param
37
+ self.auto_role_recognition_video_url = auto_role_recognition_video_url
38
+ self.deduplication_id = deduplication_id
39
+ self.exclude_generate_options = exclude_generate_options
40
+ self.face_identity_similarity_min_score = face_identity_similarity_min_score
41
+ self.frame_sample_method = frame_sample_method
42
+ self.generate_options = generate_options
43
+ self.language = language
44
+ self.model_custom_prompt_template = model_custom_prompt_template
45
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
46
+ self.model_id = model_id
47
+ self.snapshot_interval = snapshot_interval
48
+ self.split_interval = split_interval
49
+ self.split_type = split_type
50
+ self.text_process_tasks = text_process_tasks
51
+ self.video_caption_info = video_caption_info
52
+ self.video_extra_info = video_extra_info
53
+ self.video_model_custom_prompt_template = video_model_custom_prompt_template
54
+ self.video_model_id = video_model_id
55
+ self.video_roles = video_roles
56
+ self.video_shot_face_identity_count = video_shot_face_identity_count
57
+ # This parameter is required.
58
+ self.video_url = video_url
59
+
60
+ def validate(self):
61
+ if self.add_document_param:
62
+ self.add_document_param.validate()
63
+ if self.frame_sample_method:
64
+ self.frame_sample_method.validate()
65
+ if self.text_process_tasks:
66
+ for v1 in self.text_process_tasks:
67
+ if v1:
68
+ v1.validate()
69
+ if self.video_caption_info:
70
+ self.video_caption_info.validate()
71
+ if self.video_roles:
72
+ for v1 in self.video_roles:
73
+ if v1:
74
+ v1.validate()
75
+
76
+ def to_map(self):
77
+ result = dict()
78
+ _map = super().to_map()
79
+ if _map is not None:
80
+ result = _map
81
+ if self.add_document_param is not None:
82
+ result['addDocumentParam'] = self.add_document_param.to_map()
83
+
84
+ if self.auto_role_recognition_video_url is not None:
85
+ result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
86
+
87
+ if self.deduplication_id is not None:
88
+ result['deduplicationId'] = self.deduplication_id
89
+
90
+ if self.exclude_generate_options is not None:
91
+ result['excludeGenerateOptions'] = self.exclude_generate_options
92
+
93
+ if self.face_identity_similarity_min_score is not None:
94
+ result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
95
+
96
+ if self.frame_sample_method is not None:
97
+ result['frameSampleMethod'] = self.frame_sample_method.to_map()
98
+
99
+ if self.generate_options is not None:
100
+ result['generateOptions'] = self.generate_options
101
+
102
+ if self.language is not None:
103
+ result['language'] = self.language
104
+
105
+ if self.model_custom_prompt_template is not None:
106
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
107
+
108
+ if self.model_custom_prompt_template_id is not None:
109
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
110
+
111
+ if self.model_id is not None:
112
+ result['modelId'] = self.model_id
113
+
114
+ if self.snapshot_interval is not None:
115
+ result['snapshotInterval'] = self.snapshot_interval
116
+
117
+ if self.split_interval is not None:
118
+ result['splitInterval'] = self.split_interval
119
+
120
+ if self.split_type is not None:
121
+ result['splitType'] = self.split_type
122
+
123
+ result['textProcessTasks'] = []
124
+ if self.text_process_tasks is not None:
125
+ for k1 in self.text_process_tasks:
126
+ result['textProcessTasks'].append(k1.to_map() if k1 else None)
127
+
128
+ if self.video_caption_info is not None:
129
+ result['videoCaptionInfo'] = self.video_caption_info.to_map()
130
+
131
+ if self.video_extra_info is not None:
132
+ result['videoExtraInfo'] = self.video_extra_info
133
+
134
+ if self.video_model_custom_prompt_template is not None:
135
+ result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
136
+
137
+ if self.video_model_id is not None:
138
+ result['videoModelId'] = self.video_model_id
139
+
140
+ result['videoRoles'] = []
141
+ if self.video_roles is not None:
142
+ for k1 in self.video_roles:
143
+ result['videoRoles'].append(k1.to_map() if k1 else None)
144
+
145
+ if self.video_shot_face_identity_count is not None:
146
+ result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
147
+
148
+ if self.video_url is not None:
149
+ result['videoUrl'] = self.video_url
150
+
151
+ return result
152
+
153
+ def from_map(self, m: dict = None):
154
+ m = m or dict()
155
+ if m.get('addDocumentParam') is not None:
156
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestAddDocumentParam()
157
+ self.add_document_param = temp_model.from_map(m.get('addDocumentParam'))
158
+
159
+ if m.get('autoRoleRecognitionVideoUrl') is not None:
160
+ self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
161
+
162
+ if m.get('deduplicationId') is not None:
163
+ self.deduplication_id = m.get('deduplicationId')
164
+
165
+ if m.get('excludeGenerateOptions') is not None:
166
+ self.exclude_generate_options = m.get('excludeGenerateOptions')
167
+
168
+ if m.get('faceIdentitySimilarityMinScore') is not None:
169
+ self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
170
+
171
+ if m.get('frameSampleMethod') is not None:
172
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestFrameSampleMethod()
173
+ self.frame_sample_method = temp_model.from_map(m.get('frameSampleMethod'))
174
+
175
+ if m.get('generateOptions') is not None:
176
+ self.generate_options = m.get('generateOptions')
177
+
178
+ if m.get('language') is not None:
179
+ self.language = m.get('language')
180
+
181
+ if m.get('modelCustomPromptTemplate') is not None:
182
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
183
+
184
+ if m.get('modelCustomPromptTemplateId') is not None:
185
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
186
+
187
+ if m.get('modelId') is not None:
188
+ self.model_id = m.get('modelId')
189
+
190
+ if m.get('snapshotInterval') is not None:
191
+ self.snapshot_interval = m.get('snapshotInterval')
192
+
193
+ if m.get('splitInterval') is not None:
194
+ self.split_interval = m.get('splitInterval')
195
+
196
+ if m.get('splitType') is not None:
197
+ self.split_type = m.get('splitType')
198
+
199
+ self.text_process_tasks = []
200
+ if m.get('textProcessTasks') is not None:
201
+ for k1 in m.get('textProcessTasks'):
202
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestTextProcessTasks()
203
+ self.text_process_tasks.append(temp_model.from_map(k1))
204
+
205
+ if m.get('videoCaptionInfo') is not None:
206
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestVideoCaptionInfo()
207
+ self.video_caption_info = temp_model.from_map(m.get('videoCaptionInfo'))
208
+
209
+ if m.get('videoExtraInfo') is not None:
210
+ self.video_extra_info = m.get('videoExtraInfo')
211
+
212
+ if m.get('videoModelCustomPromptTemplate') is not None:
213
+ self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
214
+
215
+ if m.get('videoModelId') is not None:
216
+ self.video_model_id = m.get('videoModelId')
217
+
218
+ self.video_roles = []
219
+ if m.get('videoRoles') is not None:
220
+ for k1 in m.get('videoRoles'):
221
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestVideoRoles()
222
+ self.video_roles.append(temp_model.from_map(k1))
223
+
224
+ if m.get('videoShotFaceIdentityCount') is not None:
225
+ self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
226
+
227
+ if m.get('videoUrl') is not None:
228
+ self.video_url = m.get('videoUrl')
229
+
230
+ return self
231
+
232
+ class SubmitVideoAnalysisTaskRequestVideoRoles(DaraModel):
233
+ def __init__(
234
+ self,
235
+ is_auto_recognition: bool = None,
236
+ role_info: str = None,
237
+ role_name: str = None,
238
+ time_intervals: List[main_models.SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals] = None,
239
+ urls: List[str] = None,
240
+ ):
241
+ self.is_auto_recognition = is_auto_recognition
242
+ self.role_info = role_info
243
+ self.role_name = role_name
244
+ self.time_intervals = time_intervals
245
+ self.urls = urls
246
+
247
+ def validate(self):
248
+ if self.time_intervals:
249
+ for v1 in self.time_intervals:
250
+ if v1:
251
+ v1.validate()
252
+
253
+ def to_map(self):
254
+ result = dict()
255
+ _map = super().to_map()
256
+ if _map is not None:
257
+ result = _map
258
+ if self.is_auto_recognition is not None:
259
+ result['isAutoRecognition'] = self.is_auto_recognition
260
+
261
+ if self.role_info is not None:
262
+ result['roleInfo'] = self.role_info
263
+
264
+ if self.role_name is not None:
265
+ result['roleName'] = self.role_name
266
+
267
+ result['timeIntervals'] = []
268
+ if self.time_intervals is not None:
269
+ for k1 in self.time_intervals:
270
+ result['timeIntervals'].append(k1.to_map() if k1 else None)
271
+
272
+ if self.urls is not None:
273
+ result['urls'] = self.urls
274
+
275
+ return result
276
+
277
+ def from_map(self, m: dict = None):
278
+ m = m or dict()
279
+ if m.get('isAutoRecognition') is not None:
280
+ self.is_auto_recognition = m.get('isAutoRecognition')
281
+
282
+ if m.get('roleInfo') is not None:
283
+ self.role_info = m.get('roleInfo')
284
+
285
+ if m.get('roleName') is not None:
286
+ self.role_name = m.get('roleName')
287
+
288
+ self.time_intervals = []
289
+ if m.get('timeIntervals') is not None:
290
+ for k1 in m.get('timeIntervals'):
291
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals()
292
+ self.time_intervals.append(temp_model.from_map(k1))
293
+
294
+ if m.get('urls') is not None:
295
+ self.urls = m.get('urls')
296
+
297
+ return self
298
+
299
+ class SubmitVideoAnalysisTaskRequestVideoRolesTimeIntervals(DaraModel):
300
+ def __init__(
301
+ self,
302
+ end_time: int = None,
303
+ start_time: int = None,
304
+ ):
305
+ self.end_time = end_time
306
+ self.start_time = start_time
307
+
308
+ def validate(self):
309
+ pass
310
+
311
+ def to_map(self):
312
+ result = dict()
313
+ _map = super().to_map()
314
+ if _map is not None:
315
+ result = _map
316
+ if self.end_time is not None:
317
+ result['endTime'] = self.end_time
318
+
319
+ if self.start_time is not None:
320
+ result['startTime'] = self.start_time
321
+
322
+ return result
323
+
324
+ def from_map(self, m: dict = None):
325
+ m = m or dict()
326
+ if m.get('endTime') is not None:
327
+ self.end_time = m.get('endTime')
328
+
329
+ if m.get('startTime') is not None:
330
+ self.start_time = m.get('startTime')
331
+
332
+ return self
333
+
334
+ class SubmitVideoAnalysisTaskRequestVideoCaptionInfo(DaraModel):
335
+ def __init__(
336
+ self,
337
+ video_caption_file_url: str = None,
338
+ video_captions: List[main_models.SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions] = None,
339
+ ):
340
+ self.video_caption_file_url = video_caption_file_url
341
+ self.video_captions = video_captions
342
+
343
+ def validate(self):
344
+ if self.video_captions:
345
+ for v1 in self.video_captions:
346
+ if v1:
347
+ v1.validate()
348
+
349
+ def to_map(self):
350
+ result = dict()
351
+ _map = super().to_map()
352
+ if _map is not None:
353
+ result = _map
354
+ if self.video_caption_file_url is not None:
355
+ result['videoCaptionFileUrl'] = self.video_caption_file_url
356
+
357
+ result['videoCaptions'] = []
358
+ if self.video_captions is not None:
359
+ for k1 in self.video_captions:
360
+ result['videoCaptions'].append(k1.to_map() if k1 else None)
361
+
362
+ return result
363
+
364
+ def from_map(self, m: dict = None):
365
+ m = m or dict()
366
+ if m.get('videoCaptionFileUrl') is not None:
367
+ self.video_caption_file_url = m.get('videoCaptionFileUrl')
368
+
369
+ self.video_captions = []
370
+ if m.get('videoCaptions') is not None:
371
+ for k1 in m.get('videoCaptions'):
372
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions()
373
+ self.video_captions.append(temp_model.from_map(k1))
374
+
375
+ return self
376
+
377
+ class SubmitVideoAnalysisTaskRequestVideoCaptionInfoVideoCaptions(DaraModel):
378
+ def __init__(
379
+ self,
380
+ end_time: int = None,
381
+ speaker: str = None,
382
+ start_time: int = None,
383
+ text: str = None,
384
+ ):
385
+ self.end_time = end_time
386
+ self.speaker = speaker
387
+ self.start_time = start_time
388
+ self.text = text
389
+
390
+ def validate(self):
391
+ pass
392
+
393
+ def to_map(self):
394
+ result = dict()
395
+ _map = super().to_map()
396
+ if _map is not None:
397
+ result = _map
398
+ if self.end_time is not None:
399
+ result['endTime'] = self.end_time
400
+
401
+ if self.speaker is not None:
402
+ result['speaker'] = self.speaker
403
+
404
+ if self.start_time is not None:
405
+ result['startTime'] = self.start_time
406
+
407
+ if self.text is not None:
408
+ result['text'] = self.text
409
+
410
+ return result
411
+
412
+ def from_map(self, m: dict = None):
413
+ m = m or dict()
414
+ if m.get('endTime') is not None:
415
+ self.end_time = m.get('endTime')
416
+
417
+ if m.get('speaker') is not None:
418
+ self.speaker = m.get('speaker')
419
+
420
+ if m.get('startTime') is not None:
421
+ self.start_time = m.get('startTime')
422
+
423
+ if m.get('text') is not None:
424
+ self.text = m.get('text')
425
+
426
+ return self
427
+
428
+ class SubmitVideoAnalysisTaskRequestTextProcessTasks(DaraModel):
429
+ def __init__(
430
+ self,
431
+ model_custom_prompt_template: str = None,
432
+ model_custom_prompt_template_id: str = None,
433
+ model_id: str = None,
434
+ ):
435
+ self.model_custom_prompt_template = model_custom_prompt_template
436
+ self.model_custom_prompt_template_id = model_custom_prompt_template_id
437
+ self.model_id = model_id
438
+
439
+ def validate(self):
440
+ pass
441
+
442
+ def to_map(self):
443
+ result = dict()
444
+ _map = super().to_map()
445
+ if _map is not None:
446
+ result = _map
447
+ if self.model_custom_prompt_template is not None:
448
+ result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
449
+
450
+ if self.model_custom_prompt_template_id is not None:
451
+ result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
452
+
453
+ if self.model_id is not None:
454
+ result['modelId'] = self.model_id
455
+
456
+ return result
457
+
458
+ def from_map(self, m: dict = None):
459
+ m = m or dict()
460
+ if m.get('modelCustomPromptTemplate') is not None:
461
+ self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
462
+
463
+ if m.get('modelCustomPromptTemplateId') is not None:
464
+ self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
465
+
466
+ if m.get('modelId') is not None:
467
+ self.model_id = m.get('modelId')
468
+
469
+ return self
470
+
471
+ class SubmitVideoAnalysisTaskRequestFrameSampleMethod(DaraModel):
472
+ def __init__(
473
+ self,
474
+ interval: float = None,
475
+ method_name: str = None,
476
+ pixel: int = None,
477
+ ):
478
+ self.interval = interval
479
+ self.method_name = method_name
480
+ self.pixel = pixel
481
+
482
+ def validate(self):
483
+ pass
484
+
485
+ def to_map(self):
486
+ result = dict()
487
+ _map = super().to_map()
488
+ if _map is not None:
489
+ result = _map
490
+ if self.interval is not None:
491
+ result['interval'] = self.interval
492
+
493
+ if self.method_name is not None:
494
+ result['methodName'] = self.method_name
495
+
496
+ if self.pixel is not None:
497
+ result['pixel'] = self.pixel
498
+
499
+ return result
500
+
501
+ def from_map(self, m: dict = None):
502
+ m = m or dict()
503
+ if m.get('interval') is not None:
504
+ self.interval = m.get('interval')
505
+
506
+ if m.get('methodName') is not None:
507
+ self.method_name = m.get('methodName')
508
+
509
+ if m.get('pixel') is not None:
510
+ self.pixel = m.get('pixel')
511
+
512
+ return self
513
+
514
+ class SubmitVideoAnalysisTaskRequestAddDocumentParam(DaraModel):
515
+ def __init__(
516
+ self,
517
+ dataset_id: int = None,
518
+ dataset_name: str = None,
519
+ document: main_models.SubmitVideoAnalysisTaskRequestAddDocumentParamDocument = None,
520
+ ):
521
+ self.dataset_id = dataset_id
522
+ self.dataset_name = dataset_name
523
+ self.document = document
524
+
525
+ def validate(self):
526
+ if self.document:
527
+ self.document.validate()
528
+
529
+ def to_map(self):
530
+ result = dict()
531
+ _map = super().to_map()
532
+ if _map is not None:
533
+ result = _map
534
+ if self.dataset_id is not None:
535
+ result['datasetId'] = self.dataset_id
536
+
537
+ if self.dataset_name is not None:
538
+ result['datasetName'] = self.dataset_name
539
+
540
+ if self.document is not None:
541
+ result['document'] = self.document.to_map()
542
+
543
+ return result
544
+
545
+ def from_map(self, m: dict = None):
546
+ m = m or dict()
547
+ if m.get('datasetId') is not None:
548
+ self.dataset_id = m.get('datasetId')
549
+
550
+ if m.get('datasetName') is not None:
551
+ self.dataset_name = m.get('datasetName')
552
+
553
+ if m.get('document') is not None:
554
+ temp_model = main_models.SubmitVideoAnalysisTaskRequestAddDocumentParamDocument()
555
+ self.document = temp_model.from_map(m.get('document'))
556
+
557
+ return self
558
+
559
+ class SubmitVideoAnalysisTaskRequestAddDocumentParamDocument(DaraModel):
560
+ def __init__(
561
+ self,
562
+ doc_id: str = None,
563
+ title: str = None,
564
+ ):
565
+ self.doc_id = doc_id
566
+ self.title = title
567
+
568
+ def validate(self):
569
+ pass
570
+
571
+ def to_map(self):
572
+ result = dict()
573
+ _map = super().to_map()
574
+ if _map is not None:
575
+ result = _map
576
+ if self.doc_id is not None:
577
+ result['docId'] = self.doc_id
578
+
579
+ if self.title is not None:
580
+ result['title'] = self.title
581
+
582
+ return result
583
+
584
+ def from_map(self, m: dict = None):
585
+ m = m or dict()
586
+ if m.get('docId') is not None:
587
+ self.doc_id = m.get('docId')
588
+
589
+ if m.get('title') is not None:
590
+ self.title = m.get('title')
591
+
592
+ return self
593
+
@@ -0,0 +1,54 @@
1
+ # -*- coding: utf-8 -*-
2
+ # This file is auto-generated, don't edit it. Thanks.
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict
6
+
7
+ from alibabacloud_quanmiaolightapp20240801 import models as main_models
8
+ from darabonba.model import DaraModel
9
+
10
+ class SubmitVideoAnalysisTaskResponse(DaraModel):
11
+ def __init__(
12
+ self,
13
+ headers: Dict[str, str] = None,
14
+ status_code: int = None,
15
+ body: main_models.SubmitVideoAnalysisTaskResponseBody = None,
16
+ ):
17
+ self.headers = headers
18
+ self.status_code = status_code
19
+ self.body = body
20
+
21
+ def validate(self):
22
+ if self.body:
23
+ self.body.validate()
24
+
25
+ def to_map(self):
26
+ result = dict()
27
+ _map = super().to_map()
28
+ if _map is not None:
29
+ result = _map
30
+ if self.headers is not None:
31
+ result['headers'] = self.headers
32
+
33
+ if self.status_code is not None:
34
+ result['statusCode'] = self.status_code
35
+
36
+ if self.body is not None:
37
+ result['body'] = self.body.to_map()
38
+
39
+ return result
40
+
41
+ def from_map(self, m: dict = None):
42
+ m = m or dict()
43
+ if m.get('headers') is not None:
44
+ self.headers = m.get('headers')
45
+
46
+ if m.get('statusCode') is not None:
47
+ self.status_code = m.get('statusCode')
48
+
49
+ if m.get('body') is not None:
50
+ temp_model = main_models.SubmitVideoAnalysisTaskResponseBody()
51
+ self.body = temp_model.from_map(m.get('body'))
52
+
53
+ return self
54
+