alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
- alibabacloud_quanmiaolightapp20240801/models/__init__.py +693 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +195 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
- alibabacloud_quanmiaolightapp20240801/models/_model_usage.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/METADATA +7 -7
- alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info/RECORD +147 -0
- alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
- alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class SubmitVideoDetectShotTaskShrinkRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
deduplication_id: str = None,
|
|
11
|
+
intelli_simp_prompt: str = None,
|
|
12
|
+
intelli_simp_prompt_template_id: str = None,
|
|
13
|
+
language: str = None,
|
|
14
|
+
model_custom_prompt_template_id: str = None,
|
|
15
|
+
model_id: str = None,
|
|
16
|
+
model_vl_custom_prompt_template_id: str = None,
|
|
17
|
+
options_shrink: str = None,
|
|
18
|
+
original_session_id: str = None,
|
|
19
|
+
pre_model_id: str = None,
|
|
20
|
+
prompt: str = None,
|
|
21
|
+
recognition_options_shrink: str = None,
|
|
22
|
+
task_id: str = None,
|
|
23
|
+
video_url: str = None,
|
|
24
|
+
vl_prompt: str = None,
|
|
25
|
+
):
|
|
26
|
+
self.deduplication_id = deduplication_id
|
|
27
|
+
self.intelli_simp_prompt = intelli_simp_prompt
|
|
28
|
+
self.intelli_simp_prompt_template_id = intelli_simp_prompt_template_id
|
|
29
|
+
self.language = language
|
|
30
|
+
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
31
|
+
self.model_id = model_id
|
|
32
|
+
self.model_vl_custom_prompt_template_id = model_vl_custom_prompt_template_id
|
|
33
|
+
self.options_shrink = options_shrink
|
|
34
|
+
self.original_session_id = original_session_id
|
|
35
|
+
self.pre_model_id = pre_model_id
|
|
36
|
+
self.prompt = prompt
|
|
37
|
+
self.recognition_options_shrink = recognition_options_shrink
|
|
38
|
+
self.task_id = task_id
|
|
39
|
+
# This parameter is required.
|
|
40
|
+
self.video_url = video_url
|
|
41
|
+
self.vl_prompt = vl_prompt
|
|
42
|
+
|
|
43
|
+
def validate(self):
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
def to_map(self):
|
|
47
|
+
result = dict()
|
|
48
|
+
_map = super().to_map()
|
|
49
|
+
if _map is not None:
|
|
50
|
+
result = _map
|
|
51
|
+
if self.deduplication_id is not None:
|
|
52
|
+
result['deduplicationId'] = self.deduplication_id
|
|
53
|
+
|
|
54
|
+
if self.intelli_simp_prompt is not None:
|
|
55
|
+
result['intelliSimpPrompt'] = self.intelli_simp_prompt
|
|
56
|
+
|
|
57
|
+
if self.intelli_simp_prompt_template_id is not None:
|
|
58
|
+
result['intelliSimpPromptTemplateId'] = self.intelli_simp_prompt_template_id
|
|
59
|
+
|
|
60
|
+
if self.language is not None:
|
|
61
|
+
result['language'] = self.language
|
|
62
|
+
|
|
63
|
+
if self.model_custom_prompt_template_id is not None:
|
|
64
|
+
result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
|
|
65
|
+
|
|
66
|
+
if self.model_id is not None:
|
|
67
|
+
result['modelId'] = self.model_id
|
|
68
|
+
|
|
69
|
+
if self.model_vl_custom_prompt_template_id is not None:
|
|
70
|
+
result['modelVlCustomPromptTemplateId'] = self.model_vl_custom_prompt_template_id
|
|
71
|
+
|
|
72
|
+
if self.options_shrink is not None:
|
|
73
|
+
result['options'] = self.options_shrink
|
|
74
|
+
|
|
75
|
+
if self.original_session_id is not None:
|
|
76
|
+
result['originalSessionId'] = self.original_session_id
|
|
77
|
+
|
|
78
|
+
if self.pre_model_id is not None:
|
|
79
|
+
result['preModelId'] = self.pre_model_id
|
|
80
|
+
|
|
81
|
+
if self.prompt is not None:
|
|
82
|
+
result['prompt'] = self.prompt
|
|
83
|
+
|
|
84
|
+
if self.recognition_options_shrink is not None:
|
|
85
|
+
result['recognitionOptions'] = self.recognition_options_shrink
|
|
86
|
+
|
|
87
|
+
if self.task_id is not None:
|
|
88
|
+
result['taskId'] = self.task_id
|
|
89
|
+
|
|
90
|
+
if self.video_url is not None:
|
|
91
|
+
result['videoUrl'] = self.video_url
|
|
92
|
+
|
|
93
|
+
if self.vl_prompt is not None:
|
|
94
|
+
result['vlPrompt'] = self.vl_prompt
|
|
95
|
+
|
|
96
|
+
return result
|
|
97
|
+
|
|
98
|
+
def from_map(self, m: dict = None):
|
|
99
|
+
m = m or dict()
|
|
100
|
+
if m.get('deduplicationId') is not None:
|
|
101
|
+
self.deduplication_id = m.get('deduplicationId')
|
|
102
|
+
|
|
103
|
+
if m.get('intelliSimpPrompt') is not None:
|
|
104
|
+
self.intelli_simp_prompt = m.get('intelliSimpPrompt')
|
|
105
|
+
|
|
106
|
+
if m.get('intelliSimpPromptTemplateId') is not None:
|
|
107
|
+
self.intelli_simp_prompt_template_id = m.get('intelliSimpPromptTemplateId')
|
|
108
|
+
|
|
109
|
+
if m.get('language') is not None:
|
|
110
|
+
self.language = m.get('language')
|
|
111
|
+
|
|
112
|
+
if m.get('modelCustomPromptTemplateId') is not None:
|
|
113
|
+
self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
|
|
114
|
+
|
|
115
|
+
if m.get('modelId') is not None:
|
|
116
|
+
self.model_id = m.get('modelId')
|
|
117
|
+
|
|
118
|
+
if m.get('modelVlCustomPromptTemplateId') is not None:
|
|
119
|
+
self.model_vl_custom_prompt_template_id = m.get('modelVlCustomPromptTemplateId')
|
|
120
|
+
|
|
121
|
+
if m.get('options') is not None:
|
|
122
|
+
self.options_shrink = m.get('options')
|
|
123
|
+
|
|
124
|
+
if m.get('originalSessionId') is not None:
|
|
125
|
+
self.original_session_id = m.get('originalSessionId')
|
|
126
|
+
|
|
127
|
+
if m.get('preModelId') is not None:
|
|
128
|
+
self.pre_model_id = m.get('preModelId')
|
|
129
|
+
|
|
130
|
+
if m.get('prompt') is not None:
|
|
131
|
+
self.prompt = m.get('prompt')
|
|
132
|
+
|
|
133
|
+
if m.get('recognitionOptions') is not None:
|
|
134
|
+
self.recognition_options_shrink = m.get('recognitionOptions')
|
|
135
|
+
|
|
136
|
+
if m.get('taskId') is not None:
|
|
137
|
+
self.task_id = m.get('taskId')
|
|
138
|
+
|
|
139
|
+
if m.get('videoUrl') is not None:
|
|
140
|
+
self.video_url = m.get('videoUrl')
|
|
141
|
+
|
|
142
|
+
if m.get('vlPrompt') is not None:
|
|
143
|
+
self.vl_prompt = m.get('vlPrompt')
|
|
144
|
+
|
|
145
|
+
return self
|
|
146
|
+
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class UpdateVideoAnalysisConfigRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
async_concurrency: int = None,
|
|
11
|
+
):
|
|
12
|
+
# This parameter is required.
|
|
13
|
+
self.async_concurrency = async_concurrency
|
|
14
|
+
|
|
15
|
+
def validate(self):
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
def to_map(self):
|
|
19
|
+
result = dict()
|
|
20
|
+
_map = super().to_map()
|
|
21
|
+
if _map is not None:
|
|
22
|
+
result = _map
|
|
23
|
+
if self.async_concurrency is not None:
|
|
24
|
+
result['asyncConcurrency'] = self.async_concurrency
|
|
25
|
+
|
|
26
|
+
return result
|
|
27
|
+
|
|
28
|
+
def from_map(self, m: dict = None):
|
|
29
|
+
m = m or dict()
|
|
30
|
+
if m.get('asyncConcurrency') is not None:
|
|
31
|
+
self.async_concurrency = m.get('asyncConcurrency')
|
|
32
|
+
|
|
33
|
+
return self
|
|
34
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class UpdateVideoAnalysisConfigResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.UpdateVideoAnalysisConfigResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.UpdateVideoAnalysisConfigResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class UpdateVideoAnalysisConfigResponseBody(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
code: str = None,
|
|
11
|
+
http_status_code: int = None,
|
|
12
|
+
message: str = None,
|
|
13
|
+
request_id: str = None,
|
|
14
|
+
success: bool = None,
|
|
15
|
+
):
|
|
16
|
+
self.code = code
|
|
17
|
+
self.http_status_code = http_status_code
|
|
18
|
+
self.message = message
|
|
19
|
+
# Id of the request
|
|
20
|
+
self.request_id = request_id
|
|
21
|
+
self.success = success
|
|
22
|
+
|
|
23
|
+
def validate(self):
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
def to_map(self):
|
|
27
|
+
result = dict()
|
|
28
|
+
_map = super().to_map()
|
|
29
|
+
if _map is not None:
|
|
30
|
+
result = _map
|
|
31
|
+
if self.code is not None:
|
|
32
|
+
result['code'] = self.code
|
|
33
|
+
|
|
34
|
+
if self.http_status_code is not None:
|
|
35
|
+
result['httpStatusCode'] = self.http_status_code
|
|
36
|
+
|
|
37
|
+
if self.message is not None:
|
|
38
|
+
result['message'] = self.message
|
|
39
|
+
|
|
40
|
+
if self.request_id is not None:
|
|
41
|
+
result['requestId'] = self.request_id
|
|
42
|
+
|
|
43
|
+
if self.success is not None:
|
|
44
|
+
result['success'] = self.success
|
|
45
|
+
|
|
46
|
+
return result
|
|
47
|
+
|
|
48
|
+
def from_map(self, m: dict = None):
|
|
49
|
+
m = m or dict()
|
|
50
|
+
if m.get('code') is not None:
|
|
51
|
+
self.code = m.get('code')
|
|
52
|
+
|
|
53
|
+
if m.get('httpStatusCode') is not None:
|
|
54
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
55
|
+
|
|
56
|
+
if m.get('message') is not None:
|
|
57
|
+
self.message = m.get('message')
|
|
58
|
+
|
|
59
|
+
if m.get('requestId') is not None:
|
|
60
|
+
self.request_id = m.get('requestId')
|
|
61
|
+
|
|
62
|
+
if m.get('success') is not None:
|
|
63
|
+
self.success = m.get('success')
|
|
64
|
+
|
|
65
|
+
return self
|
|
66
|
+
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class UpdateVideoAnalysisTaskRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
task_id: str = None,
|
|
11
|
+
task_status: str = None,
|
|
12
|
+
):
|
|
13
|
+
# This parameter is required.
|
|
14
|
+
self.task_id = task_id
|
|
15
|
+
# This parameter is required.
|
|
16
|
+
self.task_status = task_status
|
|
17
|
+
|
|
18
|
+
def validate(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
def to_map(self):
|
|
22
|
+
result = dict()
|
|
23
|
+
_map = super().to_map()
|
|
24
|
+
if _map is not None:
|
|
25
|
+
result = _map
|
|
26
|
+
if self.task_id is not None:
|
|
27
|
+
result['taskId'] = self.task_id
|
|
28
|
+
|
|
29
|
+
if self.task_status is not None:
|
|
30
|
+
result['taskStatus'] = self.task_status
|
|
31
|
+
|
|
32
|
+
return result
|
|
33
|
+
|
|
34
|
+
def from_map(self, m: dict = None):
|
|
35
|
+
m = m or dict()
|
|
36
|
+
if m.get('taskId') is not None:
|
|
37
|
+
self.task_id = m.get('taskId')
|
|
38
|
+
|
|
39
|
+
if m.get('taskStatus') is not None:
|
|
40
|
+
self.task_status = m.get('taskStatus')
|
|
41
|
+
|
|
42
|
+
return self
|
|
43
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class UpdateVideoAnalysisTaskResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.UpdateVideoAnalysisTaskResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.UpdateVideoAnalysisTaskResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
6
|
+
from darabonba.model import DaraModel
|
|
7
|
+
|
|
8
|
+
class UpdateVideoAnalysisTaskResponseBody(DaraModel):
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
code: str = None,
|
|
12
|
+
data: main_models.UpdateVideoAnalysisTaskResponseBodyData = None,
|
|
13
|
+
http_status_code: int = None,
|
|
14
|
+
message: str = None,
|
|
15
|
+
request_id: str = None,
|
|
16
|
+
success: bool = None,
|
|
17
|
+
):
|
|
18
|
+
self.code = code
|
|
19
|
+
self.data = data
|
|
20
|
+
self.http_status_code = http_status_code
|
|
21
|
+
self.message = message
|
|
22
|
+
self.request_id = request_id
|
|
23
|
+
self.success = success
|
|
24
|
+
|
|
25
|
+
def validate(self):
|
|
26
|
+
if self.data:
|
|
27
|
+
self.data.validate()
|
|
28
|
+
|
|
29
|
+
def to_map(self):
|
|
30
|
+
result = dict()
|
|
31
|
+
_map = super().to_map()
|
|
32
|
+
if _map is not None:
|
|
33
|
+
result = _map
|
|
34
|
+
if self.code is not None:
|
|
35
|
+
result['code'] = self.code
|
|
36
|
+
|
|
37
|
+
if self.data is not None:
|
|
38
|
+
result['data'] = self.data.to_map()
|
|
39
|
+
|
|
40
|
+
if self.http_status_code is not None:
|
|
41
|
+
result['httpStatusCode'] = self.http_status_code
|
|
42
|
+
|
|
43
|
+
if self.message is not None:
|
|
44
|
+
result['message'] = self.message
|
|
45
|
+
|
|
46
|
+
if self.request_id is not None:
|
|
47
|
+
result['requestId'] = self.request_id
|
|
48
|
+
|
|
49
|
+
if self.success is not None:
|
|
50
|
+
result['success'] = self.success
|
|
51
|
+
|
|
52
|
+
return result
|
|
53
|
+
|
|
54
|
+
def from_map(self, m: dict = None):
|
|
55
|
+
m = m or dict()
|
|
56
|
+
if m.get('code') is not None:
|
|
57
|
+
self.code = m.get('code')
|
|
58
|
+
|
|
59
|
+
if m.get('data') is not None:
|
|
60
|
+
temp_model = main_models.UpdateVideoAnalysisTaskResponseBodyData()
|
|
61
|
+
self.data = temp_model.from_map(m.get('data'))
|
|
62
|
+
|
|
63
|
+
if m.get('httpStatusCode') is not None:
|
|
64
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
65
|
+
|
|
66
|
+
if m.get('message') is not None:
|
|
67
|
+
self.message = m.get('message')
|
|
68
|
+
|
|
69
|
+
if m.get('requestId') is not None:
|
|
70
|
+
self.request_id = m.get('requestId')
|
|
71
|
+
|
|
72
|
+
if m.get('success') is not None:
|
|
73
|
+
self.success = m.get('success')
|
|
74
|
+
|
|
75
|
+
return self
|
|
76
|
+
|
|
77
|
+
class UpdateVideoAnalysisTaskResponseBodyData(DaraModel):
|
|
78
|
+
def __init__(
|
|
79
|
+
self,
|
|
80
|
+
task_error_message: str = None,
|
|
81
|
+
task_id: str = None,
|
|
82
|
+
task_status: str = None,
|
|
83
|
+
):
|
|
84
|
+
self.task_error_message = task_error_message
|
|
85
|
+
self.task_id = task_id
|
|
86
|
+
self.task_status = task_status
|
|
87
|
+
|
|
88
|
+
def validate(self):
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
def to_map(self):
|
|
92
|
+
result = dict()
|
|
93
|
+
_map = super().to_map()
|
|
94
|
+
if _map is not None:
|
|
95
|
+
result = _map
|
|
96
|
+
if self.task_error_message is not None:
|
|
97
|
+
result['taskErrorMessage'] = self.task_error_message
|
|
98
|
+
|
|
99
|
+
if self.task_id is not None:
|
|
100
|
+
result['taskId'] = self.task_id
|
|
101
|
+
|
|
102
|
+
if self.task_status is not None:
|
|
103
|
+
result['taskStatus'] = self.task_status
|
|
104
|
+
|
|
105
|
+
return result
|
|
106
|
+
|
|
107
|
+
def from_map(self, m: dict = None):
|
|
108
|
+
m = m or dict()
|
|
109
|
+
if m.get('taskErrorMessage') is not None:
|
|
110
|
+
self.task_error_message = m.get('taskErrorMessage')
|
|
111
|
+
|
|
112
|
+
if m.get('taskId') is not None:
|
|
113
|
+
self.task_id = m.get('taskId')
|
|
114
|
+
|
|
115
|
+
if m.get('taskStatus') is not None:
|
|
116
|
+
self.task_status = m.get('taskStatus')
|
|
117
|
+
|
|
118
|
+
return self
|
|
119
|
+
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from darabonba.model import DaraModel
|
|
8
|
+
|
|
9
|
+
class UpdateVideoAnalysisTasksRequest(DaraModel):
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
task_ids: List[str] = None,
|
|
13
|
+
task_status: str = None,
|
|
14
|
+
):
|
|
15
|
+
# This parameter is required.
|
|
16
|
+
self.task_ids = task_ids
|
|
17
|
+
# This parameter is required.
|
|
18
|
+
self.task_status = task_status
|
|
19
|
+
|
|
20
|
+
def validate(self):
|
|
21
|
+
pass
|
|
22
|
+
|
|
23
|
+
def to_map(self):
|
|
24
|
+
result = dict()
|
|
25
|
+
_map = super().to_map()
|
|
26
|
+
if _map is not None:
|
|
27
|
+
result = _map
|
|
28
|
+
if self.task_ids is not None:
|
|
29
|
+
result['taskIds'] = self.task_ids
|
|
30
|
+
|
|
31
|
+
if self.task_status is not None:
|
|
32
|
+
result['taskStatus'] = self.task_status
|
|
33
|
+
|
|
34
|
+
return result
|
|
35
|
+
|
|
36
|
+
def from_map(self, m: dict = None):
|
|
37
|
+
m = m or dict()
|
|
38
|
+
if m.get('taskIds') is not None:
|
|
39
|
+
self.task_ids = m.get('taskIds')
|
|
40
|
+
|
|
41
|
+
if m.get('taskStatus') is not None:
|
|
42
|
+
self.task_status = m.get('taskStatus')
|
|
43
|
+
|
|
44
|
+
return self
|
|
45
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class UpdateVideoAnalysisTasksResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.UpdateVideoAnalysisTasksResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.UpdateVideoAnalysisTasksResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|