alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
- alibabacloud_quanmiaolightapp20240801/models/__init__.py +691 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +174 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/METADATA +7 -7
- alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info/RECORD +146 -0
- alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
- alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
6
|
+
from darabonba.model import DaraModel
|
|
7
|
+
|
|
8
|
+
class SubmitVideoAnalysisTaskResponseBody(DaraModel):
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
code: str = None,
|
|
12
|
+
data: main_models.SubmitVideoAnalysisTaskResponseBodyData = None,
|
|
13
|
+
http_status_code: int = None,
|
|
14
|
+
message: str = None,
|
|
15
|
+
request_id: str = None,
|
|
16
|
+
success: bool = None,
|
|
17
|
+
):
|
|
18
|
+
self.code = code
|
|
19
|
+
self.data = data
|
|
20
|
+
self.http_status_code = http_status_code
|
|
21
|
+
self.message = message
|
|
22
|
+
self.request_id = request_id
|
|
23
|
+
self.success = success
|
|
24
|
+
|
|
25
|
+
def validate(self):
|
|
26
|
+
if self.data:
|
|
27
|
+
self.data.validate()
|
|
28
|
+
|
|
29
|
+
def to_map(self):
|
|
30
|
+
result = dict()
|
|
31
|
+
_map = super().to_map()
|
|
32
|
+
if _map is not None:
|
|
33
|
+
result = _map
|
|
34
|
+
if self.code is not None:
|
|
35
|
+
result['code'] = self.code
|
|
36
|
+
|
|
37
|
+
if self.data is not None:
|
|
38
|
+
result['data'] = self.data.to_map()
|
|
39
|
+
|
|
40
|
+
if self.http_status_code is not None:
|
|
41
|
+
result['httpStatusCode'] = self.http_status_code
|
|
42
|
+
|
|
43
|
+
if self.message is not None:
|
|
44
|
+
result['message'] = self.message
|
|
45
|
+
|
|
46
|
+
if self.request_id is not None:
|
|
47
|
+
result['requestId'] = self.request_id
|
|
48
|
+
|
|
49
|
+
if self.success is not None:
|
|
50
|
+
result['success'] = self.success
|
|
51
|
+
|
|
52
|
+
return result
|
|
53
|
+
|
|
54
|
+
def from_map(self, m: dict = None):
|
|
55
|
+
m = m or dict()
|
|
56
|
+
if m.get('code') is not None:
|
|
57
|
+
self.code = m.get('code')
|
|
58
|
+
|
|
59
|
+
if m.get('data') is not None:
|
|
60
|
+
temp_model = main_models.SubmitVideoAnalysisTaskResponseBodyData()
|
|
61
|
+
self.data = temp_model.from_map(m.get('data'))
|
|
62
|
+
|
|
63
|
+
if m.get('httpStatusCode') is not None:
|
|
64
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
65
|
+
|
|
66
|
+
if m.get('message') is not None:
|
|
67
|
+
self.message = m.get('message')
|
|
68
|
+
|
|
69
|
+
if m.get('requestId') is not None:
|
|
70
|
+
self.request_id = m.get('requestId')
|
|
71
|
+
|
|
72
|
+
if m.get('success') is not None:
|
|
73
|
+
self.success = m.get('success')
|
|
74
|
+
|
|
75
|
+
return self
|
|
76
|
+
|
|
77
|
+
class SubmitVideoAnalysisTaskResponseBodyData(DaraModel):
|
|
78
|
+
def __init__(
|
|
79
|
+
self,
|
|
80
|
+
task_id: str = None,
|
|
81
|
+
):
|
|
82
|
+
self.task_id = task_id
|
|
83
|
+
|
|
84
|
+
def validate(self):
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
def to_map(self):
|
|
88
|
+
result = dict()
|
|
89
|
+
_map = super().to_map()
|
|
90
|
+
if _map is not None:
|
|
91
|
+
result = _map
|
|
92
|
+
if self.task_id is not None:
|
|
93
|
+
result['taskId'] = self.task_id
|
|
94
|
+
|
|
95
|
+
return result
|
|
96
|
+
|
|
97
|
+
def from_map(self, m: dict = None):
|
|
98
|
+
m = m or dict()
|
|
99
|
+
if m.get('taskId') is not None:
|
|
100
|
+
self.task_id = m.get('taskId')
|
|
101
|
+
|
|
102
|
+
return self
|
|
103
|
+
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class SubmitVideoAnalysisTaskShrinkRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
add_document_param_shrink: str = None,
|
|
11
|
+
auto_role_recognition_video_url: str = None,
|
|
12
|
+
deduplication_id: str = None,
|
|
13
|
+
exclude_generate_options_shrink: str = None,
|
|
14
|
+
face_identity_similarity_min_score: float = None,
|
|
15
|
+
frame_sample_method_shrink: str = None,
|
|
16
|
+
generate_options_shrink: str = None,
|
|
17
|
+
language: str = None,
|
|
18
|
+
model_custom_prompt_template: str = None,
|
|
19
|
+
model_custom_prompt_template_id: str = None,
|
|
20
|
+
model_id: str = None,
|
|
21
|
+
snapshot_interval: float = None,
|
|
22
|
+
split_interval: int = None,
|
|
23
|
+
split_type: str = None,
|
|
24
|
+
text_process_tasks_shrink: str = None,
|
|
25
|
+
video_caption_info_shrink: str = None,
|
|
26
|
+
video_extra_info: str = None,
|
|
27
|
+
video_model_custom_prompt_template: str = None,
|
|
28
|
+
video_model_id: str = None,
|
|
29
|
+
video_roles_shrink: str = None,
|
|
30
|
+
video_shot_face_identity_count: int = None,
|
|
31
|
+
video_url: str = None,
|
|
32
|
+
):
|
|
33
|
+
self.add_document_param_shrink = add_document_param_shrink
|
|
34
|
+
self.auto_role_recognition_video_url = auto_role_recognition_video_url
|
|
35
|
+
self.deduplication_id = deduplication_id
|
|
36
|
+
self.exclude_generate_options_shrink = exclude_generate_options_shrink
|
|
37
|
+
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
38
|
+
self.frame_sample_method_shrink = frame_sample_method_shrink
|
|
39
|
+
self.generate_options_shrink = generate_options_shrink
|
|
40
|
+
self.language = language
|
|
41
|
+
self.model_custom_prompt_template = model_custom_prompt_template
|
|
42
|
+
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
43
|
+
self.model_id = model_id
|
|
44
|
+
self.snapshot_interval = snapshot_interval
|
|
45
|
+
self.split_interval = split_interval
|
|
46
|
+
self.split_type = split_type
|
|
47
|
+
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
48
|
+
self.video_caption_info_shrink = video_caption_info_shrink
|
|
49
|
+
self.video_extra_info = video_extra_info
|
|
50
|
+
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
51
|
+
self.video_model_id = video_model_id
|
|
52
|
+
self.video_roles_shrink = video_roles_shrink
|
|
53
|
+
self.video_shot_face_identity_count = video_shot_face_identity_count
|
|
54
|
+
# This parameter is required.
|
|
55
|
+
self.video_url = video_url
|
|
56
|
+
|
|
57
|
+
def validate(self):
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
def to_map(self):
|
|
61
|
+
result = dict()
|
|
62
|
+
_map = super().to_map()
|
|
63
|
+
if _map is not None:
|
|
64
|
+
result = _map
|
|
65
|
+
if self.add_document_param_shrink is not None:
|
|
66
|
+
result['addDocumentParam'] = self.add_document_param_shrink
|
|
67
|
+
|
|
68
|
+
if self.auto_role_recognition_video_url is not None:
|
|
69
|
+
result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
|
|
70
|
+
|
|
71
|
+
if self.deduplication_id is not None:
|
|
72
|
+
result['deduplicationId'] = self.deduplication_id
|
|
73
|
+
|
|
74
|
+
if self.exclude_generate_options_shrink is not None:
|
|
75
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options_shrink
|
|
76
|
+
|
|
77
|
+
if self.face_identity_similarity_min_score is not None:
|
|
78
|
+
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
79
|
+
|
|
80
|
+
if self.frame_sample_method_shrink is not None:
|
|
81
|
+
result['frameSampleMethod'] = self.frame_sample_method_shrink
|
|
82
|
+
|
|
83
|
+
if self.generate_options_shrink is not None:
|
|
84
|
+
result['generateOptions'] = self.generate_options_shrink
|
|
85
|
+
|
|
86
|
+
if self.language is not None:
|
|
87
|
+
result['language'] = self.language
|
|
88
|
+
|
|
89
|
+
if self.model_custom_prompt_template is not None:
|
|
90
|
+
result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
|
|
91
|
+
|
|
92
|
+
if self.model_custom_prompt_template_id is not None:
|
|
93
|
+
result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
|
|
94
|
+
|
|
95
|
+
if self.model_id is not None:
|
|
96
|
+
result['modelId'] = self.model_id
|
|
97
|
+
|
|
98
|
+
if self.snapshot_interval is not None:
|
|
99
|
+
result['snapshotInterval'] = self.snapshot_interval
|
|
100
|
+
|
|
101
|
+
if self.split_interval is not None:
|
|
102
|
+
result['splitInterval'] = self.split_interval
|
|
103
|
+
|
|
104
|
+
if self.split_type is not None:
|
|
105
|
+
result['splitType'] = self.split_type
|
|
106
|
+
|
|
107
|
+
if self.text_process_tasks_shrink is not None:
|
|
108
|
+
result['textProcessTasks'] = self.text_process_tasks_shrink
|
|
109
|
+
|
|
110
|
+
if self.video_caption_info_shrink is not None:
|
|
111
|
+
result['videoCaptionInfo'] = self.video_caption_info_shrink
|
|
112
|
+
|
|
113
|
+
if self.video_extra_info is not None:
|
|
114
|
+
result['videoExtraInfo'] = self.video_extra_info
|
|
115
|
+
|
|
116
|
+
if self.video_model_custom_prompt_template is not None:
|
|
117
|
+
result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
|
|
118
|
+
|
|
119
|
+
if self.video_model_id is not None:
|
|
120
|
+
result['videoModelId'] = self.video_model_id
|
|
121
|
+
|
|
122
|
+
if self.video_roles_shrink is not None:
|
|
123
|
+
result['videoRoles'] = self.video_roles_shrink
|
|
124
|
+
|
|
125
|
+
if self.video_shot_face_identity_count is not None:
|
|
126
|
+
result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
|
|
127
|
+
|
|
128
|
+
if self.video_url is not None:
|
|
129
|
+
result['videoUrl'] = self.video_url
|
|
130
|
+
|
|
131
|
+
return result
|
|
132
|
+
|
|
133
|
+
def from_map(self, m: dict = None):
|
|
134
|
+
m = m or dict()
|
|
135
|
+
if m.get('addDocumentParam') is not None:
|
|
136
|
+
self.add_document_param_shrink = m.get('addDocumentParam')
|
|
137
|
+
|
|
138
|
+
if m.get('autoRoleRecognitionVideoUrl') is not None:
|
|
139
|
+
self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
|
|
140
|
+
|
|
141
|
+
if m.get('deduplicationId') is not None:
|
|
142
|
+
self.deduplication_id = m.get('deduplicationId')
|
|
143
|
+
|
|
144
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
145
|
+
self.exclude_generate_options_shrink = m.get('excludeGenerateOptions')
|
|
146
|
+
|
|
147
|
+
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
148
|
+
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
149
|
+
|
|
150
|
+
if m.get('frameSampleMethod') is not None:
|
|
151
|
+
self.frame_sample_method_shrink = m.get('frameSampleMethod')
|
|
152
|
+
|
|
153
|
+
if m.get('generateOptions') is not None:
|
|
154
|
+
self.generate_options_shrink = m.get('generateOptions')
|
|
155
|
+
|
|
156
|
+
if m.get('language') is not None:
|
|
157
|
+
self.language = m.get('language')
|
|
158
|
+
|
|
159
|
+
if m.get('modelCustomPromptTemplate') is not None:
|
|
160
|
+
self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
|
|
161
|
+
|
|
162
|
+
if m.get('modelCustomPromptTemplateId') is not None:
|
|
163
|
+
self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
|
|
164
|
+
|
|
165
|
+
if m.get('modelId') is not None:
|
|
166
|
+
self.model_id = m.get('modelId')
|
|
167
|
+
|
|
168
|
+
if m.get('snapshotInterval') is not None:
|
|
169
|
+
self.snapshot_interval = m.get('snapshotInterval')
|
|
170
|
+
|
|
171
|
+
if m.get('splitInterval') is not None:
|
|
172
|
+
self.split_interval = m.get('splitInterval')
|
|
173
|
+
|
|
174
|
+
if m.get('splitType') is not None:
|
|
175
|
+
self.split_type = m.get('splitType')
|
|
176
|
+
|
|
177
|
+
if m.get('textProcessTasks') is not None:
|
|
178
|
+
self.text_process_tasks_shrink = m.get('textProcessTasks')
|
|
179
|
+
|
|
180
|
+
if m.get('videoCaptionInfo') is not None:
|
|
181
|
+
self.video_caption_info_shrink = m.get('videoCaptionInfo')
|
|
182
|
+
|
|
183
|
+
if m.get('videoExtraInfo') is not None:
|
|
184
|
+
self.video_extra_info = m.get('videoExtraInfo')
|
|
185
|
+
|
|
186
|
+
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
187
|
+
self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
|
|
188
|
+
|
|
189
|
+
if m.get('videoModelId') is not None:
|
|
190
|
+
self.video_model_id = m.get('videoModelId')
|
|
191
|
+
|
|
192
|
+
if m.get('videoRoles') is not None:
|
|
193
|
+
self.video_roles_shrink = m.get('videoRoles')
|
|
194
|
+
|
|
195
|
+
if m.get('videoShotFaceIdentityCount') is not None:
|
|
196
|
+
self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
|
|
197
|
+
|
|
198
|
+
if m.get('videoUrl') is not None:
|
|
199
|
+
self.video_url = m.get('videoUrl')
|
|
200
|
+
|
|
201
|
+
return self
|
|
202
|
+
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from darabonba.model import DaraModel
|
|
8
|
+
|
|
9
|
+
class SubmitVideoDetectShotTaskRequest(DaraModel):
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
deduplication_id: str = None,
|
|
13
|
+
intelli_simp_prompt: str = None,
|
|
14
|
+
intelli_simp_prompt_template_id: str = None,
|
|
15
|
+
language: str = None,
|
|
16
|
+
model_custom_prompt_template_id: str = None,
|
|
17
|
+
model_id: str = None,
|
|
18
|
+
model_vl_custom_prompt_template_id: str = None,
|
|
19
|
+
options: List[str] = None,
|
|
20
|
+
original_session_id: str = None,
|
|
21
|
+
pre_model_id: str = None,
|
|
22
|
+
prompt: str = None,
|
|
23
|
+
recognition_options: List[str] = None,
|
|
24
|
+
task_id: str = None,
|
|
25
|
+
video_url: str = None,
|
|
26
|
+
vl_prompt: str = None,
|
|
27
|
+
):
|
|
28
|
+
self.deduplication_id = deduplication_id
|
|
29
|
+
self.intelli_simp_prompt = intelli_simp_prompt
|
|
30
|
+
self.intelli_simp_prompt_template_id = intelli_simp_prompt_template_id
|
|
31
|
+
self.language = language
|
|
32
|
+
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
33
|
+
self.model_id = model_id
|
|
34
|
+
self.model_vl_custom_prompt_template_id = model_vl_custom_prompt_template_id
|
|
35
|
+
self.options = options
|
|
36
|
+
self.original_session_id = original_session_id
|
|
37
|
+
self.pre_model_id = pre_model_id
|
|
38
|
+
self.prompt = prompt
|
|
39
|
+
self.recognition_options = recognition_options
|
|
40
|
+
self.task_id = task_id
|
|
41
|
+
# This parameter is required.
|
|
42
|
+
self.video_url = video_url
|
|
43
|
+
self.vl_prompt = vl_prompt
|
|
44
|
+
|
|
45
|
+
def validate(self):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
def to_map(self):
|
|
49
|
+
result = dict()
|
|
50
|
+
_map = super().to_map()
|
|
51
|
+
if _map is not None:
|
|
52
|
+
result = _map
|
|
53
|
+
if self.deduplication_id is not None:
|
|
54
|
+
result['deduplicationId'] = self.deduplication_id
|
|
55
|
+
|
|
56
|
+
if self.intelli_simp_prompt is not None:
|
|
57
|
+
result['intelliSimpPrompt'] = self.intelli_simp_prompt
|
|
58
|
+
|
|
59
|
+
if self.intelli_simp_prompt_template_id is not None:
|
|
60
|
+
result['intelliSimpPromptTemplateId'] = self.intelli_simp_prompt_template_id
|
|
61
|
+
|
|
62
|
+
if self.language is not None:
|
|
63
|
+
result['language'] = self.language
|
|
64
|
+
|
|
65
|
+
if self.model_custom_prompt_template_id is not None:
|
|
66
|
+
result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
|
|
67
|
+
|
|
68
|
+
if self.model_id is not None:
|
|
69
|
+
result['modelId'] = self.model_id
|
|
70
|
+
|
|
71
|
+
if self.model_vl_custom_prompt_template_id is not None:
|
|
72
|
+
result['modelVlCustomPromptTemplateId'] = self.model_vl_custom_prompt_template_id
|
|
73
|
+
|
|
74
|
+
if self.options is not None:
|
|
75
|
+
result['options'] = self.options
|
|
76
|
+
|
|
77
|
+
if self.original_session_id is not None:
|
|
78
|
+
result['originalSessionId'] = self.original_session_id
|
|
79
|
+
|
|
80
|
+
if self.pre_model_id is not None:
|
|
81
|
+
result['preModelId'] = self.pre_model_id
|
|
82
|
+
|
|
83
|
+
if self.prompt is not None:
|
|
84
|
+
result['prompt'] = self.prompt
|
|
85
|
+
|
|
86
|
+
if self.recognition_options is not None:
|
|
87
|
+
result['recognitionOptions'] = self.recognition_options
|
|
88
|
+
|
|
89
|
+
if self.task_id is not None:
|
|
90
|
+
result['taskId'] = self.task_id
|
|
91
|
+
|
|
92
|
+
if self.video_url is not None:
|
|
93
|
+
result['videoUrl'] = self.video_url
|
|
94
|
+
|
|
95
|
+
if self.vl_prompt is not None:
|
|
96
|
+
result['vlPrompt'] = self.vl_prompt
|
|
97
|
+
|
|
98
|
+
return result
|
|
99
|
+
|
|
100
|
+
def from_map(self, m: dict = None):
|
|
101
|
+
m = m or dict()
|
|
102
|
+
if m.get('deduplicationId') is not None:
|
|
103
|
+
self.deduplication_id = m.get('deduplicationId')
|
|
104
|
+
|
|
105
|
+
if m.get('intelliSimpPrompt') is not None:
|
|
106
|
+
self.intelli_simp_prompt = m.get('intelliSimpPrompt')
|
|
107
|
+
|
|
108
|
+
if m.get('intelliSimpPromptTemplateId') is not None:
|
|
109
|
+
self.intelli_simp_prompt_template_id = m.get('intelliSimpPromptTemplateId')
|
|
110
|
+
|
|
111
|
+
if m.get('language') is not None:
|
|
112
|
+
self.language = m.get('language')
|
|
113
|
+
|
|
114
|
+
if m.get('modelCustomPromptTemplateId') is not None:
|
|
115
|
+
self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
|
|
116
|
+
|
|
117
|
+
if m.get('modelId') is not None:
|
|
118
|
+
self.model_id = m.get('modelId')
|
|
119
|
+
|
|
120
|
+
if m.get('modelVlCustomPromptTemplateId') is not None:
|
|
121
|
+
self.model_vl_custom_prompt_template_id = m.get('modelVlCustomPromptTemplateId')
|
|
122
|
+
|
|
123
|
+
if m.get('options') is not None:
|
|
124
|
+
self.options = m.get('options')
|
|
125
|
+
|
|
126
|
+
if m.get('originalSessionId') is not None:
|
|
127
|
+
self.original_session_id = m.get('originalSessionId')
|
|
128
|
+
|
|
129
|
+
if m.get('preModelId') is not None:
|
|
130
|
+
self.pre_model_id = m.get('preModelId')
|
|
131
|
+
|
|
132
|
+
if m.get('prompt') is not None:
|
|
133
|
+
self.prompt = m.get('prompt')
|
|
134
|
+
|
|
135
|
+
if m.get('recognitionOptions') is not None:
|
|
136
|
+
self.recognition_options = m.get('recognitionOptions')
|
|
137
|
+
|
|
138
|
+
if m.get('taskId') is not None:
|
|
139
|
+
self.task_id = m.get('taskId')
|
|
140
|
+
|
|
141
|
+
if m.get('videoUrl') is not None:
|
|
142
|
+
self.video_url = m.get('videoUrl')
|
|
143
|
+
|
|
144
|
+
if m.get('vlPrompt') is not None:
|
|
145
|
+
self.vl_prompt = m.get('vlPrompt')
|
|
146
|
+
|
|
147
|
+
return self
|
|
148
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class SubmitVideoDetectShotTaskResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.SubmitVideoDetectShotTaskResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.SubmitVideoDetectShotTaskResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
6
|
+
from darabonba.model import DaraModel
|
|
7
|
+
|
|
8
|
+
class SubmitVideoDetectShotTaskResponseBody(DaraModel):
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
code: str = None,
|
|
12
|
+
data: main_models.SubmitVideoDetectShotTaskResponseBodyData = None,
|
|
13
|
+
http_status_code: int = None,
|
|
14
|
+
message: str = None,
|
|
15
|
+
request_id: str = None,
|
|
16
|
+
success: bool = None,
|
|
17
|
+
):
|
|
18
|
+
self.code = code
|
|
19
|
+
self.data = data
|
|
20
|
+
self.http_status_code = http_status_code
|
|
21
|
+
self.message = message
|
|
22
|
+
# Id of the request
|
|
23
|
+
self.request_id = request_id
|
|
24
|
+
self.success = success
|
|
25
|
+
|
|
26
|
+
def validate(self):
|
|
27
|
+
if self.data:
|
|
28
|
+
self.data.validate()
|
|
29
|
+
|
|
30
|
+
def to_map(self):
|
|
31
|
+
result = dict()
|
|
32
|
+
_map = super().to_map()
|
|
33
|
+
if _map is not None:
|
|
34
|
+
result = _map
|
|
35
|
+
if self.code is not None:
|
|
36
|
+
result['code'] = self.code
|
|
37
|
+
|
|
38
|
+
if self.data is not None:
|
|
39
|
+
result['data'] = self.data.to_map()
|
|
40
|
+
|
|
41
|
+
if self.http_status_code is not None:
|
|
42
|
+
result['httpStatusCode'] = self.http_status_code
|
|
43
|
+
|
|
44
|
+
if self.message is not None:
|
|
45
|
+
result['message'] = self.message
|
|
46
|
+
|
|
47
|
+
if self.request_id is not None:
|
|
48
|
+
result['requestId'] = self.request_id
|
|
49
|
+
|
|
50
|
+
if self.success is not None:
|
|
51
|
+
result['success'] = self.success
|
|
52
|
+
|
|
53
|
+
return result
|
|
54
|
+
|
|
55
|
+
def from_map(self, m: dict = None):
|
|
56
|
+
m = m or dict()
|
|
57
|
+
if m.get('code') is not None:
|
|
58
|
+
self.code = m.get('code')
|
|
59
|
+
|
|
60
|
+
if m.get('data') is not None:
|
|
61
|
+
temp_model = main_models.SubmitVideoDetectShotTaskResponseBodyData()
|
|
62
|
+
self.data = temp_model.from_map(m.get('data'))
|
|
63
|
+
|
|
64
|
+
if m.get('httpStatusCode') is not None:
|
|
65
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
66
|
+
|
|
67
|
+
if m.get('message') is not None:
|
|
68
|
+
self.message = m.get('message')
|
|
69
|
+
|
|
70
|
+
if m.get('requestId') is not None:
|
|
71
|
+
self.request_id = m.get('requestId')
|
|
72
|
+
|
|
73
|
+
if m.get('success') is not None:
|
|
74
|
+
self.success = m.get('success')
|
|
75
|
+
|
|
76
|
+
return self
|
|
77
|
+
|
|
78
|
+
class SubmitVideoDetectShotTaskResponseBodyData(DaraModel):
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
task_id: str = None,
|
|
82
|
+
):
|
|
83
|
+
self.task_id = task_id
|
|
84
|
+
|
|
85
|
+
def validate(self):
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
def to_map(self):
|
|
89
|
+
result = dict()
|
|
90
|
+
_map = super().to_map()
|
|
91
|
+
if _map is not None:
|
|
92
|
+
result = _map
|
|
93
|
+
if self.task_id is not None:
|
|
94
|
+
result['taskId'] = self.task_id
|
|
95
|
+
|
|
96
|
+
return result
|
|
97
|
+
|
|
98
|
+
def from_map(self, m: dict = None):
|
|
99
|
+
m = m or dict()
|
|
100
|
+
if m.get('taskId') is not None:
|
|
101
|
+
self.task_id = m.get('taskId')
|
|
102
|
+
|
|
103
|
+
return self
|
|
104
|
+
|