alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
- alibabacloud_quanmiaolightapp20240801/models/__init__.py +693 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +195 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
- alibabacloud_quanmiaolightapp20240801/models/_model_usage.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/METADATA +7 -7
- alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info/RECORD +147 -0
- alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
- alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class RunVideoAnalysisShrinkRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
add_document_param_shrink: str = None,
|
|
11
|
+
auto_role_recognition_video_url: str = None,
|
|
12
|
+
exclude_generate_options_shrink: str = None,
|
|
13
|
+
face_identity_similarity_min_score: float = None,
|
|
14
|
+
frame_sample_method_shrink: str = None,
|
|
15
|
+
generate_options_shrink: str = None,
|
|
16
|
+
language: str = None,
|
|
17
|
+
model_custom_prompt_template: str = None,
|
|
18
|
+
model_custom_prompt_template_id: str = None,
|
|
19
|
+
model_id: str = None,
|
|
20
|
+
original_session_id: str = None,
|
|
21
|
+
snapshot_interval: float = None,
|
|
22
|
+
split_interval: int = None,
|
|
23
|
+
split_type: str = None,
|
|
24
|
+
task_id: str = None,
|
|
25
|
+
text_process_tasks_shrink: str = None,
|
|
26
|
+
video_caption_info_shrink: str = None,
|
|
27
|
+
video_extra_info: str = None,
|
|
28
|
+
video_model_custom_prompt_template: str = None,
|
|
29
|
+
video_model_id: str = None,
|
|
30
|
+
video_roles_shrink: str = None,
|
|
31
|
+
video_shot_face_identity_count: int = None,
|
|
32
|
+
video_url: str = None,
|
|
33
|
+
):
|
|
34
|
+
self.add_document_param_shrink = add_document_param_shrink
|
|
35
|
+
self.auto_role_recognition_video_url = auto_role_recognition_video_url
|
|
36
|
+
self.exclude_generate_options_shrink = exclude_generate_options_shrink
|
|
37
|
+
self.face_identity_similarity_min_score = face_identity_similarity_min_score
|
|
38
|
+
self.frame_sample_method_shrink = frame_sample_method_shrink
|
|
39
|
+
self.generate_options_shrink = generate_options_shrink
|
|
40
|
+
self.language = language
|
|
41
|
+
self.model_custom_prompt_template = model_custom_prompt_template
|
|
42
|
+
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
43
|
+
self.model_id = model_id
|
|
44
|
+
self.original_session_id = original_session_id
|
|
45
|
+
self.snapshot_interval = snapshot_interval
|
|
46
|
+
self.split_interval = split_interval
|
|
47
|
+
self.split_type = split_type
|
|
48
|
+
self.task_id = task_id
|
|
49
|
+
self.text_process_tasks_shrink = text_process_tasks_shrink
|
|
50
|
+
self.video_caption_info_shrink = video_caption_info_shrink
|
|
51
|
+
self.video_extra_info = video_extra_info
|
|
52
|
+
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
53
|
+
self.video_model_id = video_model_id
|
|
54
|
+
self.video_roles_shrink = video_roles_shrink
|
|
55
|
+
self.video_shot_face_identity_count = video_shot_face_identity_count
|
|
56
|
+
self.video_url = video_url
|
|
57
|
+
|
|
58
|
+
def validate(self):
|
|
59
|
+
pass
|
|
60
|
+
|
|
61
|
+
def to_map(self):
|
|
62
|
+
result = dict()
|
|
63
|
+
_map = super().to_map()
|
|
64
|
+
if _map is not None:
|
|
65
|
+
result = _map
|
|
66
|
+
if self.add_document_param_shrink is not None:
|
|
67
|
+
result['addDocumentParam'] = self.add_document_param_shrink
|
|
68
|
+
|
|
69
|
+
if self.auto_role_recognition_video_url is not None:
|
|
70
|
+
result['autoRoleRecognitionVideoUrl'] = self.auto_role_recognition_video_url
|
|
71
|
+
|
|
72
|
+
if self.exclude_generate_options_shrink is not None:
|
|
73
|
+
result['excludeGenerateOptions'] = self.exclude_generate_options_shrink
|
|
74
|
+
|
|
75
|
+
if self.face_identity_similarity_min_score is not None:
|
|
76
|
+
result['faceIdentitySimilarityMinScore'] = self.face_identity_similarity_min_score
|
|
77
|
+
|
|
78
|
+
if self.frame_sample_method_shrink is not None:
|
|
79
|
+
result['frameSampleMethod'] = self.frame_sample_method_shrink
|
|
80
|
+
|
|
81
|
+
if self.generate_options_shrink is not None:
|
|
82
|
+
result['generateOptions'] = self.generate_options_shrink
|
|
83
|
+
|
|
84
|
+
if self.language is not None:
|
|
85
|
+
result['language'] = self.language
|
|
86
|
+
|
|
87
|
+
if self.model_custom_prompt_template is not None:
|
|
88
|
+
result['modelCustomPromptTemplate'] = self.model_custom_prompt_template
|
|
89
|
+
|
|
90
|
+
if self.model_custom_prompt_template_id is not None:
|
|
91
|
+
result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
|
|
92
|
+
|
|
93
|
+
if self.model_id is not None:
|
|
94
|
+
result['modelId'] = self.model_id
|
|
95
|
+
|
|
96
|
+
if self.original_session_id is not None:
|
|
97
|
+
result['originalSessionId'] = self.original_session_id
|
|
98
|
+
|
|
99
|
+
if self.snapshot_interval is not None:
|
|
100
|
+
result['snapshotInterval'] = self.snapshot_interval
|
|
101
|
+
|
|
102
|
+
if self.split_interval is not None:
|
|
103
|
+
result['splitInterval'] = self.split_interval
|
|
104
|
+
|
|
105
|
+
if self.split_type is not None:
|
|
106
|
+
result['splitType'] = self.split_type
|
|
107
|
+
|
|
108
|
+
if self.task_id is not None:
|
|
109
|
+
result['taskId'] = self.task_id
|
|
110
|
+
|
|
111
|
+
if self.text_process_tasks_shrink is not None:
|
|
112
|
+
result['textProcessTasks'] = self.text_process_tasks_shrink
|
|
113
|
+
|
|
114
|
+
if self.video_caption_info_shrink is not None:
|
|
115
|
+
result['videoCaptionInfo'] = self.video_caption_info_shrink
|
|
116
|
+
|
|
117
|
+
if self.video_extra_info is not None:
|
|
118
|
+
result['videoExtraInfo'] = self.video_extra_info
|
|
119
|
+
|
|
120
|
+
if self.video_model_custom_prompt_template is not None:
|
|
121
|
+
result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
|
|
122
|
+
|
|
123
|
+
if self.video_model_id is not None:
|
|
124
|
+
result['videoModelId'] = self.video_model_id
|
|
125
|
+
|
|
126
|
+
if self.video_roles_shrink is not None:
|
|
127
|
+
result['videoRoles'] = self.video_roles_shrink
|
|
128
|
+
|
|
129
|
+
if self.video_shot_face_identity_count is not None:
|
|
130
|
+
result['videoShotFaceIdentityCount'] = self.video_shot_face_identity_count
|
|
131
|
+
|
|
132
|
+
if self.video_url is not None:
|
|
133
|
+
result['videoUrl'] = self.video_url
|
|
134
|
+
|
|
135
|
+
return result
|
|
136
|
+
|
|
137
|
+
def from_map(self, m: dict = None):
|
|
138
|
+
m = m or dict()
|
|
139
|
+
if m.get('addDocumentParam') is not None:
|
|
140
|
+
self.add_document_param_shrink = m.get('addDocumentParam')
|
|
141
|
+
|
|
142
|
+
if m.get('autoRoleRecognitionVideoUrl') is not None:
|
|
143
|
+
self.auto_role_recognition_video_url = m.get('autoRoleRecognitionVideoUrl')
|
|
144
|
+
|
|
145
|
+
if m.get('excludeGenerateOptions') is not None:
|
|
146
|
+
self.exclude_generate_options_shrink = m.get('excludeGenerateOptions')
|
|
147
|
+
|
|
148
|
+
if m.get('faceIdentitySimilarityMinScore') is not None:
|
|
149
|
+
self.face_identity_similarity_min_score = m.get('faceIdentitySimilarityMinScore')
|
|
150
|
+
|
|
151
|
+
if m.get('frameSampleMethod') is not None:
|
|
152
|
+
self.frame_sample_method_shrink = m.get('frameSampleMethod')
|
|
153
|
+
|
|
154
|
+
if m.get('generateOptions') is not None:
|
|
155
|
+
self.generate_options_shrink = m.get('generateOptions')
|
|
156
|
+
|
|
157
|
+
if m.get('language') is not None:
|
|
158
|
+
self.language = m.get('language')
|
|
159
|
+
|
|
160
|
+
if m.get('modelCustomPromptTemplate') is not None:
|
|
161
|
+
self.model_custom_prompt_template = m.get('modelCustomPromptTemplate')
|
|
162
|
+
|
|
163
|
+
if m.get('modelCustomPromptTemplateId') is not None:
|
|
164
|
+
self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
|
|
165
|
+
|
|
166
|
+
if m.get('modelId') is not None:
|
|
167
|
+
self.model_id = m.get('modelId')
|
|
168
|
+
|
|
169
|
+
if m.get('originalSessionId') is not None:
|
|
170
|
+
self.original_session_id = m.get('originalSessionId')
|
|
171
|
+
|
|
172
|
+
if m.get('snapshotInterval') is not None:
|
|
173
|
+
self.snapshot_interval = m.get('snapshotInterval')
|
|
174
|
+
|
|
175
|
+
if m.get('splitInterval') is not None:
|
|
176
|
+
self.split_interval = m.get('splitInterval')
|
|
177
|
+
|
|
178
|
+
if m.get('splitType') is not None:
|
|
179
|
+
self.split_type = m.get('splitType')
|
|
180
|
+
|
|
181
|
+
if m.get('taskId') is not None:
|
|
182
|
+
self.task_id = m.get('taskId')
|
|
183
|
+
|
|
184
|
+
if m.get('textProcessTasks') is not None:
|
|
185
|
+
self.text_process_tasks_shrink = m.get('textProcessTasks')
|
|
186
|
+
|
|
187
|
+
if m.get('videoCaptionInfo') is not None:
|
|
188
|
+
self.video_caption_info_shrink = m.get('videoCaptionInfo')
|
|
189
|
+
|
|
190
|
+
if m.get('videoExtraInfo') is not None:
|
|
191
|
+
self.video_extra_info = m.get('videoExtraInfo')
|
|
192
|
+
|
|
193
|
+
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
194
|
+
self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
|
|
195
|
+
|
|
196
|
+
if m.get('videoModelId') is not None:
|
|
197
|
+
self.video_model_id = m.get('videoModelId')
|
|
198
|
+
|
|
199
|
+
if m.get('videoRoles') is not None:
|
|
200
|
+
self.video_roles_shrink = m.get('videoRoles')
|
|
201
|
+
|
|
202
|
+
if m.get('videoShotFaceIdentityCount') is not None:
|
|
203
|
+
self.video_shot_face_identity_count = m.get('videoShotFaceIdentityCount')
|
|
204
|
+
|
|
205
|
+
if m.get('videoUrl') is not None:
|
|
206
|
+
self.video_url = m.get('videoUrl')
|
|
207
|
+
|
|
208
|
+
return self
|
|
209
|
+
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from darabonba.model import DaraModel
|
|
8
|
+
|
|
9
|
+
class RunVideoDetectShotRequest(DaraModel):
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
intelli_simp_prompt: str = None,
|
|
13
|
+
intelli_simp_prompt_template_id: str = None,
|
|
14
|
+
language: str = None,
|
|
15
|
+
model_custom_prompt_template_id: str = None,
|
|
16
|
+
model_id: str = None,
|
|
17
|
+
model_vl_custom_prompt_template_id: str = None,
|
|
18
|
+
options: List[str] = None,
|
|
19
|
+
original_session_id: str = None,
|
|
20
|
+
pre_model_id: str = None,
|
|
21
|
+
prompt: str = None,
|
|
22
|
+
recognition_options: List[str] = None,
|
|
23
|
+
task_id: str = None,
|
|
24
|
+
video_url: str = None,
|
|
25
|
+
vl_prompt: str = None,
|
|
26
|
+
):
|
|
27
|
+
self.intelli_simp_prompt = intelli_simp_prompt
|
|
28
|
+
self.intelli_simp_prompt_template_id = intelli_simp_prompt_template_id
|
|
29
|
+
self.language = language
|
|
30
|
+
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
31
|
+
self.model_id = model_id
|
|
32
|
+
self.model_vl_custom_prompt_template_id = model_vl_custom_prompt_template_id
|
|
33
|
+
# This parameter is required.
|
|
34
|
+
self.options = options
|
|
35
|
+
self.original_session_id = original_session_id
|
|
36
|
+
self.pre_model_id = pre_model_id
|
|
37
|
+
self.prompt = prompt
|
|
38
|
+
# This parameter is required.
|
|
39
|
+
self.recognition_options = recognition_options
|
|
40
|
+
self.task_id = task_id
|
|
41
|
+
# This parameter is required.
|
|
42
|
+
self.video_url = video_url
|
|
43
|
+
self.vl_prompt = vl_prompt
|
|
44
|
+
|
|
45
|
+
def validate(self):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
def to_map(self):
|
|
49
|
+
result = dict()
|
|
50
|
+
_map = super().to_map()
|
|
51
|
+
if _map is not None:
|
|
52
|
+
result = _map
|
|
53
|
+
if self.intelli_simp_prompt is not None:
|
|
54
|
+
result['intelliSimpPrompt'] = self.intelli_simp_prompt
|
|
55
|
+
|
|
56
|
+
if self.intelli_simp_prompt_template_id is not None:
|
|
57
|
+
result['intelliSimpPromptTemplateId'] = self.intelli_simp_prompt_template_id
|
|
58
|
+
|
|
59
|
+
if self.language is not None:
|
|
60
|
+
result['language'] = self.language
|
|
61
|
+
|
|
62
|
+
if self.model_custom_prompt_template_id is not None:
|
|
63
|
+
result['modelCustomPromptTemplateId'] = self.model_custom_prompt_template_id
|
|
64
|
+
|
|
65
|
+
if self.model_id is not None:
|
|
66
|
+
result['modelId'] = self.model_id
|
|
67
|
+
|
|
68
|
+
if self.model_vl_custom_prompt_template_id is not None:
|
|
69
|
+
result['modelVlCustomPromptTemplateId'] = self.model_vl_custom_prompt_template_id
|
|
70
|
+
|
|
71
|
+
if self.options is not None:
|
|
72
|
+
result['options'] = self.options
|
|
73
|
+
|
|
74
|
+
if self.original_session_id is not None:
|
|
75
|
+
result['originalSessionId'] = self.original_session_id
|
|
76
|
+
|
|
77
|
+
if self.pre_model_id is not None:
|
|
78
|
+
result['preModelId'] = self.pre_model_id
|
|
79
|
+
|
|
80
|
+
if self.prompt is not None:
|
|
81
|
+
result['prompt'] = self.prompt
|
|
82
|
+
|
|
83
|
+
if self.recognition_options is not None:
|
|
84
|
+
result['recognitionOptions'] = self.recognition_options
|
|
85
|
+
|
|
86
|
+
if self.task_id is not None:
|
|
87
|
+
result['taskId'] = self.task_id
|
|
88
|
+
|
|
89
|
+
if self.video_url is not None:
|
|
90
|
+
result['videoUrl'] = self.video_url
|
|
91
|
+
|
|
92
|
+
if self.vl_prompt is not None:
|
|
93
|
+
result['vlPrompt'] = self.vl_prompt
|
|
94
|
+
|
|
95
|
+
return result
|
|
96
|
+
|
|
97
|
+
def from_map(self, m: dict = None):
|
|
98
|
+
m = m or dict()
|
|
99
|
+
if m.get('intelliSimpPrompt') is not None:
|
|
100
|
+
self.intelli_simp_prompt = m.get('intelliSimpPrompt')
|
|
101
|
+
|
|
102
|
+
if m.get('intelliSimpPromptTemplateId') is not None:
|
|
103
|
+
self.intelli_simp_prompt_template_id = m.get('intelliSimpPromptTemplateId')
|
|
104
|
+
|
|
105
|
+
if m.get('language') is not None:
|
|
106
|
+
self.language = m.get('language')
|
|
107
|
+
|
|
108
|
+
if m.get('modelCustomPromptTemplateId') is not None:
|
|
109
|
+
self.model_custom_prompt_template_id = m.get('modelCustomPromptTemplateId')
|
|
110
|
+
|
|
111
|
+
if m.get('modelId') is not None:
|
|
112
|
+
self.model_id = m.get('modelId')
|
|
113
|
+
|
|
114
|
+
if m.get('modelVlCustomPromptTemplateId') is not None:
|
|
115
|
+
self.model_vl_custom_prompt_template_id = m.get('modelVlCustomPromptTemplateId')
|
|
116
|
+
|
|
117
|
+
if m.get('options') is not None:
|
|
118
|
+
self.options = m.get('options')
|
|
119
|
+
|
|
120
|
+
if m.get('originalSessionId') is not None:
|
|
121
|
+
self.original_session_id = m.get('originalSessionId')
|
|
122
|
+
|
|
123
|
+
if m.get('preModelId') is not None:
|
|
124
|
+
self.pre_model_id = m.get('preModelId')
|
|
125
|
+
|
|
126
|
+
if m.get('prompt') is not None:
|
|
127
|
+
self.prompt = m.get('prompt')
|
|
128
|
+
|
|
129
|
+
if m.get('recognitionOptions') is not None:
|
|
130
|
+
self.recognition_options = m.get('recognitionOptions')
|
|
131
|
+
|
|
132
|
+
if m.get('taskId') is not None:
|
|
133
|
+
self.task_id = m.get('taskId')
|
|
134
|
+
|
|
135
|
+
if m.get('videoUrl') is not None:
|
|
136
|
+
self.video_url = m.get('videoUrl')
|
|
137
|
+
|
|
138
|
+
if m.get('vlPrompt') is not None:
|
|
139
|
+
self.vl_prompt = m.get('vlPrompt')
|
|
140
|
+
|
|
141
|
+
return self
|
|
142
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class RunVideoDetectShotResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.RunVideoDetectShotResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.RunVideoDetectShotResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|