alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
- alibabacloud_quanmiaolightapp20240801/models/__init__.py +691 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +174 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/METADATA +7 -7
- alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info/RECORD +146 -0
- alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
- alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class GetVideoDetectShotTaskResponseBody(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
code: str = None,
|
|
14
|
+
data: main_models.GetVideoDetectShotTaskResponseBodyData = None,
|
|
15
|
+
http_status_code: int = None,
|
|
16
|
+
message: str = None,
|
|
17
|
+
request_id: str = None,
|
|
18
|
+
success: bool = None,
|
|
19
|
+
):
|
|
20
|
+
self.code = code
|
|
21
|
+
self.data = data
|
|
22
|
+
self.http_status_code = http_status_code
|
|
23
|
+
self.message = message
|
|
24
|
+
# Id of the request
|
|
25
|
+
self.request_id = request_id
|
|
26
|
+
self.success = success
|
|
27
|
+
|
|
28
|
+
def validate(self):
|
|
29
|
+
if self.data:
|
|
30
|
+
self.data.validate()
|
|
31
|
+
|
|
32
|
+
def to_map(self):
|
|
33
|
+
result = dict()
|
|
34
|
+
_map = super().to_map()
|
|
35
|
+
if _map is not None:
|
|
36
|
+
result = _map
|
|
37
|
+
if self.code is not None:
|
|
38
|
+
result['code'] = self.code
|
|
39
|
+
|
|
40
|
+
if self.data is not None:
|
|
41
|
+
result['data'] = self.data.to_map()
|
|
42
|
+
|
|
43
|
+
if self.http_status_code is not None:
|
|
44
|
+
result['httpStatusCode'] = self.http_status_code
|
|
45
|
+
|
|
46
|
+
if self.message is not None:
|
|
47
|
+
result['message'] = self.message
|
|
48
|
+
|
|
49
|
+
if self.request_id is not None:
|
|
50
|
+
result['requestId'] = self.request_id
|
|
51
|
+
|
|
52
|
+
if self.success is not None:
|
|
53
|
+
result['success'] = self.success
|
|
54
|
+
|
|
55
|
+
return result
|
|
56
|
+
|
|
57
|
+
def from_map(self, m: dict = None):
|
|
58
|
+
m = m or dict()
|
|
59
|
+
if m.get('code') is not None:
|
|
60
|
+
self.code = m.get('code')
|
|
61
|
+
|
|
62
|
+
if m.get('data') is not None:
|
|
63
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyData()
|
|
64
|
+
self.data = temp_model.from_map(m.get('data'))
|
|
65
|
+
|
|
66
|
+
if m.get('httpStatusCode') is not None:
|
|
67
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
68
|
+
|
|
69
|
+
if m.get('message') is not None:
|
|
70
|
+
self.message = m.get('message')
|
|
71
|
+
|
|
72
|
+
if m.get('requestId') is not None:
|
|
73
|
+
self.request_id = m.get('requestId')
|
|
74
|
+
|
|
75
|
+
if m.get('success') is not None:
|
|
76
|
+
self.success = m.get('success')
|
|
77
|
+
|
|
78
|
+
return self
|
|
79
|
+
|
|
80
|
+
class GetVideoDetectShotTaskResponseBodyData(DaraModel):
|
|
81
|
+
def __init__(
|
|
82
|
+
self,
|
|
83
|
+
error_message: str = None,
|
|
84
|
+
header: main_models.GetVideoDetectShotTaskResponseBodyDataHeader = None,
|
|
85
|
+
payload: main_models.GetVideoDetectShotTaskResponseBodyDataPayload = None,
|
|
86
|
+
task_id: str = None,
|
|
87
|
+
task_run_info: main_models.GetVideoDetectShotTaskResponseBodyDataTaskRunInfo = None,
|
|
88
|
+
task_status: str = None,
|
|
89
|
+
):
|
|
90
|
+
self.error_message = error_message
|
|
91
|
+
self.header = header
|
|
92
|
+
self.payload = payload
|
|
93
|
+
self.task_id = task_id
|
|
94
|
+
self.task_run_info = task_run_info
|
|
95
|
+
self.task_status = task_status
|
|
96
|
+
|
|
97
|
+
def validate(self):
|
|
98
|
+
if self.header:
|
|
99
|
+
self.header.validate()
|
|
100
|
+
if self.payload:
|
|
101
|
+
self.payload.validate()
|
|
102
|
+
if self.task_run_info:
|
|
103
|
+
self.task_run_info.validate()
|
|
104
|
+
|
|
105
|
+
def to_map(self):
|
|
106
|
+
result = dict()
|
|
107
|
+
_map = super().to_map()
|
|
108
|
+
if _map is not None:
|
|
109
|
+
result = _map
|
|
110
|
+
if self.error_message is not None:
|
|
111
|
+
result['errorMessage'] = self.error_message
|
|
112
|
+
|
|
113
|
+
if self.header is not None:
|
|
114
|
+
result['header'] = self.header.to_map()
|
|
115
|
+
|
|
116
|
+
if self.payload is not None:
|
|
117
|
+
result['payload'] = self.payload.to_map()
|
|
118
|
+
|
|
119
|
+
if self.task_id is not None:
|
|
120
|
+
result['taskId'] = self.task_id
|
|
121
|
+
|
|
122
|
+
if self.task_run_info is not None:
|
|
123
|
+
result['taskRunInfo'] = self.task_run_info.to_map()
|
|
124
|
+
|
|
125
|
+
if self.task_status is not None:
|
|
126
|
+
result['taskStatus'] = self.task_status
|
|
127
|
+
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
def from_map(self, m: dict = None):
|
|
131
|
+
m = m or dict()
|
|
132
|
+
if m.get('errorMessage') is not None:
|
|
133
|
+
self.error_message = m.get('errorMessage')
|
|
134
|
+
|
|
135
|
+
if m.get('header') is not None:
|
|
136
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataHeader()
|
|
137
|
+
self.header = temp_model.from_map(m.get('header'))
|
|
138
|
+
|
|
139
|
+
if m.get('payload') is not None:
|
|
140
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataPayload()
|
|
141
|
+
self.payload = temp_model.from_map(m.get('payload'))
|
|
142
|
+
|
|
143
|
+
if m.get('taskId') is not None:
|
|
144
|
+
self.task_id = m.get('taskId')
|
|
145
|
+
|
|
146
|
+
if m.get('taskRunInfo') is not None:
|
|
147
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataTaskRunInfo()
|
|
148
|
+
self.task_run_info = temp_model.from_map(m.get('taskRunInfo'))
|
|
149
|
+
|
|
150
|
+
if m.get('taskStatus') is not None:
|
|
151
|
+
self.task_status = m.get('taskStatus')
|
|
152
|
+
|
|
153
|
+
return self
|
|
154
|
+
|
|
155
|
+
class GetVideoDetectShotTaskResponseBodyDataTaskRunInfo(DaraModel):
|
|
156
|
+
def __init__(
|
|
157
|
+
self,
|
|
158
|
+
concurrent_charge_enable: bool = None,
|
|
159
|
+
response_time: int = None,
|
|
160
|
+
):
|
|
161
|
+
self.concurrent_charge_enable = concurrent_charge_enable
|
|
162
|
+
self.response_time = response_time
|
|
163
|
+
|
|
164
|
+
def validate(self):
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
def to_map(self):
|
|
168
|
+
result = dict()
|
|
169
|
+
_map = super().to_map()
|
|
170
|
+
if _map is not None:
|
|
171
|
+
result = _map
|
|
172
|
+
if self.concurrent_charge_enable is not None:
|
|
173
|
+
result['concurrentChargeEnable'] = self.concurrent_charge_enable
|
|
174
|
+
|
|
175
|
+
if self.response_time is not None:
|
|
176
|
+
result['responseTime'] = self.response_time
|
|
177
|
+
|
|
178
|
+
return result
|
|
179
|
+
|
|
180
|
+
def from_map(self, m: dict = None):
|
|
181
|
+
m = m or dict()
|
|
182
|
+
if m.get('concurrentChargeEnable') is not None:
|
|
183
|
+
self.concurrent_charge_enable = m.get('concurrentChargeEnable')
|
|
184
|
+
|
|
185
|
+
if m.get('responseTime') is not None:
|
|
186
|
+
self.response_time = m.get('responseTime')
|
|
187
|
+
|
|
188
|
+
return self
|
|
189
|
+
|
|
190
|
+
class GetVideoDetectShotTaskResponseBodyDataPayload(DaraModel):
|
|
191
|
+
def __init__(
|
|
192
|
+
self,
|
|
193
|
+
output: main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutput = None,
|
|
194
|
+
usage: main_models.GetVideoDetectShotTaskResponseBodyDataPayloadUsage = None,
|
|
195
|
+
):
|
|
196
|
+
self.output = output
|
|
197
|
+
self.usage = usage
|
|
198
|
+
|
|
199
|
+
def validate(self):
|
|
200
|
+
if self.output:
|
|
201
|
+
self.output.validate()
|
|
202
|
+
if self.usage:
|
|
203
|
+
self.usage.validate()
|
|
204
|
+
|
|
205
|
+
def to_map(self):
|
|
206
|
+
result = dict()
|
|
207
|
+
_map = super().to_map()
|
|
208
|
+
if _map is not None:
|
|
209
|
+
result = _map
|
|
210
|
+
if self.output is not None:
|
|
211
|
+
result['output'] = self.output.to_map()
|
|
212
|
+
|
|
213
|
+
if self.usage is not None:
|
|
214
|
+
result['usage'] = self.usage.to_map()
|
|
215
|
+
|
|
216
|
+
return result
|
|
217
|
+
|
|
218
|
+
def from_map(self, m: dict = None):
|
|
219
|
+
m = m or dict()
|
|
220
|
+
if m.get('output') is not None:
|
|
221
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutput()
|
|
222
|
+
self.output = temp_model.from_map(m.get('output'))
|
|
223
|
+
|
|
224
|
+
if m.get('usage') is not None:
|
|
225
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataPayloadUsage()
|
|
226
|
+
self.usage = temp_model.from_map(m.get('usage'))
|
|
227
|
+
|
|
228
|
+
return self
|
|
229
|
+
|
|
230
|
+
class GetVideoDetectShotTaskResponseBodyDataPayloadUsage(DaraModel):
|
|
231
|
+
def __init__(
|
|
232
|
+
self,
|
|
233
|
+
input_tokens: int = None,
|
|
234
|
+
output_tokens: int = None,
|
|
235
|
+
total_tokens: int = None,
|
|
236
|
+
):
|
|
237
|
+
self.input_tokens = input_tokens
|
|
238
|
+
self.output_tokens = output_tokens
|
|
239
|
+
self.total_tokens = total_tokens
|
|
240
|
+
|
|
241
|
+
def validate(self):
|
|
242
|
+
pass
|
|
243
|
+
|
|
244
|
+
def to_map(self):
|
|
245
|
+
result = dict()
|
|
246
|
+
_map = super().to_map()
|
|
247
|
+
if _map is not None:
|
|
248
|
+
result = _map
|
|
249
|
+
if self.input_tokens is not None:
|
|
250
|
+
result['inputTokens'] = self.input_tokens
|
|
251
|
+
|
|
252
|
+
if self.output_tokens is not None:
|
|
253
|
+
result['outputTokens'] = self.output_tokens
|
|
254
|
+
|
|
255
|
+
if self.total_tokens is not None:
|
|
256
|
+
result['totalTokens'] = self.total_tokens
|
|
257
|
+
|
|
258
|
+
return result
|
|
259
|
+
|
|
260
|
+
def from_map(self, m: dict = None):
|
|
261
|
+
m = m or dict()
|
|
262
|
+
if m.get('inputTokens') is not None:
|
|
263
|
+
self.input_tokens = m.get('inputTokens')
|
|
264
|
+
|
|
265
|
+
if m.get('outputTokens') is not None:
|
|
266
|
+
self.output_tokens = m.get('outputTokens')
|
|
267
|
+
|
|
268
|
+
if m.get('totalTokens') is not None:
|
|
269
|
+
self.total_tokens = m.get('totalTokens')
|
|
270
|
+
|
|
271
|
+
return self
|
|
272
|
+
|
|
273
|
+
class GetVideoDetectShotTaskResponseBodyDataPayloadOutput(DaraModel):
|
|
274
|
+
def __init__(
|
|
275
|
+
self,
|
|
276
|
+
video_split_result: main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResult = None,
|
|
277
|
+
):
|
|
278
|
+
self.video_split_result = video_split_result
|
|
279
|
+
|
|
280
|
+
def validate(self):
|
|
281
|
+
if self.video_split_result:
|
|
282
|
+
self.video_split_result.validate()
|
|
283
|
+
|
|
284
|
+
def to_map(self):
|
|
285
|
+
result = dict()
|
|
286
|
+
_map = super().to_map()
|
|
287
|
+
if _map is not None:
|
|
288
|
+
result = _map
|
|
289
|
+
if self.video_split_result is not None:
|
|
290
|
+
result['videoSplitResult'] = self.video_split_result.to_map()
|
|
291
|
+
|
|
292
|
+
return result
|
|
293
|
+
|
|
294
|
+
def from_map(self, m: dict = None):
|
|
295
|
+
m = m or dict()
|
|
296
|
+
if m.get('videoSplitResult') is not None:
|
|
297
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResult()
|
|
298
|
+
self.video_split_result = temp_model.from_map(m.get('videoSplitResult'))
|
|
299
|
+
|
|
300
|
+
return self
|
|
301
|
+
|
|
302
|
+
class GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResult(DaraModel):
|
|
303
|
+
def __init__(
|
|
304
|
+
self,
|
|
305
|
+
reason_text: str = None,
|
|
306
|
+
text: str = None,
|
|
307
|
+
video_parts: List[Dict[str, str]] = None,
|
|
308
|
+
video_recognition_result: List[main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResultVideoRecognitionResult] = None,
|
|
309
|
+
):
|
|
310
|
+
self.reason_text = reason_text
|
|
311
|
+
self.text = text
|
|
312
|
+
self.video_parts = video_parts
|
|
313
|
+
self.video_recognition_result = video_recognition_result
|
|
314
|
+
|
|
315
|
+
def validate(self):
|
|
316
|
+
if self.video_recognition_result:
|
|
317
|
+
for v1 in self.video_recognition_result:
|
|
318
|
+
if v1:
|
|
319
|
+
v1.validate()
|
|
320
|
+
|
|
321
|
+
def to_map(self):
|
|
322
|
+
result = dict()
|
|
323
|
+
_map = super().to_map()
|
|
324
|
+
if _map is not None:
|
|
325
|
+
result = _map
|
|
326
|
+
if self.reason_text is not None:
|
|
327
|
+
result['reasonText'] = self.reason_text
|
|
328
|
+
|
|
329
|
+
if self.text is not None:
|
|
330
|
+
result['text'] = self.text
|
|
331
|
+
|
|
332
|
+
if self.video_parts is not None:
|
|
333
|
+
result['videoParts'] = self.video_parts
|
|
334
|
+
|
|
335
|
+
result['videoRecognitionResult'] = []
|
|
336
|
+
if self.video_recognition_result is not None:
|
|
337
|
+
for k1 in self.video_recognition_result:
|
|
338
|
+
result['videoRecognitionResult'].append(k1.to_map() if k1 else None)
|
|
339
|
+
|
|
340
|
+
return result
|
|
341
|
+
|
|
342
|
+
def from_map(self, m: dict = None):
|
|
343
|
+
m = m or dict()
|
|
344
|
+
if m.get('reasonText') is not None:
|
|
345
|
+
self.reason_text = m.get('reasonText')
|
|
346
|
+
|
|
347
|
+
if m.get('text') is not None:
|
|
348
|
+
self.text = m.get('text')
|
|
349
|
+
|
|
350
|
+
if m.get('videoParts') is not None:
|
|
351
|
+
self.video_parts = m.get('videoParts')
|
|
352
|
+
|
|
353
|
+
self.video_recognition_result = []
|
|
354
|
+
if m.get('videoRecognitionResult') is not None:
|
|
355
|
+
for k1 in m.get('videoRecognitionResult'):
|
|
356
|
+
temp_model = main_models.GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResultVideoRecognitionResult()
|
|
357
|
+
self.video_recognition_result.append(temp_model.from_map(k1))
|
|
358
|
+
|
|
359
|
+
return self
|
|
360
|
+
|
|
361
|
+
class GetVideoDetectShotTaskResponseBodyDataPayloadOutputVideoSplitResultVideoRecognitionResult(DaraModel):
|
|
362
|
+
def __init__(
|
|
363
|
+
self,
|
|
364
|
+
asr: str = None,
|
|
365
|
+
end_time: int = None,
|
|
366
|
+
ocr: str = None,
|
|
367
|
+
start_time: int = None,
|
|
368
|
+
vl: str = None,
|
|
369
|
+
):
|
|
370
|
+
self.asr = asr
|
|
371
|
+
self.end_time = end_time
|
|
372
|
+
self.ocr = ocr
|
|
373
|
+
self.start_time = start_time
|
|
374
|
+
self.vl = vl
|
|
375
|
+
|
|
376
|
+
def validate(self):
|
|
377
|
+
pass
|
|
378
|
+
|
|
379
|
+
def to_map(self):
|
|
380
|
+
result = dict()
|
|
381
|
+
_map = super().to_map()
|
|
382
|
+
if _map is not None:
|
|
383
|
+
result = _map
|
|
384
|
+
if self.asr is not None:
|
|
385
|
+
result['asr'] = self.asr
|
|
386
|
+
|
|
387
|
+
if self.end_time is not None:
|
|
388
|
+
result['endTime'] = self.end_time
|
|
389
|
+
|
|
390
|
+
if self.ocr is not None:
|
|
391
|
+
result['ocr'] = self.ocr
|
|
392
|
+
|
|
393
|
+
if self.start_time is not None:
|
|
394
|
+
result['startTime'] = self.start_time
|
|
395
|
+
|
|
396
|
+
if self.vl is not None:
|
|
397
|
+
result['vl'] = self.vl
|
|
398
|
+
|
|
399
|
+
return result
|
|
400
|
+
|
|
401
|
+
def from_map(self, m: dict = None):
|
|
402
|
+
m = m or dict()
|
|
403
|
+
if m.get('asr') is not None:
|
|
404
|
+
self.asr = m.get('asr')
|
|
405
|
+
|
|
406
|
+
if m.get('endTime') is not None:
|
|
407
|
+
self.end_time = m.get('endTime')
|
|
408
|
+
|
|
409
|
+
if m.get('ocr') is not None:
|
|
410
|
+
self.ocr = m.get('ocr')
|
|
411
|
+
|
|
412
|
+
if m.get('startTime') is not None:
|
|
413
|
+
self.start_time = m.get('startTime')
|
|
414
|
+
|
|
415
|
+
if m.get('vl') is not None:
|
|
416
|
+
self.vl = m.get('vl')
|
|
417
|
+
|
|
418
|
+
return self
|
|
419
|
+
|
|
420
|
+
class GetVideoDetectShotTaskResponseBodyDataHeader(DaraModel):
|
|
421
|
+
def __init__(
|
|
422
|
+
self,
|
|
423
|
+
error_code: str = None,
|
|
424
|
+
error_message: str = None,
|
|
425
|
+
event: str = None,
|
|
426
|
+
event_info: str = None,
|
|
427
|
+
session_id: str = None,
|
|
428
|
+
task_id: str = None,
|
|
429
|
+
trace_id: str = None,
|
|
430
|
+
):
|
|
431
|
+
self.error_code = error_code
|
|
432
|
+
self.error_message = error_message
|
|
433
|
+
self.event = event
|
|
434
|
+
self.event_info = event_info
|
|
435
|
+
self.session_id = session_id
|
|
436
|
+
self.task_id = task_id
|
|
437
|
+
self.trace_id = trace_id
|
|
438
|
+
|
|
439
|
+
def validate(self):
|
|
440
|
+
pass
|
|
441
|
+
|
|
442
|
+
def to_map(self):
|
|
443
|
+
result = dict()
|
|
444
|
+
_map = super().to_map()
|
|
445
|
+
if _map is not None:
|
|
446
|
+
result = _map
|
|
447
|
+
if self.error_code is not None:
|
|
448
|
+
result['errorCode'] = self.error_code
|
|
449
|
+
|
|
450
|
+
if self.error_message is not None:
|
|
451
|
+
result['errorMessage'] = self.error_message
|
|
452
|
+
|
|
453
|
+
if self.event is not None:
|
|
454
|
+
result['event'] = self.event
|
|
455
|
+
|
|
456
|
+
if self.event_info is not None:
|
|
457
|
+
result['eventInfo'] = self.event_info
|
|
458
|
+
|
|
459
|
+
if self.session_id is not None:
|
|
460
|
+
result['sessionId'] = self.session_id
|
|
461
|
+
|
|
462
|
+
if self.task_id is not None:
|
|
463
|
+
result['taskId'] = self.task_id
|
|
464
|
+
|
|
465
|
+
if self.trace_id is not None:
|
|
466
|
+
result['traceId'] = self.trace_id
|
|
467
|
+
|
|
468
|
+
return result
|
|
469
|
+
|
|
470
|
+
def from_map(self, m: dict = None):
|
|
471
|
+
m = m or dict()
|
|
472
|
+
if m.get('errorCode') is not None:
|
|
473
|
+
self.error_code = m.get('errorCode')
|
|
474
|
+
|
|
475
|
+
if m.get('errorMessage') is not None:
|
|
476
|
+
self.error_message = m.get('errorMessage')
|
|
477
|
+
|
|
478
|
+
if m.get('event') is not None:
|
|
479
|
+
self.event = m.get('event')
|
|
480
|
+
|
|
481
|
+
if m.get('eventInfo') is not None:
|
|
482
|
+
self.event_info = m.get('eventInfo')
|
|
483
|
+
|
|
484
|
+
if m.get('sessionId') is not None:
|
|
485
|
+
self.session_id = m.get('sessionId')
|
|
486
|
+
|
|
487
|
+
if m.get('taskId') is not None:
|
|
488
|
+
self.task_id = m.get('taskId')
|
|
489
|
+
|
|
490
|
+
if m.get('traceId') is not None:
|
|
491
|
+
self.trace_id = m.get('traceId')
|
|
492
|
+
|
|
493
|
+
return self
|
|
494
|
+
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class HotNewsRecommendRequest(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
prompt: str = None,
|
|
11
|
+
):
|
|
12
|
+
self.prompt = prompt
|
|
13
|
+
|
|
14
|
+
def validate(self):
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
def to_map(self):
|
|
18
|
+
result = dict()
|
|
19
|
+
_map = super().to_map()
|
|
20
|
+
if _map is not None:
|
|
21
|
+
result = _map
|
|
22
|
+
if self.prompt is not None:
|
|
23
|
+
result['prompt'] = self.prompt
|
|
24
|
+
|
|
25
|
+
return result
|
|
26
|
+
|
|
27
|
+
def from_map(self, m: dict = None):
|
|
28
|
+
m = m or dict()
|
|
29
|
+
if m.get('prompt') is not None:
|
|
30
|
+
self.prompt = m.get('prompt')
|
|
31
|
+
|
|
32
|
+
return self
|
|
33
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class HotNewsRecommendResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.HotNewsRecommendResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.HotNewsRecommendResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|