alibabacloud-quanmiaolightapp20240801 2.13.2__py3-none-any.whl → 2.13.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alibabacloud_quanmiaolightapp20240801/__init__.py +1 -1
- alibabacloud_quanmiaolightapp20240801/client.py +4448 -3769
- alibabacloud_quanmiaolightapp20240801/models/__init__.py +693 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_cancel_async_task_response_body.py +74 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_export_analysis_tag_detail_by_task_id_shrink_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_broadcast_news_response_body.py +303 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_request.py +112 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_generate_output_format_shrink_request.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_enterprise_voc_analysis_task_response_body.py +374 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_essay_correction_task_response_body.py +195 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_file_content_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_tag_mining_analysis_task_response_body.py +347 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_config_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_analysis_task_response_body.py +1620 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_config_response_body.py +106 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_get_video_detect_shot_task_response_body.py +494 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_hot_news_recommend_response_body.py +180 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_analysis_tag_detail_by_task_id_response_body.py +196 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_request.py +65 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_list_hot_topic_summaries_response_body.py +367 -0
- alibabacloud_quanmiaolightapp20240801/models/_model_usage.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_request.py +203 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_response_body.py +331 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_enterprise_voc_analysis_shrink_request.py +109 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_essay_correction_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_request.py +264 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_response_body.py +636 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_chat_shrink_request.py +121 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_request.py +100 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_response_body.py +241 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_hot_topic_summary_shrink_request.py +52 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_request.py +59 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_response_body.py +232 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_extract_shrink_request.py +57 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_request.py +89 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_marketing_information_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_network_content_audit_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_request.py +49 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_ocr_parse_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_request.py +42 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_chat_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_request.py +50 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_continue_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_request.py +82 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_planning_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_request.py +33 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_script_refine_response_body.py +290 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_request.py +75 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_response_body.py +248 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_style_writing_shrink_request.py +73 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_request.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_response_body.py +233 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_tag_mining_analysis_shrink_request.py +90 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_request.py +600 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_response_body.py +1668 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_analysis_shrink_request.py +209 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_request.py +142 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_response_body.py +363 -0
- alibabacloud_quanmiaolightapp20240801/models/_run_video_detect_shot_shrink_request.py +140 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_request.py +247 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_enterprise_voc_analysis_task_shrink_request.py +113 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_request.py +167 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_essay_correction_task_shrink_request.py +81 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_request.py +143 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_tag_mining_analysis_task_shrink_request.py +97 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_request.py +593 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_response_body.py +103 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_analysis_task_shrink_request.py +202 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_request.py +148 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_response_body.py +104 -0
- alibabacloud_quanmiaolightapp20240801/models/_submit_video_detect_shot_task_shrink_request.py +146 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_task_response_body.py +119 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_request.py +45 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_response_body.py +136 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_analysis_tasks_shrink_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_request.py +34 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_config_response_body.py +66 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_request.py +43 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response.py +54 -0
- alibabacloud_quanmiaolightapp20240801/models/_update_video_detect_shot_task_response_body.py +120 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/METADATA +7 -7
- alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info/RECORD +147 -0
- alibabacloud_quanmiaolightapp20240801/models.py +0 -16578
- alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info/RECORD +0 -8
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/WHEEL +0 -0
- {alibabacloud_quanmiaolightapp20240801-2.13.2.dist-info → alibabacloud_quanmiaolightapp20240801-2.13.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class ListHotTopicSummariesResponseBody(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
code: str = None,
|
|
14
|
+
data: List[main_models.ListHotTopicSummariesResponseBodyData] = None,
|
|
15
|
+
http_status_code: int = None,
|
|
16
|
+
max_results: int = None,
|
|
17
|
+
message: str = None,
|
|
18
|
+
next_token: str = None,
|
|
19
|
+
request_id: str = None,
|
|
20
|
+
success: bool = None,
|
|
21
|
+
total_count: int = None,
|
|
22
|
+
):
|
|
23
|
+
self.code = code
|
|
24
|
+
self.data = data
|
|
25
|
+
self.http_status_code = http_status_code
|
|
26
|
+
self.max_results = max_results
|
|
27
|
+
self.message = message
|
|
28
|
+
self.next_token = next_token
|
|
29
|
+
self.request_id = request_id
|
|
30
|
+
self.success = success
|
|
31
|
+
self.total_count = total_count
|
|
32
|
+
|
|
33
|
+
def validate(self):
|
|
34
|
+
if self.data:
|
|
35
|
+
for v1 in self.data:
|
|
36
|
+
if v1:
|
|
37
|
+
v1.validate()
|
|
38
|
+
|
|
39
|
+
def to_map(self):
|
|
40
|
+
result = dict()
|
|
41
|
+
_map = super().to_map()
|
|
42
|
+
if _map is not None:
|
|
43
|
+
result = _map
|
|
44
|
+
if self.code is not None:
|
|
45
|
+
result['code'] = self.code
|
|
46
|
+
|
|
47
|
+
result['data'] = []
|
|
48
|
+
if self.data is not None:
|
|
49
|
+
for k1 in self.data:
|
|
50
|
+
result['data'].append(k1.to_map() if k1 else None)
|
|
51
|
+
|
|
52
|
+
if self.http_status_code is not None:
|
|
53
|
+
result['httpStatusCode'] = self.http_status_code
|
|
54
|
+
|
|
55
|
+
if self.max_results is not None:
|
|
56
|
+
result['maxResults'] = self.max_results
|
|
57
|
+
|
|
58
|
+
if self.message is not None:
|
|
59
|
+
result['message'] = self.message
|
|
60
|
+
|
|
61
|
+
if self.next_token is not None:
|
|
62
|
+
result['nextToken'] = self.next_token
|
|
63
|
+
|
|
64
|
+
if self.request_id is not None:
|
|
65
|
+
result['requestId'] = self.request_id
|
|
66
|
+
|
|
67
|
+
if self.success is not None:
|
|
68
|
+
result['success'] = self.success
|
|
69
|
+
|
|
70
|
+
if self.total_count is not None:
|
|
71
|
+
result['totalCount'] = self.total_count
|
|
72
|
+
|
|
73
|
+
return result
|
|
74
|
+
|
|
75
|
+
def from_map(self, m: dict = None):
|
|
76
|
+
m = m or dict()
|
|
77
|
+
if m.get('code') is not None:
|
|
78
|
+
self.code = m.get('code')
|
|
79
|
+
|
|
80
|
+
self.data = []
|
|
81
|
+
if m.get('data') is not None:
|
|
82
|
+
for k1 in m.get('data'):
|
|
83
|
+
temp_model = main_models.ListHotTopicSummariesResponseBodyData()
|
|
84
|
+
self.data.append(temp_model.from_map(k1))
|
|
85
|
+
|
|
86
|
+
if m.get('httpStatusCode') is not None:
|
|
87
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
88
|
+
|
|
89
|
+
if m.get('maxResults') is not None:
|
|
90
|
+
self.max_results = m.get('maxResults')
|
|
91
|
+
|
|
92
|
+
if m.get('message') is not None:
|
|
93
|
+
self.message = m.get('message')
|
|
94
|
+
|
|
95
|
+
if m.get('nextToken') is not None:
|
|
96
|
+
self.next_token = m.get('nextToken')
|
|
97
|
+
|
|
98
|
+
if m.get('requestId') is not None:
|
|
99
|
+
self.request_id = m.get('requestId')
|
|
100
|
+
|
|
101
|
+
if m.get('success') is not None:
|
|
102
|
+
self.success = m.get('success')
|
|
103
|
+
|
|
104
|
+
if m.get('totalCount') is not None:
|
|
105
|
+
self.total_count = m.get('totalCount')
|
|
106
|
+
|
|
107
|
+
return self
|
|
108
|
+
|
|
109
|
+
class ListHotTopicSummariesResponseBodyData(DaraModel):
|
|
110
|
+
def __init__(
|
|
111
|
+
self,
|
|
112
|
+
category: str = None,
|
|
113
|
+
hot_topic: str = None,
|
|
114
|
+
hot_topic_version: str = None,
|
|
115
|
+
hot_value: float = None,
|
|
116
|
+
id: str = None,
|
|
117
|
+
news: List[main_models.ListHotTopicSummariesResponseBodyDataNews] = None,
|
|
118
|
+
summary: main_models.ListHotTopicSummariesResponseBodyDataSummary = None,
|
|
119
|
+
text_summary: str = None,
|
|
120
|
+
):
|
|
121
|
+
self.category = category
|
|
122
|
+
self.hot_topic = hot_topic
|
|
123
|
+
self.hot_topic_version = hot_topic_version
|
|
124
|
+
self.hot_value = hot_value
|
|
125
|
+
self.id = id
|
|
126
|
+
self.news = news
|
|
127
|
+
self.summary = summary
|
|
128
|
+
self.text_summary = text_summary
|
|
129
|
+
|
|
130
|
+
def validate(self):
|
|
131
|
+
if self.news:
|
|
132
|
+
for v1 in self.news:
|
|
133
|
+
if v1:
|
|
134
|
+
v1.validate()
|
|
135
|
+
if self.summary:
|
|
136
|
+
self.summary.validate()
|
|
137
|
+
|
|
138
|
+
def to_map(self):
|
|
139
|
+
result = dict()
|
|
140
|
+
_map = super().to_map()
|
|
141
|
+
if _map is not None:
|
|
142
|
+
result = _map
|
|
143
|
+
if self.category is not None:
|
|
144
|
+
result['category'] = self.category
|
|
145
|
+
|
|
146
|
+
if self.hot_topic is not None:
|
|
147
|
+
result['hotTopic'] = self.hot_topic
|
|
148
|
+
|
|
149
|
+
if self.hot_topic_version is not None:
|
|
150
|
+
result['hotTopicVersion'] = self.hot_topic_version
|
|
151
|
+
|
|
152
|
+
if self.hot_value is not None:
|
|
153
|
+
result['hotValue'] = self.hot_value
|
|
154
|
+
|
|
155
|
+
if self.id is not None:
|
|
156
|
+
result['id'] = self.id
|
|
157
|
+
|
|
158
|
+
result['news'] = []
|
|
159
|
+
if self.news is not None:
|
|
160
|
+
for k1 in self.news:
|
|
161
|
+
result['news'].append(k1.to_map() if k1 else None)
|
|
162
|
+
|
|
163
|
+
if self.summary is not None:
|
|
164
|
+
result['summary'] = self.summary.to_map()
|
|
165
|
+
|
|
166
|
+
if self.text_summary is not None:
|
|
167
|
+
result['textSummary'] = self.text_summary
|
|
168
|
+
|
|
169
|
+
return result
|
|
170
|
+
|
|
171
|
+
def from_map(self, m: dict = None):
|
|
172
|
+
m = m or dict()
|
|
173
|
+
if m.get('category') is not None:
|
|
174
|
+
self.category = m.get('category')
|
|
175
|
+
|
|
176
|
+
if m.get('hotTopic') is not None:
|
|
177
|
+
self.hot_topic = m.get('hotTopic')
|
|
178
|
+
|
|
179
|
+
if m.get('hotTopicVersion') is not None:
|
|
180
|
+
self.hot_topic_version = m.get('hotTopicVersion')
|
|
181
|
+
|
|
182
|
+
if m.get('hotValue') is not None:
|
|
183
|
+
self.hot_value = m.get('hotValue')
|
|
184
|
+
|
|
185
|
+
if m.get('id') is not None:
|
|
186
|
+
self.id = m.get('id')
|
|
187
|
+
|
|
188
|
+
self.news = []
|
|
189
|
+
if m.get('news') is not None:
|
|
190
|
+
for k1 in m.get('news'):
|
|
191
|
+
temp_model = main_models.ListHotTopicSummariesResponseBodyDataNews()
|
|
192
|
+
self.news.append(temp_model.from_map(k1))
|
|
193
|
+
|
|
194
|
+
if m.get('summary') is not None:
|
|
195
|
+
temp_model = main_models.ListHotTopicSummariesResponseBodyDataSummary()
|
|
196
|
+
self.summary = temp_model.from_map(m.get('summary'))
|
|
197
|
+
|
|
198
|
+
if m.get('textSummary') is not None:
|
|
199
|
+
self.text_summary = m.get('textSummary')
|
|
200
|
+
|
|
201
|
+
return self
|
|
202
|
+
|
|
203
|
+
class ListHotTopicSummariesResponseBodyDataSummary(DaraModel):
|
|
204
|
+
def __init__(
|
|
205
|
+
self,
|
|
206
|
+
summaries: List[main_models.ListHotTopicSummariesResponseBodyDataSummarySummaries] = None,
|
|
207
|
+
):
|
|
208
|
+
self.summaries = summaries
|
|
209
|
+
|
|
210
|
+
def validate(self):
|
|
211
|
+
if self.summaries:
|
|
212
|
+
for v1 in self.summaries:
|
|
213
|
+
if v1:
|
|
214
|
+
v1.validate()
|
|
215
|
+
|
|
216
|
+
def to_map(self):
|
|
217
|
+
result = dict()
|
|
218
|
+
_map = super().to_map()
|
|
219
|
+
if _map is not None:
|
|
220
|
+
result = _map
|
|
221
|
+
result['summaries'] = []
|
|
222
|
+
if self.summaries is not None:
|
|
223
|
+
for k1 in self.summaries:
|
|
224
|
+
result['summaries'].append(k1.to_map() if k1 else None)
|
|
225
|
+
|
|
226
|
+
return result
|
|
227
|
+
|
|
228
|
+
def from_map(self, m: dict = None):
|
|
229
|
+
m = m or dict()
|
|
230
|
+
self.summaries = []
|
|
231
|
+
if m.get('summaries') is not None:
|
|
232
|
+
for k1 in m.get('summaries'):
|
|
233
|
+
temp_model = main_models.ListHotTopicSummariesResponseBodyDataSummarySummaries()
|
|
234
|
+
self.summaries.append(temp_model.from_map(k1))
|
|
235
|
+
|
|
236
|
+
return self
|
|
237
|
+
|
|
238
|
+
class ListHotTopicSummariesResponseBodyDataSummarySummaries(DaraModel):
|
|
239
|
+
def __init__(
|
|
240
|
+
self,
|
|
241
|
+
summary: str = None,
|
|
242
|
+
title: str = None,
|
|
243
|
+
):
|
|
244
|
+
self.summary = summary
|
|
245
|
+
self.title = title
|
|
246
|
+
|
|
247
|
+
def validate(self):
|
|
248
|
+
pass
|
|
249
|
+
|
|
250
|
+
def to_map(self):
|
|
251
|
+
result = dict()
|
|
252
|
+
_map = super().to_map()
|
|
253
|
+
if _map is not None:
|
|
254
|
+
result = _map
|
|
255
|
+
if self.summary is not None:
|
|
256
|
+
result['summary'] = self.summary
|
|
257
|
+
|
|
258
|
+
if self.title is not None:
|
|
259
|
+
result['title'] = self.title
|
|
260
|
+
|
|
261
|
+
return result
|
|
262
|
+
|
|
263
|
+
def from_map(self, m: dict = None):
|
|
264
|
+
m = m or dict()
|
|
265
|
+
if m.get('summary') is not None:
|
|
266
|
+
self.summary = m.get('summary')
|
|
267
|
+
|
|
268
|
+
if m.get('title') is not None:
|
|
269
|
+
self.title = m.get('title')
|
|
270
|
+
|
|
271
|
+
return self
|
|
272
|
+
|
|
273
|
+
class ListHotTopicSummariesResponseBodyDataNews(DaraModel):
|
|
274
|
+
def __init__(
|
|
275
|
+
self,
|
|
276
|
+
comments: List[main_models.ListHotTopicSummariesResponseBodyDataNewsComments] = None,
|
|
277
|
+
content: str = None,
|
|
278
|
+
pub_time: str = None,
|
|
279
|
+
title: str = None,
|
|
280
|
+
url: str = None,
|
|
281
|
+
):
|
|
282
|
+
self.comments = comments
|
|
283
|
+
self.content = content
|
|
284
|
+
self.pub_time = pub_time
|
|
285
|
+
self.title = title
|
|
286
|
+
# url
|
|
287
|
+
self.url = url
|
|
288
|
+
|
|
289
|
+
def validate(self):
|
|
290
|
+
if self.comments:
|
|
291
|
+
for v1 in self.comments:
|
|
292
|
+
if v1:
|
|
293
|
+
v1.validate()
|
|
294
|
+
|
|
295
|
+
def to_map(self):
|
|
296
|
+
result = dict()
|
|
297
|
+
_map = super().to_map()
|
|
298
|
+
if _map is not None:
|
|
299
|
+
result = _map
|
|
300
|
+
result['comments'] = []
|
|
301
|
+
if self.comments is not None:
|
|
302
|
+
for k1 in self.comments:
|
|
303
|
+
result['comments'].append(k1.to_map() if k1 else None)
|
|
304
|
+
|
|
305
|
+
if self.content is not None:
|
|
306
|
+
result['content'] = self.content
|
|
307
|
+
|
|
308
|
+
if self.pub_time is not None:
|
|
309
|
+
result['pubTime'] = self.pub_time
|
|
310
|
+
|
|
311
|
+
if self.title is not None:
|
|
312
|
+
result['title'] = self.title
|
|
313
|
+
|
|
314
|
+
if self.url is not None:
|
|
315
|
+
result['url'] = self.url
|
|
316
|
+
|
|
317
|
+
return result
|
|
318
|
+
|
|
319
|
+
def from_map(self, m: dict = None):
|
|
320
|
+
m = m or dict()
|
|
321
|
+
self.comments = []
|
|
322
|
+
if m.get('comments') is not None:
|
|
323
|
+
for k1 in m.get('comments'):
|
|
324
|
+
temp_model = main_models.ListHotTopicSummariesResponseBodyDataNewsComments()
|
|
325
|
+
self.comments.append(temp_model.from_map(k1))
|
|
326
|
+
|
|
327
|
+
if m.get('content') is not None:
|
|
328
|
+
self.content = m.get('content')
|
|
329
|
+
|
|
330
|
+
if m.get('pubTime') is not None:
|
|
331
|
+
self.pub_time = m.get('pubTime')
|
|
332
|
+
|
|
333
|
+
if m.get('title') is not None:
|
|
334
|
+
self.title = m.get('title')
|
|
335
|
+
|
|
336
|
+
if m.get('url') is not None:
|
|
337
|
+
self.url = m.get('url')
|
|
338
|
+
|
|
339
|
+
return self
|
|
340
|
+
|
|
341
|
+
class ListHotTopicSummariesResponseBodyDataNewsComments(DaraModel):
|
|
342
|
+
def __init__(
|
|
343
|
+
self,
|
|
344
|
+
text: str = None,
|
|
345
|
+
):
|
|
346
|
+
self.text = text
|
|
347
|
+
|
|
348
|
+
def validate(self):
|
|
349
|
+
pass
|
|
350
|
+
|
|
351
|
+
def to_map(self):
|
|
352
|
+
result = dict()
|
|
353
|
+
_map = super().to_map()
|
|
354
|
+
if _map is not None:
|
|
355
|
+
result = _map
|
|
356
|
+
if self.text is not None:
|
|
357
|
+
result['text'] = self.text
|
|
358
|
+
|
|
359
|
+
return result
|
|
360
|
+
|
|
361
|
+
def from_map(self, m: dict = None):
|
|
362
|
+
m = m or dict()
|
|
363
|
+
if m.get('text') is not None:
|
|
364
|
+
self.text = m.get('text')
|
|
365
|
+
|
|
366
|
+
return self
|
|
367
|
+
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from darabonba.model import DaraModel
|
|
6
|
+
|
|
7
|
+
class ModelUsage(DaraModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
input_tokens: int = None,
|
|
11
|
+
output_tokens: int = None,
|
|
12
|
+
total_tokens: int = None,
|
|
13
|
+
):
|
|
14
|
+
self.input_tokens = input_tokens
|
|
15
|
+
self.output_tokens = output_tokens
|
|
16
|
+
self.total_tokens = total_tokens
|
|
17
|
+
|
|
18
|
+
def validate(self):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
def to_map(self):
|
|
22
|
+
result = dict()
|
|
23
|
+
_map = super().to_map()
|
|
24
|
+
if _map is not None:
|
|
25
|
+
result = _map
|
|
26
|
+
if self.input_tokens is not None:
|
|
27
|
+
result['inputTokens'] = self.input_tokens
|
|
28
|
+
|
|
29
|
+
if self.output_tokens is not None:
|
|
30
|
+
result['outputTokens'] = self.output_tokens
|
|
31
|
+
|
|
32
|
+
if self.total_tokens is not None:
|
|
33
|
+
result['totalTokens'] = self.total_tokens
|
|
34
|
+
|
|
35
|
+
return result
|
|
36
|
+
|
|
37
|
+
def from_map(self, m: dict = None):
|
|
38
|
+
m = m or dict()
|
|
39
|
+
if m.get('inputTokens') is not None:
|
|
40
|
+
self.input_tokens = m.get('inputTokens')
|
|
41
|
+
|
|
42
|
+
if m.get('outputTokens') is not None:
|
|
43
|
+
self.output_tokens = m.get('outputTokens')
|
|
44
|
+
|
|
45
|
+
if m.get('totalTokens') is not None:
|
|
46
|
+
self.total_tokens = m.get('totalTokens')
|
|
47
|
+
|
|
48
|
+
return self
|
|
49
|
+
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class RunEnterpriseVocAnalysisRequest(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
ak_proxy: str = None,
|
|
14
|
+
api_key: str = None,
|
|
15
|
+
content: str = None,
|
|
16
|
+
extra_info: str = None,
|
|
17
|
+
filter_tags: List[main_models.RunEnterpriseVocAnalysisRequestFilterTags] = None,
|
|
18
|
+
model_id: str = None,
|
|
19
|
+
output_format: str = None,
|
|
20
|
+
source_trace: bool = None,
|
|
21
|
+
tags: List[main_models.RunEnterpriseVocAnalysisRequestTags] = None,
|
|
22
|
+
task_description: str = None,
|
|
23
|
+
):
|
|
24
|
+
self.ak_proxy = ak_proxy
|
|
25
|
+
self.api_key = api_key
|
|
26
|
+
# 需要进行VOC分析的文本内容(content、contents、url、fileKey 四选一。优先级从小到大)
|
|
27
|
+
self.content = content
|
|
28
|
+
self.extra_info = extra_info
|
|
29
|
+
# 过滤标签,用于筛选符合条件的内容。
|
|
30
|
+
self.filter_tags = filter_tags
|
|
31
|
+
self.model_id = model_id
|
|
32
|
+
# 指定返回结果的格式,支持json或text
|
|
33
|
+
self.output_format = output_format
|
|
34
|
+
self.source_trace = source_trace
|
|
35
|
+
# 业务标签体系,用于对文本内容进行分类和分析。
|
|
36
|
+
self.tags = tags
|
|
37
|
+
self.task_description = task_description
|
|
38
|
+
|
|
39
|
+
def validate(self):
|
|
40
|
+
if self.filter_tags:
|
|
41
|
+
for v1 in self.filter_tags:
|
|
42
|
+
if v1:
|
|
43
|
+
v1.validate()
|
|
44
|
+
if self.tags:
|
|
45
|
+
for v1 in self.tags:
|
|
46
|
+
if v1:
|
|
47
|
+
v1.validate()
|
|
48
|
+
|
|
49
|
+
def to_map(self):
|
|
50
|
+
result = dict()
|
|
51
|
+
_map = super().to_map()
|
|
52
|
+
if _map is not None:
|
|
53
|
+
result = _map
|
|
54
|
+
if self.ak_proxy is not None:
|
|
55
|
+
result['akProxy'] = self.ak_proxy
|
|
56
|
+
|
|
57
|
+
if self.api_key is not None:
|
|
58
|
+
result['apiKey'] = self.api_key
|
|
59
|
+
|
|
60
|
+
if self.content is not None:
|
|
61
|
+
result['content'] = self.content
|
|
62
|
+
|
|
63
|
+
if self.extra_info is not None:
|
|
64
|
+
result['extraInfo'] = self.extra_info
|
|
65
|
+
|
|
66
|
+
result['filterTags'] = []
|
|
67
|
+
if self.filter_tags is not None:
|
|
68
|
+
for k1 in self.filter_tags:
|
|
69
|
+
result['filterTags'].append(k1.to_map() if k1 else None)
|
|
70
|
+
|
|
71
|
+
if self.model_id is not None:
|
|
72
|
+
result['modelId'] = self.model_id
|
|
73
|
+
|
|
74
|
+
if self.output_format is not None:
|
|
75
|
+
result['outputFormat'] = self.output_format
|
|
76
|
+
|
|
77
|
+
if self.source_trace is not None:
|
|
78
|
+
result['sourceTrace'] = self.source_trace
|
|
79
|
+
|
|
80
|
+
result['tags'] = []
|
|
81
|
+
if self.tags is not None:
|
|
82
|
+
for k1 in self.tags:
|
|
83
|
+
result['tags'].append(k1.to_map() if k1 else None)
|
|
84
|
+
|
|
85
|
+
if self.task_description is not None:
|
|
86
|
+
result['taskDescription'] = self.task_description
|
|
87
|
+
|
|
88
|
+
return result
|
|
89
|
+
|
|
90
|
+
def from_map(self, m: dict = None):
|
|
91
|
+
m = m or dict()
|
|
92
|
+
if m.get('akProxy') is not None:
|
|
93
|
+
self.ak_proxy = m.get('akProxy')
|
|
94
|
+
|
|
95
|
+
if m.get('apiKey') is not None:
|
|
96
|
+
self.api_key = m.get('apiKey')
|
|
97
|
+
|
|
98
|
+
if m.get('content') is not None:
|
|
99
|
+
self.content = m.get('content')
|
|
100
|
+
|
|
101
|
+
if m.get('extraInfo') is not None:
|
|
102
|
+
self.extra_info = m.get('extraInfo')
|
|
103
|
+
|
|
104
|
+
self.filter_tags = []
|
|
105
|
+
if m.get('filterTags') is not None:
|
|
106
|
+
for k1 in m.get('filterTags'):
|
|
107
|
+
temp_model = main_models.RunEnterpriseVocAnalysisRequestFilterTags()
|
|
108
|
+
self.filter_tags.append(temp_model.from_map(k1))
|
|
109
|
+
|
|
110
|
+
if m.get('modelId') is not None:
|
|
111
|
+
self.model_id = m.get('modelId')
|
|
112
|
+
|
|
113
|
+
if m.get('outputFormat') is not None:
|
|
114
|
+
self.output_format = m.get('outputFormat')
|
|
115
|
+
|
|
116
|
+
if m.get('sourceTrace') is not None:
|
|
117
|
+
self.source_trace = m.get('sourceTrace')
|
|
118
|
+
|
|
119
|
+
self.tags = []
|
|
120
|
+
if m.get('tags') is not None:
|
|
121
|
+
for k1 in m.get('tags'):
|
|
122
|
+
temp_model = main_models.RunEnterpriseVocAnalysisRequestTags()
|
|
123
|
+
self.tags.append(temp_model.from_map(k1))
|
|
124
|
+
|
|
125
|
+
if m.get('taskDescription') is not None:
|
|
126
|
+
self.task_description = m.get('taskDescription')
|
|
127
|
+
|
|
128
|
+
return self
|
|
129
|
+
|
|
130
|
+
class RunEnterpriseVocAnalysisRequestTags(DaraModel):
|
|
131
|
+
def __init__(
|
|
132
|
+
self,
|
|
133
|
+
tag_define_prompt: str = None,
|
|
134
|
+
tag_name: str = None,
|
|
135
|
+
):
|
|
136
|
+
# 标签定义提示词
|
|
137
|
+
self.tag_define_prompt = tag_define_prompt
|
|
138
|
+
# 标签名称
|
|
139
|
+
self.tag_name = tag_name
|
|
140
|
+
|
|
141
|
+
def validate(self):
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
def to_map(self):
|
|
145
|
+
result = dict()
|
|
146
|
+
_map = super().to_map()
|
|
147
|
+
if _map is not None:
|
|
148
|
+
result = _map
|
|
149
|
+
if self.tag_define_prompt is not None:
|
|
150
|
+
result['tagDefinePrompt'] = self.tag_define_prompt
|
|
151
|
+
|
|
152
|
+
if self.tag_name is not None:
|
|
153
|
+
result['tagName'] = self.tag_name
|
|
154
|
+
|
|
155
|
+
return result
|
|
156
|
+
|
|
157
|
+
def from_map(self, m: dict = None):
|
|
158
|
+
m = m or dict()
|
|
159
|
+
if m.get('tagDefinePrompt') is not None:
|
|
160
|
+
self.tag_define_prompt = m.get('tagDefinePrompt')
|
|
161
|
+
|
|
162
|
+
if m.get('tagName') is not None:
|
|
163
|
+
self.tag_name = m.get('tagName')
|
|
164
|
+
|
|
165
|
+
return self
|
|
166
|
+
|
|
167
|
+
class RunEnterpriseVocAnalysisRequestFilterTags(DaraModel):
|
|
168
|
+
def __init__(
|
|
169
|
+
self,
|
|
170
|
+
tag_define_prompt: str = None,
|
|
171
|
+
tag_name: str = None,
|
|
172
|
+
):
|
|
173
|
+
# 标签定义提示词
|
|
174
|
+
self.tag_define_prompt = tag_define_prompt
|
|
175
|
+
# 标签名称
|
|
176
|
+
self.tag_name = tag_name
|
|
177
|
+
|
|
178
|
+
def validate(self):
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
def to_map(self):
|
|
182
|
+
result = dict()
|
|
183
|
+
_map = super().to_map()
|
|
184
|
+
if _map is not None:
|
|
185
|
+
result = _map
|
|
186
|
+
if self.tag_define_prompt is not None:
|
|
187
|
+
result['tagDefinePrompt'] = self.tag_define_prompt
|
|
188
|
+
|
|
189
|
+
if self.tag_name is not None:
|
|
190
|
+
result['tagName'] = self.tag_name
|
|
191
|
+
|
|
192
|
+
return result
|
|
193
|
+
|
|
194
|
+
def from_map(self, m: dict = None):
|
|
195
|
+
m = m or dict()
|
|
196
|
+
if m.get('tagDefinePrompt') is not None:
|
|
197
|
+
self.tag_define_prompt = m.get('tagDefinePrompt')
|
|
198
|
+
|
|
199
|
+
if m.get('tagName') is not None:
|
|
200
|
+
self.tag_name = m.get('tagName')
|
|
201
|
+
|
|
202
|
+
return self
|
|
203
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# This file is auto-generated, don't edit it. Thanks.
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Dict
|
|
6
|
+
|
|
7
|
+
from alibabacloud_quanmiaolightapp20240801 import models as main_models
|
|
8
|
+
from darabonba.model import DaraModel
|
|
9
|
+
|
|
10
|
+
class RunEnterpriseVocAnalysisResponse(DaraModel):
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
headers: Dict[str, str] = None,
|
|
14
|
+
status_code: int = None,
|
|
15
|
+
body: main_models.RunEnterpriseVocAnalysisResponseBody = None,
|
|
16
|
+
):
|
|
17
|
+
self.headers = headers
|
|
18
|
+
self.status_code = status_code
|
|
19
|
+
self.body = body
|
|
20
|
+
|
|
21
|
+
def validate(self):
|
|
22
|
+
if self.body:
|
|
23
|
+
self.body.validate()
|
|
24
|
+
|
|
25
|
+
def to_map(self):
|
|
26
|
+
result = dict()
|
|
27
|
+
_map = super().to_map()
|
|
28
|
+
if _map is not None:
|
|
29
|
+
result = _map
|
|
30
|
+
if self.headers is not None:
|
|
31
|
+
result['headers'] = self.headers
|
|
32
|
+
|
|
33
|
+
if self.status_code is not None:
|
|
34
|
+
result['statusCode'] = self.status_code
|
|
35
|
+
|
|
36
|
+
if self.body is not None:
|
|
37
|
+
result['body'] = self.body.to_map()
|
|
38
|
+
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
def from_map(self, m: dict = None):
|
|
42
|
+
m = m or dict()
|
|
43
|
+
if m.get('headers') is not None:
|
|
44
|
+
self.headers = m.get('headers')
|
|
45
|
+
|
|
46
|
+
if m.get('statusCode') is not None:
|
|
47
|
+
self.status_code = m.get('statusCode')
|
|
48
|
+
|
|
49
|
+
if m.get('body') is not None:
|
|
50
|
+
temp_model = main_models.RunEnterpriseVocAnalysisResponseBody()
|
|
51
|
+
self.body = temp_model.from_map(m.get('body'))
|
|
52
|
+
|
|
53
|
+
return self
|
|
54
|
+
|