alibabacloud-quanmiaolightapp20240801 1.2.0__tar.gz → 1.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/ChangeLog.md +5 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/PKG-INFO +1 -1
- alibabacloud_quanmiaolightapp20240801-1.3.0/alibabacloud_quanmiaolightapp20240801/__init__.py +1 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801/client.py +236 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801/models.py +642 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801.egg-info/PKG-INFO +1 -1
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801.egg-info/requires.txt +1 -1
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/setup.py +2 -2
- alibabacloud_quanmiaolightapp20240801-1.2.0/alibabacloud_quanmiaolightapp20240801/__init__.py +0 -1
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/LICENSE +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/MANIFEST.in +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/README-CN.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/README.md +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801.egg-info/SOURCES.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801.egg-info/dependency_links.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/alibabacloud_quanmiaolightapp20240801.egg-info/top_level.txt +0 -0
- {alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/setup.cfg +0 -0
{alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/PKG-INFO
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud_quanmiaolightapp20240801
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = '1.3.0'
|
|
@@ -41,6 +41,114 @@ class Client(OpenApiClient):
|
|
|
41
41
|
return endpoint_map.get(region_id)
|
|
42
42
|
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
|
|
43
43
|
|
|
44
|
+
def generate_broadcast_news_with_options(
|
|
45
|
+
self,
|
|
46
|
+
workspace_id: str,
|
|
47
|
+
request: quan_miao_light_app_20240801_models.GenerateBroadcastNewsRequest,
|
|
48
|
+
headers: Dict[str, str],
|
|
49
|
+
runtime: util_models.RuntimeOptions,
|
|
50
|
+
) -> quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse:
|
|
51
|
+
"""
|
|
52
|
+
@summary 新闻播报-抽取分类获取播报热点
|
|
53
|
+
|
|
54
|
+
@param request: GenerateBroadcastNewsRequest
|
|
55
|
+
@param headers: map
|
|
56
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
57
|
+
@return: GenerateBroadcastNewsResponse
|
|
58
|
+
"""
|
|
59
|
+
UtilClient.validate_model(request)
|
|
60
|
+
body = {}
|
|
61
|
+
if not UtilClient.is_unset(request.prompt):
|
|
62
|
+
body['prompt'] = request.prompt
|
|
63
|
+
req = open_api_models.OpenApiRequest(
|
|
64
|
+
headers=headers,
|
|
65
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
66
|
+
)
|
|
67
|
+
params = open_api_models.Params(
|
|
68
|
+
action='GenerateBroadcastNews',
|
|
69
|
+
version='2024-08-01',
|
|
70
|
+
protocol='HTTPS',
|
|
71
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/GenerateBroadcastNews',
|
|
72
|
+
method='POST',
|
|
73
|
+
auth_type='AK',
|
|
74
|
+
style='ROA',
|
|
75
|
+
req_body_type='formData',
|
|
76
|
+
body_type='json'
|
|
77
|
+
)
|
|
78
|
+
return TeaCore.from_map(
|
|
79
|
+
quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse(),
|
|
80
|
+
self.call_api(params, req, runtime)
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
async def generate_broadcast_news_with_options_async(
|
|
84
|
+
self,
|
|
85
|
+
workspace_id: str,
|
|
86
|
+
request: quan_miao_light_app_20240801_models.GenerateBroadcastNewsRequest,
|
|
87
|
+
headers: Dict[str, str],
|
|
88
|
+
runtime: util_models.RuntimeOptions,
|
|
89
|
+
) -> quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse:
|
|
90
|
+
"""
|
|
91
|
+
@summary 新闻播报-抽取分类获取播报热点
|
|
92
|
+
|
|
93
|
+
@param request: GenerateBroadcastNewsRequest
|
|
94
|
+
@param headers: map
|
|
95
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
96
|
+
@return: GenerateBroadcastNewsResponse
|
|
97
|
+
"""
|
|
98
|
+
UtilClient.validate_model(request)
|
|
99
|
+
body = {}
|
|
100
|
+
if not UtilClient.is_unset(request.prompt):
|
|
101
|
+
body['prompt'] = request.prompt
|
|
102
|
+
req = open_api_models.OpenApiRequest(
|
|
103
|
+
headers=headers,
|
|
104
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
105
|
+
)
|
|
106
|
+
params = open_api_models.Params(
|
|
107
|
+
action='GenerateBroadcastNews',
|
|
108
|
+
version='2024-08-01',
|
|
109
|
+
protocol='HTTPS',
|
|
110
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/GenerateBroadcastNews',
|
|
111
|
+
method='POST',
|
|
112
|
+
auth_type='AK',
|
|
113
|
+
style='ROA',
|
|
114
|
+
req_body_type='formData',
|
|
115
|
+
body_type='json'
|
|
116
|
+
)
|
|
117
|
+
return TeaCore.from_map(
|
|
118
|
+
quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse(),
|
|
119
|
+
await self.call_api_async(params, req, runtime)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def generate_broadcast_news(
|
|
123
|
+
self,
|
|
124
|
+
workspace_id: str,
|
|
125
|
+
request: quan_miao_light_app_20240801_models.GenerateBroadcastNewsRequest,
|
|
126
|
+
) -> quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse:
|
|
127
|
+
"""
|
|
128
|
+
@summary 新闻播报-抽取分类获取播报热点
|
|
129
|
+
|
|
130
|
+
@param request: GenerateBroadcastNewsRequest
|
|
131
|
+
@return: GenerateBroadcastNewsResponse
|
|
132
|
+
"""
|
|
133
|
+
runtime = util_models.RuntimeOptions()
|
|
134
|
+
headers = {}
|
|
135
|
+
return self.generate_broadcast_news_with_options(workspace_id, request, headers, runtime)
|
|
136
|
+
|
|
137
|
+
async def generate_broadcast_news_async(
|
|
138
|
+
self,
|
|
139
|
+
workspace_id: str,
|
|
140
|
+
request: quan_miao_light_app_20240801_models.GenerateBroadcastNewsRequest,
|
|
141
|
+
) -> quan_miao_light_app_20240801_models.GenerateBroadcastNewsResponse:
|
|
142
|
+
"""
|
|
143
|
+
@summary 新闻播报-抽取分类获取播报热点
|
|
144
|
+
|
|
145
|
+
@param request: GenerateBroadcastNewsRequest
|
|
146
|
+
@return: GenerateBroadcastNewsResponse
|
|
147
|
+
"""
|
|
148
|
+
runtime = util_models.RuntimeOptions()
|
|
149
|
+
headers = {}
|
|
150
|
+
return await self.generate_broadcast_news_with_options_async(workspace_id, request, headers, runtime)
|
|
151
|
+
|
|
44
152
|
def list_hot_topic_summaries_with_options(
|
|
45
153
|
self,
|
|
46
154
|
workspace_id: str,
|
|
@@ -165,6 +273,126 @@ class Client(OpenApiClient):
|
|
|
165
273
|
headers = {}
|
|
166
274
|
return await self.list_hot_topic_summaries_with_options_async(workspace_id, request, headers, runtime)
|
|
167
275
|
|
|
276
|
+
def run_comment_generation_with_options(
|
|
277
|
+
self,
|
|
278
|
+
workspace_id: str,
|
|
279
|
+
request: quan_miao_light_app_20240801_models.RunCommentGenerationRequest,
|
|
280
|
+
headers: Dict[str, str],
|
|
281
|
+
runtime: util_models.RuntimeOptions,
|
|
282
|
+
) -> quan_miao_light_app_20240801_models.RunCommentGenerationResponse:
|
|
283
|
+
"""
|
|
284
|
+
@summary 评论生成服务
|
|
285
|
+
|
|
286
|
+
@param request: RunCommentGenerationRequest
|
|
287
|
+
@param headers: map
|
|
288
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
289
|
+
@return: RunCommentGenerationResponse
|
|
290
|
+
"""
|
|
291
|
+
UtilClient.validate_model(request)
|
|
292
|
+
body = {}
|
|
293
|
+
if not UtilClient.is_unset(request.length):
|
|
294
|
+
body['length'] = request.length
|
|
295
|
+
if not UtilClient.is_unset(request.num_comments):
|
|
296
|
+
body['numComments'] = request.num_comments
|
|
297
|
+
if not UtilClient.is_unset(request.source_material):
|
|
298
|
+
body['sourceMaterial'] = request.source_material
|
|
299
|
+
if not UtilClient.is_unset(request.style):
|
|
300
|
+
body['style'] = request.style
|
|
301
|
+
req = open_api_models.OpenApiRequest(
|
|
302
|
+
headers=headers,
|
|
303
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
304
|
+
)
|
|
305
|
+
params = open_api_models.Params(
|
|
306
|
+
action='RunCommentGeneration',
|
|
307
|
+
version='2024-08-01',
|
|
308
|
+
protocol='HTTPS',
|
|
309
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/runCommentGeneration',
|
|
310
|
+
method='POST',
|
|
311
|
+
auth_type='AK',
|
|
312
|
+
style='ROA',
|
|
313
|
+
req_body_type='formData',
|
|
314
|
+
body_type='json'
|
|
315
|
+
)
|
|
316
|
+
return TeaCore.from_map(
|
|
317
|
+
quan_miao_light_app_20240801_models.RunCommentGenerationResponse(),
|
|
318
|
+
self.call_api(params, req, runtime)
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
async def run_comment_generation_with_options_async(
|
|
322
|
+
self,
|
|
323
|
+
workspace_id: str,
|
|
324
|
+
request: quan_miao_light_app_20240801_models.RunCommentGenerationRequest,
|
|
325
|
+
headers: Dict[str, str],
|
|
326
|
+
runtime: util_models.RuntimeOptions,
|
|
327
|
+
) -> quan_miao_light_app_20240801_models.RunCommentGenerationResponse:
|
|
328
|
+
"""
|
|
329
|
+
@summary 评论生成服务
|
|
330
|
+
|
|
331
|
+
@param request: RunCommentGenerationRequest
|
|
332
|
+
@param headers: map
|
|
333
|
+
@param runtime: runtime options for this request RuntimeOptions
|
|
334
|
+
@return: RunCommentGenerationResponse
|
|
335
|
+
"""
|
|
336
|
+
UtilClient.validate_model(request)
|
|
337
|
+
body = {}
|
|
338
|
+
if not UtilClient.is_unset(request.length):
|
|
339
|
+
body['length'] = request.length
|
|
340
|
+
if not UtilClient.is_unset(request.num_comments):
|
|
341
|
+
body['numComments'] = request.num_comments
|
|
342
|
+
if not UtilClient.is_unset(request.source_material):
|
|
343
|
+
body['sourceMaterial'] = request.source_material
|
|
344
|
+
if not UtilClient.is_unset(request.style):
|
|
345
|
+
body['style'] = request.style
|
|
346
|
+
req = open_api_models.OpenApiRequest(
|
|
347
|
+
headers=headers,
|
|
348
|
+
body=OpenApiUtilClient.parse_to_map(body)
|
|
349
|
+
)
|
|
350
|
+
params = open_api_models.Params(
|
|
351
|
+
action='RunCommentGeneration',
|
|
352
|
+
version='2024-08-01',
|
|
353
|
+
protocol='HTTPS',
|
|
354
|
+
pathname=f'/{OpenApiUtilClient.get_encode_param(workspace_id)}/quanmiao/lightapp/runCommentGeneration',
|
|
355
|
+
method='POST',
|
|
356
|
+
auth_type='AK',
|
|
357
|
+
style='ROA',
|
|
358
|
+
req_body_type='formData',
|
|
359
|
+
body_type='json'
|
|
360
|
+
)
|
|
361
|
+
return TeaCore.from_map(
|
|
362
|
+
quan_miao_light_app_20240801_models.RunCommentGenerationResponse(),
|
|
363
|
+
await self.call_api_async(params, req, runtime)
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
def run_comment_generation(
|
|
367
|
+
self,
|
|
368
|
+
workspace_id: str,
|
|
369
|
+
request: quan_miao_light_app_20240801_models.RunCommentGenerationRequest,
|
|
370
|
+
) -> quan_miao_light_app_20240801_models.RunCommentGenerationResponse:
|
|
371
|
+
"""
|
|
372
|
+
@summary 评论生成服务
|
|
373
|
+
|
|
374
|
+
@param request: RunCommentGenerationRequest
|
|
375
|
+
@return: RunCommentGenerationResponse
|
|
376
|
+
"""
|
|
377
|
+
runtime = util_models.RuntimeOptions()
|
|
378
|
+
headers = {}
|
|
379
|
+
return self.run_comment_generation_with_options(workspace_id, request, headers, runtime)
|
|
380
|
+
|
|
381
|
+
async def run_comment_generation_async(
|
|
382
|
+
self,
|
|
383
|
+
workspace_id: str,
|
|
384
|
+
request: quan_miao_light_app_20240801_models.RunCommentGenerationRequest,
|
|
385
|
+
) -> quan_miao_light_app_20240801_models.RunCommentGenerationResponse:
|
|
386
|
+
"""
|
|
387
|
+
@summary 评论生成服务
|
|
388
|
+
|
|
389
|
+
@param request: RunCommentGenerationRequest
|
|
390
|
+
@return: RunCommentGenerationResponse
|
|
391
|
+
"""
|
|
392
|
+
runtime = util_models.RuntimeOptions()
|
|
393
|
+
headers = {}
|
|
394
|
+
return await self.run_comment_generation_with_options_async(workspace_id, request, headers, runtime)
|
|
395
|
+
|
|
168
396
|
def run_marketing_information_extract_with_options(
|
|
169
397
|
self,
|
|
170
398
|
workspace_id: str,
|
|
@@ -824,8 +1052,12 @@ class Client(OpenApiClient):
|
|
|
824
1052
|
body['modelId'] = request.model_id
|
|
825
1053
|
if not UtilClient.is_unset(request.original_session_id):
|
|
826
1054
|
body['originalSessionId'] = request.original_session_id
|
|
1055
|
+
if not UtilClient.is_unset(request.snapshot_interval):
|
|
1056
|
+
body['snapshotInterval'] = request.snapshot_interval
|
|
827
1057
|
if not UtilClient.is_unset(request.task_id):
|
|
828
1058
|
body['taskId'] = request.task_id
|
|
1059
|
+
if not UtilClient.is_unset(request.video_extra_info):
|
|
1060
|
+
body['videoExtraInfo'] = request.video_extra_info
|
|
829
1061
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
830
1062
|
body['videoModelCustomPromptTemplate'] = request.video_model_custom_prompt_template
|
|
831
1063
|
if not UtilClient.is_unset(request.video_model_id):
|
|
@@ -883,8 +1115,12 @@ class Client(OpenApiClient):
|
|
|
883
1115
|
body['modelId'] = request.model_id
|
|
884
1116
|
if not UtilClient.is_unset(request.original_session_id):
|
|
885
1117
|
body['originalSessionId'] = request.original_session_id
|
|
1118
|
+
if not UtilClient.is_unset(request.snapshot_interval):
|
|
1119
|
+
body['snapshotInterval'] = request.snapshot_interval
|
|
886
1120
|
if not UtilClient.is_unset(request.task_id):
|
|
887
1121
|
body['taskId'] = request.task_id
|
|
1122
|
+
if not UtilClient.is_unset(request.video_extra_info):
|
|
1123
|
+
body['videoExtraInfo'] = request.video_extra_info
|
|
888
1124
|
if not UtilClient.is_unset(request.video_model_custom_prompt_template):
|
|
889
1125
|
body['videoModelCustomPromptTemplate'] = request.video_model_custom_prompt_template
|
|
890
1126
|
if not UtilClient.is_unset(request.video_model_id):
|
|
@@ -4,6 +4,333 @@ from Tea.model import TeaModel
|
|
|
4
4
|
from typing import List, Dict
|
|
5
5
|
|
|
6
6
|
|
|
7
|
+
class GenerateBroadcastNewsRequest(TeaModel):
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
prompt: str = None,
|
|
11
|
+
):
|
|
12
|
+
# This parameter is required.
|
|
13
|
+
self.prompt = prompt
|
|
14
|
+
|
|
15
|
+
def validate(self):
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
def to_map(self):
|
|
19
|
+
_map = super().to_map()
|
|
20
|
+
if _map is not None:
|
|
21
|
+
return _map
|
|
22
|
+
|
|
23
|
+
result = dict()
|
|
24
|
+
if self.prompt is not None:
|
|
25
|
+
result['prompt'] = self.prompt
|
|
26
|
+
return result
|
|
27
|
+
|
|
28
|
+
def from_map(self, m: dict = None):
|
|
29
|
+
m = m or dict()
|
|
30
|
+
if m.get('prompt') is not None:
|
|
31
|
+
self.prompt = m.get('prompt')
|
|
32
|
+
return self
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class GenerateBroadcastNewsResponseBodyDataHotTopicSummariesImages(TeaModel):
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
url: str = None,
|
|
39
|
+
):
|
|
40
|
+
self.url = url
|
|
41
|
+
|
|
42
|
+
def validate(self):
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
def to_map(self):
|
|
46
|
+
_map = super().to_map()
|
|
47
|
+
if _map is not None:
|
|
48
|
+
return _map
|
|
49
|
+
|
|
50
|
+
result = dict()
|
|
51
|
+
if self.url is not None:
|
|
52
|
+
result['url'] = self.url
|
|
53
|
+
return result
|
|
54
|
+
|
|
55
|
+
def from_map(self, m: dict = None):
|
|
56
|
+
m = m or dict()
|
|
57
|
+
if m.get('url') is not None:
|
|
58
|
+
self.url = m.get('url')
|
|
59
|
+
return self
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class GenerateBroadcastNewsResponseBodyDataHotTopicSummaries(TeaModel):
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
category: str = None,
|
|
66
|
+
hot_topic: str = None,
|
|
67
|
+
hot_topic_version: str = None,
|
|
68
|
+
hot_value: float = None,
|
|
69
|
+
id: str = None,
|
|
70
|
+
images: List[GenerateBroadcastNewsResponseBodyDataHotTopicSummariesImages] = None,
|
|
71
|
+
text_summary: str = None,
|
|
72
|
+
):
|
|
73
|
+
self.category = category
|
|
74
|
+
self.hot_topic = hot_topic
|
|
75
|
+
self.hot_topic_version = hot_topic_version
|
|
76
|
+
self.hot_value = hot_value
|
|
77
|
+
self.id = id
|
|
78
|
+
self.images = images
|
|
79
|
+
self.text_summary = text_summary
|
|
80
|
+
|
|
81
|
+
def validate(self):
|
|
82
|
+
if self.images:
|
|
83
|
+
for k in self.images:
|
|
84
|
+
if k:
|
|
85
|
+
k.validate()
|
|
86
|
+
|
|
87
|
+
def to_map(self):
|
|
88
|
+
_map = super().to_map()
|
|
89
|
+
if _map is not None:
|
|
90
|
+
return _map
|
|
91
|
+
|
|
92
|
+
result = dict()
|
|
93
|
+
if self.category is not None:
|
|
94
|
+
result['category'] = self.category
|
|
95
|
+
if self.hot_topic is not None:
|
|
96
|
+
result['hotTopic'] = self.hot_topic
|
|
97
|
+
if self.hot_topic_version is not None:
|
|
98
|
+
result['hotTopicVersion'] = self.hot_topic_version
|
|
99
|
+
if self.hot_value is not None:
|
|
100
|
+
result['hotValue'] = self.hot_value
|
|
101
|
+
if self.id is not None:
|
|
102
|
+
result['id'] = self.id
|
|
103
|
+
result['images'] = []
|
|
104
|
+
if self.images is not None:
|
|
105
|
+
for k in self.images:
|
|
106
|
+
result['images'].append(k.to_map() if k else None)
|
|
107
|
+
if self.text_summary is not None:
|
|
108
|
+
result['textSummary'] = self.text_summary
|
|
109
|
+
return result
|
|
110
|
+
|
|
111
|
+
def from_map(self, m: dict = None):
|
|
112
|
+
m = m or dict()
|
|
113
|
+
if m.get('category') is not None:
|
|
114
|
+
self.category = m.get('category')
|
|
115
|
+
if m.get('hotTopic') is not None:
|
|
116
|
+
self.hot_topic = m.get('hotTopic')
|
|
117
|
+
if m.get('hotTopicVersion') is not None:
|
|
118
|
+
self.hot_topic_version = m.get('hotTopicVersion')
|
|
119
|
+
if m.get('hotValue') is not None:
|
|
120
|
+
self.hot_value = m.get('hotValue')
|
|
121
|
+
if m.get('id') is not None:
|
|
122
|
+
self.id = m.get('id')
|
|
123
|
+
self.images = []
|
|
124
|
+
if m.get('images') is not None:
|
|
125
|
+
for k in m.get('images'):
|
|
126
|
+
temp_model = GenerateBroadcastNewsResponseBodyDataHotTopicSummariesImages()
|
|
127
|
+
self.images.append(temp_model.from_map(k))
|
|
128
|
+
if m.get('textSummary') is not None:
|
|
129
|
+
self.text_summary = m.get('textSummary')
|
|
130
|
+
return self
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class GenerateBroadcastNewsResponseBodyDataUsage(TeaModel):
|
|
134
|
+
def __init__(
|
|
135
|
+
self,
|
|
136
|
+
input_tokens: int = None,
|
|
137
|
+
output_tokens: int = None,
|
|
138
|
+
total_tokens: int = None,
|
|
139
|
+
):
|
|
140
|
+
self.input_tokens = input_tokens
|
|
141
|
+
self.output_tokens = output_tokens
|
|
142
|
+
self.total_tokens = total_tokens
|
|
143
|
+
|
|
144
|
+
def validate(self):
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
def to_map(self):
|
|
148
|
+
_map = super().to_map()
|
|
149
|
+
if _map is not None:
|
|
150
|
+
return _map
|
|
151
|
+
|
|
152
|
+
result = dict()
|
|
153
|
+
if self.input_tokens is not None:
|
|
154
|
+
result['inputTokens'] = self.input_tokens
|
|
155
|
+
if self.output_tokens is not None:
|
|
156
|
+
result['outputTokens'] = self.output_tokens
|
|
157
|
+
if self.total_tokens is not None:
|
|
158
|
+
result['totalTokens'] = self.total_tokens
|
|
159
|
+
return result
|
|
160
|
+
|
|
161
|
+
def from_map(self, m: dict = None):
|
|
162
|
+
m = m or dict()
|
|
163
|
+
if m.get('inputTokens') is not None:
|
|
164
|
+
self.input_tokens = m.get('inputTokens')
|
|
165
|
+
if m.get('outputTokens') is not None:
|
|
166
|
+
self.output_tokens = m.get('outputTokens')
|
|
167
|
+
if m.get('totalTokens') is not None:
|
|
168
|
+
self.total_tokens = m.get('totalTokens')
|
|
169
|
+
return self
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class GenerateBroadcastNewsResponseBodyData(TeaModel):
|
|
173
|
+
def __init__(
|
|
174
|
+
self,
|
|
175
|
+
hot_topic_summaries: List[GenerateBroadcastNewsResponseBodyDataHotTopicSummaries] = None,
|
|
176
|
+
session_id: str = None,
|
|
177
|
+
task_id: str = None,
|
|
178
|
+
text: str = None,
|
|
179
|
+
usage: GenerateBroadcastNewsResponseBodyDataUsage = None,
|
|
180
|
+
):
|
|
181
|
+
self.hot_topic_summaries = hot_topic_summaries
|
|
182
|
+
self.session_id = session_id
|
|
183
|
+
self.task_id = task_id
|
|
184
|
+
self.text = text
|
|
185
|
+
self.usage = usage
|
|
186
|
+
|
|
187
|
+
def validate(self):
|
|
188
|
+
if self.hot_topic_summaries:
|
|
189
|
+
for k in self.hot_topic_summaries:
|
|
190
|
+
if k:
|
|
191
|
+
k.validate()
|
|
192
|
+
if self.usage:
|
|
193
|
+
self.usage.validate()
|
|
194
|
+
|
|
195
|
+
def to_map(self):
|
|
196
|
+
_map = super().to_map()
|
|
197
|
+
if _map is not None:
|
|
198
|
+
return _map
|
|
199
|
+
|
|
200
|
+
result = dict()
|
|
201
|
+
result['hotTopicSummaries'] = []
|
|
202
|
+
if self.hot_topic_summaries is not None:
|
|
203
|
+
for k in self.hot_topic_summaries:
|
|
204
|
+
result['hotTopicSummaries'].append(k.to_map() if k else None)
|
|
205
|
+
if self.session_id is not None:
|
|
206
|
+
result['sessionId'] = self.session_id
|
|
207
|
+
if self.task_id is not None:
|
|
208
|
+
result['taskId'] = self.task_id
|
|
209
|
+
if self.text is not None:
|
|
210
|
+
result['text'] = self.text
|
|
211
|
+
if self.usage is not None:
|
|
212
|
+
result['usage'] = self.usage.to_map()
|
|
213
|
+
return result
|
|
214
|
+
|
|
215
|
+
def from_map(self, m: dict = None):
|
|
216
|
+
m = m or dict()
|
|
217
|
+
self.hot_topic_summaries = []
|
|
218
|
+
if m.get('hotTopicSummaries') is not None:
|
|
219
|
+
for k in m.get('hotTopicSummaries'):
|
|
220
|
+
temp_model = GenerateBroadcastNewsResponseBodyDataHotTopicSummaries()
|
|
221
|
+
self.hot_topic_summaries.append(temp_model.from_map(k))
|
|
222
|
+
if m.get('sessionId') is not None:
|
|
223
|
+
self.session_id = m.get('sessionId')
|
|
224
|
+
if m.get('taskId') is not None:
|
|
225
|
+
self.task_id = m.get('taskId')
|
|
226
|
+
if m.get('text') is not None:
|
|
227
|
+
self.text = m.get('text')
|
|
228
|
+
if m.get('usage') is not None:
|
|
229
|
+
temp_model = GenerateBroadcastNewsResponseBodyDataUsage()
|
|
230
|
+
self.usage = temp_model.from_map(m['usage'])
|
|
231
|
+
return self
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
class GenerateBroadcastNewsResponseBody(TeaModel):
|
|
235
|
+
def __init__(
|
|
236
|
+
self,
|
|
237
|
+
code: str = None,
|
|
238
|
+
data: GenerateBroadcastNewsResponseBodyData = None,
|
|
239
|
+
http_status_code: int = None,
|
|
240
|
+
message: str = None,
|
|
241
|
+
request_id: str = None,
|
|
242
|
+
success: bool = None,
|
|
243
|
+
):
|
|
244
|
+
self.code = code
|
|
245
|
+
self.data = data
|
|
246
|
+
self.http_status_code = http_status_code
|
|
247
|
+
self.message = message
|
|
248
|
+
self.request_id = request_id
|
|
249
|
+
self.success = success
|
|
250
|
+
|
|
251
|
+
def validate(self):
|
|
252
|
+
if self.data:
|
|
253
|
+
self.data.validate()
|
|
254
|
+
|
|
255
|
+
def to_map(self):
|
|
256
|
+
_map = super().to_map()
|
|
257
|
+
if _map is not None:
|
|
258
|
+
return _map
|
|
259
|
+
|
|
260
|
+
result = dict()
|
|
261
|
+
if self.code is not None:
|
|
262
|
+
result['code'] = self.code
|
|
263
|
+
if self.data is not None:
|
|
264
|
+
result['data'] = self.data.to_map()
|
|
265
|
+
if self.http_status_code is not None:
|
|
266
|
+
result['httpStatusCode'] = self.http_status_code
|
|
267
|
+
if self.message is not None:
|
|
268
|
+
result['message'] = self.message
|
|
269
|
+
if self.request_id is not None:
|
|
270
|
+
result['requestId'] = self.request_id
|
|
271
|
+
if self.success is not None:
|
|
272
|
+
result['success'] = self.success
|
|
273
|
+
return result
|
|
274
|
+
|
|
275
|
+
def from_map(self, m: dict = None):
|
|
276
|
+
m = m or dict()
|
|
277
|
+
if m.get('code') is not None:
|
|
278
|
+
self.code = m.get('code')
|
|
279
|
+
if m.get('data') is not None:
|
|
280
|
+
temp_model = GenerateBroadcastNewsResponseBodyData()
|
|
281
|
+
self.data = temp_model.from_map(m['data'])
|
|
282
|
+
if m.get('httpStatusCode') is not None:
|
|
283
|
+
self.http_status_code = m.get('httpStatusCode')
|
|
284
|
+
if m.get('message') is not None:
|
|
285
|
+
self.message = m.get('message')
|
|
286
|
+
if m.get('requestId') is not None:
|
|
287
|
+
self.request_id = m.get('requestId')
|
|
288
|
+
if m.get('success') is not None:
|
|
289
|
+
self.success = m.get('success')
|
|
290
|
+
return self
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
class GenerateBroadcastNewsResponse(TeaModel):
|
|
294
|
+
def __init__(
|
|
295
|
+
self,
|
|
296
|
+
headers: Dict[str, str] = None,
|
|
297
|
+
status_code: int = None,
|
|
298
|
+
body: GenerateBroadcastNewsResponseBody = None,
|
|
299
|
+
):
|
|
300
|
+
self.headers = headers
|
|
301
|
+
self.status_code = status_code
|
|
302
|
+
self.body = body
|
|
303
|
+
|
|
304
|
+
def validate(self):
|
|
305
|
+
if self.body:
|
|
306
|
+
self.body.validate()
|
|
307
|
+
|
|
308
|
+
def to_map(self):
|
|
309
|
+
_map = super().to_map()
|
|
310
|
+
if _map is not None:
|
|
311
|
+
return _map
|
|
312
|
+
|
|
313
|
+
result = dict()
|
|
314
|
+
if self.headers is not None:
|
|
315
|
+
result['headers'] = self.headers
|
|
316
|
+
if self.status_code is not None:
|
|
317
|
+
result['statusCode'] = self.status_code
|
|
318
|
+
if self.body is not None:
|
|
319
|
+
result['body'] = self.body.to_map()
|
|
320
|
+
return result
|
|
321
|
+
|
|
322
|
+
def from_map(self, m: dict = None):
|
|
323
|
+
m = m or dict()
|
|
324
|
+
if m.get('headers') is not None:
|
|
325
|
+
self.headers = m.get('headers')
|
|
326
|
+
if m.get('statusCode') is not None:
|
|
327
|
+
self.status_code = m.get('statusCode')
|
|
328
|
+
if m.get('body') is not None:
|
|
329
|
+
temp_model = GenerateBroadcastNewsResponseBody()
|
|
330
|
+
self.body = temp_model.from_map(m['body'])
|
|
331
|
+
return self
|
|
332
|
+
|
|
333
|
+
|
|
7
334
|
class ListHotTopicSummariesRequest(TeaModel):
|
|
8
335
|
def __init__(
|
|
9
336
|
self,
|
|
@@ -414,6 +741,297 @@ class ListHotTopicSummariesResponse(TeaModel):
|
|
|
414
741
|
return self
|
|
415
742
|
|
|
416
743
|
|
|
744
|
+
class RunCommentGenerationRequest(TeaModel):
|
|
745
|
+
def __init__(
|
|
746
|
+
self,
|
|
747
|
+
length: str = None,
|
|
748
|
+
num_comments: str = None,
|
|
749
|
+
source_material: str = None,
|
|
750
|
+
style: str = None,
|
|
751
|
+
):
|
|
752
|
+
self.length = length
|
|
753
|
+
self.num_comments = num_comments
|
|
754
|
+
self.source_material = source_material
|
|
755
|
+
self.style = style
|
|
756
|
+
|
|
757
|
+
def validate(self):
|
|
758
|
+
pass
|
|
759
|
+
|
|
760
|
+
def to_map(self):
|
|
761
|
+
_map = super().to_map()
|
|
762
|
+
if _map is not None:
|
|
763
|
+
return _map
|
|
764
|
+
|
|
765
|
+
result = dict()
|
|
766
|
+
if self.length is not None:
|
|
767
|
+
result['length'] = self.length
|
|
768
|
+
if self.num_comments is not None:
|
|
769
|
+
result['numComments'] = self.num_comments
|
|
770
|
+
if self.source_material is not None:
|
|
771
|
+
result['sourceMaterial'] = self.source_material
|
|
772
|
+
if self.style is not None:
|
|
773
|
+
result['style'] = self.style
|
|
774
|
+
return result
|
|
775
|
+
|
|
776
|
+
def from_map(self, m: dict = None):
|
|
777
|
+
m = m or dict()
|
|
778
|
+
if m.get('length') is not None:
|
|
779
|
+
self.length = m.get('length')
|
|
780
|
+
if m.get('numComments') is not None:
|
|
781
|
+
self.num_comments = m.get('numComments')
|
|
782
|
+
if m.get('sourceMaterial') is not None:
|
|
783
|
+
self.source_material = m.get('sourceMaterial')
|
|
784
|
+
if m.get('style') is not None:
|
|
785
|
+
self.style = m.get('style')
|
|
786
|
+
return self
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+
class RunCommentGenerationResponseBodyHeader(TeaModel):
|
|
790
|
+
def __init__(
|
|
791
|
+
self,
|
|
792
|
+
event: str = None,
|
|
793
|
+
event_info: str = None,
|
|
794
|
+
request_id: str = None,
|
|
795
|
+
session_id: str = None,
|
|
796
|
+
task_id: str = None,
|
|
797
|
+
trace_id: str = None,
|
|
798
|
+
):
|
|
799
|
+
self.event = event
|
|
800
|
+
self.event_info = event_info
|
|
801
|
+
self.request_id = request_id
|
|
802
|
+
self.session_id = session_id
|
|
803
|
+
self.task_id = task_id
|
|
804
|
+
self.trace_id = trace_id
|
|
805
|
+
|
|
806
|
+
def validate(self):
|
|
807
|
+
pass
|
|
808
|
+
|
|
809
|
+
def to_map(self):
|
|
810
|
+
_map = super().to_map()
|
|
811
|
+
if _map is not None:
|
|
812
|
+
return _map
|
|
813
|
+
|
|
814
|
+
result = dict()
|
|
815
|
+
if self.event is not None:
|
|
816
|
+
result['event'] = self.event
|
|
817
|
+
if self.event_info is not None:
|
|
818
|
+
result['eventInfo'] = self.event_info
|
|
819
|
+
if self.request_id is not None:
|
|
820
|
+
result['requestId'] = self.request_id
|
|
821
|
+
if self.session_id is not None:
|
|
822
|
+
result['sessionId'] = self.session_id
|
|
823
|
+
if self.task_id is not None:
|
|
824
|
+
result['taskId'] = self.task_id
|
|
825
|
+
if self.trace_id is not None:
|
|
826
|
+
result['traceId'] = self.trace_id
|
|
827
|
+
return result
|
|
828
|
+
|
|
829
|
+
def from_map(self, m: dict = None):
|
|
830
|
+
m = m or dict()
|
|
831
|
+
if m.get('event') is not None:
|
|
832
|
+
self.event = m.get('event')
|
|
833
|
+
if m.get('eventInfo') is not None:
|
|
834
|
+
self.event_info = m.get('eventInfo')
|
|
835
|
+
if m.get('requestId') is not None:
|
|
836
|
+
self.request_id = m.get('requestId')
|
|
837
|
+
if m.get('sessionId') is not None:
|
|
838
|
+
self.session_id = m.get('sessionId')
|
|
839
|
+
if m.get('taskId') is not None:
|
|
840
|
+
self.task_id = m.get('taskId')
|
|
841
|
+
if m.get('traceId') is not None:
|
|
842
|
+
self.trace_id = m.get('traceId')
|
|
843
|
+
return self
|
|
844
|
+
|
|
845
|
+
|
|
846
|
+
class RunCommentGenerationResponseBodyPayloadOutput(TeaModel):
|
|
847
|
+
def __init__(
|
|
848
|
+
self,
|
|
849
|
+
text: str = None,
|
|
850
|
+
):
|
|
851
|
+
self.text = text
|
|
852
|
+
|
|
853
|
+
def validate(self):
|
|
854
|
+
pass
|
|
855
|
+
|
|
856
|
+
def to_map(self):
|
|
857
|
+
_map = super().to_map()
|
|
858
|
+
if _map is not None:
|
|
859
|
+
return _map
|
|
860
|
+
|
|
861
|
+
result = dict()
|
|
862
|
+
if self.text is not None:
|
|
863
|
+
result['text'] = self.text
|
|
864
|
+
return result
|
|
865
|
+
|
|
866
|
+
def from_map(self, m: dict = None):
|
|
867
|
+
m = m or dict()
|
|
868
|
+
if m.get('text') is not None:
|
|
869
|
+
self.text = m.get('text')
|
|
870
|
+
return self
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
class RunCommentGenerationResponseBodyPayloadUsage(TeaModel):
|
|
874
|
+
def __init__(
|
|
875
|
+
self,
|
|
876
|
+
input_tokens: int = None,
|
|
877
|
+
output_tokens: int = None,
|
|
878
|
+
total_tokens: int = None,
|
|
879
|
+
):
|
|
880
|
+
self.input_tokens = input_tokens
|
|
881
|
+
self.output_tokens = output_tokens
|
|
882
|
+
self.total_tokens = total_tokens
|
|
883
|
+
|
|
884
|
+
def validate(self):
|
|
885
|
+
pass
|
|
886
|
+
|
|
887
|
+
def to_map(self):
|
|
888
|
+
_map = super().to_map()
|
|
889
|
+
if _map is not None:
|
|
890
|
+
return _map
|
|
891
|
+
|
|
892
|
+
result = dict()
|
|
893
|
+
if self.input_tokens is not None:
|
|
894
|
+
result['inputTokens'] = self.input_tokens
|
|
895
|
+
if self.output_tokens is not None:
|
|
896
|
+
result['outputTokens'] = self.output_tokens
|
|
897
|
+
if self.total_tokens is not None:
|
|
898
|
+
result['totalTokens'] = self.total_tokens
|
|
899
|
+
return result
|
|
900
|
+
|
|
901
|
+
def from_map(self, m: dict = None):
|
|
902
|
+
m = m or dict()
|
|
903
|
+
if m.get('inputTokens') is not None:
|
|
904
|
+
self.input_tokens = m.get('inputTokens')
|
|
905
|
+
if m.get('outputTokens') is not None:
|
|
906
|
+
self.output_tokens = m.get('outputTokens')
|
|
907
|
+
if m.get('totalTokens') is not None:
|
|
908
|
+
self.total_tokens = m.get('totalTokens')
|
|
909
|
+
return self
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
class RunCommentGenerationResponseBodyPayload(TeaModel):
|
|
913
|
+
def __init__(
|
|
914
|
+
self,
|
|
915
|
+
output: RunCommentGenerationResponseBodyPayloadOutput = None,
|
|
916
|
+
usage: RunCommentGenerationResponseBodyPayloadUsage = None,
|
|
917
|
+
):
|
|
918
|
+
self.output = output
|
|
919
|
+
self.usage = usage
|
|
920
|
+
|
|
921
|
+
def validate(self):
|
|
922
|
+
if self.output:
|
|
923
|
+
self.output.validate()
|
|
924
|
+
if self.usage:
|
|
925
|
+
self.usage.validate()
|
|
926
|
+
|
|
927
|
+
def to_map(self):
|
|
928
|
+
_map = super().to_map()
|
|
929
|
+
if _map is not None:
|
|
930
|
+
return _map
|
|
931
|
+
|
|
932
|
+
result = dict()
|
|
933
|
+
if self.output is not None:
|
|
934
|
+
result['output'] = self.output.to_map()
|
|
935
|
+
if self.usage is not None:
|
|
936
|
+
result['usage'] = self.usage.to_map()
|
|
937
|
+
return result
|
|
938
|
+
|
|
939
|
+
def from_map(self, m: dict = None):
|
|
940
|
+
m = m or dict()
|
|
941
|
+
if m.get('output') is not None:
|
|
942
|
+
temp_model = RunCommentGenerationResponseBodyPayloadOutput()
|
|
943
|
+
self.output = temp_model.from_map(m['output'])
|
|
944
|
+
if m.get('usage') is not None:
|
|
945
|
+
temp_model = RunCommentGenerationResponseBodyPayloadUsage()
|
|
946
|
+
self.usage = temp_model.from_map(m['usage'])
|
|
947
|
+
return self
|
|
948
|
+
|
|
949
|
+
|
|
950
|
+
class RunCommentGenerationResponseBody(TeaModel):
|
|
951
|
+
def __init__(
|
|
952
|
+
self,
|
|
953
|
+
end: bool = None,
|
|
954
|
+
header: RunCommentGenerationResponseBodyHeader = None,
|
|
955
|
+
payload: RunCommentGenerationResponseBodyPayload = None,
|
|
956
|
+
):
|
|
957
|
+
self.end = end
|
|
958
|
+
self.header = header
|
|
959
|
+
self.payload = payload
|
|
960
|
+
|
|
961
|
+
def validate(self):
|
|
962
|
+
if self.header:
|
|
963
|
+
self.header.validate()
|
|
964
|
+
if self.payload:
|
|
965
|
+
self.payload.validate()
|
|
966
|
+
|
|
967
|
+
def to_map(self):
|
|
968
|
+
_map = super().to_map()
|
|
969
|
+
if _map is not None:
|
|
970
|
+
return _map
|
|
971
|
+
|
|
972
|
+
result = dict()
|
|
973
|
+
if self.end is not None:
|
|
974
|
+
result['end'] = self.end
|
|
975
|
+
if self.header is not None:
|
|
976
|
+
result['header'] = self.header.to_map()
|
|
977
|
+
if self.payload is not None:
|
|
978
|
+
result['payload'] = self.payload.to_map()
|
|
979
|
+
return result
|
|
980
|
+
|
|
981
|
+
def from_map(self, m: dict = None):
|
|
982
|
+
m = m or dict()
|
|
983
|
+
if m.get('end') is not None:
|
|
984
|
+
self.end = m.get('end')
|
|
985
|
+
if m.get('header') is not None:
|
|
986
|
+
temp_model = RunCommentGenerationResponseBodyHeader()
|
|
987
|
+
self.header = temp_model.from_map(m['header'])
|
|
988
|
+
if m.get('payload') is not None:
|
|
989
|
+
temp_model = RunCommentGenerationResponseBodyPayload()
|
|
990
|
+
self.payload = temp_model.from_map(m['payload'])
|
|
991
|
+
return self
|
|
992
|
+
|
|
993
|
+
|
|
994
|
+
class RunCommentGenerationResponse(TeaModel):
|
|
995
|
+
def __init__(
|
|
996
|
+
self,
|
|
997
|
+
headers: Dict[str, str] = None,
|
|
998
|
+
status_code: int = None,
|
|
999
|
+
body: RunCommentGenerationResponseBody = None,
|
|
1000
|
+
):
|
|
1001
|
+
self.headers = headers
|
|
1002
|
+
self.status_code = status_code
|
|
1003
|
+
self.body = body
|
|
1004
|
+
|
|
1005
|
+
def validate(self):
|
|
1006
|
+
if self.body:
|
|
1007
|
+
self.body.validate()
|
|
1008
|
+
|
|
1009
|
+
def to_map(self):
|
|
1010
|
+
_map = super().to_map()
|
|
1011
|
+
if _map is not None:
|
|
1012
|
+
return _map
|
|
1013
|
+
|
|
1014
|
+
result = dict()
|
|
1015
|
+
if self.headers is not None:
|
|
1016
|
+
result['headers'] = self.headers
|
|
1017
|
+
if self.status_code is not None:
|
|
1018
|
+
result['statusCode'] = self.status_code
|
|
1019
|
+
if self.body is not None:
|
|
1020
|
+
result['body'] = self.body.to_map()
|
|
1021
|
+
return result
|
|
1022
|
+
|
|
1023
|
+
def from_map(self, m: dict = None):
|
|
1024
|
+
m = m or dict()
|
|
1025
|
+
if m.get('headers') is not None:
|
|
1026
|
+
self.headers = m.get('headers')
|
|
1027
|
+
if m.get('statusCode') is not None:
|
|
1028
|
+
self.status_code = m.get('statusCode')
|
|
1029
|
+
if m.get('body') is not None:
|
|
1030
|
+
temp_model = RunCommentGenerationResponseBody()
|
|
1031
|
+
self.body = temp_model.from_map(m['body'])
|
|
1032
|
+
return self
|
|
1033
|
+
|
|
1034
|
+
|
|
417
1035
|
class RunMarketingInformationExtractRequest(TeaModel):
|
|
418
1036
|
def __init__(
|
|
419
1037
|
self,
|
|
@@ -2023,7 +2641,9 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
2023
2641
|
model_custom_prompt_template_id: str = None,
|
|
2024
2642
|
model_id: str = None,
|
|
2025
2643
|
original_session_id: str = None,
|
|
2644
|
+
snapshot_interval: float = None,
|
|
2026
2645
|
task_id: str = None,
|
|
2646
|
+
video_extra_info: str = None,
|
|
2027
2647
|
video_model_custom_prompt_template: str = None,
|
|
2028
2648
|
video_model_id: str = None,
|
|
2029
2649
|
video_url: str = None,
|
|
@@ -2033,7 +2653,9 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
2033
2653
|
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
2034
2654
|
self.model_id = model_id
|
|
2035
2655
|
self.original_session_id = original_session_id
|
|
2656
|
+
self.snapshot_interval = snapshot_interval
|
|
2036
2657
|
self.task_id = task_id
|
|
2658
|
+
self.video_extra_info = video_extra_info
|
|
2037
2659
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
2038
2660
|
self.video_model_id = video_model_id
|
|
2039
2661
|
self.video_url = video_url
|
|
@@ -2057,8 +2679,12 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
2057
2679
|
result['modelId'] = self.model_id
|
|
2058
2680
|
if self.original_session_id is not None:
|
|
2059
2681
|
result['originalSessionId'] = self.original_session_id
|
|
2682
|
+
if self.snapshot_interval is not None:
|
|
2683
|
+
result['snapshotInterval'] = self.snapshot_interval
|
|
2060
2684
|
if self.task_id is not None:
|
|
2061
2685
|
result['taskId'] = self.task_id
|
|
2686
|
+
if self.video_extra_info is not None:
|
|
2687
|
+
result['videoExtraInfo'] = self.video_extra_info
|
|
2062
2688
|
if self.video_model_custom_prompt_template is not None:
|
|
2063
2689
|
result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
|
|
2064
2690
|
if self.video_model_id is not None:
|
|
@@ -2079,8 +2705,12 @@ class RunVideoAnalysisRequest(TeaModel):
|
|
|
2079
2705
|
self.model_id = m.get('modelId')
|
|
2080
2706
|
if m.get('originalSessionId') is not None:
|
|
2081
2707
|
self.original_session_id = m.get('originalSessionId')
|
|
2708
|
+
if m.get('snapshotInterval') is not None:
|
|
2709
|
+
self.snapshot_interval = m.get('snapshotInterval')
|
|
2082
2710
|
if m.get('taskId') is not None:
|
|
2083
2711
|
self.task_id = m.get('taskId')
|
|
2712
|
+
if m.get('videoExtraInfo') is not None:
|
|
2713
|
+
self.video_extra_info = m.get('videoExtraInfo')
|
|
2084
2714
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
2085
2715
|
self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
|
|
2086
2716
|
if m.get('videoModelId') is not None:
|
|
@@ -2098,7 +2728,9 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
2098
2728
|
model_custom_prompt_template_id: str = None,
|
|
2099
2729
|
model_id: str = None,
|
|
2100
2730
|
original_session_id: str = None,
|
|
2731
|
+
snapshot_interval: float = None,
|
|
2101
2732
|
task_id: str = None,
|
|
2733
|
+
video_extra_info: str = None,
|
|
2102
2734
|
video_model_custom_prompt_template: str = None,
|
|
2103
2735
|
video_model_id: str = None,
|
|
2104
2736
|
video_url: str = None,
|
|
@@ -2108,7 +2740,9 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
2108
2740
|
self.model_custom_prompt_template_id = model_custom_prompt_template_id
|
|
2109
2741
|
self.model_id = model_id
|
|
2110
2742
|
self.original_session_id = original_session_id
|
|
2743
|
+
self.snapshot_interval = snapshot_interval
|
|
2111
2744
|
self.task_id = task_id
|
|
2745
|
+
self.video_extra_info = video_extra_info
|
|
2112
2746
|
self.video_model_custom_prompt_template = video_model_custom_prompt_template
|
|
2113
2747
|
self.video_model_id = video_model_id
|
|
2114
2748
|
self.video_url = video_url
|
|
@@ -2132,8 +2766,12 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
2132
2766
|
result['modelId'] = self.model_id
|
|
2133
2767
|
if self.original_session_id is not None:
|
|
2134
2768
|
result['originalSessionId'] = self.original_session_id
|
|
2769
|
+
if self.snapshot_interval is not None:
|
|
2770
|
+
result['snapshotInterval'] = self.snapshot_interval
|
|
2135
2771
|
if self.task_id is not None:
|
|
2136
2772
|
result['taskId'] = self.task_id
|
|
2773
|
+
if self.video_extra_info is not None:
|
|
2774
|
+
result['videoExtraInfo'] = self.video_extra_info
|
|
2137
2775
|
if self.video_model_custom_prompt_template is not None:
|
|
2138
2776
|
result['videoModelCustomPromptTemplate'] = self.video_model_custom_prompt_template
|
|
2139
2777
|
if self.video_model_id is not None:
|
|
@@ -2154,8 +2792,12 @@ class RunVideoAnalysisShrinkRequest(TeaModel):
|
|
|
2154
2792
|
self.model_id = m.get('modelId')
|
|
2155
2793
|
if m.get('originalSessionId') is not None:
|
|
2156
2794
|
self.original_session_id = m.get('originalSessionId')
|
|
2795
|
+
if m.get('snapshotInterval') is not None:
|
|
2796
|
+
self.snapshot_interval = m.get('snapshotInterval')
|
|
2157
2797
|
if m.get('taskId') is not None:
|
|
2158
2798
|
self.task_id = m.get('taskId')
|
|
2799
|
+
if m.get('videoExtraInfo') is not None:
|
|
2800
|
+
self.video_extra_info = m.get('videoExtraInfo')
|
|
2159
2801
|
if m.get('videoModelCustomPromptTemplate') is not None:
|
|
2160
2802
|
self.video_model_custom_prompt_template = m.get('videoModelCustomPromptTemplate')
|
|
2161
2803
|
if m.get('videoModelId') is not None:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: alibabacloud-quanmiaolightapp20240801
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Alibaba Cloud QuanMiaoLightApp (20240801) SDK Library for Python
|
|
5
5
|
Home-page: https://github.com/aliyun/alibabacloud-python-sdk
|
|
6
6
|
Author: Alibaba Cloud SDK
|
{alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/setup.py
RENAMED
|
@@ -24,7 +24,7 @@ from setuptools import setup, find_packages
|
|
|
24
24
|
"""
|
|
25
25
|
setup module for alibabacloud_quanmiaolightapp20240801.
|
|
26
26
|
|
|
27
|
-
Created on
|
|
27
|
+
Created on 26/09/2024
|
|
28
28
|
|
|
29
29
|
@author: Alibaba Cloud SDK
|
|
30
30
|
"""
|
|
@@ -38,7 +38,7 @@ URL = "https://github.com/aliyun/alibabacloud-python-sdk"
|
|
|
38
38
|
VERSION = __import__(PACKAGE).__version__
|
|
39
39
|
REQUIRES = [
|
|
40
40
|
"alibabacloud_tea_util>=0.3.13, <1.0.0",
|
|
41
|
-
"alibabacloud_tea_openapi>=0.3.
|
|
41
|
+
"alibabacloud_tea_openapi>=0.3.12, <1.0.0",
|
|
42
42
|
"alibabacloud_openapi_util>=0.2.1, <1.0.0",
|
|
43
43
|
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
|
|
44
44
|
]
|
alibabacloud_quanmiaolightapp20240801-1.2.0/alibabacloud_quanmiaolightapp20240801/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = '1.2.0'
|
{alibabacloud_quanmiaolightapp20240801-1.2.0 → alibabacloud_quanmiaolightapp20240801-1.3.0}/LICENSE
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|