dashscope 1.8.0__py3-none-any.whl → 1.25.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dashscope/__init__.py +61 -14
- dashscope/aigc/__init__.py +10 -3
- dashscope/aigc/chat_completion.py +282 -0
- dashscope/aigc/code_generation.py +145 -0
- dashscope/aigc/conversation.py +71 -12
- dashscope/aigc/generation.py +288 -16
- dashscope/aigc/image_synthesis.py +473 -31
- dashscope/aigc/multimodal_conversation.py +299 -14
- dashscope/aigc/video_synthesis.py +610 -0
- dashscope/api_entities/aiohttp_request.py +8 -5
- dashscope/api_entities/api_request_data.py +4 -2
- dashscope/api_entities/api_request_factory.py +68 -20
- dashscope/api_entities/base_request.py +20 -3
- dashscope/api_entities/chat_completion_types.py +344 -0
- dashscope/api_entities/dashscope_response.py +243 -15
- dashscope/api_entities/encryption.py +179 -0
- dashscope/api_entities/http_request.py +216 -62
- dashscope/api_entities/websocket_request.py +43 -34
- dashscope/app/__init__.py +5 -0
- dashscope/app/application.py +203 -0
- dashscope/app/application_response.py +246 -0
- dashscope/assistants/__init__.py +16 -0
- dashscope/assistants/assistant_types.py +175 -0
- dashscope/assistants/assistants.py +311 -0
- dashscope/assistants/files.py +197 -0
- dashscope/audio/__init__.py +4 -2
- dashscope/audio/asr/__init__.py +17 -1
- dashscope/audio/asr/asr_phrase_manager.py +203 -0
- dashscope/audio/asr/recognition.py +167 -27
- dashscope/audio/asr/transcription.py +107 -14
- dashscope/audio/asr/translation_recognizer.py +1006 -0
- dashscope/audio/asr/vocabulary.py +177 -0
- dashscope/audio/qwen_asr/__init__.py +7 -0
- dashscope/audio/qwen_asr/qwen_transcription.py +189 -0
- dashscope/audio/qwen_omni/__init__.py +11 -0
- dashscope/audio/qwen_omni/omni_realtime.py +524 -0
- dashscope/audio/qwen_tts/__init__.py +5 -0
- dashscope/audio/qwen_tts/speech_synthesizer.py +77 -0
- dashscope/audio/qwen_tts_realtime/__init__.py +10 -0
- dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py +355 -0
- dashscope/audio/tts/__init__.py +2 -0
- dashscope/audio/tts/speech_synthesizer.py +5 -0
- dashscope/audio/tts_v2/__init__.py +12 -0
- dashscope/audio/tts_v2/enrollment.py +179 -0
- dashscope/audio/tts_v2/speech_synthesizer.py +886 -0
- dashscope/cli.py +157 -37
- dashscope/client/base_api.py +652 -87
- dashscope/common/api_key.py +2 -0
- dashscope/common/base_type.py +135 -0
- dashscope/common/constants.py +13 -16
- dashscope/common/env.py +2 -0
- dashscope/common/error.py +58 -22
- dashscope/common/logging.py +2 -0
- dashscope/common/message_manager.py +2 -0
- dashscope/common/utils.py +276 -46
- dashscope/customize/__init__.py +0 -0
- dashscope/customize/customize_types.py +192 -0
- dashscope/customize/deployments.py +146 -0
- dashscope/customize/finetunes.py +234 -0
- dashscope/embeddings/__init__.py +5 -1
- dashscope/embeddings/batch_text_embedding.py +208 -0
- dashscope/embeddings/batch_text_embedding_response.py +65 -0
- dashscope/embeddings/multimodal_embedding.py +118 -10
- dashscope/embeddings/text_embedding.py +13 -1
- dashscope/{file.py → files.py} +19 -4
- dashscope/io/input_output.py +2 -0
- dashscope/model.py +11 -2
- dashscope/models.py +43 -0
- dashscope/multimodal/__init__.py +20 -0
- dashscope/multimodal/dialog_state.py +56 -0
- dashscope/multimodal/multimodal_constants.py +28 -0
- dashscope/multimodal/multimodal_dialog.py +648 -0
- dashscope/multimodal/multimodal_request_params.py +313 -0
- dashscope/multimodal/tingwu/__init__.py +10 -0
- dashscope/multimodal/tingwu/tingwu.py +80 -0
- dashscope/multimodal/tingwu/tingwu_realtime.py +579 -0
- dashscope/nlp/__init__.py +0 -0
- dashscope/nlp/understanding.py +64 -0
- dashscope/protocol/websocket.py +3 -0
- dashscope/rerank/__init__.py +0 -0
- dashscope/rerank/text_rerank.py +69 -0
- dashscope/resources/qwen.tiktoken +151643 -0
- dashscope/threads/__init__.py +26 -0
- dashscope/threads/messages/__init__.py +0 -0
- dashscope/threads/messages/files.py +113 -0
- dashscope/threads/messages/messages.py +220 -0
- dashscope/threads/runs/__init__.py +0 -0
- dashscope/threads/runs/runs.py +501 -0
- dashscope/threads/runs/steps.py +112 -0
- dashscope/threads/thread_types.py +665 -0
- dashscope/threads/threads.py +212 -0
- dashscope/tokenizers/__init__.py +7 -0
- dashscope/tokenizers/qwen_tokenizer.py +111 -0
- dashscope/tokenizers/tokenization.py +125 -0
- dashscope/tokenizers/tokenizer.py +45 -0
- dashscope/tokenizers/tokenizer_base.py +32 -0
- dashscope/utils/__init__.py +0 -0
- dashscope/utils/message_utils.py +838 -0
- dashscope/utils/oss_utils.py +243 -0
- dashscope/utils/param_utils.py +29 -0
- dashscope/version.py +3 -1
- {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/METADATA +53 -50
- dashscope-1.25.6.dist-info/RECORD +112 -0
- {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/WHEEL +1 -1
- {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/entry_points.txt +0 -1
- {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info/licenses}/LICENSE +2 -4
- dashscope/deployment.py +0 -129
- dashscope/finetune.py +0 -149
- dashscope-1.8.0.dist-info/RECORD +0 -49
- {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,610 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, Union, List
|
|
4
|
+
|
|
5
|
+
from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
|
|
6
|
+
VideoSynthesisResponse)
|
|
7
|
+
from dashscope.client.base_api import BaseAsyncApi, BaseAsyncAioApi
|
|
8
|
+
from dashscope.common.constants import PROMPT, REFERENCE_VIDEO_URLS
|
|
9
|
+
from dashscope.common.utils import _get_task_group_and_task
|
|
10
|
+
from dashscope.utils.oss_utils import check_and_upload_local
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class VideoSynthesis(BaseAsyncApi):
|
|
14
|
+
task = 'video-generation'
|
|
15
|
+
"""API for video synthesis.
|
|
16
|
+
"""
|
|
17
|
+
class Models:
|
|
18
|
+
"""@deprecated, use wanx2.1-t2v-plus instead"""
|
|
19
|
+
wanx_txt2video_pro = 'wanx-txt2video-pro'
|
|
20
|
+
"""@deprecated, use wanx2.1-i2v-plus instead"""
|
|
21
|
+
wanx_img2video_pro = 'wanx-img2video-pro'
|
|
22
|
+
|
|
23
|
+
wanx_2_1_t2v_turbo = 'wanx2.1-t2v-turbo'
|
|
24
|
+
wanx_2_1_t2v_plus = 'wanx2.1-t2v-plus'
|
|
25
|
+
|
|
26
|
+
wanx_2_1_i2v_plus = 'wanx2.1-i2v-plus'
|
|
27
|
+
wanx_2_1_i2v_turbo = 'wanx2.1-i2v-turbo'
|
|
28
|
+
|
|
29
|
+
wanx_2_1_kf2v_plus = 'wanx2.1-kf2v-plus'
|
|
30
|
+
wanx_kf2v = 'wanx-kf2v'
|
|
31
|
+
|
|
32
|
+
@classmethod
|
|
33
|
+
def call(cls,
|
|
34
|
+
model: str,
|
|
35
|
+
prompt: Any = None,
|
|
36
|
+
# """@deprecated, use prompt_extend in parameters """
|
|
37
|
+
extend_prompt: bool = True,
|
|
38
|
+
negative_prompt: str = None,
|
|
39
|
+
template: str = None,
|
|
40
|
+
img_url: str = None,
|
|
41
|
+
audio_url: str = None,
|
|
42
|
+
reference_video_urls: List[str] = None,
|
|
43
|
+
reference_video_description: List[str] = None,
|
|
44
|
+
api_key: str = None,
|
|
45
|
+
extra_input: Dict = None,
|
|
46
|
+
workspace: str = None,
|
|
47
|
+
task: str = None,
|
|
48
|
+
head_frame: str = None,
|
|
49
|
+
tail_frame: str = None,
|
|
50
|
+
first_frame_url: str = None,
|
|
51
|
+
last_frame_url: str = None,
|
|
52
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
53
|
+
"""Call video synthesis service and get result.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
model (str): The model, reference ``Models``.
|
|
57
|
+
prompt (Any): The prompt for video synthesis.
|
|
58
|
+
extend_prompt (bool): @deprecated, use prompt_extend in parameters
|
|
59
|
+
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
60
|
+
template (str): LoRa input, such as gufeng, katong, etc.
|
|
61
|
+
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
62
|
+
audio_url (str): The input audio url
|
|
63
|
+
reference_video_urls (List[str]): list of character reference video file urls uploaded by the user
|
|
64
|
+
reference_video_description (List[str]): For the description information of the picture and sound of the reference video, corresponding to ref video, it needs to be in the order of the url. If the quantity is different, an error will be reported
|
|
65
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
66
|
+
workspace (str): The dashscope workspace id.
|
|
67
|
+
extra_input (Dict): The extra input parameters.
|
|
68
|
+
task (str): The task of api, ref doc.
|
|
69
|
+
first_frame_url (str): The URL of the first frame image for generating the video.
|
|
70
|
+
last_frame_url (str): The URL of the last frame image for generating the video.
|
|
71
|
+
**kwargs:
|
|
72
|
+
size(str, `optional`): The output video size(width*height).
|
|
73
|
+
duration(int, optional): The duration. Duration of video generation. The default value is 5, in seconds.
|
|
74
|
+
seed(int, optional): The seed. The random seed for video generation. The default value is 5.
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
InputRequired: The prompt cannot be empty.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
VideoSynthesisResponse: The video synthesis result.
|
|
81
|
+
"""
|
|
82
|
+
return super().call(model,
|
|
83
|
+
prompt,
|
|
84
|
+
img_url=img_url,
|
|
85
|
+
audio_url=audio_url,
|
|
86
|
+
reference_video_urls=reference_video_urls,
|
|
87
|
+
reference_video_description=reference_video_description,
|
|
88
|
+
api_key=api_key,
|
|
89
|
+
extend_prompt=extend_prompt,
|
|
90
|
+
negative_prompt=negative_prompt,
|
|
91
|
+
template=template,
|
|
92
|
+
workspace=workspace,
|
|
93
|
+
extra_input=extra_input,
|
|
94
|
+
task=task,
|
|
95
|
+
head_frame=head_frame,
|
|
96
|
+
tail_frame=tail_frame,
|
|
97
|
+
first_frame_url=first_frame_url,
|
|
98
|
+
last_frame_url=last_frame_url,
|
|
99
|
+
**kwargs)
|
|
100
|
+
|
|
101
|
+
@classmethod
|
|
102
|
+
def _get_input(cls,
|
|
103
|
+
model: str,
|
|
104
|
+
prompt: Any = None,
|
|
105
|
+
img_url: str = None,
|
|
106
|
+
audio_url: str = None,
|
|
107
|
+
reference_video_urls: List[str] = None,
|
|
108
|
+
reference_video_description: List[str] = None,
|
|
109
|
+
# """@deprecated, use prompt_extend in parameters """
|
|
110
|
+
extend_prompt: bool = True,
|
|
111
|
+
negative_prompt: str = None,
|
|
112
|
+
template: str = None,
|
|
113
|
+
api_key: str = None,
|
|
114
|
+
extra_input: Dict = None,
|
|
115
|
+
task: str = None,
|
|
116
|
+
function: str = None,
|
|
117
|
+
head_frame: str = None,
|
|
118
|
+
tail_frame: str = None,
|
|
119
|
+
first_frame_url: str = None,
|
|
120
|
+
last_frame_url: str = None,
|
|
121
|
+
**kwargs):
|
|
122
|
+
|
|
123
|
+
inputs = {PROMPT: prompt, 'extend_prompt': extend_prompt}
|
|
124
|
+
if negative_prompt:
|
|
125
|
+
inputs['negative_prompt'] = negative_prompt
|
|
126
|
+
if template:
|
|
127
|
+
inputs['template'] = template
|
|
128
|
+
if function:
|
|
129
|
+
inputs['function'] = function
|
|
130
|
+
if reference_video_description:
|
|
131
|
+
inputs['reference_video_description'] = reference_video_description
|
|
132
|
+
|
|
133
|
+
has_upload = False
|
|
134
|
+
upload_certificate = None
|
|
135
|
+
|
|
136
|
+
if img_url is not None and img_url:
|
|
137
|
+
is_upload, res_img_url, upload_certificate = check_and_upload_local(
|
|
138
|
+
model, img_url, api_key, upload_certificate)
|
|
139
|
+
if is_upload:
|
|
140
|
+
has_upload = True
|
|
141
|
+
inputs['img_url'] = res_img_url
|
|
142
|
+
|
|
143
|
+
if audio_url is not None and audio_url:
|
|
144
|
+
is_upload, res_audio_url, upload_certificate = check_and_upload_local(
|
|
145
|
+
model, audio_url, api_key, upload_certificate)
|
|
146
|
+
if is_upload:
|
|
147
|
+
has_upload = True
|
|
148
|
+
inputs['audio_url'] = res_audio_url
|
|
149
|
+
|
|
150
|
+
if head_frame is not None and head_frame:
|
|
151
|
+
is_upload, res_head_frame, upload_certificate = check_and_upload_local(
|
|
152
|
+
model, head_frame, api_key, upload_certificate)
|
|
153
|
+
if is_upload:
|
|
154
|
+
has_upload = True
|
|
155
|
+
inputs['head_frame'] = res_head_frame
|
|
156
|
+
|
|
157
|
+
if tail_frame is not None and tail_frame:
|
|
158
|
+
is_upload, res_tail_frame, upload_certificate = check_and_upload_local(
|
|
159
|
+
model, tail_frame, api_key, upload_certificate)
|
|
160
|
+
if is_upload:
|
|
161
|
+
has_upload = True
|
|
162
|
+
inputs['tail_frame'] = res_tail_frame
|
|
163
|
+
|
|
164
|
+
if first_frame_url is not None and first_frame_url:
|
|
165
|
+
is_upload, res_first_frame_url, upload_certificate = check_and_upload_local(
|
|
166
|
+
model, first_frame_url, api_key, upload_certificate)
|
|
167
|
+
if is_upload:
|
|
168
|
+
has_upload = True
|
|
169
|
+
inputs['first_frame_url'] = res_first_frame_url
|
|
170
|
+
|
|
171
|
+
if last_frame_url is not None and last_frame_url:
|
|
172
|
+
is_upload, res_last_frame_url, upload_certificate = check_and_upload_local(
|
|
173
|
+
model, last_frame_url, api_key, upload_certificate)
|
|
174
|
+
if is_upload:
|
|
175
|
+
has_upload = True
|
|
176
|
+
inputs['last_frame_url'] = res_last_frame_url
|
|
177
|
+
|
|
178
|
+
if (reference_video_urls is not None
|
|
179
|
+
and reference_video_urls and len(reference_video_urls) > 0):
|
|
180
|
+
new_videos = []
|
|
181
|
+
for video in reference_video_urls:
|
|
182
|
+
is_upload, new_video, upload_certificate = check_and_upload_local(
|
|
183
|
+
model, video, api_key, upload_certificate)
|
|
184
|
+
if is_upload:
|
|
185
|
+
has_upload = True
|
|
186
|
+
new_videos.append(new_video)
|
|
187
|
+
inputs[REFERENCE_VIDEO_URLS] = new_videos
|
|
188
|
+
|
|
189
|
+
if extra_input is not None and extra_input:
|
|
190
|
+
inputs = {**inputs, **extra_input}
|
|
191
|
+
if has_upload:
|
|
192
|
+
headers = kwargs.pop('headers', {})
|
|
193
|
+
headers['X-DashScope-OssResourceResolve'] = 'enable'
|
|
194
|
+
kwargs['headers'] = headers
|
|
195
|
+
|
|
196
|
+
if task is None:
|
|
197
|
+
task = VideoSynthesis.task
|
|
198
|
+
if model is not None and model and 'kf2v' in model:
|
|
199
|
+
task = 'image2video'
|
|
200
|
+
|
|
201
|
+
return inputs, kwargs, task
|
|
202
|
+
|
|
203
|
+
@classmethod
|
|
204
|
+
def async_call(cls,
|
|
205
|
+
model: str,
|
|
206
|
+
prompt: Any = None,
|
|
207
|
+
img_url: str = None,
|
|
208
|
+
audio_url: str = None,
|
|
209
|
+
reference_video_urls: List[str] = None,
|
|
210
|
+
reference_video_description: List[str] = None,
|
|
211
|
+
# """@deprecated, use prompt_extend in parameters """
|
|
212
|
+
extend_prompt: bool = True,
|
|
213
|
+
negative_prompt: str = None,
|
|
214
|
+
template: str = None,
|
|
215
|
+
api_key: str = None,
|
|
216
|
+
extra_input: Dict = None,
|
|
217
|
+
workspace: str = None,
|
|
218
|
+
task: str = None,
|
|
219
|
+
head_frame: str = None,
|
|
220
|
+
tail_frame: str = None,
|
|
221
|
+
first_frame_url: str = None,
|
|
222
|
+
last_frame_url: str = None,
|
|
223
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
224
|
+
"""Create a video synthesis task, and return task information.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
model (str): The model, reference ``Models``.
|
|
228
|
+
prompt (Any): The prompt for video synthesis.
|
|
229
|
+
extend_prompt (bool): @deprecated, use prompt_extend in parameters
|
|
230
|
+
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
231
|
+
template (str): LoRa input, such as gufeng, katong, etc.
|
|
232
|
+
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
233
|
+
audio_url (str): The input audio url.
|
|
234
|
+
reference_video_urls (List[str]): list of character reference video file urls uploaded by the user
|
|
235
|
+
reference_video_description (List[str]): For the description information of the picture and sound of the reference video, corresponding to ref video, it needs to be in the order of the url. If the quantity is different, an error will be reported
|
|
236
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
237
|
+
workspace (str): The dashscope workspace id.
|
|
238
|
+
extra_input (Dict): The extra input parameters.
|
|
239
|
+
task (str): The task of api, ref doc.
|
|
240
|
+
first_frame_url (str): The URL of the first frame image for generating the video.
|
|
241
|
+
last_frame_url (str): The URL of the last frame image for generating the video.
|
|
242
|
+
**kwargs:
|
|
243
|
+
size(str, `optional`): The output video size(width*height).
|
|
244
|
+
duration(int, optional): The duration. Duration of video generation. The default value is 5, in seconds.
|
|
245
|
+
seed(int, optional): The seed. The random seed for video generation. The default value is 5.
|
|
246
|
+
|
|
247
|
+
Raises:
|
|
248
|
+
InputRequired: The prompt cannot be empty.
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
DashScopeAPIResponse: The video synthesis
|
|
252
|
+
task id in the response.
|
|
253
|
+
"""
|
|
254
|
+
task_group, function = _get_task_group_and_task(__name__)
|
|
255
|
+
|
|
256
|
+
inputs, kwargs, task = cls._get_input(
|
|
257
|
+
model, prompt, img_url, audio_url, reference_video_urls, reference_video_description,
|
|
258
|
+
extend_prompt, negative_prompt, template, api_key,
|
|
259
|
+
extra_input, task, function, head_frame, tail_frame,
|
|
260
|
+
first_frame_url, last_frame_url, **kwargs)
|
|
261
|
+
|
|
262
|
+
response = super().async_call(
|
|
263
|
+
model=model,
|
|
264
|
+
task_group=task_group,
|
|
265
|
+
task=VideoSynthesis.task if task is None else task,
|
|
266
|
+
function=function,
|
|
267
|
+
api_key=api_key,
|
|
268
|
+
input=inputs,
|
|
269
|
+
workspace=workspace,
|
|
270
|
+
**kwargs)
|
|
271
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
272
|
+
|
|
273
|
+
@classmethod
|
|
274
|
+
def fetch(cls,
|
|
275
|
+
task: Union[str, VideoSynthesisResponse],
|
|
276
|
+
api_key: str = None,
|
|
277
|
+
workspace: str = None) -> VideoSynthesisResponse:
|
|
278
|
+
"""Fetch video synthesis task status or result.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
282
|
+
VideoSynthesisResponse return by async_call().
|
|
283
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
284
|
+
workspace (str): The dashscope workspace id.
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
VideoSynthesisResponse: The task status or result.
|
|
288
|
+
"""
|
|
289
|
+
response = super().fetch(task, api_key=api_key, workspace=workspace)
|
|
290
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
291
|
+
|
|
292
|
+
@classmethod
|
|
293
|
+
def wait(cls,
|
|
294
|
+
task: Union[str, VideoSynthesisResponse],
|
|
295
|
+
api_key: str = None,
|
|
296
|
+
workspace: str = None) -> VideoSynthesisResponse:
|
|
297
|
+
"""Wait for video synthesis task to complete, and return the result.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
301
|
+
VideoSynthesisResponse return by async_call().
|
|
302
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
303
|
+
workspace (str): The dashscope workspace id.
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
VideoSynthesisResponse: The task result.
|
|
307
|
+
"""
|
|
308
|
+
response = super().wait(task, api_key, workspace=workspace)
|
|
309
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
310
|
+
|
|
311
|
+
@classmethod
|
|
312
|
+
def cancel(cls,
|
|
313
|
+
task: Union[str, VideoSynthesisResponse],
|
|
314
|
+
api_key: str = None,
|
|
315
|
+
workspace: str = None) -> DashScopeAPIResponse:
|
|
316
|
+
"""Cancel video synthesis task.
|
|
317
|
+
Only tasks whose status is PENDING can be canceled.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
321
|
+
VideoSynthesisResponse return by async_call().
|
|
322
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
323
|
+
workspace (str): The dashscope workspace id.
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
DashScopeAPIResponse: The response data.
|
|
327
|
+
"""
|
|
328
|
+
return super().cancel(task, api_key, workspace=workspace)
|
|
329
|
+
|
|
330
|
+
@classmethod
|
|
331
|
+
def list(cls,
|
|
332
|
+
start_time: str = None,
|
|
333
|
+
end_time: str = None,
|
|
334
|
+
model_name: str = None,
|
|
335
|
+
api_key_id: str = None,
|
|
336
|
+
region: str = None,
|
|
337
|
+
status: str = None,
|
|
338
|
+
page_no: int = 1,
|
|
339
|
+
page_size: int = 10,
|
|
340
|
+
api_key: str = None,
|
|
341
|
+
workspace: str = None,
|
|
342
|
+
**kwargs) -> DashScopeAPIResponse:
|
|
343
|
+
"""List async tasks.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
start_time (str, optional): The tasks start time,
|
|
347
|
+
for example: 20230420000000. Defaults to None.
|
|
348
|
+
end_time (str, optional): The tasks end time,
|
|
349
|
+
for example: 20230420000000. Defaults to None.
|
|
350
|
+
model_name (str, optional): The tasks model name. Defaults to None.
|
|
351
|
+
api_key_id (str, optional): The tasks api-key-id. Defaults to None.
|
|
352
|
+
region (str, optional): The service region,
|
|
353
|
+
for example: cn-beijing. Defaults to None.
|
|
354
|
+
status (str, optional): The status of tasks[PENDING,
|
|
355
|
+
RUNNING, SUCCEEDED, FAILED, CANCELED]. Defaults to None.
|
|
356
|
+
page_no (int, optional): The page number. Defaults to 1.
|
|
357
|
+
page_size (int, optional): The page size. Defaults to 10.
|
|
358
|
+
api_key (str, optional): The user api-key. Defaults to None.
|
|
359
|
+
workspace (str): The dashscope workspace id.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
DashScopeAPIResponse: The response data.
|
|
363
|
+
"""
|
|
364
|
+
return super().list(start_time=start_time,
|
|
365
|
+
end_time=end_time,
|
|
366
|
+
model_name=model_name,
|
|
367
|
+
api_key_id=api_key_id,
|
|
368
|
+
region=region,
|
|
369
|
+
status=status,
|
|
370
|
+
page_no=page_no,
|
|
371
|
+
page_size=page_size,
|
|
372
|
+
api_key=api_key,
|
|
373
|
+
workspace=workspace,
|
|
374
|
+
**kwargs)
|
|
375
|
+
|
|
376
|
+
class AioVideoSynthesis(BaseAsyncAioApi):
|
|
377
|
+
@classmethod
|
|
378
|
+
async def call(cls,
|
|
379
|
+
model: str,
|
|
380
|
+
prompt: Any = None,
|
|
381
|
+
img_url: str = None,
|
|
382
|
+
audio_url: str = None,
|
|
383
|
+
reference_video_urls: List[str] = None,
|
|
384
|
+
reference_video_description: List[str] = None,
|
|
385
|
+
# """@deprecated, use prompt_extend in parameters """
|
|
386
|
+
extend_prompt: bool = True,
|
|
387
|
+
negative_prompt: str = None,
|
|
388
|
+
template: str = None,
|
|
389
|
+
api_key: str = None,
|
|
390
|
+
extra_input: Dict = None,
|
|
391
|
+
workspace: str = None,
|
|
392
|
+
task: str = None,
|
|
393
|
+
head_frame: str = None,
|
|
394
|
+
tail_frame: str = None,
|
|
395
|
+
first_frame_url: str = None,
|
|
396
|
+
last_frame_url: str = None,
|
|
397
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
398
|
+
"""Call video synthesis service and get result.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
model (str): The model, reference ``Models``.
|
|
402
|
+
prompt (Any): The prompt for video synthesis.
|
|
403
|
+
extend_prompt (bool): @deprecated, use prompt_extend in parameters
|
|
404
|
+
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
405
|
+
template (str): LoRa input, such as gufeng, katong, etc.
|
|
406
|
+
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
407
|
+
audio_url (str): The input audio url.
|
|
408
|
+
reference_video_urls (List[str]): list of character reference video file urls uploaded by the user
|
|
409
|
+
reference_video_description (List[str]): For the description information of the picture and sound of the reference video, corresponding to ref video, it needs to be in the order of the url. If the quantity is different, an error will be reported
|
|
410
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
411
|
+
workspace (str): The dashscope workspace id.
|
|
412
|
+
extra_input (Dict): The extra input parameters.
|
|
413
|
+
task (str): The task of api, ref doc.
|
|
414
|
+
first_frame_url (str): The URL of the first frame image for generating the video.
|
|
415
|
+
last_frame_url (str): The URL of the last frame image for generating the video.
|
|
416
|
+
**kwargs:
|
|
417
|
+
size(str, `optional`): The output video size(width*height).
|
|
418
|
+
duration(int, optional): The duration. Duration of video generation. The default value is 5, in seconds.
|
|
419
|
+
seed(int, optional): The seed. The random seed for video generation. The default value is 5.
|
|
420
|
+
|
|
421
|
+
Raises:
|
|
422
|
+
InputRequired: The prompt cannot be empty.
|
|
423
|
+
|
|
424
|
+
Returns:
|
|
425
|
+
VideoSynthesisResponse: The video synthesis result.
|
|
426
|
+
"""
|
|
427
|
+
task_group, f = _get_task_group_and_task(__name__)
|
|
428
|
+
inputs, kwargs, task = VideoSynthesis._get_input(
|
|
429
|
+
model, prompt, img_url, audio_url, reference_video_urls, reference_video_description,
|
|
430
|
+
extend_prompt, negative_prompt, template, api_key,
|
|
431
|
+
extra_input, task, f, head_frame, tail_frame,
|
|
432
|
+
first_frame_url, last_frame_url, **kwargs)
|
|
433
|
+
response = await super().call(model, inputs, task_group, task, f, api_key, workspace, **kwargs)
|
|
434
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
435
|
+
|
|
436
|
+
@classmethod
|
|
437
|
+
async def async_call(cls,
|
|
438
|
+
model: str,
|
|
439
|
+
prompt: Any = None,
|
|
440
|
+
img_url: str = None,
|
|
441
|
+
audio_url: str = None,
|
|
442
|
+
reference_video_urls: List[str] = None,
|
|
443
|
+
reference_video_description: List[str] = None,
|
|
444
|
+
# """@deprecated, use prompt_extend in parameters """
|
|
445
|
+
extend_prompt: bool = True,
|
|
446
|
+
negative_prompt: str = None,
|
|
447
|
+
template: str = None,
|
|
448
|
+
api_key: str = None,
|
|
449
|
+
extra_input: Dict = None,
|
|
450
|
+
workspace: str = None,
|
|
451
|
+
task: str = None,
|
|
452
|
+
head_frame: str = None,
|
|
453
|
+
tail_frame: str = None,
|
|
454
|
+
first_frame_url: str = None,
|
|
455
|
+
last_frame_url: str = None,
|
|
456
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
457
|
+
"""Create a video synthesis task, and return task information.
|
|
458
|
+
|
|
459
|
+
Args:
|
|
460
|
+
model (str): The model, reference ``Models``.
|
|
461
|
+
prompt (Any): The prompt for video synthesis.
|
|
462
|
+
extend_prompt (bool): @deprecated, use prompt_extend in parameters
|
|
463
|
+
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
464
|
+
template (str): LoRa input, such as gufeng, katong, etc.
|
|
465
|
+
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
466
|
+
audio_url (str): The input audio url.
|
|
467
|
+
reference_video_urls (List[str]): list of character reference video file urls uploaded by the user
|
|
468
|
+
reference_video_description (List[str]): For the description information of the picture and sound of the reference video, corresponding to ref video, it needs to be in the order of the url. If the quantity is different, an error will be reported
|
|
469
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
470
|
+
workspace (str): The dashscope workspace id.
|
|
471
|
+
extra_input (Dict): The extra input parameters.
|
|
472
|
+
task (str): The task of api, ref doc.
|
|
473
|
+
first_frame_url (str): The URL of the first frame image for generating the video.
|
|
474
|
+
last_frame_url (str): The URL of the last frame image for generating the video.
|
|
475
|
+
**kwargs:
|
|
476
|
+
size(str, `optional`): The output video size(width*height).
|
|
477
|
+
duration(int, optional): The duration. Duration of video generation. The default value is 5, in seconds.
|
|
478
|
+
seed(int, optional): The seed. The random seed for video generation. The default value is 5.
|
|
479
|
+
|
|
480
|
+
Raises:
|
|
481
|
+
InputRequired: The prompt cannot be empty.
|
|
482
|
+
|
|
483
|
+
Returns:
|
|
484
|
+
DashScopeAPIResponse: The video synthesis
|
|
485
|
+
task id in the response.
|
|
486
|
+
"""
|
|
487
|
+
task_group, function = _get_task_group_and_task(__name__)
|
|
488
|
+
|
|
489
|
+
inputs, kwargs, task = VideoSynthesis._get_input(
|
|
490
|
+
model, prompt, img_url, audio_url, reference_video_urls, reference_video_description,
|
|
491
|
+
extend_prompt, negative_prompt, template, api_key,
|
|
492
|
+
extra_input, task, function, head_frame, tail_frame,
|
|
493
|
+
first_frame_url, last_frame_url, **kwargs)
|
|
494
|
+
|
|
495
|
+
response = await super().async_call(
|
|
496
|
+
model=model,
|
|
497
|
+
task_group=task_group,
|
|
498
|
+
task=VideoSynthesis.task if task is None else task,
|
|
499
|
+
function=function,
|
|
500
|
+
api_key=api_key,
|
|
501
|
+
input=inputs,
|
|
502
|
+
workspace=workspace,
|
|
503
|
+
**kwargs)
|
|
504
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
505
|
+
|
|
506
|
+
@classmethod
|
|
507
|
+
async def fetch(cls,
|
|
508
|
+
task: Union[str, VideoSynthesisResponse],
|
|
509
|
+
api_key: str = None,
|
|
510
|
+
workspace: str = None,
|
|
511
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
512
|
+
"""Fetch video synthesis task status or result.
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
516
|
+
VideoSynthesisResponse return by async_call().
|
|
517
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
518
|
+
workspace (str): The dashscope workspace id.
|
|
519
|
+
|
|
520
|
+
Returns:
|
|
521
|
+
VideoSynthesisResponse: The task status or result.
|
|
522
|
+
"""
|
|
523
|
+
response = await super().fetch(task, api_key=api_key, workspace=workspace)
|
|
524
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
525
|
+
|
|
526
|
+
@classmethod
|
|
527
|
+
async def wait(cls,
|
|
528
|
+
task: Union[str, VideoSynthesisResponse],
|
|
529
|
+
api_key: str = None,
|
|
530
|
+
workspace: str = None,
|
|
531
|
+
**kwargs) -> VideoSynthesisResponse:
|
|
532
|
+
"""Wait for video synthesis task to complete, and return the result.
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
536
|
+
VideoSynthesisResponse return by async_call().
|
|
537
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
538
|
+
workspace (str): The dashscope workspace id.
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
VideoSynthesisResponse: The task result.
|
|
542
|
+
"""
|
|
543
|
+
response = await super().wait(task, api_key, workspace=workspace)
|
|
544
|
+
return VideoSynthesisResponse.from_api_response(response)
|
|
545
|
+
|
|
546
|
+
@classmethod
|
|
547
|
+
async def cancel(cls,
|
|
548
|
+
task: Union[str, VideoSynthesisResponse],
|
|
549
|
+
api_key: str = None,
|
|
550
|
+
workspace: str = None,
|
|
551
|
+
**kwargs) -> DashScopeAPIResponse:
|
|
552
|
+
"""Cancel video synthesis task.
|
|
553
|
+
Only tasks whose status is PENDING can be canceled.
|
|
554
|
+
|
|
555
|
+
Args:
|
|
556
|
+
task (Union[str, VideoSynthesisResponse]): The task_id or
|
|
557
|
+
VideoSynthesisResponse return by async_call().
|
|
558
|
+
api_key (str, optional): The api api_key. Defaults to None.
|
|
559
|
+
workspace (str): The dashscope workspace id.
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
DashScopeAPIResponse: The response data.
|
|
563
|
+
"""
|
|
564
|
+
return await super().cancel(task, api_key, workspace=workspace)
|
|
565
|
+
|
|
566
|
+
@classmethod
|
|
567
|
+
async def list(cls,
|
|
568
|
+
start_time: str = None,
|
|
569
|
+
end_time: str = None,
|
|
570
|
+
model_name: str = None,
|
|
571
|
+
api_key_id: str = None,
|
|
572
|
+
region: str = None,
|
|
573
|
+
status: str = None,
|
|
574
|
+
page_no: int = 1,
|
|
575
|
+
page_size: int = 10,
|
|
576
|
+
api_key: str = None,
|
|
577
|
+
workspace: str = None,
|
|
578
|
+
**kwargs) -> DashScopeAPIResponse:
|
|
579
|
+
"""List async tasks.
|
|
580
|
+
|
|
581
|
+
Args:
|
|
582
|
+
start_time (str, optional): The tasks start time,
|
|
583
|
+
for example: 20230420000000. Defaults to None.
|
|
584
|
+
end_time (str, optional): The tasks end time,
|
|
585
|
+
for example: 20230420000000. Defaults to None.
|
|
586
|
+
model_name (str, optional): The tasks model name. Defaults to None.
|
|
587
|
+
api_key_id (str, optional): The tasks api-key-id. Defaults to None.
|
|
588
|
+
region (str, optional): The service region,
|
|
589
|
+
for example: cn-beijing. Defaults to None.
|
|
590
|
+
status (str, optional): The status of tasks[PENDING,
|
|
591
|
+
RUNNING, SUCCEEDED, FAILED, CANCELED]. Defaults to None.
|
|
592
|
+
page_no (int, optional): The page number. Defaults to 1.
|
|
593
|
+
page_size (int, optional): The page size. Defaults to 10.
|
|
594
|
+
api_key (str, optional): The user api-key. Defaults to None.
|
|
595
|
+
workspace (str): The dashscope workspace id.
|
|
596
|
+
|
|
597
|
+
Returns:
|
|
598
|
+
DashScopeAPIResponse: The response data.
|
|
599
|
+
"""
|
|
600
|
+
return await super().list(start_time=start_time,
|
|
601
|
+
end_time=end_time,
|
|
602
|
+
model_name=model_name,
|
|
603
|
+
api_key_id=api_key_id,
|
|
604
|
+
region=region,
|
|
605
|
+
status=status,
|
|
606
|
+
page_no=page_no,
|
|
607
|
+
page_size=page_size,
|
|
608
|
+
api_key=api_key,
|
|
609
|
+
workspace=workspace,
|
|
610
|
+
**kwargs)
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
1
3
|
import json
|
|
2
4
|
from http import HTTPStatus
|
|
3
5
|
|
|
@@ -6,8 +8,7 @@ import aiohttp
|
|
|
6
8
|
from dashscope.api_entities.base_request import AioBaseRequest
|
|
7
9
|
from dashscope.api_entities.dashscope_response import DashScopeAPIResponse
|
|
8
10
|
from dashscope.common.constants import (DEFAULT_REQUEST_TIMEOUT_SECONDS,
|
|
9
|
-
SSE_CONTENT_TYPE, HTTPMethod
|
|
10
|
-
StreamResultMode)
|
|
11
|
+
SSE_CONTENT_TYPE, HTTPMethod)
|
|
11
12
|
from dashscope.common.error import UnsupportedHTTPMethod
|
|
12
13
|
from dashscope.common.logging import logger
|
|
13
14
|
from dashscope.common.utils import async_to_sync
|
|
@@ -21,9 +22,9 @@ class AioHttpRequest(AioBaseRequest):
|
|
|
21
22
|
stream: bool = True,
|
|
22
23
|
async_request: bool = False,
|
|
23
24
|
query: bool = False,
|
|
24
|
-
stream_result_mode: str = StreamResultMode.ACCUMULATE,
|
|
25
25
|
timeout: int = DEFAULT_REQUEST_TIMEOUT_SECONDS,
|
|
26
|
-
task_id: str = None
|
|
26
|
+
task_id: str = None,
|
|
27
|
+
user_agent: str = '') -> None:
|
|
27
28
|
"""HttpSSERequest, processing http server sent event stream.
|
|
28
29
|
|
|
29
30
|
Args:
|
|
@@ -33,9 +34,11 @@ class AioHttpRequest(AioBaseRequest):
|
|
|
33
34
|
stream (bool, optional): Is stream request. Defaults to True.
|
|
34
35
|
timeout (int, optional): Total request timeout.
|
|
35
36
|
Defaults to DEFAULT_REQUEST_TIMEOUT_SECONDS.
|
|
37
|
+
user_agent (str, optional): Additional user agent string to
|
|
38
|
+
append. Defaults to ''.
|
|
36
39
|
"""
|
|
37
40
|
|
|
38
|
-
super().__init__()
|
|
41
|
+
super().__init__(user_agent=user_agent)
|
|
39
42
|
self.url = url
|
|
40
43
|
self.async_request = async_request
|
|
41
44
|
self.headers = {
|
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
1
3
|
import json
|
|
2
4
|
from urllib.parse import urlencode
|
|
3
5
|
|
|
@@ -41,10 +43,10 @@ class ApiRequestData():
|
|
|
41
43
|
def add_parameters(self, **params):
|
|
42
44
|
for key, value in params.items():
|
|
43
45
|
self.parameters[key] = value
|
|
44
|
-
|
|
46
|
+
|
|
45
47
|
def add_resources(self, resources):
|
|
46
48
|
self.resources = resources
|
|
47
|
-
|
|
49
|
+
|
|
48
50
|
def to_request_object(self) -> str:
|
|
49
51
|
"""Convert data to json, called from http request.
|
|
50
52
|
Returns:
|