dashscope 1.24.6__py3-none-any.whl → 1.24.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dashscope might be problematic. Click here for more details.
- dashscope/aigc/image_synthesis.py +23 -6
- dashscope/aigc/video_synthesis.py +20 -3
- dashscope/app/application.py +4 -0
- dashscope/audio/qwen_omni/omni_realtime.py +45 -1
- dashscope/audio/tts_v2/enrollment.py +9 -4
- dashscope/multimodal/multimodal_request_params.py +32 -2
- dashscope/version.py +1 -1
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/METADATA +1 -1
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/RECORD +13 -13
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/WHEEL +0 -0
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/entry_points.txt +0 -0
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/licenses/LICENSE +0 -0
- {dashscope-1.24.6.dist-info → dashscope-1.24.8.dist-info}/top_level.txt +0 -0
|
@@ -138,8 +138,15 @@ class ImageSynthesis(BaseAsyncApi):
|
|
|
138
138
|
has_upload = False
|
|
139
139
|
if negative_prompt is not None:
|
|
140
140
|
inputs[NEGATIVE_PROMPT] = negative_prompt
|
|
141
|
-
if images is not None:
|
|
142
|
-
|
|
141
|
+
if images is not None and images and len(images) > 0:
|
|
142
|
+
new_images = []
|
|
143
|
+
for image in images:
|
|
144
|
+
is_upload, new_image = check_and_upload_local(
|
|
145
|
+
model, image, api_key)
|
|
146
|
+
if is_upload:
|
|
147
|
+
has_upload = True
|
|
148
|
+
new_images.append(new_image)
|
|
149
|
+
inputs[IMAGES] = new_images
|
|
143
150
|
if sketch_image_url is not None and sketch_image_url:
|
|
144
151
|
is_upload, sketch_image_url = check_and_upload_local(
|
|
145
152
|
model, sketch_image_url, api_key)
|
|
@@ -178,10 +185,20 @@ class ImageSynthesis(BaseAsyncApi):
|
|
|
178
185
|
headers['X-DashScope-OssResourceResolve'] = 'enable'
|
|
179
186
|
kwargs['headers'] = headers
|
|
180
187
|
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
188
|
+
def __get_i2i_task(task, model) -> str:
|
|
189
|
+
# 处理task参数:优先使用有效的task值
|
|
190
|
+
if task is not None and task != "":
|
|
191
|
+
return task
|
|
192
|
+
|
|
193
|
+
# 根据model确定任务类型
|
|
194
|
+
if model is not None and model != "":
|
|
195
|
+
if 'imageedit' in model or "wan2.5-i2i" in model:
|
|
196
|
+
return 'image2image'
|
|
197
|
+
|
|
198
|
+
# 默认返回文本到图像任务
|
|
199
|
+
return ImageSynthesis.task
|
|
200
|
+
|
|
201
|
+
task = __get_i2i_task(task, model)
|
|
185
202
|
|
|
186
203
|
return inputs, kwargs, task
|
|
187
204
|
|
|
@@ -38,6 +38,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
38
38
|
negative_prompt: str = None,
|
|
39
39
|
template: str = None,
|
|
40
40
|
img_url: str = None,
|
|
41
|
+
audio_url: str = None,
|
|
41
42
|
api_key: str = None,
|
|
42
43
|
extra_input: Dict = None,
|
|
43
44
|
workspace: str = None,
|
|
@@ -56,6 +57,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
56
57
|
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
57
58
|
template (str): LoRa input, such as gufeng, katong, etc.
|
|
58
59
|
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
60
|
+
audio_url (str): The input audio url
|
|
59
61
|
api_key (str, optional): The api api_key. Defaults to None.
|
|
60
62
|
workspace (str): The dashscope workspace id.
|
|
61
63
|
extra_input (Dict): The extra input parameters.
|
|
@@ -76,6 +78,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
76
78
|
return super().call(model,
|
|
77
79
|
prompt,
|
|
78
80
|
img_url=img_url,
|
|
81
|
+
audio_url=audio_url,
|
|
79
82
|
api_key=api_key,
|
|
80
83
|
extend_prompt=extend_prompt,
|
|
81
84
|
negative_prompt=negative_prompt,
|
|
@@ -94,6 +97,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
94
97
|
model: str,
|
|
95
98
|
prompt: Any = None,
|
|
96
99
|
img_url: str = None,
|
|
100
|
+
audio_url: str = None,
|
|
97
101
|
# """@deprecated, use prompt_extend in parameters """
|
|
98
102
|
extend_prompt: bool = True,
|
|
99
103
|
negative_prompt: str = None,
|
|
@@ -125,6 +129,13 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
125
129
|
has_upload = True
|
|
126
130
|
inputs['img_url'] = res_img_url
|
|
127
131
|
|
|
132
|
+
if audio_url is not None and audio_url:
|
|
133
|
+
is_upload, res_audio_url = check_and_upload_local(
|
|
134
|
+
model, audio_url, api_key)
|
|
135
|
+
if is_upload:
|
|
136
|
+
has_upload = True
|
|
137
|
+
inputs['audio_url'] = res_audio_url
|
|
138
|
+
|
|
128
139
|
if head_frame is not None and head_frame:
|
|
129
140
|
is_upload, res_head_frame = check_and_upload_local(
|
|
130
141
|
model, head_frame, api_key)
|
|
@@ -172,6 +183,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
172
183
|
model: str,
|
|
173
184
|
prompt: Any = None,
|
|
174
185
|
img_url: str = None,
|
|
186
|
+
audio_url: str = None,
|
|
175
187
|
# """@deprecated, use prompt_extend in parameters """
|
|
176
188
|
extend_prompt: bool = True,
|
|
177
189
|
negative_prompt: str = None,
|
|
@@ -194,6 +206,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
194
206
|
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
195
207
|
template (str): LoRa input, such as gufeng, katong, etc.
|
|
196
208
|
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
209
|
+
audio_url (str): The input audio url.
|
|
197
210
|
api_key (str, optional): The api api_key. Defaults to None.
|
|
198
211
|
workspace (str): The dashscope workspace id.
|
|
199
212
|
extra_input (Dict): The extra input parameters.
|
|
@@ -215,7 +228,7 @@ class VideoSynthesis(BaseAsyncApi):
|
|
|
215
228
|
task_group, function = _get_task_group_and_task(__name__)
|
|
216
229
|
|
|
217
230
|
inputs, kwargs, task = cls._get_input(
|
|
218
|
-
model, prompt, img_url, extend_prompt, negative_prompt, template, api_key,
|
|
231
|
+
model, prompt, img_url, audio_url, extend_prompt, negative_prompt, template, api_key,
|
|
219
232
|
extra_input, task, function, head_frame, tail_frame,
|
|
220
233
|
first_frame_url, last_frame_url, **kwargs)
|
|
221
234
|
|
|
@@ -339,6 +352,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
339
352
|
model: str,
|
|
340
353
|
prompt: Any = None,
|
|
341
354
|
img_url: str = None,
|
|
355
|
+
audio_url: str = None,
|
|
342
356
|
# """@deprecated, use prompt_extend in parameters """
|
|
343
357
|
extend_prompt: bool = True,
|
|
344
358
|
negative_prompt: str = None,
|
|
@@ -361,6 +375,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
361
375
|
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
362
376
|
template (str): LoRa input, such as gufeng, katong, etc.
|
|
363
377
|
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
378
|
+
audio_url (str): The input audio url.
|
|
364
379
|
api_key (str, optional): The api api_key. Defaults to None.
|
|
365
380
|
workspace (str): The dashscope workspace id.
|
|
366
381
|
extra_input (Dict): The extra input parameters.
|
|
@@ -380,7 +395,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
380
395
|
"""
|
|
381
396
|
task_group, f = _get_task_group_and_task(__name__)
|
|
382
397
|
inputs, kwargs, task = VideoSynthesis._get_input(
|
|
383
|
-
model, prompt, img_url, extend_prompt, negative_prompt, template, api_key,
|
|
398
|
+
model, prompt, img_url, audio_url, extend_prompt, negative_prompt, template, api_key,
|
|
384
399
|
extra_input, task, f, head_frame, tail_frame,
|
|
385
400
|
first_frame_url, last_frame_url, **kwargs)
|
|
386
401
|
response = await super().call(model, inputs, task_group, task, f, api_key, workspace, **kwargs)
|
|
@@ -391,6 +406,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
391
406
|
model: str,
|
|
392
407
|
prompt: Any = None,
|
|
393
408
|
img_url: str = None,
|
|
409
|
+
audio_url: str = None,
|
|
394
410
|
# """@deprecated, use prompt_extend in parameters """
|
|
395
411
|
extend_prompt: bool = True,
|
|
396
412
|
negative_prompt: str = None,
|
|
@@ -413,6 +429,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
413
429
|
negative_prompt (str): The negative prompt is the opposite of the prompt meaning.
|
|
414
430
|
template (str): LoRa input, such as gufeng, katong, etc.
|
|
415
431
|
img_url (str): The input image url, Generate the URL of the image referenced by the video.
|
|
432
|
+
audio_url (str): The input audio url.
|
|
416
433
|
api_key (str, optional): The api api_key. Defaults to None.
|
|
417
434
|
workspace (str): The dashscope workspace id.
|
|
418
435
|
extra_input (Dict): The extra input parameters.
|
|
@@ -434,7 +451,7 @@ class AioVideoSynthesis(BaseAsyncAioApi):
|
|
|
434
451
|
task_group, function = _get_task_group_and_task(__name__)
|
|
435
452
|
|
|
436
453
|
inputs, kwargs, task = VideoSynthesis._get_input(
|
|
437
|
-
model, prompt, img_url, extend_prompt, negative_prompt, template, api_key,
|
|
454
|
+
model, prompt, img_url, audio_url, extend_prompt, negative_prompt, template, api_key,
|
|
438
455
|
extra_input, task, function, head_frame, tail_frame,
|
|
439
456
|
first_frame_url, last_frame_url, **kwargs)
|
|
440
457
|
|
dashscope/app/application.py
CHANGED
|
@@ -196,4 +196,8 @@ class Application(BaseApi):
|
|
|
196
196
|
if image_list is not None and image_list:
|
|
197
197
|
input_param['image_list'] = image_list
|
|
198
198
|
|
|
199
|
+
file_list = kwargs.pop('file_list', None)
|
|
200
|
+
if file_list is not None and file_list:
|
|
201
|
+
input_param['file_list'] = file_list
|
|
202
|
+
|
|
199
203
|
return input_param, {**parameters, **kwargs}
|
|
@@ -4,6 +4,7 @@ import json
|
|
|
4
4
|
import platform
|
|
5
5
|
import threading
|
|
6
6
|
import time
|
|
7
|
+
from dataclasses import field, dataclass
|
|
7
8
|
from typing import List
|
|
8
9
|
import uuid
|
|
9
10
|
from enum import Enum, unique
|
|
@@ -29,6 +30,26 @@ class OmniRealtimeCallback:
|
|
|
29
30
|
pass
|
|
30
31
|
|
|
31
32
|
|
|
33
|
+
@dataclass
|
|
34
|
+
class TranslationParams:
|
|
35
|
+
"""
|
|
36
|
+
TranslationParams
|
|
37
|
+
"""
|
|
38
|
+
language: str = field(default=None)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class TranscriptionParams:
|
|
43
|
+
"""
|
|
44
|
+
TranscriptionParams
|
|
45
|
+
"""
|
|
46
|
+
language: str = field(default=None)
|
|
47
|
+
sample_rate: int = field(default=16000)
|
|
48
|
+
input_audio_format: str = field(default="pcm")
|
|
49
|
+
corpus: dict = field(default=None)
|
|
50
|
+
corpus_text: str = field(default=None)
|
|
51
|
+
|
|
52
|
+
|
|
32
53
|
@unique
|
|
33
54
|
class AudioFormat(Enum):
|
|
34
55
|
# format, sample_rate, channels, bit_rate, name
|
|
@@ -171,7 +192,7 @@ class OmniRealtimeConversation:
|
|
|
171
192
|
|
|
172
193
|
def update_session(self,
|
|
173
194
|
output_modalities: List[MultiModality],
|
|
174
|
-
voice: str,
|
|
195
|
+
voice: str = None,
|
|
175
196
|
input_audio_format: AudioFormat = AudioFormat.
|
|
176
197
|
PCM_16000HZ_MONO_16BIT,
|
|
177
198
|
output_audio_format: AudioFormat = AudioFormat.
|
|
@@ -184,6 +205,8 @@ class OmniRealtimeConversation:
|
|
|
184
205
|
turn_detection_threshold: float = 0.2,
|
|
185
206
|
turn_detection_silence_duration_ms: int = 800,
|
|
186
207
|
turn_detection_param: dict = None,
|
|
208
|
+
translation_params: TranslationParams = None,
|
|
209
|
+
transcription_params: TranscriptionParams = None,
|
|
187
210
|
**kwargs) -> None:
|
|
188
211
|
'''
|
|
189
212
|
update session configuration, should be used before create response
|
|
@@ -206,6 +229,13 @@ class OmniRealtimeConversation:
|
|
|
206
229
|
In a quiet environment, it may be necessary to decrease the threshold to improve sensitivity
|
|
207
230
|
turn_detection_silence_duration_ms: int
|
|
208
231
|
duration of silence in milliseconds to detect turn, range [200, 6000]
|
|
232
|
+
translation_params: TranslationParams
|
|
233
|
+
translation params, include language. Only effective with qwen3-livetranslate-flash-realtime model or
|
|
234
|
+
further models. Do not set this parameter for other models.
|
|
235
|
+
transcription_params: TranscriptionParams
|
|
236
|
+
transcription params, include language, sample_rate, input_audio_format, corpus.
|
|
237
|
+
Only effective with qwen3-asr-flash-realtime model or
|
|
238
|
+
further models. Do not set this parameter for other models.
|
|
209
239
|
'''
|
|
210
240
|
self.config = {
|
|
211
241
|
'modalities': [m.value for m in output_modalities],
|
|
@@ -230,6 +260,20 @@ class OmniRealtimeConversation:
|
|
|
230
260
|
self.config['turn_detection'].update(turn_detection_param)
|
|
231
261
|
else:
|
|
232
262
|
self.config['turn_detection'] = None
|
|
263
|
+
if translation_params is not None:
|
|
264
|
+
self.config['translation'] = {
|
|
265
|
+
'language': translation_params.language
|
|
266
|
+
}
|
|
267
|
+
if transcription_params is not None:
|
|
268
|
+
self.config['language'] = transcription_params.language
|
|
269
|
+
if transcription_params.corpus is not None:
|
|
270
|
+
self.config['corpus'] = transcription_params.corpus
|
|
271
|
+
if transcription_params.corpus_text is not None:
|
|
272
|
+
self.config['corpus'] = {
|
|
273
|
+
"text": transcription_params.corpus_text
|
|
274
|
+
}
|
|
275
|
+
self.config['input_audio_format'] = transcription_params.input_audio_format
|
|
276
|
+
self.config['sample_rate']= transcription_params.sample_rate
|
|
233
277
|
self.config.update(kwargs)
|
|
234
278
|
self.__send_str(
|
|
235
279
|
json.dumps({
|
|
@@ -68,20 +68,25 @@ class VoiceEnrollmentService(BaseApi):
|
|
|
68
68
|
logger.debug('>>>>recv', response)
|
|
69
69
|
return response
|
|
70
70
|
|
|
71
|
-
def create_voice(self, target_model: str, prefix: str, url: str) -> str:
|
|
71
|
+
def create_voice(self, target_model: str, prefix: str, url: str, language_hints: List[str] = None) -> str:
|
|
72
72
|
'''
|
|
73
73
|
创建新克隆音色
|
|
74
74
|
param: target_model 克隆音色对应的语音合成模型版本
|
|
75
75
|
param: prefix 音色自定义前缀,仅允许数字和小写字母,小于十个字符。
|
|
76
76
|
param: url 用于克隆的音频文件url
|
|
77
|
+
param: language_hints 克隆音色目标语言
|
|
77
78
|
return: voice_id
|
|
78
79
|
'''
|
|
79
|
-
|
|
80
|
+
|
|
81
|
+
input_params = {
|
|
80
82
|
'action': 'create_voice',
|
|
81
83
|
'target_model': target_model,
|
|
82
84
|
'prefix': prefix,
|
|
83
|
-
'url': url
|
|
84
|
-
}
|
|
85
|
+
'url': url
|
|
86
|
+
}
|
|
87
|
+
if language_hints is not None:
|
|
88
|
+
input_params['language_hints'] = language_hints
|
|
89
|
+
response = self.__call_with_input(input_params)
|
|
85
90
|
self._last_request_id = response.request_id
|
|
86
91
|
if response.status_code == 200:
|
|
87
92
|
return response.output['voice_id']
|
|
@@ -72,7 +72,31 @@ class RequestBodyInput(DashPayloadInput):
|
|
|
72
72
|
"directive": self.directive,
|
|
73
73
|
"dialog_id": self.dialog_id
|
|
74
74
|
}
|
|
75
|
+
@dataclass
|
|
76
|
+
class AsrPostProcessing:
|
|
77
|
+
replace_words: list = field(default=None)
|
|
78
|
+
|
|
79
|
+
def to_dict(self):
|
|
80
|
+
if self.replace_words is None:
|
|
81
|
+
return None
|
|
82
|
+
if len(self.replace_words) == 0:
|
|
83
|
+
return None
|
|
84
|
+
return {
|
|
85
|
+
"replace_words": [word.to_dict() for word in self.replace_words]
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
@dataclass
|
|
89
|
+
class ReplaceWord:
|
|
90
|
+
source: str = field(default=None)
|
|
91
|
+
target: str = field(default=None)
|
|
92
|
+
match_mode: str = field(default=None)
|
|
75
93
|
|
|
94
|
+
def to_dict(self):
|
|
95
|
+
return {
|
|
96
|
+
"source": self.source,
|
|
97
|
+
"target": self.target,
|
|
98
|
+
"match_mode": self.match_mode
|
|
99
|
+
}
|
|
76
100
|
|
|
77
101
|
@dataclass
|
|
78
102
|
class Upstream:
|
|
@@ -80,7 +104,9 @@ class Upstream:
|
|
|
80
104
|
audio_format: str = field(default="pcm") # 上行语音格式,默认pcm.支持pcm/opus
|
|
81
105
|
type: str = field(default="AudioOnly") # 上行类型:AudioOnly 仅语音通话; AudioAndVideo 上传视频
|
|
82
106
|
mode: str = field(default="tap2talk") # 客户端交互模式 push2talk/tap2talk/duplex
|
|
83
|
-
|
|
107
|
+
sample_rate: int = field(default=16000) # 音频采样率
|
|
108
|
+
vocabulary_id: str = field(default=None)
|
|
109
|
+
asr_post_processing: AsrPostProcessing = field(default=None)
|
|
84
110
|
pass_through_params: dict = field(default=None)
|
|
85
111
|
|
|
86
112
|
def to_dict(self):
|
|
@@ -88,8 +114,12 @@ class Upstream:
|
|
|
88
114
|
"type": self.type,
|
|
89
115
|
"mode": self.mode,
|
|
90
116
|
"audio_format": self.audio_format,
|
|
91
|
-
|
|
117
|
+
"sample_rate": self.sample_rate,
|
|
118
|
+
"vocabulary_id": self.vocabulary_id,
|
|
92
119
|
}
|
|
120
|
+
if self.asr_post_processing is not None:
|
|
121
|
+
upstream["asr_post_processing"] = self.asr_post_processing.to_dict()
|
|
122
|
+
|
|
93
123
|
if self.pass_through_params is not None:
|
|
94
124
|
upstream.update(self.pass_through_params)
|
|
95
125
|
return upstream
|
dashscope/version.py
CHANGED
|
@@ -3,15 +3,15 @@ dashscope/cli.py,sha256=64oGkevgX0RHPPmMg0sevXDgaFLQNA_0vdtjQ7Z2pHM,26492
|
|
|
3
3
|
dashscope/files.py,sha256=vRDQygm3lOqBZR73o7KNHs1iTBVuvLncuwJNxIYjzAU,3981
|
|
4
4
|
dashscope/model.py,sha256=B5v_BtYLPqj6raClejBgdKg6WTGwhH_f-20pvsQqmsk,1491
|
|
5
5
|
dashscope/models.py,sha256=dE4mzXkl85G343qVylSGpURPRdA5pZSqXlx6PcxqC_Q,1275
|
|
6
|
-
dashscope/version.py,sha256=
|
|
6
|
+
dashscope/version.py,sha256=4lPAnaxnK8FAiwo6FAKr_rfwI7P6TqbukJDF2ZFJiFU,74
|
|
7
7
|
dashscope/aigc/__init__.py,sha256=kYvYEoRK-NUHyMWpBDNQBz4fVA__uOhHRK2kDTBaWgk,617
|
|
8
8
|
dashscope/aigc/chat_completion.py,sha256=ONlyyssIbfaKKcFo7cEKhHx5OCF2XX810HFzIExW1ho,14813
|
|
9
9
|
dashscope/aigc/code_generation.py,sha256=p_mxDKJLQMW0IjFD46JRlZuEZCRESSVKEfLlAevBtqw,10936
|
|
10
10
|
dashscope/aigc/conversation.py,sha256=95xEEY4ThZJysj5zy3aMw7ql9KLJVfD_1iHv9QZ17Ew,14282
|
|
11
11
|
dashscope/aigc/generation.py,sha256=xMcMu16rICTdjZiD_sPqYV_Ltdp4ewGzzfC7JD9VApY,17948
|
|
12
|
-
dashscope/aigc/image_synthesis.py,sha256=
|
|
12
|
+
dashscope/aigc/image_synthesis.py,sha256=sKKKmc11nwNSBEuhLb6ymUr5pxVAAl87uB-ALuh0dLE,28851
|
|
13
13
|
dashscope/aigc/multimodal_conversation.py,sha256=k-OPWwgrWJBb7LiUdFuuP9X6sR5ukbaRHPqFTSikO7Y,12380
|
|
14
|
-
dashscope/aigc/video_synthesis.py,sha256=
|
|
14
|
+
dashscope/aigc/video_synthesis.py,sha256=Wps8ZE3rDRFoebh7w1uvLNSyKHKk1BCTXmq7ryG9iPQ,24885
|
|
15
15
|
dashscope/api_entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
dashscope/api_entities/aiohttp_request.py,sha256=1L7XdIJ9L65cQmX8x9JCR4t5hNIMDrbiWADfKKp9yfo,10280
|
|
17
17
|
dashscope/api_entities/api_request_data.py,sha256=04rpYPNK1HkT3iTPJmZpquH621xcBbe8R8EGrDJSLt0,5514
|
|
@@ -23,7 +23,7 @@ dashscope/api_entities/encryption.py,sha256=rUCZx3wwVvS5oyKXEeWgyWPxM8Y5d4AaVdgx
|
|
|
23
23
|
dashscope/api_entities/http_request.py,sha256=MTxYsbkK8oYWDp8ZPjrkdY9YbnQ9SEIy87riyJidMXo,16484
|
|
24
24
|
dashscope/api_entities/websocket_request.py,sha256=PS0FU854-HjTbKa68f4GHa7-noFRMzKySJGfPkrrBjw,16146
|
|
25
25
|
dashscope/app/__init__.py,sha256=xvSvU8O7m5u7vgIvJXTJektJZxmjT2Rpt_YwePH88XE,113
|
|
26
|
-
dashscope/app/application.py,sha256=
|
|
26
|
+
dashscope/app/application.py,sha256=79wnnvAAjWBJ8TtMUN6-B4FHBV7twkAOzhXg7yDJQxE,9620
|
|
27
27
|
dashscope/app/application_response.py,sha256=XO6iOZlt7OXulvFS71zwAq_HXYkn3HLJdAimTWPP0B4,8568
|
|
28
28
|
dashscope/assistants/__init__.py,sha256=hjCTuv13yFaXyUqlexAU-RaO0Ahq3P7VK9_LkSbkGVU,434
|
|
29
29
|
dashscope/assistants/assistant_types.py,sha256=qVDSy0xcsMq_sAD7t_ppoGLBN2QDiHqarAAlW_CDDtY,4478
|
|
@@ -37,7 +37,7 @@ dashscope/audio/asr/transcription.py,sha256=lYzPjh7jJQwjMoxx8-AY0YCMBKNKO0bi7xd5
|
|
|
37
37
|
dashscope/audio/asr/translation_recognizer.py,sha256=JgBmhkIl_kqH8uVwop6Fba5KlXccftKFrhaygN9PKjU,39680
|
|
38
38
|
dashscope/audio/asr/vocabulary.py,sha256=N0pMS2x1lDxqJ14FgTGKctfuVkR2_hlEsCNWFcgYpTY,6717
|
|
39
39
|
dashscope/audio/qwen_omni/__init__.py,sha256=MEFxmyxr5H6bW22l_R9073Pl6Ka6knvhrATGT-4UBjI,298
|
|
40
|
-
dashscope/audio/qwen_omni/omni_realtime.py,sha256=
|
|
40
|
+
dashscope/audio/qwen_omni/omni_realtime.py,sha256=b7t14nsciA8YcJ4MGr2GzmDxbgBR2wpbml9ZG_GNqiI,16722
|
|
41
41
|
dashscope/audio/qwen_tts/__init__.py,sha256=JS3axY1grqO0aTIJufZ3KS1JsU6yf6y4K2CQlNvUK9I,132
|
|
42
42
|
dashscope/audio/qwen_tts/speech_synthesizer.py,sha256=7LHR-PXhn-VE1cCOp_82Jq0zE9rMc3xy3dszUeyLLNs,2927
|
|
43
43
|
dashscope/audio/qwen_tts_realtime/__init__.py,sha256=vVkmeJr_mEAn_O0Rh5AU3ICg6qIZqppUryJ5lY8VYPo,254
|
|
@@ -45,7 +45,7 @@ dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py,sha256=uMLglxRjUZgol9Z7MT
|
|
|
45
45
|
dashscope/audio/tts/__init__.py,sha256=xYpMFseUZGgqgj_70zcX2VsLv-L7qxJ3d-bbdj_hO0I,245
|
|
46
46
|
dashscope/audio/tts/speech_synthesizer.py,sha256=vD1xQV-rew8qAsIaAGH5amsNtB0SqdtNhVHhJHGQ-xk,7622
|
|
47
47
|
dashscope/audio/tts_v2/__init__.py,sha256=me9a3_7KsHQxcJ8hx4SeKlY1e_ThHVvGMw7Yn0uoscM,333
|
|
48
|
-
dashscope/audio/tts_v2/enrollment.py,sha256
|
|
48
|
+
dashscope/audio/tts_v2/enrollment.py,sha256=ekeZJz_swhI0OwRANuUwsZjdP0rRoUergSsCUQmsh8E,6577
|
|
49
49
|
dashscope/audio/tts_v2/speech_synthesizer.py,sha256=p764P4TYwLkvvPCpA4VnFwlNbIJbuNbp2d9mxgni7Ws,22047
|
|
50
50
|
dashscope/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
51
51
|
dashscope/client/base_api.py,sha256=znAJ65DeHiFw1H7FWK0YrkLz1CoNcyqUxF8EJ3gujeY,52523
|
|
@@ -73,7 +73,7 @@ dashscope/multimodal/__init__.py,sha256=fyqeolbDLWVn5wSpPZ3nAOnUBRF9k6mlsy6dCmgj
|
|
|
73
73
|
dashscope/multimodal/dialog_state.py,sha256=CtOdfGWhq0ePG3bc8-7inhespETtPD4QDli1513hd1A,1522
|
|
74
74
|
dashscope/multimodal/multimodal_constants.py,sha256=z_QVq01E43FAqKQnDu9vdf89d1zuYlWyANewWTEXVJM,1282
|
|
75
75
|
dashscope/multimodal/multimodal_dialog.py,sha256=HymlaQYp7SgJdoKbT27SNiviyRRoM91zklNBwTHmm1Q,23939
|
|
76
|
-
dashscope/multimodal/multimodal_request_params.py,sha256=
|
|
76
|
+
dashscope/multimodal/multimodal_request_params.py,sha256=iOnATOdv4aRp5ffU8lY2Gu0UNlz-sBCEun2zFG4saFk,9356
|
|
77
77
|
dashscope/multimodal/tingwu/__init__.py,sha256=Gi9GEM0bdeJlZpvyksSeHOc2--_tG5aF6QAx6TAS2fE,225
|
|
78
78
|
dashscope/multimodal/tingwu/tingwu.py,sha256=01d-QOeuB1QmRhiZqbXJ8pHoGqT0C-xZTjIs_ZBXOyw,2613
|
|
79
79
|
dashscope/multimodal/tingwu/tingwu_realtime.py,sha256=oBeqrZit3uBZHuyI7m9VILz2qaqJRMO0-Nm2eJ5Q63g,20215
|
|
@@ -100,9 +100,9 @@ dashscope/tokenizers/tokenizer.py,sha256=3FQVDvMNkCW9ccYeJdjrd_PIMMD3Xv7aNZkaYOE
|
|
|
100
100
|
dashscope/tokenizers/tokenizer_base.py,sha256=5EJIFuizMWESEmLmbd38yJnfeHmPnzZPwsO4aOGjpl4,707
|
|
101
101
|
dashscope/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
102
102
|
dashscope/utils/oss_utils.py,sha256=aZIHlMN2JOfVw6kp0SVrMw_N1MfoTcR_-wiRbJ7DgHw,7501
|
|
103
|
-
dashscope-1.24.
|
|
104
|
-
dashscope-1.24.
|
|
105
|
-
dashscope-1.24.
|
|
106
|
-
dashscope-1.24.
|
|
107
|
-
dashscope-1.24.
|
|
108
|
-
dashscope-1.24.
|
|
103
|
+
dashscope-1.24.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
104
|
+
dashscope-1.24.8.dist-info/METADATA,sha256=vGaMepDH6HXkvDgknJ3njBi4vDe5oqG82vM1Yw569Dk,7146
|
|
105
|
+
dashscope-1.24.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
106
|
+
dashscope-1.24.8.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
|
|
107
|
+
dashscope-1.24.8.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
|
|
108
|
+
dashscope-1.24.8.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|