dashscope 1.24.4__py3-none-any.whl → 1.24.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dashscope might be problematic. Click here for more details.
- dashscope/__init__.py +2 -1
- dashscope/aigc/__init__.py +6 -4
- dashscope/aigc/multimodal_conversation.py +55 -20
- dashscope/api_entities/dashscope_response.py +31 -3
- dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py +5 -0
- dashscope/embeddings/multimodal_embedding.py +70 -1
- dashscope/version.py +1 -1
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/METADATA +1 -1
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/RECORD +13 -13
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/WHEEL +0 -0
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/entry_points.txt +0 -0
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/licenses/LICENSE +0 -0
- {dashscope-1.24.4.dist-info → dashscope-1.24.6.dist-info}/top_level.txt +0 -0
dashscope/__init__.py
CHANGED
|
@@ -24,7 +24,7 @@ from dashscope.embeddings.batch_text_embedding_response import \
|
|
|
24
24
|
BatchTextEmbeddingResponse
|
|
25
25
|
from dashscope.embeddings.multimodal_embedding import (
|
|
26
26
|
MultiModalEmbedding, MultiModalEmbeddingItemAudio,
|
|
27
|
-
MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText)
|
|
27
|
+
MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText, AioMultiModalEmbedding)
|
|
28
28
|
from dashscope.embeddings.text_embedding import TextEmbedding
|
|
29
29
|
from dashscope.files import Files
|
|
30
30
|
from dashscope.models import Models
|
|
@@ -55,6 +55,7 @@ __all__ = [
|
|
|
55
55
|
Models,
|
|
56
56
|
TextEmbedding,
|
|
57
57
|
MultiModalEmbedding,
|
|
58
|
+
AioMultiModalEmbedding,
|
|
58
59
|
MultiModalEmbeddingItemAudio,
|
|
59
60
|
MultiModalEmbeddingItemImage,
|
|
60
61
|
MultiModalEmbeddingItemText,
|
dashscope/aigc/__init__.py
CHANGED
|
@@ -1,18 +1,20 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
-
|
|
3
2
|
from .conversation import Conversation, History, HistoryItem
|
|
4
|
-
from .generation import Generation
|
|
5
|
-
from .image_synthesis import ImageSynthesis
|
|
3
|
+
from .generation import Generation, AioGeneration
|
|
4
|
+
from .image_synthesis import ImageSynthesis, AioImageSynthesis
|
|
6
5
|
from .multimodal_conversation import MultiModalConversation, AioMultiModalConversation
|
|
7
|
-
from .video_synthesis import VideoSynthesis
|
|
6
|
+
from .video_synthesis import VideoSynthesis, AioVideoSynthesis
|
|
8
7
|
|
|
9
8
|
__all__ = [
|
|
10
9
|
Generation,
|
|
10
|
+
AioGeneration,
|
|
11
11
|
Conversation,
|
|
12
12
|
HistoryItem,
|
|
13
13
|
History,
|
|
14
14
|
ImageSynthesis,
|
|
15
|
+
AioImageSynthesis,
|
|
15
16
|
MultiModalConversation,
|
|
16
17
|
AioMultiModalConversation,
|
|
17
18
|
VideoSynthesis,
|
|
19
|
+
AioVideoSynthesis,
|
|
18
20
|
]
|
|
@@ -24,9 +24,12 @@ class MultiModalConversation(BaseApi):
|
|
|
24
24
|
def call(
|
|
25
25
|
cls,
|
|
26
26
|
model: str,
|
|
27
|
-
messages: List,
|
|
27
|
+
messages: List = None,
|
|
28
28
|
api_key: str = None,
|
|
29
29
|
workspace: str = None,
|
|
30
|
+
text: str = None,
|
|
31
|
+
voice: str = None,
|
|
32
|
+
language_type: str = None,
|
|
30
33
|
**kwargs
|
|
31
34
|
) -> Union[MultiModalConversationResponse, Generator[
|
|
32
35
|
MultiModalConversationResponse, None, None]]:
|
|
@@ -55,6 +58,10 @@ class MultiModalConversation(BaseApi):
|
|
|
55
58
|
if None, will retrieve by rule [1].
|
|
56
59
|
[1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
|
|
57
60
|
workspace (str): The dashscope workspace id.
|
|
61
|
+
text (str): The text to generate.
|
|
62
|
+
voice (str): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
|
|
63
|
+
you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
|
|
64
|
+
language_type (str): The synthesized language type, default is 'auto', useful for [qwen3-tts].
|
|
58
65
|
**kwargs:
|
|
59
66
|
stream(bool, `optional`): Enable server-sent events
|
|
60
67
|
(ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
|
|
@@ -70,6 +77,7 @@ class MultiModalConversation(BaseApi):
|
|
|
70
77
|
considered[qwen-turbo,bailian-v1].
|
|
71
78
|
top_k(float, `optional`):
|
|
72
79
|
|
|
80
|
+
|
|
73
81
|
Raises:
|
|
74
82
|
InvalidInput: The history and auto_history are mutually exclusive.
|
|
75
83
|
|
|
@@ -78,18 +86,28 @@ class MultiModalConversation(BaseApi):
|
|
|
78
86
|
Generator[MultiModalConversationResponse, None, None]]: If
|
|
79
87
|
stream is True, return Generator, otherwise MultiModalConversationResponse.
|
|
80
88
|
"""
|
|
81
|
-
if (messages is None or not messages):
|
|
82
|
-
raise InputRequired('prompt or messages is required!')
|
|
83
89
|
if model is None or not model:
|
|
84
90
|
raise ModelRequired('Model is required!')
|
|
85
91
|
task_group, _ = _get_task_group_and_task(__name__)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
92
|
+
input = {}
|
|
93
|
+
msg_copy = None
|
|
94
|
+
|
|
95
|
+
if messages is not None and messages:
|
|
96
|
+
msg_copy = copy.deepcopy(messages)
|
|
97
|
+
has_upload = cls._preprocess_messages(model, msg_copy, api_key)
|
|
98
|
+
if has_upload:
|
|
99
|
+
headers = kwargs.pop('headers', {})
|
|
100
|
+
headers['X-DashScope-OssResourceResolve'] = 'enable'
|
|
101
|
+
kwargs['headers'] = headers
|
|
102
|
+
|
|
103
|
+
if text is not None and text:
|
|
104
|
+
input.update({'text': text})
|
|
105
|
+
if voice is not None and voice:
|
|
106
|
+
input.update({'voice': voice})
|
|
107
|
+
if language_type is not None and language_type:
|
|
108
|
+
input.update({'language_type': language_type})
|
|
109
|
+
if msg_copy is not None:
|
|
110
|
+
input.update({'messages': msg_copy})
|
|
93
111
|
response = super().call(model=model,
|
|
94
112
|
task_group=task_group,
|
|
95
113
|
task=MultiModalConversation.task,
|
|
@@ -145,9 +163,12 @@ class AioMultiModalConversation(BaseAioApi):
|
|
|
145
163
|
async def call(
|
|
146
164
|
cls,
|
|
147
165
|
model: str,
|
|
148
|
-
messages: List,
|
|
166
|
+
messages: List = None,
|
|
149
167
|
api_key: str = None,
|
|
150
168
|
workspace: str = None,
|
|
169
|
+
text: str = None,
|
|
170
|
+
voice: str = None,
|
|
171
|
+
language_type: str = None,
|
|
151
172
|
**kwargs
|
|
152
173
|
) -> Union[MultiModalConversationResponse, Generator[
|
|
153
174
|
MultiModalConversationResponse, None, None]]:
|
|
@@ -176,6 +197,10 @@ class AioMultiModalConversation(BaseAioApi):
|
|
|
176
197
|
if None, will retrieve by rule [1].
|
|
177
198
|
[1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
|
|
178
199
|
workspace (str): The dashscope workspace id.
|
|
200
|
+
text (str): The text to generate.
|
|
201
|
+
voice (str): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
|
|
202
|
+
you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
|
|
203
|
+
language_type (str): The synthesized language type, default is 'auto', useful for [qwen3-tts].
|
|
179
204
|
**kwargs:
|
|
180
205
|
stream(bool, `optional`): Enable server-sent events
|
|
181
206
|
(ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
|
|
@@ -199,18 +224,28 @@ class AioMultiModalConversation(BaseAioApi):
|
|
|
199
224
|
Generator[MultiModalConversationResponse, None, None]]: If
|
|
200
225
|
stream is True, return Generator, otherwise MultiModalConversationResponse.
|
|
201
226
|
"""
|
|
202
|
-
if (messages is None or not messages):
|
|
203
|
-
raise InputRequired('prompt or messages is required!')
|
|
204
227
|
if model is None or not model:
|
|
205
228
|
raise ModelRequired('Model is required!')
|
|
206
229
|
task_group, _ = _get_task_group_and_task(__name__)
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
230
|
+
input = {}
|
|
231
|
+
msg_copy = None
|
|
232
|
+
|
|
233
|
+
if messages is not None and messages:
|
|
234
|
+
msg_copy = copy.deepcopy(messages)
|
|
235
|
+
has_upload = cls._preprocess_messages(model, msg_copy, api_key)
|
|
236
|
+
if has_upload:
|
|
237
|
+
headers = kwargs.pop('headers', {})
|
|
238
|
+
headers['X-DashScope-OssResourceResolve'] = 'enable'
|
|
239
|
+
kwargs['headers'] = headers
|
|
240
|
+
|
|
241
|
+
if text is not None and text:
|
|
242
|
+
input.update({'text': text})
|
|
243
|
+
if voice is not None and voice:
|
|
244
|
+
input.update({'voice': voice})
|
|
245
|
+
if language_type is not None and language_type:
|
|
246
|
+
input.update({'language_type': language_type})
|
|
247
|
+
if msg_copy is not None:
|
|
248
|
+
input.update({'messages': msg_copy})
|
|
214
249
|
response = await super().call(model=model,
|
|
215
250
|
task_group=task_group,
|
|
216
251
|
task=AioMultiModalConversation.task,
|
|
@@ -152,6 +152,26 @@ class Choice(DictMixin):
|
|
|
152
152
|
**kwargs)
|
|
153
153
|
|
|
154
154
|
|
|
155
|
+
@dataclass(init=False)
|
|
156
|
+
class Audio(DictMixin):
|
|
157
|
+
data: str
|
|
158
|
+
url: str
|
|
159
|
+
id: str
|
|
160
|
+
expires_at: int
|
|
161
|
+
|
|
162
|
+
def __init__(self,
|
|
163
|
+
data: str = None,
|
|
164
|
+
url: str = None,
|
|
165
|
+
id: str = None,
|
|
166
|
+
expires_at: int = None,
|
|
167
|
+
**kwargs):
|
|
168
|
+
super().__init__(data=data,
|
|
169
|
+
url=url,
|
|
170
|
+
id=id,
|
|
171
|
+
expires_at=expires_at,
|
|
172
|
+
**kwargs)
|
|
173
|
+
|
|
174
|
+
|
|
155
175
|
@dataclass(init=False)
|
|
156
176
|
class GenerationOutput(DictMixin):
|
|
157
177
|
text: str
|
|
@@ -217,20 +237,25 @@ class GenerationResponse(DashScopeAPIResponse):
|
|
|
217
237
|
@dataclass(init=False)
|
|
218
238
|
class MultiModalConversationOutput(DictMixin):
|
|
219
239
|
choices: List[Choice]
|
|
240
|
+
audio: Audio
|
|
220
241
|
|
|
221
242
|
def __init__(self,
|
|
222
243
|
text: str = None,
|
|
223
244
|
finish_reason: str = None,
|
|
224
245
|
choices: List[Choice] = None,
|
|
246
|
+
audio: Audio = None,
|
|
225
247
|
**kwargs):
|
|
226
248
|
chs = None
|
|
227
249
|
if choices is not None:
|
|
228
250
|
chs = []
|
|
229
251
|
for choice in choices:
|
|
230
252
|
chs.append(Choice(**choice))
|
|
253
|
+
if audio is not None:
|
|
254
|
+
audio = Audio(**audio)
|
|
231
255
|
super().__init__(text=text,
|
|
232
256
|
finish_reason=finish_reason,
|
|
233
257
|
choices=chs,
|
|
258
|
+
audio=audio,
|
|
234
259
|
**kwargs)
|
|
235
260
|
|
|
236
261
|
|
|
@@ -238,15 +263,18 @@ class MultiModalConversationOutput(DictMixin):
|
|
|
238
263
|
class MultiModalConversationUsage(DictMixin):
|
|
239
264
|
input_tokens: int
|
|
240
265
|
output_tokens: int
|
|
266
|
+
characters: int
|
|
241
267
|
|
|
242
268
|
# TODO add image usage info.
|
|
243
269
|
|
|
244
270
|
def __init__(self,
|
|
245
271
|
input_tokens: int = 0,
|
|
246
272
|
output_tokens: int = 0,
|
|
273
|
+
characters: int = 0,
|
|
247
274
|
**kwargs):
|
|
248
275
|
super().__init__(input_tokens=input_tokens,
|
|
249
276
|
output_tokens=output_tokens,
|
|
277
|
+
characters=characters,
|
|
250
278
|
**kwargs)
|
|
251
279
|
|
|
252
280
|
|
|
@@ -378,7 +406,7 @@ class RecognitionResponse(DashScopeAPIResponse):
|
|
|
378
406
|
"""
|
|
379
407
|
result = False
|
|
380
408
|
if sentence is not None and 'end_time' in sentence and sentence[
|
|
381
|
-
|
|
409
|
+
'end_time'] is not None:
|
|
382
410
|
result = True
|
|
383
411
|
return result
|
|
384
412
|
|
|
@@ -445,8 +473,8 @@ class ImageSynthesisOutput(DictMixin):
|
|
|
445
473
|
results: List[ImageSynthesisResult]
|
|
446
474
|
|
|
447
475
|
def __init__(self,
|
|
448
|
-
task_id: str =
|
|
449
|
-
task_status: str =
|
|
476
|
+
task_id: str = None,
|
|
477
|
+
task_status: str = None,
|
|
450
478
|
results: List[ImageSynthesisResult] = [],
|
|
451
479
|
**kwargs):
|
|
452
480
|
res = []
|
|
@@ -158,6 +158,7 @@ class QwenTtsRealtime:
|
|
|
158
158
|
response_format: AudioFormat = AudioFormat.
|
|
159
159
|
PCM_24000HZ_MONO_16BIT,
|
|
160
160
|
mode: str = 'server_commit',
|
|
161
|
+
language_type: str = None,
|
|
161
162
|
**kwargs) -> None:
|
|
162
163
|
'''
|
|
163
164
|
update session configuration, should be used before create response
|
|
@@ -170,6 +171,8 @@ class QwenTtsRealtime:
|
|
|
170
171
|
output audio format
|
|
171
172
|
mode: str
|
|
172
173
|
response mode, server_commit or commit
|
|
174
|
+
language_type: str
|
|
175
|
+
language type for synthesized audio, default is 'auto'
|
|
173
176
|
'''
|
|
174
177
|
self.config = {
|
|
175
178
|
'voice': voice,
|
|
@@ -177,6 +180,8 @@ class QwenTtsRealtime:
|
|
|
177
180
|
'response_format': response_format.format,
|
|
178
181
|
'sample_rate': response_format.sample_rate,
|
|
179
182
|
}
|
|
183
|
+
if language_type is not None:
|
|
184
|
+
self.config['language_type'] = language_type
|
|
180
185
|
self.config.update(kwargs)
|
|
181
186
|
self.__send_str(
|
|
182
187
|
json.dumps({
|
|
@@ -5,7 +5,7 @@ from typing import List
|
|
|
5
5
|
|
|
6
6
|
from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
|
|
7
7
|
DictMixin)
|
|
8
|
-
from dashscope.client.base_api import BaseApi
|
|
8
|
+
from dashscope.client.base_api import BaseApi, BaseAioApi
|
|
9
9
|
from dashscope.common.error import InputRequired, ModelRequired
|
|
10
10
|
from dashscope.common.utils import _get_task_group_and_task
|
|
11
11
|
from dashscope.utils.oss_utils import preprocess_message_element
|
|
@@ -111,3 +111,72 @@ class MultiModalEmbedding(BaseApi):
|
|
|
111
111
|
if is_upload and not has_upload:
|
|
112
112
|
has_upload = True
|
|
113
113
|
return has_upload
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class AioMultiModalEmbedding(BaseAioApi):
|
|
117
|
+
task = 'multimodal-embedding'
|
|
118
|
+
|
|
119
|
+
class Models:
|
|
120
|
+
multimodal_embedding_one_peace_v1 = 'multimodal-embedding-one-peace-v1'
|
|
121
|
+
|
|
122
|
+
@classmethod
|
|
123
|
+
async def call(cls,
|
|
124
|
+
model: str,
|
|
125
|
+
input: List[MultiModalEmbeddingItemBase],
|
|
126
|
+
api_key: str = None,
|
|
127
|
+
workspace: str = None,
|
|
128
|
+
**kwargs) -> DashScopeAPIResponse:
|
|
129
|
+
"""Get embedding multimodal contents..
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
model (str): The embedding model name.
|
|
133
|
+
input (List[MultiModalEmbeddingElement]): The embedding elements,
|
|
134
|
+
every element include data, modal, factor field.
|
|
135
|
+
workspace (str): The dashscope workspace id.
|
|
136
|
+
**kwargs:
|
|
137
|
+
auto_truncation(bool, `optional`): Automatically truncate
|
|
138
|
+
audio longer than 15 seconds or text longer than 70 words.
|
|
139
|
+
Default to false(Too long input will result in failure).
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
DashScopeAPIResponse: The embedding result.
|
|
143
|
+
"""
|
|
144
|
+
if input is None or not input:
|
|
145
|
+
raise InputRequired('prompt is required!')
|
|
146
|
+
if model is None or not model:
|
|
147
|
+
raise ModelRequired('Model is required!')
|
|
148
|
+
embedding_input = {}
|
|
149
|
+
has_upload = cls._preprocess_message_inputs(model, input, api_key)
|
|
150
|
+
if has_upload:
|
|
151
|
+
headers = kwargs.pop('headers', {})
|
|
152
|
+
headers['X-DashScope-OssResourceResolve'] = 'enable'
|
|
153
|
+
kwargs['headers'] = headers
|
|
154
|
+
embedding_input['contents'] = input
|
|
155
|
+
kwargs.pop('stream', False) # not support streaming output.
|
|
156
|
+
task_group, function = _get_task_group_and_task(__name__)
|
|
157
|
+
response = await super().call(
|
|
158
|
+
model=model,
|
|
159
|
+
input=embedding_input,
|
|
160
|
+
task_group=task_group,
|
|
161
|
+
task=MultiModalEmbedding.task,
|
|
162
|
+
function=function,
|
|
163
|
+
api_key=api_key,
|
|
164
|
+
workspace=workspace,
|
|
165
|
+
**kwargs)
|
|
166
|
+
return response
|
|
167
|
+
|
|
168
|
+
@classmethod
|
|
169
|
+
def _preprocess_message_inputs(cls, model: str, input: List[dict],
|
|
170
|
+
api_key: str):
|
|
171
|
+
"""preprocess following inputs
|
|
172
|
+
input = [{'factor': 1, 'text': 'hello'},
|
|
173
|
+
{'factor': 2, 'audio': ''},
|
|
174
|
+
{'factor': 3, 'image': ''}]
|
|
175
|
+
"""
|
|
176
|
+
has_upload = False
|
|
177
|
+
for elem in input:
|
|
178
|
+
if not isinstance(elem, (int, float, bool, str, bytes, bytearray)):
|
|
179
|
+
is_upload = preprocess_message_element(model, elem, api_key)
|
|
180
|
+
if is_upload and not has_upload:
|
|
181
|
+
has_upload = True
|
|
182
|
+
return has_upload
|
dashscope/version.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
dashscope/__init__.py,sha256=
|
|
1
|
+
dashscope/__init__.py,sha256=96J137Im9Ii9uxfVOOYkZDJNZXF1sEbcH4-QXFr4xEw,3172
|
|
2
2
|
dashscope/cli.py,sha256=64oGkevgX0RHPPmMg0sevXDgaFLQNA_0vdtjQ7Z2pHM,26492
|
|
3
3
|
dashscope/files.py,sha256=vRDQygm3lOqBZR73o7KNHs1iTBVuvLncuwJNxIYjzAU,3981
|
|
4
4
|
dashscope/model.py,sha256=B5v_BtYLPqj6raClejBgdKg6WTGwhH_f-20pvsQqmsk,1491
|
|
5
5
|
dashscope/models.py,sha256=dE4mzXkl85G343qVylSGpURPRdA5pZSqXlx6PcxqC_Q,1275
|
|
6
|
-
dashscope/version.py,sha256
|
|
7
|
-
dashscope/aigc/__init__.py,sha256=
|
|
6
|
+
dashscope/version.py,sha256=JqxudsXng54szjAiYPU1SF6csQl4dgfVKBuLLIxDvFk,74
|
|
7
|
+
dashscope/aigc/__init__.py,sha256=kYvYEoRK-NUHyMWpBDNQBz4fVA__uOhHRK2kDTBaWgk,617
|
|
8
8
|
dashscope/aigc/chat_completion.py,sha256=ONlyyssIbfaKKcFo7cEKhHx5OCF2XX810HFzIExW1ho,14813
|
|
9
9
|
dashscope/aigc/code_generation.py,sha256=p_mxDKJLQMW0IjFD46JRlZuEZCRESSVKEfLlAevBtqw,10936
|
|
10
10
|
dashscope/aigc/conversation.py,sha256=95xEEY4ThZJysj5zy3aMw7ql9KLJVfD_1iHv9QZ17Ew,14282
|
|
11
11
|
dashscope/aigc/generation.py,sha256=xMcMu16rICTdjZiD_sPqYV_Ltdp4ewGzzfC7JD9VApY,17948
|
|
12
12
|
dashscope/aigc/image_synthesis.py,sha256=Itx9h5brEwC-d3Mj_ntDHGd4qaitqDg9DeGHMJouhMk,28178
|
|
13
|
-
dashscope/aigc/multimodal_conversation.py,sha256=
|
|
13
|
+
dashscope/aigc/multimodal_conversation.py,sha256=k-OPWwgrWJBb7LiUdFuuP9X6sR5ukbaRHPqFTSikO7Y,12380
|
|
14
14
|
dashscope/aigc/video_synthesis.py,sha256=RSPjar5-YiF9xclRmf9H7-5QbRxLcsNXO4zS7oTKi2I,24137
|
|
15
15
|
dashscope/api_entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
dashscope/api_entities/aiohttp_request.py,sha256=1L7XdIJ9L65cQmX8x9JCR4t5hNIMDrbiWADfKKp9yfo,10280
|
|
@@ -18,7 +18,7 @@ dashscope/api_entities/api_request_data.py,sha256=04rpYPNK1HkT3iTPJmZpquH621xcBb
|
|
|
18
18
|
dashscope/api_entities/api_request_factory.py,sha256=ynpbFmxSne4dJkv5m40Vlwt4hJSxQPprAuUgMSQIQDg,5639
|
|
19
19
|
dashscope/api_entities/base_request.py,sha256=W2SzrSAGFS6V8DErfSrayQtSL0T4iO7BrC8flr7nt1w,977
|
|
20
20
|
dashscope/api_entities/chat_completion_types.py,sha256=1WMWPszhM3HaJBVz-ZXx-El4D8-RfVUL3ym65xsDRLk,11435
|
|
21
|
-
dashscope/api_entities/dashscope_response.py,sha256=
|
|
21
|
+
dashscope/api_entities/dashscope_response.py,sha256=31guU41ePkLyFsVVN-1WODXdOHiURzRyxxhrUmX9dGM,22835
|
|
22
22
|
dashscope/api_entities/encryption.py,sha256=rUCZx3wwVvS5oyKXEeWgyWPxM8Y5d4AaVdgxLhizBqA,5517
|
|
23
23
|
dashscope/api_entities/http_request.py,sha256=MTxYsbkK8oYWDp8ZPjrkdY9YbnQ9SEIy87riyJidMXo,16484
|
|
24
24
|
dashscope/api_entities/websocket_request.py,sha256=PS0FU854-HjTbKa68f4GHa7-noFRMzKySJGfPkrrBjw,16146
|
|
@@ -41,7 +41,7 @@ dashscope/audio/qwen_omni/omni_realtime.py,sha256=eBmoOxuKcfzMHuXsQWCrIIKmso9iEz
|
|
|
41
41
|
dashscope/audio/qwen_tts/__init__.py,sha256=JS3axY1grqO0aTIJufZ3KS1JsU6yf6y4K2CQlNvUK9I,132
|
|
42
42
|
dashscope/audio/qwen_tts/speech_synthesizer.py,sha256=7LHR-PXhn-VE1cCOp_82Jq0zE9rMc3xy3dszUeyLLNs,2927
|
|
43
43
|
dashscope/audio/qwen_tts_realtime/__init__.py,sha256=vVkmeJr_mEAn_O0Rh5AU3ICg6qIZqppUryJ5lY8VYPo,254
|
|
44
|
-
dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py,sha256=
|
|
44
|
+
dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py,sha256=uMLglxRjUZgol9Z7MTUY1Ji_HvaQaayjYkZvRkrNpkQ,10606
|
|
45
45
|
dashscope/audio/tts/__init__.py,sha256=xYpMFseUZGgqgj_70zcX2VsLv-L7qxJ3d-bbdj_hO0I,245
|
|
46
46
|
dashscope/audio/tts/speech_synthesizer.py,sha256=vD1xQV-rew8qAsIaAGH5amsNtB0SqdtNhVHhJHGQ-xk,7622
|
|
47
47
|
dashscope/audio/tts_v2/__init__.py,sha256=me9a3_7KsHQxcJ8hx4SeKlY1e_ThHVvGMw7Yn0uoscM,333
|
|
@@ -65,7 +65,7 @@ dashscope/customize/finetunes.py,sha256=AL_kGTJXMvM2ej-EKsLLd1dUphPQdVTefFVCSVH-
|
|
|
65
65
|
dashscope/embeddings/__init__.py,sha256=XQ7vKr8oZM2CmdOduE53BWy6_Qpn9xUPkma64yw8Gws,291
|
|
66
66
|
dashscope/embeddings/batch_text_embedding.py,sha256=lVhvTS8McYfXuqt_8CmmhA6bPqD0nrGv965kjYG_j0E,8842
|
|
67
67
|
dashscope/embeddings/batch_text_embedding_response.py,sha256=ZfkJMUq8GRsFA6XUTsiAsIySqGJH-VPi2P9Ba1KTU-s,2056
|
|
68
|
-
dashscope/embeddings/multimodal_embedding.py,sha256=
|
|
68
|
+
dashscope/embeddings/multimodal_embedding.py,sha256=PEF7DmtE5cbrXw4k3WQcfmsBKaAY3CTIoei3SyhOl34,6774
|
|
69
69
|
dashscope/embeddings/text_embedding.py,sha256=2MPEyMB99xueDbvFg9kKAe8bgHMDEaFLaFa6GzDWDHg,2108
|
|
70
70
|
dashscope/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
71
71
|
dashscope/io/input_output.py,sha256=0aXrRJFo1ZqYm_AJWR_w88O4-Btn9np2zUhrrUdBdfw,3992
|
|
@@ -100,9 +100,9 @@ dashscope/tokenizers/tokenizer.py,sha256=3FQVDvMNkCW9ccYeJdjrd_PIMMD3Xv7aNZkaYOE
|
|
|
100
100
|
dashscope/tokenizers/tokenizer_base.py,sha256=5EJIFuizMWESEmLmbd38yJnfeHmPnzZPwsO4aOGjpl4,707
|
|
101
101
|
dashscope/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
102
102
|
dashscope/utils/oss_utils.py,sha256=aZIHlMN2JOfVw6kp0SVrMw_N1MfoTcR_-wiRbJ7DgHw,7501
|
|
103
|
-
dashscope-1.24.
|
|
104
|
-
dashscope-1.24.
|
|
105
|
-
dashscope-1.24.
|
|
106
|
-
dashscope-1.24.
|
|
107
|
-
dashscope-1.24.
|
|
108
|
-
dashscope-1.24.
|
|
103
|
+
dashscope-1.24.6.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
104
|
+
dashscope-1.24.6.dist-info/METADATA,sha256=ClDqUPyIROfxsTfLRRM4YsbEOPU7S8HRc_-eo5sPtw0,7146
|
|
105
|
+
dashscope-1.24.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
106
|
+
dashscope-1.24.6.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
|
|
107
|
+
dashscope-1.24.6.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
|
|
108
|
+
dashscope-1.24.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|