dashscope 1.24.5__py3-none-any.whl → 1.24.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

@@ -28,6 +28,8 @@ class MultiModalConversation(BaseApi):
28
28
  api_key: str = None,
29
29
  workspace: str = None,
30
30
  text: str = None,
31
+ voice: str = None,
32
+ language_type: str = None,
31
33
  **kwargs
32
34
  ) -> Union[MultiModalConversationResponse, Generator[
33
35
  MultiModalConversationResponse, None, None]]:
@@ -57,6 +59,9 @@ class MultiModalConversation(BaseApi):
57
59
  [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
58
60
  workspace (str): The dashscope workspace id.
59
61
  text (str): The text to generate.
62
+ voice (str): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
63
+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
64
+ language_type (str): The synthesized language type, default is 'auto', useful for [qwen3-tts].
60
65
  **kwargs:
61
66
  stream(bool, `optional`): Enable server-sent events
62
67
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -70,8 +75,6 @@ class MultiModalConversation(BaseApi):
70
75
  tokens with top_p probability mass. So 0.1 means only
71
76
  the tokens comprising the top 10% probability mass are
72
77
  considered[qwen-turbo,bailian-v1].
73
- voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
74
- you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
75
78
  top_k(float, `optional`):
76
79
 
77
80
 
@@ -99,6 +102,10 @@ class MultiModalConversation(BaseApi):
99
102
 
100
103
  if text is not None and text:
101
104
  input.update({'text': text})
105
+ if voice is not None and voice:
106
+ input.update({'voice': voice})
107
+ if language_type is not None and language_type:
108
+ input.update({'language_type': language_type})
102
109
  if msg_copy is not None:
103
110
  input.update({'messages': msg_copy})
104
111
  response = super().call(model=model,
@@ -160,6 +167,8 @@ class AioMultiModalConversation(BaseAioApi):
160
167
  api_key: str = None,
161
168
  workspace: str = None,
162
169
  text: str = None,
170
+ voice: str = None,
171
+ language_type: str = None,
163
172
  **kwargs
164
173
  ) -> Union[MultiModalConversationResponse, Generator[
165
174
  MultiModalConversationResponse, None, None]]:
@@ -189,6 +198,9 @@ class AioMultiModalConversation(BaseAioApi):
189
198
  [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
190
199
  workspace (str): The dashscope workspace id.
191
200
  text (str): The text to generate.
201
+ voice (str): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
202
+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
203
+ language_type (str): The synthesized language type, default is 'auto', useful for [qwen3-tts].
192
204
  **kwargs:
193
205
  stream(bool, `optional`): Enable server-sent events
194
206
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -202,8 +214,6 @@ class AioMultiModalConversation(BaseAioApi):
202
214
  tokens with top_p probability mass. So 0.1 means only
203
215
  the tokens comprising the top 10% probability mass are
204
216
  considered[qwen-turbo,bailian-v1].
205
- voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
206
- you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
207
217
  top_k(float, `optional`):
208
218
 
209
219
  Raises:
@@ -230,6 +240,10 @@ class AioMultiModalConversation(BaseAioApi):
230
240
 
231
241
  if text is not None and text:
232
242
  input.update({'text': text})
243
+ if voice is not None and voice:
244
+ input.update({'voice': voice})
245
+ if language_type is not None and language_type:
246
+ input.update({'language_type': language_type})
233
247
  if msg_copy is not None:
234
248
  input.update({'messages': msg_copy})
235
249
  response = await super().call(model=model,
@@ -158,6 +158,7 @@ class QwenTtsRealtime:
158
158
  response_format: AudioFormat = AudioFormat.
159
159
  PCM_24000HZ_MONO_16BIT,
160
160
  mode: str = 'server_commit',
161
+ language_type: str = None,
161
162
  **kwargs) -> None:
162
163
  '''
163
164
  update session configuration, should be used before create response
@@ -170,6 +171,8 @@ class QwenTtsRealtime:
170
171
  output audio format
171
172
  mode: str
172
173
  response mode, server_commit or commit
174
+ language_type: str
175
+ language type for synthesized audio, default is 'auto'
173
176
  '''
174
177
  self.config = {
175
178
  'voice': voice,
@@ -177,6 +180,8 @@ class QwenTtsRealtime:
177
180
  'response_format': response_format.format,
178
181
  'sample_rate': response_format.sample_rate,
179
182
  }
183
+ if language_type is not None:
184
+ self.config['language_type'] = language_type
180
185
  self.config.update(kwargs)
181
186
  self.__send_str(
182
187
  json.dumps({
dashscope/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- __version__ = '1.24.5'
3
+ __version__ = '1.24.6'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dashscope
3
- Version: 1.24.5
3
+ Version: 1.24.6
4
4
  Summary: dashscope client sdk library
5
5
  Home-page: https://dashscope.aliyun.com/
6
6
  Author: Alibaba Cloud
@@ -3,14 +3,14 @@ dashscope/cli.py,sha256=64oGkevgX0RHPPmMg0sevXDgaFLQNA_0vdtjQ7Z2pHM,26492
3
3
  dashscope/files.py,sha256=vRDQygm3lOqBZR73o7KNHs1iTBVuvLncuwJNxIYjzAU,3981
4
4
  dashscope/model.py,sha256=B5v_BtYLPqj6raClejBgdKg6WTGwhH_f-20pvsQqmsk,1491
5
5
  dashscope/models.py,sha256=dE4mzXkl85G343qVylSGpURPRdA5pZSqXlx6PcxqC_Q,1275
6
- dashscope/version.py,sha256=2fvqw7bZLyWOIDvUb8DEkdi6y_VgyljhOeYdITEksWM,74
6
+ dashscope/version.py,sha256=JqxudsXng54szjAiYPU1SF6csQl4dgfVKBuLLIxDvFk,74
7
7
  dashscope/aigc/__init__.py,sha256=kYvYEoRK-NUHyMWpBDNQBz4fVA__uOhHRK2kDTBaWgk,617
8
8
  dashscope/aigc/chat_completion.py,sha256=ONlyyssIbfaKKcFo7cEKhHx5OCF2XX810HFzIExW1ho,14813
9
9
  dashscope/aigc/code_generation.py,sha256=p_mxDKJLQMW0IjFD46JRlZuEZCRESSVKEfLlAevBtqw,10936
10
10
  dashscope/aigc/conversation.py,sha256=95xEEY4ThZJysj5zy3aMw7ql9KLJVfD_1iHv9QZ17Ew,14282
11
11
  dashscope/aigc/generation.py,sha256=xMcMu16rICTdjZiD_sPqYV_Ltdp4ewGzzfC7JD9VApY,17948
12
12
  dashscope/aigc/image_synthesis.py,sha256=Itx9h5brEwC-d3Mj_ntDHGd4qaitqDg9DeGHMJouhMk,28178
13
- dashscope/aigc/multimodal_conversation.py,sha256=BXpUthyGxJHQs18-m_ZzAw6MI5nSM4_NVMUfTDSC1-k,11682
13
+ dashscope/aigc/multimodal_conversation.py,sha256=k-OPWwgrWJBb7LiUdFuuP9X6sR5ukbaRHPqFTSikO7Y,12380
14
14
  dashscope/aigc/video_synthesis.py,sha256=RSPjar5-YiF9xclRmf9H7-5QbRxLcsNXO4zS7oTKi2I,24137
15
15
  dashscope/api_entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  dashscope/api_entities/aiohttp_request.py,sha256=1L7XdIJ9L65cQmX8x9JCR4t5hNIMDrbiWADfKKp9yfo,10280
@@ -41,7 +41,7 @@ dashscope/audio/qwen_omni/omni_realtime.py,sha256=eBmoOxuKcfzMHuXsQWCrIIKmso9iEz
41
41
  dashscope/audio/qwen_tts/__init__.py,sha256=JS3axY1grqO0aTIJufZ3KS1JsU6yf6y4K2CQlNvUK9I,132
42
42
  dashscope/audio/qwen_tts/speech_synthesizer.py,sha256=7LHR-PXhn-VE1cCOp_82Jq0zE9rMc3xy3dszUeyLLNs,2927
43
43
  dashscope/audio/qwen_tts_realtime/__init__.py,sha256=vVkmeJr_mEAn_O0Rh5AU3ICg6qIZqppUryJ5lY8VYPo,254
44
- dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py,sha256=8bOAMcDasTHwSLb9xAGJoj9eUPpQHh2aWvonV6Kf1U4,10367
44
+ dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py,sha256=uMLglxRjUZgol9Z7MTUY1Ji_HvaQaayjYkZvRkrNpkQ,10606
45
45
  dashscope/audio/tts/__init__.py,sha256=xYpMFseUZGgqgj_70zcX2VsLv-L7qxJ3d-bbdj_hO0I,245
46
46
  dashscope/audio/tts/speech_synthesizer.py,sha256=vD1xQV-rew8qAsIaAGH5amsNtB0SqdtNhVHhJHGQ-xk,7622
47
47
  dashscope/audio/tts_v2/__init__.py,sha256=me9a3_7KsHQxcJ8hx4SeKlY1e_ThHVvGMw7Yn0uoscM,333
@@ -100,9 +100,9 @@ dashscope/tokenizers/tokenizer.py,sha256=3FQVDvMNkCW9ccYeJdjrd_PIMMD3Xv7aNZkaYOE
100
100
  dashscope/tokenizers/tokenizer_base.py,sha256=5EJIFuizMWESEmLmbd38yJnfeHmPnzZPwsO4aOGjpl4,707
101
101
  dashscope/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  dashscope/utils/oss_utils.py,sha256=aZIHlMN2JOfVw6kp0SVrMw_N1MfoTcR_-wiRbJ7DgHw,7501
103
- dashscope-1.24.5.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
104
- dashscope-1.24.5.dist-info/METADATA,sha256=eRxoK1TphMD4hn-vUM3aPMtaCsvCzstnDe-QPH9A4Q0,7146
105
- dashscope-1.24.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
- dashscope-1.24.5.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
107
- dashscope-1.24.5.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
108
- dashscope-1.24.5.dist-info/RECORD,,
103
+ dashscope-1.24.6.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
104
+ dashscope-1.24.6.dist-info/METADATA,sha256=ClDqUPyIROfxsTfLRRM4YsbEOPU7S8HRc_-eo5sPtw0,7146
105
+ dashscope-1.24.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
+ dashscope-1.24.6.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
107
+ dashscope-1.24.6.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
108
+ dashscope-1.24.6.dist-info/RECORD,,