dashscope 1.20.9__py3-none-any.whl → 1.20.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

@@ -99,6 +99,8 @@ class Application(BaseApi):
99
99
  simple: simple format of doc retrival which not include index in response text but in doc reference list.
100
100
  indexed: include both index in response text and doc reference list
101
101
  memory_id(str, `optional`): Used to store long term context summary between end users and assistant.
102
+ image_list(list, `optional`): Used to pass image url list.
103
+ rag_options(dict, `optional`): Rag options for retrieval augmented generation options.
102
104
  Raises:
103
105
  InvalidInput: The history and auto_history are mutually exclusive.
104
106
 
@@ -169,4 +171,8 @@ class Application(BaseApi):
169
171
  if biz_params is not None and biz_params:
170
172
  input_param['biz_params'] = biz_params
171
173
 
174
+ image_list = kwargs.pop('image_list', None)
175
+ if image_list is not None and image_list:
176
+ input_param['image_list'] = image_list
177
+
172
178
  return input_param, {**parameters, **kwargs}
@@ -1,8 +1,9 @@
1
1
  from .asr_phrase_manager import AsrPhraseManager
2
2
  from .recognition import Recognition, RecognitionCallback, RecognitionResult
3
3
  from .transcription import Transcription
4
+ from .vocabulary import VocabularyService, VocabularyServiceException
4
5
 
5
6
  __all__ = [
6
- Transcription, Recognition, RecognitionCallback, RecognitionResult,
7
- AsrPhraseManager
7
+ 'Transcription', 'Recognition', 'RecognitionCallback', 'RecognitionResult',
8
+ 'AsrPhraseManager', 'VocabularyServiceException', 'VocabularyService'
8
9
  ]
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  import os
3
- import time
4
3
  import threading
4
+ import time
5
5
  from http import HTTPStatus
6
6
  from threading import Timer
7
7
  from typing import Any, Dict, List, Union
@@ -185,24 +185,29 @@ class Recognition(BaseApi):
185
185
  self._callback.on_complete()
186
186
  else:
187
187
  usage: Dict[str, Any] = None
188
- useags: List[Any] = None
188
+ usages: List[Any] = None
189
189
  if 'sentence' in part.output:
190
190
  if (self._first_package_timestamp < 0):
191
- self._first_package_timestamp = time.time()*1000
192
- logger.debug('first package delay {}'.format(self._first_package_timestamp - self._start_stream_timestamp))
191
+ self._first_package_timestamp = time.time() * 1000
192
+ logger.debug('first package delay {}'.format(
193
+ self._first_package_timestamp -
194
+ self._start_stream_timestamp))
193
195
  sentence = part.output['sentence']
194
- logger.debug('Recv Result :{}, isEnd: {}'.format(sentence, RecognitionResult.is_sentence_end(sentence)))
195
- if part.usage is not None:
196
+ logger.debug('Recv Result :{}, isEnd: {}'.format(
197
+ sentence,
198
+ RecognitionResult.is_sentence_end(sentence)))
199
+ if part.usage is not None:
196
200
  usage = {
197
- 'end_time': part.output['sentence']['end_time'],
201
+ 'end_time':
202
+ part.output['sentence']['end_time'],
198
203
  'usage': part.usage
199
204
  }
200
- useags = [usage]
205
+ usages = [usage]
201
206
 
202
207
  self._callback.on_event(
203
208
  RecognitionResult(
204
209
  RecognitionResponse.from_api_response(part),
205
- usages=useags))
210
+ usages=usages))
206
211
  else:
207
212
  self._running = False
208
213
  self._stream_data.clear()
@@ -318,7 +323,7 @@ class Recognition(BaseApi):
318
323
  Returns:
319
324
  RecognitionResult: The result of speech recognition.
320
325
  """
321
- self._start_stream_timestamp = time.time()*1000
326
+ self._start_stream_timestamp = time.time() * 1000
322
327
  if self._running:
323
328
  raise InvalidParameter('Speech recognition has been called.')
324
329
 
@@ -363,10 +368,14 @@ class Recognition(BaseApi):
363
368
  if part.status_code == HTTPStatus.OK:
364
369
  if 'sentence' in part.output:
365
370
  if (self._first_package_timestamp < 0):
366
- self._first_package_timestamp = time.time()*1000
367
- logger.debug('first package delay {}'.format(self._first_package_timestamp - self._start_stream_timestamp))
371
+ self._first_package_timestamp = time.time() * 1000
372
+ logger.debug('first package delay {}'.format(
373
+ self._first_package_timestamp -
374
+ self._start_stream_timestamp))
368
375
  sentence = part.output['sentence']
369
- logger.debug('Recv Result :{}, isEnd: {}'.format(sentence, RecognitionResult.is_sentence_end(sentence)))
376
+ logger.debug('Recv Result :{}, isEnd: {}'.format(
377
+ sentence,
378
+ RecognitionResult.is_sentence_end(sentence)))
370
379
  if RecognitionResult.is_sentence_end(sentence):
371
380
  sentences.append(sentence)
372
381
 
@@ -3,7 +3,6 @@ import time
3
3
  from typing import List, Union
4
4
 
5
5
  import aiohttp
6
-
7
6
  from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
8
7
  TranscriptionResponse)
9
8
  from dashscope.client.base_api import BaseAsyncApi
@@ -109,13 +108,11 @@ class Transcription(BaseAsyncApi):
109
108
  return TranscriptionResponse.from_api_response(response)
110
109
 
111
110
  @classmethod
112
- def fetch(
113
- cls,
114
- task: Union[str, TranscriptionResponse],
115
- api_key: str = None,
116
- workspace: str = None,
117
- **kwargs
118
- ) -> TranscriptionResponse:
111
+ def fetch(cls,
112
+ task: Union[str, TranscriptionResponse],
113
+ api_key: str = None,
114
+ workspace: str = None,
115
+ **kwargs) -> TranscriptionResponse:
119
116
  """Fetch the status of task, including results of batch transcription when task_status is SUCCEEDED. # noqa: E501
120
117
 
121
118
  Args:
@@ -147,13 +144,11 @@ class Transcription(BaseAsyncApi):
147
144
  return TranscriptionResponse.from_api_response(response)
148
145
 
149
146
  @classmethod
150
- def wait(
151
- cls,
152
- task: Union[str, TranscriptionResponse],
153
- api_key: str = None,
154
- workspace: str = None,
155
- **kwargs
156
- ) -> TranscriptionResponse:
147
+ def wait(cls,
148
+ task: Union[str, TranscriptionResponse],
149
+ api_key: str = None,
150
+ workspace: str = None,
151
+ **kwargs) -> TranscriptionResponse:
157
152
  """Poll task until the final results of transcription is obtained.
158
153
 
159
154
  Args:
@@ -164,7 +159,10 @@ class Transcription(BaseAsyncApi):
164
159
  Returns:
165
160
  TranscriptionResponse: The result of batch transcription.
166
161
  """
167
- response = super().wait(task, api_key=api_key, workspace=workspace, **kwargs)
162
+ response = super().wait(task,
163
+ api_key=api_key,
164
+ workspace=workspace,
165
+ **kwargs)
168
166
  return TranscriptionResponse.from_api_response(response)
169
167
 
170
168
  @classmethod
@@ -0,0 +1,173 @@
1
+ import asyncio
2
+ import time
3
+ from typing import List
4
+
5
+ import aiohttp
6
+ from dashscope.client.base_api import BaseApi
7
+ from dashscope.common.constants import ApiProtocol, HTTPMethod
8
+ from dashscope.common.logging import logger
9
+
10
+
11
+ class VocabularyServiceException(Exception):
12
+ def __init__(self, status_code: int, code: str,
13
+ error_message: str) -> None:
14
+ self._status_code = status_code
15
+ self._code = code
16
+ self._error_message = error_message
17
+
18
+ def __str__(self):
19
+ return f'Status Code: {self._status_code}, Code: {self._code}, Error Message: {self._error_message}'
20
+
21
+
22
+ class VocabularyService(BaseApi):
23
+ '''
24
+ API for asr vocabulary service
25
+ '''
26
+ MAX_QUERY_TRY_COUNT = 3
27
+
28
+ def __init__(self,
29
+ api_key=None,
30
+ workspace=None,
31
+ model=None,
32
+ **kwargs) -> None:
33
+ super().__init__()
34
+ self._api_key = api_key
35
+ self._workspace = workspace
36
+ self._kwargs = kwargs
37
+ self._last_request_id = None
38
+ self.model = model
39
+ if self.model == None:
40
+ self.model = 'speech-biasing'
41
+
42
+ def __call_with_input(self, input):
43
+ try_count = 0
44
+ while True:
45
+ try:
46
+ response = super().call(model=self.model,
47
+ task_group='audio',
48
+ task='asr',
49
+ function='customization',
50
+ input=input,
51
+ api_protocol=ApiProtocol.HTTP,
52
+ http_method=HTTPMethod.POST,
53
+ api_key=self._api_key,
54
+ workspace=self._workspace,
55
+ **self._kwargs)
56
+ except (asyncio.TimeoutError, aiohttp.ClientConnectorError) as e:
57
+ logger.error(e)
58
+ try_count += 1
59
+ if try_count <= VocabularyService.MAX_QUERY_TRY_COUNT:
60
+ time.sleep(2)
61
+ continue
62
+
63
+ break
64
+ logger.debug('>>>>recv', response)
65
+ return response
66
+
67
+ def create_vocabulary(self, target_model: str, prefix: str,
68
+ vocabulary: List[dict]) -> str:
69
+ '''
70
+ 创建热词表
71
+ param: target_model 热词表对应的语音识别模型版本
72
+ param: prefix 热词表自定义前缀,仅允许数字和小写字母,小于十个字符。
73
+ param: vocabulary 热词表字典
74
+ return: 热词表标识符 vocabulary_id
75
+ '''
76
+ response = self.__call_with_input(input={
77
+ 'action': 'create_vocabulary',
78
+ 'target_model': target_model,
79
+ 'prefix': prefix,
80
+ 'vocabulary': vocabulary,
81
+ }, )
82
+ if response.status_code == 200:
83
+ self._last_request_id = response.request_id
84
+ return response.output['vocabulary_id']
85
+ else:
86
+ raise VocabularyServiceException(response.status_code,
87
+ response.code, response.message)
88
+
89
+ def list_vocabularies(self,
90
+ prefix=None,
91
+ page_index: int = 0,
92
+ page_size: int = 10) -> List[dict]:
93
+ '''
94
+ 查询已创建的所有热词表
95
+ param: prefix 自定义前缀,如果设定则只返回指定前缀的热词表标识符列表。
96
+ param: page_index 查询的页索引
97
+ param: page_size 查询页大小
98
+ return: 热词表标识符列表
99
+ '''
100
+ if prefix:
101
+ response = self.__call_with_input(input={
102
+ 'action': 'list_vocabulary',
103
+ 'prefix': prefix,
104
+ 'page_index': page_index,
105
+ 'page_size': page_size,
106
+ }, )
107
+ else:
108
+ response = self.__call_with_input(input={
109
+ 'action': 'list_vocabulary',
110
+ 'page_index': page_index,
111
+ 'page_size': page_size,
112
+ }, )
113
+ if response.status_code == 200:
114
+ self._last_request_id = response.request_id
115
+ return response.output['vocabulary_list']
116
+ else:
117
+ raise VocabularyServiceException(response.status_code,
118
+ response.code, response.message)
119
+
120
+ def query_vocabulary(self, vocabulary_id: str) -> List[dict]:
121
+ '''
122
+ 获取热词表内容
123
+ param: vocabulary_id 热词表标识符
124
+ return: 热词表
125
+ '''
126
+ response = self.__call_with_input(input={
127
+ 'action': 'query_vocabulary',
128
+ 'vocabulary_id': vocabulary_id,
129
+ }, )
130
+ if response.status_code == 200:
131
+ self._last_request_id = response.request_id
132
+ return response.output
133
+ else:
134
+ raise VocabularyServiceException(response.status_code,
135
+ response.code, response.message)
136
+
137
+ def update_vocabulary(self, vocabulary_id: str,
138
+ vocabulary: List[dict]) -> None:
139
+ '''
140
+ 用新的热词表替换已有热词表
141
+ param: vocabulary_id 需要替换的热词表标识符
142
+ param: vocabulary 热词表
143
+ '''
144
+ response = self.__call_with_input(input={
145
+ 'action': 'update_vocabulary',
146
+ 'vocabulary_id': vocabulary_id,
147
+ 'vocabulary': vocabulary,
148
+ }, )
149
+ if response.status_code == 200:
150
+ self._last_request_id = response.request_id
151
+ return
152
+ else:
153
+ raise VocabularyServiceException(response.status_code,
154
+ response.code, response.message)
155
+
156
+ def delete_vocabulary(self, vocabulary_id: str) -> None:
157
+ '''
158
+ 删除热词表
159
+ param: vocabulary_id 需要删除的热词表标识符
160
+ '''
161
+ response = self.__call_with_input(input={
162
+ 'action': 'delete_vocabulary',
163
+ 'vocabulary_id': vocabulary_id,
164
+ }, )
165
+ if response.status_code == 200:
166
+ self._last_request_id = response.request_id
167
+ return
168
+ else:
169
+ raise VocabularyServiceException(response.status_code,
170
+ response.code, response.message)
171
+
172
+ def get_last_request_id(self):
173
+ return self._last_request_id
@@ -1,7 +1,7 @@
1
+ from .enrollment import VoiceEnrollmentException, VoiceEnrollmentService
1
2
  from .speech_synthesizer import AudioFormat, ResultCallback, SpeechSynthesizer
2
3
 
3
- __all__ = ['SpeechSynthesizer', 'ResultCallback', 'AudioFormat']
4
-
5
- # from .speech_synthesizer import (SpeechSynthesizer, ResultCallback, SpeechSynthesisResult, AudioFormat)
6
-
7
- # __all__ = ['SpeechSynthesizer', 'ResultCallback', 'SpeechSynthesisResult', 'AudioFormat']
4
+ __all__ = [
5
+ 'SpeechSynthesizer', 'ResultCallback', 'AudioFormat',
6
+ 'VoiceEnrollmentException', 'VoiceEnrollmentService'
7
+ ]
@@ -0,0 +1,170 @@
1
+ import asyncio
2
+ import time
3
+ from typing import List
4
+
5
+ import aiohttp
6
+ from dashscope.client.base_api import BaseApi
7
+ from dashscope.common.constants import ApiProtocol, HTTPMethod
8
+ from dashscope.common.logging import logger
9
+
10
+
11
+ class VoiceEnrollmentException(Exception):
12
+ def __init__(self, status_code: int, code: str,
13
+ error_message: str) -> None:
14
+ self._status_code = status_code
15
+ self._code = code
16
+ self._error_message = error_message
17
+
18
+ def __str__(self):
19
+ return f'Status Code: {self._status_code}, Code: {self._code}, Error Message: {self._error_message}'
20
+
21
+
22
+ class VoiceEnrollmentService(BaseApi):
23
+ '''
24
+ API for voice clone service
25
+ '''
26
+ MAX_QUERY_TRY_COUNT = 3
27
+
28
+ def __init__(self,
29
+ api_key=None,
30
+ workspace=None,
31
+ model=None,
32
+ **kwargs) -> None:
33
+ super().__init__()
34
+ self._api_key = api_key
35
+ self._workspace = workspace
36
+ self._kwargs = kwargs
37
+ self._last_request_id = None
38
+ self.model = model
39
+ if self.model == None:
40
+ self.model = 'voice-enrollment'
41
+
42
+ def __call_with_input(self, input):
43
+ try_count = 0
44
+ while True:
45
+ try:
46
+ response = super().call(model=self.model,
47
+ task_group='audio',
48
+ task='tts',
49
+ function='customization',
50
+ input=input,
51
+ api_protocol=ApiProtocol.HTTP,
52
+ http_method=HTTPMethod.POST,
53
+ api_key=self._api_key,
54
+ workspace=self._workspace,
55
+ **self._kwargs)
56
+ except (asyncio.TimeoutError, aiohttp.ClientConnectorError) as e:
57
+ logger.error(e)
58
+ try_count += 1
59
+ if try_count <= VoiceEnrollmentService.MAX_QUERY_TRY_COUNT:
60
+ time.sleep(2)
61
+ continue
62
+
63
+ break
64
+ logger.debug('>>>>recv', response)
65
+ return response
66
+
67
+ def create_voice(self, target_model: str, prefix: str, url: str) -> str:
68
+ '''
69
+ 创建新克隆音色
70
+ param: target_model 克隆音色对应的语音识别模型版本
71
+ param: prefix 音色自定义前缀,仅允许数字和小写字母,小于十个字符。
72
+ param: url 用于克隆的音频文件url
73
+ return: voice_id
74
+ '''
75
+ response = self.__call_with_input(input={
76
+ 'action': 'create_voice',
77
+ 'target_model': target_model,
78
+ 'prefix': prefix,
79
+ 'url': url,
80
+ }, )
81
+ if response.status_code == 200:
82
+ self._last_request_id = response.request_id
83
+ return response.output['voice_id']
84
+ else:
85
+ raise VoiceEnrollmentException(response.status_code, response.code,
86
+ response.message)
87
+
88
+ def list_voices(self,
89
+ prefix=None,
90
+ page_index: int = 0,
91
+ page_size: int = 10) -> List[dict]:
92
+ '''
93
+ 查询已创建的所有音色
94
+ param: page_index 查询的页索引
95
+ param: page_size 查询页大小
96
+ return: List[dict] 音色列表,包含每个音色的id,创建时间,修改时间,状态。
97
+ '''
98
+ if prefix:
99
+ response = self.__call_with_input(input={
100
+ 'action': 'list_voice',
101
+ 'prefix': prefix,
102
+ 'page_index': page_index,
103
+ 'page_size': page_size,
104
+ }, )
105
+ else:
106
+ response = self.__call_with_input(input={
107
+ 'action': 'list_voice',
108
+ 'page_index': page_index,
109
+ 'page_size': page_size,
110
+ }, )
111
+ if response.status_code == 200:
112
+ self._last_request_id = response.request_id
113
+ return response.output['voice_list']
114
+ else:
115
+ raise VoiceEnrollmentException(response.status_code, response.code,
116
+ response.message)
117
+
118
+ def query_voice(self, voice_id: str) -> List[str]:
119
+ '''
120
+ 查询已创建的所有音色
121
+ param: voice_id 需要查询的音色
122
+ return: bytes 注册音色使用的音频
123
+ '''
124
+ response = self.__call_with_input(input={
125
+ 'action': 'query_voice',
126
+ 'voice_id': voice_id,
127
+ }, )
128
+ if response.status_code == 200:
129
+ self._last_request_id = response.request_id
130
+ return response.output
131
+ else:
132
+ raise VoiceEnrollmentException(response.status_code, response.code,
133
+ response.message)
134
+
135
+ def update_voice(self, voice_id: str, url: str) -> None:
136
+ '''
137
+ 更新音色
138
+ param: voice_id 音色id
139
+ param: url 用于克隆的音频文件url
140
+ '''
141
+ response = self.__call_with_input(input={
142
+ 'action': 'update_voice',
143
+ 'voice_id': voice_id,
144
+ 'url': url,
145
+ }, )
146
+ if response.status_code == 200:
147
+ self._last_request_id = response.request_id
148
+ return
149
+ else:
150
+ raise VoiceEnrollmentException(response.status_code, response.code,
151
+ response.message)
152
+
153
+ def delete_voice(self, voice_id: str) -> None:
154
+ '''
155
+ 删除音色
156
+ param: voice_id 需要删除的音色
157
+ '''
158
+ response = self.__call_with_input(input={
159
+ 'action': 'delete_voice',
160
+ 'voice_id': voice_id,
161
+ }, )
162
+ if response.status_code == 200:
163
+ self._last_request_id = response.request_id
164
+ return
165
+ else:
166
+ raise VoiceEnrollmentException(response.status_code, response.code,
167
+ response.message)
168
+
169
+ def get_last_request_id(self):
170
+ return self._last_request_id
@@ -80,7 +80,7 @@ class Request:
80
80
  voice,
81
81
  format='wav',
82
82
  sample_rate=16000,
83
- volumn=50,
83
+ volume=50,
84
84
  speech_rate=1.0,
85
85
  pitch_rate=1.0,
86
86
  ):
@@ -90,7 +90,7 @@ class Request:
90
90
  self.model = model
91
91
  self.format = format
92
92
  self.sample_rate = sample_rate
93
- self.volumn = volumn
93
+ self.volume = volume
94
94
  self.speech_rate = speech_rate
95
95
  self.pitch_rate = pitch_rate
96
96
 
@@ -136,7 +136,7 @@ class Request:
136
136
  },
137
137
  'parameters': {
138
138
  'voice': self.voice,
139
- 'volume': self.volumn,
139
+ 'volume': self.volume,
140
140
  'text_type': 'PlainText',
141
141
  'sample_rate': self.sample_rate,
142
142
  'rate': self.speech_rate,
@@ -190,7 +190,7 @@ class SpeechSynthesizer:
190
190
  model,
191
191
  voice,
192
192
  format: AudioFormat = AudioFormat.DEFAULT,
193
- volumn=50,
193
+ volume=50,
194
194
  speech_rate=1.0,
195
195
  pitch_rate=1.0,
196
196
  headers=None,
@@ -253,7 +253,7 @@ class SpeechSynthesizer:
253
253
  voice=voice,
254
254
  format=format.format,
255
255
  sample_rate=format.sample_rate,
256
- volumn=volumn,
256
+ volume=volume,
257
257
  speech_rate=speech_rate,
258
258
  pitch_rate=pitch_rate,
259
259
  )
@@ -424,6 +424,8 @@ class SpeechSynthesizer:
424
424
  request = self.request.getFinishRequest()
425
425
  self.__send_str(request)
426
426
  self.close()
427
+ self.start_event.set()
428
+ self.complete_event.set()
427
429
 
428
430
  # 监听消息的回调函数
429
431
  def on_message(self, ws, message):
dashscope/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.20.9'
1
+ __version__ = '1.20.11'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dashscope
3
- Version: 1.20.9
3
+ Version: 1.20.11
4
4
  Summary: dashscope client sdk library
5
5
  Home-page: https://dashscope.aliyun.com/
6
6
  Author: Alibaba Cloud
@@ -6,7 +6,7 @@ dashscope/files.py,sha256=QgJjwhtn9F548nCA8jD8OvE6aQEj-20hZqJgYXsUdQU,3930
6
6
  dashscope/finetune.py,sha256=_tflDUvu0KagSoCzLaf0hofpG_P8NU6PylL8CPjVhrA,6243
7
7
  dashscope/model.py,sha256=UPOn1qMYFhX-ovXi3BMxZEBk8qOK7WLJOYHMbPZwYBo,1440
8
8
  dashscope/models.py,sha256=1-bc-Ue68zurgu_y6RhfFr9uzeQMF5AZq-C32lJGMGU,1224
9
- dashscope/version.py,sha256=rW_OBoIurR44z0-gRCL1GiR4KuiexcDQ6XLZw1HMwZs,23
9
+ dashscope/version.py,sha256=TEZ0ud2aEa7-cNPJK3g_8-b2BatRgSxi7ztPNW9JcfM,24
10
10
  dashscope/aigc/__init__.py,sha256=s-MCA87KYiVumYtKtJi5IMN7xelSF6TqEU3s3_7RF-Y,327
11
11
  dashscope/aigc/code_generation.py,sha256=KAJVrGp6tiNFBBg64Ovs9RfcP5SrIhrbW3wdA89NKso,10885
12
12
  dashscope/aigc/conversation.py,sha256=xRoJlCR-IXHjSdkDrK74a9ut1FJg0FZhTNXZAJC18MA,14231
@@ -22,22 +22,24 @@ dashscope/api_entities/dashscope_response.py,sha256=Bp1T7HwVlkOvpMNg-AEjz-BScxhL
22
22
  dashscope/api_entities/http_request.py,sha256=pYE8qRMu9CaQDiugPlXeYoaj_diBv-ZDExCD3WNhehI,13259
23
23
  dashscope/api_entities/websocket_request.py,sha256=Xr6IJ9WqrIw5ouBQLpgoRSwL1C09jkb4u1EZdxhVQy0,15039
24
24
  dashscope/app/__init__.py,sha256=UiN_9i--z84Dw5wUehOh_Tkk_9Gq_td_Kbz1dobBEKg,62
25
- dashscope/app/application.py,sha256=uIWVEupscDGpUl7t6b8JZeTTlCaCQA1bznI_QdxNrj8,8200
25
+ dashscope/app/application.py,sha256=QdFSUgQCDDwEaJtlKkbrxrw1lXBCZB7eOX7P1D3GNgU,8532
26
26
  dashscope/app/application_response.py,sha256=U5I8Yb1IlXzj2L5a1OAl55i0MCB3kG9Qp4aY17_73pI,6886
27
27
  dashscope/assistants/__init__.py,sha256=i9N5OxHgY7edlOhTdPyC0N5Uc0uMCkB2vbMPDCD1zX0,383
28
28
  dashscope/assistants/assistant_types.py,sha256=1jNL30TOlrkiYhvCaB3E8jkPLG8CnQ6I3tHpYXZCsD0,4211
29
29
  dashscope/assistants/assistants.py,sha256=NYahIDqhtnOcQOmnhZsjc5F5jvBUQcce8-fbrJXHVnQ,10833
30
30
  dashscope/assistants/files.py,sha256=pwLVJ_pjpRFWyfI_MRvhH7Si7FzGDj4ChzZgWTpLOhg,6699
31
31
  dashscope/audio/__init__.py,sha256=-ZRxrK-gV4QsUtlThIT-XwqB6vmyEsnhxIxdLmhCUuc,61
32
- dashscope/audio/asr/__init__.py,sha256=-s180qWn_JPSpCo1q0aDJJ5HQ3zTzD4z5yUwsRqH4aU,275
32
+ dashscope/audio/asr/__init__.py,sha256=kFdx3IYsdfGGDDlQmUjvtd2kqifuEekwlPBEOUvXvEY,406
33
33
  dashscope/audio/asr/asr_phrase_manager.py,sha256=EjtbI3zz9UQGS1qv6Yb4zzEMj4OJJVXmwkqZyIrzvEA,7642
34
- dashscope/audio/asr/recognition.py,sha256=cEooE3wGf8kKfJIVbaXEytl5X6F0hMsLe8g4Bj9Fn4w,18768
34
+ dashscope/audio/asr/recognition.py,sha256=a4zIkIMiWwOEApP9k9ZC9jGDr7CP7BqB6Cy1dBVTN4g,18978
35
35
  dashscope/audio/asr/transcribe.py,sha256=HfZYpvpVfvGRAIIIzX65Af33E6vsIFGd_qqhQ8LaNcM,9651
36
- dashscope/audio/asr/transcription.py,sha256=1WAg9WH89antVzRYEKXb5LQP9xylZmX4YKp7v5oMYjY,8931
36
+ dashscope/audio/asr/transcription.py,sha256=D8CW0XDqJuEJVmNFJ6qczTysSV3Sz_rzk2C6NIKTtVc,9042
37
+ dashscope/audio/asr/vocabulary.py,sha256=880u5CGh8Ky9iWXDf_7cUuHfL5AGmw8JJRCbRThVCMI,6484
37
38
  dashscope/audio/tts/__init__.py,sha256=fbnieZX9yNFNh5BsxLpLXb63jlxzxrdCJakV3ignjlQ,194
38
39
  dashscope/audio/tts/speech_synthesizer.py,sha256=dnKx9FDDdO_ETHAjhK8zaMVaH6SfoTtN5YxXXqgY1JA,7571
39
- dashscope/audio/tts_v2/__init__.py,sha256=ve5a81qTbWDcRaSuritZtJBzryOIol2_dxzfqqdCw-k,345
40
- dashscope/audio/tts_v2/speech_synthesizer.py,sha256=bpzj9gx2D_FfOzgsjU-GBGmeWvEdewNPFd447mOgM-o,19220
40
+ dashscope/audio/tts_v2/__init__.py,sha256=5UfyDBYnuGgOy9KMxEIXA2U2ihcXutdZc1cqJudy-8M,282
41
+ dashscope/audio/tts_v2/enrollment.py,sha256=sUkOEUsP8RXREMtTkAeDTYfrQJ6lPnM_Y-DeefXB_Q4,6140
42
+ dashscope/audio/tts_v2/speech_synthesizer.py,sha256=lATasQJB8HlB_yYm90qqW6zIAE1CQFxBxhnch6xdg9s,19285
41
43
  dashscope/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
44
  dashscope/client/base_api.py,sha256=rXN97XGyDhCCaD_dz_clpFDjOJfpGjqiH7yX3LaD-GE,41233
43
45
  dashscope/common/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -83,9 +85,9 @@ dashscope/tokenizers/tokenizer.py,sha256=y6P91qTCYo__pEx_0VHAcj9YECfbUdRqZU1fdGT
83
85
  dashscope/tokenizers/tokenizer_base.py,sha256=REDhzRyDT13iequ61-a6_KcTy0GFKlihQve5HkyoyRs,656
84
86
  dashscope/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
87
  dashscope/utils/oss_utils.py,sha256=7vZ2Lypxwiit8VcAqAvr3cCyhVfaLapDiNuF-H3ZCD4,7332
86
- dashscope-1.20.9.dist-info/LICENSE,sha256=Izp5L1DF1Mbza6qojkqNNWlE_mYLnr4rmzx2EBF8YFw,11413
87
- dashscope-1.20.9.dist-info/METADATA,sha256=n0YxcRTLCxVG5CnYICYV2ysSWYQMCYeG65bc7xsdYig,6641
88
- dashscope-1.20.9.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
89
- dashscope-1.20.9.dist-info/entry_points.txt,sha256=raEp5dOuj8whJ7yqZlDM8WQ5p2RfnGrGNo0QLQEnatY,50
90
- dashscope-1.20.9.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
91
- dashscope-1.20.9.dist-info/RECORD,,
88
+ dashscope-1.20.11.dist-info/LICENSE,sha256=Izp5L1DF1Mbza6qojkqNNWlE_mYLnr4rmzx2EBF8YFw,11413
89
+ dashscope-1.20.11.dist-info/METADATA,sha256=wA4EY4SbQF5tv-A5DqOJQ4CUJBxR77DTn11XHuWgOgw,6642
90
+ dashscope-1.20.11.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
91
+ dashscope-1.20.11.dist-info/entry_points.txt,sha256=raEp5dOuj8whJ7yqZlDM8WQ5p2RfnGrGNo0QLQEnatY,50
92
+ dashscope-1.20.11.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
93
+ dashscope-1.20.11.dist-info/RECORD,,