dashscope 1.18.1__py3-none-any.whl → 1.19.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

dashscope/__init__.py CHANGED
@@ -1,12 +1,12 @@
1
1
  import logging
2
2
  from logging import NullHandler
3
3
 
4
- from dashscope.app.application import Application
5
4
  from dashscope.aigc.code_generation import CodeGeneration
6
5
  from dashscope.aigc.conversation import Conversation, History, HistoryItem
7
- from dashscope.aigc.generation import Generation
6
+ from dashscope.aigc.generation import AioGeneration, Generation
8
7
  from dashscope.aigc.image_synthesis import ImageSynthesis
9
8
  from dashscope.aigc.multimodal_conversation import MultiModalConversation
9
+ from dashscope.app.application import Application
10
10
  from dashscope.assistants import Assistant, AssistantList, Assistants
11
11
  from dashscope.assistants.assistant_types import AssistantFile, DeleteResponse
12
12
  from dashscope.audio.asr.transcription import Transcription
@@ -14,7 +14,8 @@ from dashscope.audio.tts.speech_synthesizer import SpeechSynthesizer
14
14
  from dashscope.common.api_key import save_api_key
15
15
  from dashscope.common.env import (api_key, api_key_file_path,
16
16
  base_http_api_url, base_websocket_api_url)
17
- from dashscope.deployment import Deployment
17
+ from dashscope.customize.deployments import Deployments
18
+ from dashscope.customize.finetunes import FineTunes
18
19
  from dashscope.embeddings.batch_text_embedding import BatchTextEmbedding
19
20
  from dashscope.embeddings.batch_text_embedding_response import \
20
21
  BatchTextEmbeddingResponse
@@ -23,8 +24,7 @@ from dashscope.embeddings.multimodal_embedding import (
23
24
  MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText)
24
25
  from dashscope.embeddings.text_embedding import TextEmbedding
25
26
  from dashscope.files import Files
26
- from dashscope.finetune import FineTune
27
- from dashscope.model import Model
27
+ from dashscope.models import Models
28
28
  from dashscope.nlp.understanding import Understanding
29
29
  from dashscope.rerank.text_rerank import TextReRank
30
30
  from dashscope.threads import (MessageFile, Messages, Run, RunList, Runs,
@@ -39,6 +39,7 @@ __all__ = [
39
39
  api_key,
40
40
  api_key_file_path,
41
41
  save_api_key,
42
+ AioGeneration,
42
43
  Conversation,
43
44
  Generation,
44
45
  History,
@@ -46,9 +47,9 @@ __all__ = [
46
47
  ImageSynthesis,
47
48
  Transcription,
48
49
  Files,
49
- Deployment,
50
- FineTune,
51
- Model,
50
+ Deployments,
51
+ FineTunes,
52
+ Models,
52
53
  TextEmbedding,
53
54
  MultiModalEmbedding,
54
55
  MultiModalEmbeddingItemAudio,
@@ -4,7 +4,7 @@ from typing import Any, Dict, Generator, List, Union
4
4
 
5
5
  from dashscope.api_entities.dashscope_response import (GenerationResponse,
6
6
  Message, Role)
7
- from dashscope.client.base_api import BaseApi
7
+ from dashscope.client.base_api import BaseAioApi, BaseApi
8
8
  from dashscope.common.constants import (CUSTOMIZED_MODEL_ID,
9
9
  DEPRECATED_MESSAGE, HISTORY, MESSAGES,
10
10
  PROMPT)
@@ -188,3 +188,141 @@ class Generation(BaseApi):
188
188
  input[CUSTOMIZED_MODEL_ID] = customized_model_id
189
189
 
190
190
  return input, {**parameters, **kwargs}
191
+
192
+
193
+ class AioGeneration(BaseAioApi):
194
+ task = 'text-generation'
195
+ """API for AI-Generated Content(AIGC) models.
196
+
197
+ """
198
+ class Models:
199
+ """@deprecated, use qwen_turbo instead"""
200
+ qwen_v1 = 'qwen-v1'
201
+ """@deprecated, use qwen_plus instead"""
202
+ qwen_plus_v1 = 'qwen-plus-v1'
203
+
204
+ bailian_v1 = 'bailian-v1'
205
+ dolly_12b_v2 = 'dolly-12b-v2'
206
+ qwen_turbo = 'qwen-turbo'
207
+ qwen_plus = 'qwen-plus'
208
+ qwen_max = 'qwen-max'
209
+
210
+ @classmethod
211
+ async def call(
212
+ cls,
213
+ model: str,
214
+ prompt: Any = None,
215
+ history: list = None,
216
+ api_key: str = None,
217
+ messages: List[Message] = None,
218
+ plugins: Union[str, Dict[str, Any]] = None,
219
+ workspace: str = None,
220
+ **kwargs
221
+ ) -> Union[GenerationResponse, Generator[GenerationResponse, None, None]]:
222
+ """Call generation model service.
223
+
224
+ Args:
225
+ model (str): The requested model, such as qwen-turbo
226
+ prompt (Any): The input prompt.
227
+ history (list):The user provided history, deprecated
228
+ examples:
229
+ [{'user':'The weather is fine today.',
230
+ 'bot': 'Suitable for outings'}].
231
+ Defaults to None.
232
+ api_key (str, optional): The api api_key, can be None,
233
+ if None, will get by default rule(TODO: api key doc).
234
+ messages (list): The generation messages.
235
+ examples:
236
+ [{'role': 'user',
237
+ 'content': 'The weather is fine today.'},
238
+ {'role': 'assistant', 'content': 'Suitable for outings'}]
239
+ plugins (Any): The plugin config. Can be plugins config str, or dict.
240
+ **kwargs:
241
+ stream(bool, `optional`): Enable server-sent events
242
+ (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
243
+ the result will back partially[qwen-turbo,bailian-v1].
244
+ temperature(float, `optional`): Used to control the degree
245
+ of randomness and diversity. Specifically, the temperature
246
+ value controls the degree to which the probability distribution
247
+ of each candidate word is smoothed when generating text.
248
+ A higher temperature value will reduce the peak value of
249
+ the probability, allowing more low-probability words to be
250
+ selected, and the generated results will be more diverse;
251
+ while a lower temperature value will enhance the peak value
252
+ of the probability, making it easier for high-probability
253
+ words to be selected, the generated results are more
254
+ deterministic, range(0, 2) .[qwen-turbo,qwen-plus].
255
+ top_p(float, `optional`): A sampling strategy, called nucleus
256
+ sampling, where the model considers the results of the
257
+ tokens with top_p probability mass. So 0.1 means only
258
+ the tokens comprising the top 10% probability mass are
259
+ considered[qwen-turbo,bailian-v1].
260
+ top_k(int, `optional`): The size of the sample candidate set when generated. # noqa E501
261
+ For example, when the value is 50, only the 50 highest-scoring tokens # noqa E501
262
+ in a single generation form a randomly sampled candidate set. # noqa E501
263
+ The larger the value, the higher the randomness generated; # noqa E501
264
+ the smaller the value, the higher the certainty generated. # noqa E501
265
+ The default value is 0, which means the top_k policy is # noqa E501
266
+ not enabled. At this time, only the top_p policy takes effect. # noqa E501
267
+ enable_search(bool, `optional`): Whether to enable web search(quark). # noqa E501
268
+ Currently works best only on the first round of conversation.
269
+ Default to False, support model: [qwen-turbo].
270
+ customized_model_id(str, required) The enterprise-specific
271
+ large model id, which needs to be generated from the
272
+ operation background of the enterprise-specific
273
+ large model product, support model: [bailian-v1].
274
+ result_format(str, `optional`): [message|text] Set result result format. # noqa E501
275
+ Default result is text
276
+ incremental_output(bool, `optional`): Used to control the streaming output mode. # noqa E501
277
+ If true, the subsequent output will include the previously input content. # noqa E501
278
+ Otherwise, the subsequent output will not include the previously output # noqa E501
279
+ content. Default false.
280
+ stop(list[str] or list[list[int]], `optional`): Used to control the generation to stop # noqa E501
281
+ when encountering setting str or token ids, the result will not include # noqa E501
282
+ stop words or tokens.
283
+ max_tokens(int, `optional`): The maximum token num expected to be output. It should be # noqa E501
284
+ noted that the length generated by the model will only be less than max_tokens, # noqa E501
285
+ not necessarily equal to it. If max_tokens is set too large, the service will # noqa E501
286
+ directly prompt that the length exceeds the limit. It is generally # noqa E501
287
+ not recommended to set this value.
288
+ repetition_penalty(float, `optional`): Used to control the repeatability when generating models. # noqa E501
289
+ Increasing repetition_penalty can reduce the duplication of model generation. # noqa E501
290
+ 1.0 means no punishment.
291
+ workspace (str): The dashscope workspace id.
292
+ Raises:
293
+ InvalidInput: The history and auto_history are mutually exclusive.
294
+
295
+ Returns:
296
+ Union[GenerationResponse,
297
+ Generator[GenerationResponse, None, None]]: If
298
+ stream is True, return Generator, otherwise GenerationResponse.
299
+ """
300
+ if (prompt is None or not prompt) and (messages is None
301
+ or not messages):
302
+ raise InputRequired('prompt or messages is required!')
303
+ if model is None or not model:
304
+ raise ModelRequired('Model is required!')
305
+ task_group, function = _get_task_group_and_task(__name__)
306
+ if plugins is not None:
307
+ headers = kwargs.pop('headers', {})
308
+ if isinstance(plugins, str):
309
+ headers['X-DashScope-Plugin'] = plugins
310
+ else:
311
+ headers['X-DashScope-Plugin'] = json.dumps(plugins)
312
+ kwargs['headers'] = headers
313
+ input, parameters = Generation._build_input_parameters(
314
+ model, prompt, history, messages, **kwargs)
315
+ response = await super().call(model=model,
316
+ task_group=task_group,
317
+ task=Generation.task,
318
+ function=function,
319
+ api_key=api_key,
320
+ input=input,
321
+ workspace=workspace,
322
+ **parameters)
323
+ is_stream = kwargs.get('stream', False)
324
+ if is_stream:
325
+ return (GenerationResponse.from_api_response(rsp)
326
+ async for rsp in response)
327
+ else:
328
+ return GenerationResponse.from_api_response(response)
@@ -1,18 +1,22 @@
1
1
  import json
2
2
  from http import HTTPStatus
3
3
 
4
+ import aiohttp
4
5
  import requests
5
6
 
6
- from dashscope.api_entities.base_request import BaseRequest
7
+ from dashscope.api_entities.base_request import AioBaseRequest
7
8
  from dashscope.api_entities.dashscope_response import DashScopeAPIResponse
8
9
  from dashscope.common.constants import (DEFAULT_REQUEST_TIMEOUT_SECONDS,
9
10
  SSE_CONTENT_TYPE, HTTPMethod)
10
11
  from dashscope.common.error import UnsupportedHTTPMethod
11
12
  from dashscope.common.logging import logger
12
- from dashscope.common.utils import _handle_stream
13
+ from dashscope.common.utils import (_handle_aio_stream,
14
+ _handle_aiohttp_failed_response,
15
+ _handle_http_failed_response,
16
+ _handle_stream)
13
17
 
14
18
 
15
- class HttpRequest(BaseRequest):
19
+ class HttpRequest(AioBaseRequest):
16
20
  def __init__(self,
17
21
  url: str,
18
22
  api_key: str,
@@ -84,6 +88,127 @@ class HttpRequest(BaseRequest):
84
88
  pass
85
89
  return output
86
90
 
91
+ async def aio_call(self):
92
+ response = self._handle_aio_request()
93
+ if self.stream:
94
+ return (item async for item in response)
95
+ else:
96
+ result = await response.__anext__()
97
+ try:
98
+ await response.__anext__()
99
+ except StopAsyncIteration:
100
+ pass
101
+ return result
102
+
103
+ async def _handle_aio_request(self):
104
+ try:
105
+ async with aiohttp.ClientSession(
106
+ timeout=aiohttp.ClientTimeout(total=self.timeout),
107
+ headers=self.headers) as session:
108
+ logger.debug('Starting request: %s' % self.url)
109
+ if self.method == HTTPMethod.POST:
110
+ is_form, obj = self.data.get_aiohttp_payload()
111
+ if is_form:
112
+ headers = {**self.headers, **obj.headers}
113
+ response = await session.post(url=self.url,
114
+ data=obj,
115
+ headers=headers)
116
+ else:
117
+ response = await session.request('POST',
118
+ url=self.url,
119
+ json=obj,
120
+ headers=self.headers)
121
+ elif self.method == HTTPMethod.GET:
122
+ response = await session.get(url=self.url,
123
+ params=self.data.parameters,
124
+ headers=self.headers)
125
+ else:
126
+ raise UnsupportedHTTPMethod('Unsupported http method: %s' %
127
+ self.method)
128
+ logger.debug('Response returned: %s' % self.url)
129
+ async with response:
130
+ async for rsp in self._handle_aio_response(response):
131
+ yield rsp
132
+ except aiohttp.ClientConnectorError as e:
133
+ logger.exception(e)
134
+ yield DashScopeAPIResponse(-1,
135
+ '',
136
+ 'Unknown',
137
+ message='Error type: %s, message: %s' %
138
+ (type(e), e))
139
+ except BaseException as e:
140
+ logger.exception(e)
141
+ yield DashScopeAPIResponse(-1,
142
+ '',
143
+ 'Unknown',
144
+ message='Error type: %s, message: %s' %
145
+ (type(e), e))
146
+
147
+ async def _handle_aio_response(self, response: aiohttp.ClientResponse):
148
+ request_id = ''
149
+ if (response.status == HTTPStatus.OK and self.stream
150
+ and SSE_CONTENT_TYPE in response.content_type):
151
+ async for is_error, status_code, data in _handle_aio_stream(
152
+ response):
153
+ try:
154
+ output = None
155
+ usage = None
156
+ msg = json.loads(data)
157
+ if not is_error:
158
+ if 'output' in msg:
159
+ output = msg['output']
160
+ if 'usage' in msg:
161
+ usage = msg['usage']
162
+ if 'request_id' in msg:
163
+ request_id = msg['request_id']
164
+ except json.JSONDecodeError:
165
+ yield DashScopeAPIResponse(
166
+ request_id=request_id,
167
+ status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
168
+ code='Unknown',
169
+ message=data)
170
+ continue
171
+ if is_error:
172
+ yield DashScopeAPIResponse(request_id=request_id,
173
+ status_code=status_code,
174
+ code=msg['code'],
175
+ message=msg['message'])
176
+ else:
177
+ yield DashScopeAPIResponse(request_id=request_id,
178
+ status_code=HTTPStatus.OK,
179
+ output=output,
180
+ usage=usage)
181
+ elif (response.status == HTTPStatus.OK
182
+ and 'multipart' in response.content_type):
183
+ reader = aiohttp.MultipartReader.from_response(response)
184
+ output = {}
185
+ while True:
186
+ part = await reader.next()
187
+ if part is None:
188
+ break
189
+ output[part.name] = await part.read()
190
+ if 'request_id' in output:
191
+ request_id = output['request_id']
192
+ yield DashScopeAPIResponse(request_id=request_id,
193
+ status_code=HTTPStatus.OK,
194
+ output=output)
195
+ elif response.status == HTTPStatus.OK:
196
+ json_content = await response.json()
197
+ output = None
198
+ usage = None
199
+ if 'output' in json_content and json_content['output'] is not None:
200
+ output = json_content['output']
201
+ if 'usage' in json_content:
202
+ usage = json_content['usage']
203
+ if 'request_id' in json_content:
204
+ request_id = json_content['request_id']
205
+ yield DashScopeAPIResponse(request_id=request_id,
206
+ status_code=HTTPStatus.OK,
207
+ output=output,
208
+ usage=usage)
209
+ else:
210
+ yield await _handle_aiohttp_failed_response(response)
211
+
87
212
  def _handle_response(self, response: requests.Response):
88
213
  request_id = ''
89
214
  if (response.status_code == HTTPStatus.OK and self.stream
@@ -149,33 +274,7 @@ class HttpRequest(BaseRequest):
149
274
  output=output,
150
275
  usage=usage)
151
276
  else:
152
- if 'application/json' in response.headers.get('content-type', ''):
153
- error = response.json()
154
- if 'request_id' in error:
155
- request_id = error['request_id']
156
- if 'message' not in error:
157
- message = ''
158
- logger.error('Request: %s failed, status: %s' %
159
- (self.url, response.status_code))
160
- else:
161
- message = error['message']
162
- logger.error(
163
- 'Request: %s failed, status: %s, message: %s' %
164
- (self.url, response.status_code, error['message']))
165
- yield DashScopeAPIResponse(
166
- request_id=request_id,
167
- status_code=response.status_code,
168
- output=None,
169
- code=error['code']
170
- if 'code' in error else None, # noqa E501
171
- message=message)
172
- else:
173
- msg = response.content
174
- yield DashScopeAPIResponse(request_id=request_id,
175
- status_code=response.status_code,
176
- output=None,
177
- code='Unknown',
178
- message=msg.decode('utf-8'))
277
+ yield _handle_http_failed_response(response)
179
278
 
180
279
  def _handle_request(self):
181
280
  try:
@@ -207,6 +306,10 @@ class HttpRequest(BaseRequest):
207
306
  self.method)
208
307
  for rsp in self._handle_response(response):
209
308
  yield rsp
210
- except Exception as e:
309
+ except BaseException as e:
211
310
  logger.error(e)
212
- raise e
311
+ yield DashScopeAPIResponse(-1,
312
+ '',
313
+ 'Unknown',
314
+ message='Error type: %s, message: %s' %
315
+ (type(e), e))
@@ -154,6 +154,13 @@ class WebSocketRequest(AioBaseRequest):
154
154
  status_code=code,
155
155
  code=code,
156
156
  message=msg)
157
+ except BaseException as e:
158
+ logger.exception(e)
159
+ yield DashScopeAPIResponse(request_id='',
160
+ status_code=-1,
161
+ code='Unknown',
162
+ message='Error type: %s, message: %s' %
163
+ (type(e), e))
157
164
 
158
165
  def _to_DashScopeAPIResponse(self, task_id, is_binary, result):
159
166
  if is_binary:
dashscope/app/__init__.py CHANGED
@@ -1,5 +1,3 @@
1
1
  from .application import Application
2
2
 
3
- __all__ = [
4
- Application
5
- ]
3
+ __all__ = [Application]
@@ -1,6 +1,4 @@
1
1
  #!/usr/bin/env python3
2
- # -*-coding:utf-8 -*-
3
-
4
2
  """
5
3
  @File : application_response.py
6
4
  @Date : 2024-02-24
@@ -8,9 +6,10 @@
8
6
  """
9
7
  from dataclasses import dataclass
10
8
  from http import HTTPStatus
11
- from typing import List, Dict
9
+ from typing import Dict, List
12
10
 
13
- from dashscope.api_entities.dashscope_response import DashScopeAPIResponse, DictMixin
11
+ from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
12
+ DictMixin)
14
13
 
15
14
 
16
15
  @dataclass(init=False)
@@ -157,17 +156,14 @@ class ApplicationModelUsage(DictMixin):
157
156
  class ApplicationUsage(DictMixin):
158
157
  models: List[ApplicationModelUsage]
159
158
 
160
- def __init__(self,
161
- models: List[ApplicationModelUsage] = None,
162
- **kwargs):
159
+ def __init__(self, models: List[ApplicationModelUsage] = None, **kwargs):
163
160
  model_usages = None
164
161
  if models is not None:
165
162
  model_usages = []
166
163
  for model_usage in models:
167
164
  model_usages.append(ApplicationModelUsage(**model_usage))
168
165
 
169
- super().__init__(models=model_usages,
170
- **kwargs)
166
+ super().__init__(models=model_usages, **kwargs)
171
167
 
172
168
 
173
169
  @dataclass(init=False)
@@ -5,7 +5,7 @@ from dashscope.api_entities.dashscope_response import DashScopeAPIResponse
5
5
  from dashscope.client.base_api import BaseAsyncApi
6
6
  from dashscope.common.error import InvalidParameter
7
7
  from dashscope.common.logging import logger
8
- from dashscope.finetune import FineTune
8
+ from dashscope.customize.finetunes import FineTunes
9
9
 
10
10
 
11
11
  class AsrPhraseManager(BaseAsyncApi):
@@ -39,16 +39,16 @@ class AsrPhraseManager(BaseAsyncApi):
39
39
  if training_type is None or len(training_type) == 0:
40
40
  raise InvalidParameter('training_type is empty!')
41
41
 
42
- original_ft_sub_path = FineTune.SUB_PATH
43
- FineTune.SUB_PATH = 'fine-tunes'
44
- response = FineTune.call(model=model,
45
- training_file_ids=[],
46
- validation_file_ids=[],
47
- mode=training_type,
48
- hyper_parameters={'phrase_list': phrases},
49
- workspace=workspace,
50
- **kwargs)
51
- FineTune.SUB_PATH = original_ft_sub_path
42
+ original_ft_sub_path = FineTunes.SUB_PATH
43
+ FineTunes.SUB_PATH = 'fine-tunes'
44
+ response = FineTunes.call(model=model,
45
+ training_file_ids=[],
46
+ validation_file_ids=[],
47
+ mode=training_type,
48
+ hyper_parameters={'phrase_list': phrases},
49
+ workspace=workspace,
50
+ **kwargs)
51
+ FineTunes.SUB_PATH = original_ft_sub_path
52
52
 
53
53
  if response.status_code != HTTPStatus.OK:
54
54
  logger.error('Create phrase failed, ' + str(response))
@@ -88,17 +88,17 @@ class AsrPhraseManager(BaseAsyncApi):
88
88
  if training_type is None or len(training_type) == 0:
89
89
  raise InvalidParameter('training_type is empty!')
90
90
 
91
- original_ft_sub_path = FineTune.SUB_PATH
92
- FineTune.SUB_PATH = 'fine-tunes'
93
- response = FineTune.call(model=model,
94
- training_file_ids=[],
95
- validation_file_ids=[],
96
- mode=training_type,
97
- hyper_parameters={'phrase_list': phrases},
98
- finetuned_output=phrase_id,
99
- workspace=workspace,
100
- **kwargs)
101
- FineTune.SUB_PATH = original_ft_sub_path
91
+ original_ft_sub_path = FineTunes.SUB_PATH
92
+ FineTunes.SUB_PATH = 'fine-tunes'
93
+ response = FineTunes.call(model=model,
94
+ training_file_ids=[],
95
+ validation_file_ids=[],
96
+ mode=training_type,
97
+ hyper_parameters={'phrase_list': phrases},
98
+ finetuned_output=phrase_id,
99
+ workspace=workspace,
100
+ **kwargs)
101
+ FineTunes.SUB_PATH = original_ft_sub_path
102
102
 
103
103
  if response.status_code != HTTPStatus.OK:
104
104
  logger.error('Update phrase failed, ' + str(response))
@@ -126,12 +126,12 @@ class AsrPhraseManager(BaseAsyncApi):
126
126
  if phrase_id is None or len(phrase_id) == 0:
127
127
  raise InvalidParameter('phrase_id is empty!')
128
128
 
129
- original_ft_sub_path = FineTune.SUB_PATH
130
- FineTune.SUB_PATH = 'fine-tunes/outputs'
131
- response = FineTune.get(job_id=phrase_id,
132
- workspace=workspace,
133
- **kwargs)
134
- FineTune.SUB_PATH = original_ft_sub_path
129
+ original_ft_sub_path = FineTunes.SUB_PATH
130
+ FineTunes.SUB_PATH = 'fine-tunes/outputs'
131
+ response = FineTunes.get(job_id=phrase_id,
132
+ workspace=workspace,
133
+ **kwargs)
134
+ FineTunes.SUB_PATH = original_ft_sub_path
135
135
 
136
136
  if response.status_code != HTTPStatus.OK:
137
137
  logger.error('Query phrase failed, ' + str(response))
@@ -155,13 +155,13 @@ class AsrPhraseManager(BaseAsyncApi):
155
155
  Returns:
156
156
  DashScopeAPIResponse: The results of listing hot words.
157
157
  """
158
- original_ft_sub_path = FineTune.SUB_PATH
159
- FineTune.SUB_PATH = 'fine-tunes/outputs'
160
- response = FineTune.list(page=page,
161
- page_size=page_size,
162
- workspace=workspace,
163
- **kwargs)
164
- FineTune.SUB_PATH = original_ft_sub_path
158
+ original_ft_sub_path = FineTunes.SUB_PATH
159
+ FineTunes.SUB_PATH = 'fine-tunes/outputs'
160
+ response = FineTunes.list(page=page,
161
+ page_size=page_size,
162
+ workspace=workspace,
163
+ **kwargs)
164
+ FineTunes.SUB_PATH = original_ft_sub_path
165
165
 
166
166
  if response.status_code != HTTPStatus.OK:
167
167
  logger.error('List phrase failed, ' + str(response))
@@ -188,12 +188,12 @@ class AsrPhraseManager(BaseAsyncApi):
188
188
  if phrase_id is None or len(phrase_id) == 0:
189
189
  raise InvalidParameter('phrase_id is empty!')
190
190
 
191
- original_ft_sub_path = FineTune.SUB_PATH
192
- FineTune.SUB_PATH = 'fine-tunes/outputs'
193
- response = FineTune.delete(job_id=phrase_id,
194
- workspace=workspace,
195
- **kwargs)
196
- FineTune.SUB_PATH = original_ft_sub_path
191
+ original_ft_sub_path = FineTunes.SUB_PATH
192
+ FineTunes.SUB_PATH = 'fine-tunes/outputs'
193
+ response = FineTunes.delete(job_id=phrase_id,
194
+ workspace=workspace,
195
+ **kwargs)
196
+ FineTunes.SUB_PATH = original_ft_sub_path
197
197
 
198
198
  if response.status_code != HTTPStatus.OK:
199
199
  logger.error('Delete phrase failed, ' + str(response))