dashscope 1.12.0__py3-none-any.whl → 1.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

Files changed (32) hide show
  1. dashscope/__init__.py +8 -27
  2. dashscope/aigc/code_generation.py +14 -17
  3. dashscope/aigc/conversation.py +26 -20
  4. dashscope/aigc/generation.py +11 -1
  5. dashscope/aigc/multimodal_conversation.py +39 -4
  6. dashscope/api_entities/api_request_data.py +2 -2
  7. dashscope/api_entities/api_request_factory.py +4 -10
  8. dashscope/api_entities/dashscope_response.py +18 -9
  9. dashscope/audio/asr/__init__.py +5 -1
  10. dashscope/audio/asr/asr_phrase_manager.py +179 -0
  11. dashscope/audio/asr/recognition.py +61 -3
  12. dashscope/audio/asr/transcription.py +55 -2
  13. dashscope/client/base_api.py +13 -8
  14. dashscope/common/constants.py +5 -2
  15. dashscope/common/error.py +4 -0
  16. dashscope/common/utils.py +12 -2
  17. dashscope/embeddings/batch_text_embedding.py +3 -2
  18. dashscope/embeddings/multimodal_embedding.py +37 -9
  19. dashscope/embeddings/text_embedding.py +1 -0
  20. dashscope/finetune.py +2 -0
  21. dashscope/nlp/understanding.py +11 -16
  22. dashscope/tokenizers/__init__.py +1 -1
  23. dashscope/utils/__init__.py +0 -0
  24. dashscope/utils/oss_utils.py +133 -0
  25. dashscope/version.py +1 -1
  26. {dashscope-1.12.0.dist-info → dashscope-1.13.1.dist-info}/METADATA +1 -1
  27. dashscope-1.13.1.dist-info/RECORD +59 -0
  28. dashscope-1.12.0.dist-info/RECORD +0 -56
  29. {dashscope-1.12.0.dist-info → dashscope-1.13.1.dist-info}/LICENSE +0 -0
  30. {dashscope-1.12.0.dist-info → dashscope-1.13.1.dist-info}/WHEEL +0 -0
  31. {dashscope-1.12.0.dist-info → dashscope-1.13.1.dist-info}/entry_points.txt +0 -0
  32. {dashscope-1.12.0.dist-info → dashscope-1.13.1.dist-info}/top_level.txt +0 -0
dashscope/__init__.py CHANGED
@@ -19,39 +19,20 @@ from dashscope.embeddings.multimodal_embedding import (
19
19
  MultiModalEmbedding, MultiModalEmbeddingItemAudio,
20
20
  MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText)
21
21
  from dashscope.embeddings.text_embedding import TextEmbedding
22
+ from dashscope.tokenizers import Tokenization
22
23
  from dashscope.file import File
23
24
  from dashscope.finetune import FineTune
24
25
  from dashscope.model import Model
25
26
  from dashscope.nlp.understanding import Understanding
26
- from dashscope.tokenizers import Tokenization
27
27
 
28
28
  __all__ = [
29
- base_http_api_url,
30
- base_websocket_api_url,
31
- api_key,
32
- api_key_file_path,
33
- save_api_key,
34
- Conversation,
35
- Generation,
36
- History,
37
- HistoryItem,
38
- ImageSynthesis,
39
- Transcription,
40
- File,
41
- Deployment,
42
- FineTune,
43
- Model,
44
- TextEmbedding,
45
- MultiModalEmbedding,
46
- MultiModalEmbeddingItemAudio,
47
- MultiModalEmbeddingItemImage,
48
- MultiModalEmbeddingItemText,
49
- SpeechSynthesizer,
50
- MultiModalConversation,
51
- BatchTextEmbedding,
52
- BatchTextEmbeddingResponse,
53
- Understanding,
54
- CodeGeneration,
29
+ base_http_api_url, base_websocket_api_url, api_key, api_key_file_path,
30
+ save_api_key, Conversation, Generation, History, HistoryItem,
31
+ ImageSynthesis, Transcription, File, Deployment, FineTune, Model,
32
+ TextEmbedding, MultiModalEmbedding, MultiModalEmbeddingItemAudio,
33
+ MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText,
34
+ SpeechSynthesizer, MultiModalConversation, BatchTextEmbedding,
35
+ BatchTextEmbeddingResponse, Understanding, CodeGeneration,
55
36
  Tokenization,
56
37
  ]
57
38
 
@@ -1,6 +1,7 @@
1
1
  from typing import Generator, List, Union
2
2
 
3
- from dashscope.api_entities.dashscope_response import DashScopeAPIResponse, DictMixin, Role
3
+ from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
4
+ DictMixin, Role)
4
5
  from dashscope.client.base_api import BaseApi
5
6
  from dashscope.common.constants import MESSAGE, SCENE
6
7
  from dashscope.common.error import InputRequired, ModelRequired
@@ -47,7 +48,6 @@ class CodeGeneration(BaseApi):
47
48
  """API for AI-Generated Content(AIGC) models.
48
49
 
49
50
  """
50
-
51
51
  class Models:
52
52
  tongyi_lingma_v1 = 'tongyi-lingma-v1'
53
53
 
@@ -63,13 +63,14 @@ class CodeGeneration(BaseApi):
63
63
 
64
64
  @classmethod
65
65
  def call(
66
- cls,
67
- model: str,
68
- scene: str = None,
69
- api_key: str = None,
70
- message: List[MessageParam] = None,
71
- **kwargs
72
- ) -> Union[DashScopeAPIResponse, Generator[DashScopeAPIResponse, None, None]]:
66
+ cls,
67
+ model: str,
68
+ scene: str = None,
69
+ api_key: str = None,
70
+ message: List[MessageParam] = None,
71
+ **kwargs
72
+ ) -> Union[DashScopeAPIResponse, Generator[DashScopeAPIResponse, None,
73
+ None]]:
73
74
  """Call generation model service.
74
75
 
75
76
  Args:
@@ -116,7 +117,8 @@ class CodeGeneration(BaseApi):
116
117
  if model is None or not model:
117
118
  raise ModelRequired('Model is required!')
118
119
  task_group, task = _get_task_group_and_task(__name__)
119
- input, parameters = cls._build_input_parameters(model, scene, message, **kwargs)
120
+ input, parameters = cls._build_input_parameters(
121
+ model, scene, message, **kwargs)
120
122
  response = super().call(model=model,
121
123
  task_group=task_group,
122
124
  task=task,
@@ -133,11 +135,6 @@ class CodeGeneration(BaseApi):
133
135
 
134
136
  @classmethod
135
137
  def _build_input_parameters(cls, model, scene, message, **kwargs):
136
- parameters = {
137
- "n": kwargs.pop("n", 1)
138
- }
139
- input = {
140
- SCENE: scene,
141
- MESSAGE: message
142
- }
138
+ parameters = {'n': kwargs.pop('n', 1)}
139
+ input = {SCENE: scene, MESSAGE: message}
143
140
  return input, {**parameters, **kwargs}
@@ -1,12 +1,12 @@
1
+ import json
1
2
  from copy import deepcopy
2
3
  from http import HTTPStatus
3
- from typing import Any, Generator, List, Union
4
+ from typing import Any, Dict, Generator, List, Union
4
5
 
5
6
  from dashscope.api_entities.dashscope_response import (ConversationResponse,
6
7
  Message, Role)
7
8
  from dashscope.client.base_api import BaseApi
8
- from dashscope.common.constants import (DEPRECATED_MESSAGE, HISTORY, MESSAGES,
9
- PROMPT)
9
+ from dashscope.common.constants import DEPRECATED_MESSAGE, HISTORY, PROMPT
10
10
  from dashscope.common.error import InputRequired, InvalidInput, ModelRequired
11
11
  from dashscope.common.logging import logger
12
12
  from dashscope.common.utils import _get_task_group_and_task
@@ -101,8 +101,7 @@ class Conversation(BaseApi):
101
101
  qwen_turbo = 'qwen-turbo'
102
102
  qwen_plus = 'qwen-plus'
103
103
 
104
- def __init__(self,
105
- history: History = None) -> None:
104
+ def __init__(self, history: History = None) -> None:
106
105
  """Init a chat.
107
106
 
108
107
  Args:
@@ -126,6 +125,7 @@ class Conversation(BaseApi):
126
125
  n_history: int = -1,
127
126
  api_key: str = None,
128
127
  messages: List[Message] = None,
128
+ plugins: Union[str, Dict[str, Any]] = None,
129
129
  **kwargs
130
130
  ) -> Union[ConversationResponse, Generator[ConversationResponse, None,
131
131
  None]]:
@@ -150,6 +150,7 @@ class Conversation(BaseApi):
150
150
  [{'role': 'user',
151
151
  'content': 'The weather is fine today.'},
152
152
  {'role': 'assistant', 'content': 'Suitable for outings'}]
153
+ plugins (Any): The plugin config, Can be plugins config str, or dict.
153
154
  **kwargs(qwen-turbo, qwen-plus):
154
155
  stream(bool, `optional`): Enable server-sent events
155
156
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -157,13 +158,13 @@ class Conversation(BaseApi):
157
158
  temperature(float, `optional`): Used to control the degree
158
159
  of randomness and diversity. Specifically, the temperature
159
160
  value controls the degree to which the probability distribution
160
- of each candidate word is smoothed when generating text.
161
- A higher temperature value will reduce the peak value of
162
- the probability, allowing more low-probability words to be
163
- selected, and the generated results will be more diverse;
164
- while a lower temperature value will enhance the peak value
165
- of the probability, making it easier for high-probability
166
- words to be selected, the generated results are more
161
+ of each candidate word is smoothed when generating text.
162
+ A higher temperature value will reduce the peak value of
163
+ the probability, allowing more low-probability words to be
164
+ selected, and the generated results will be more diverse;
165
+ while a lower temperature value will enhance the peak value
166
+ of the probability, making it easier for high-probability
167
+ words to be selected, the generated results are more
167
168
  deterministic,range(0, 2) .[qwen-turbo,qwen-plus].
168
169
  top_p(float, `optional`): A sampling strategy, called nucleus
169
170
  sampling, where the model considers the results of the
@@ -195,6 +196,13 @@ class Conversation(BaseApi):
195
196
  if model is None or not model:
196
197
  raise ModelRequired('Model is required!')
197
198
  task_group, _ = _get_task_group_and_task(__name__)
199
+ if plugins is not None:
200
+ headers = kwargs.pop('headers', {})
201
+ if isinstance(plugins, str):
202
+ headers['X-DashScope-Plugin'] = plugins
203
+ else:
204
+ headers['X-DashScope-Plugin'] = json.dumps(plugins)
205
+ kwargs['headers'] = headers
198
206
  input, parameters = self._build_input_parameters(
199
207
  model, prompt, history, auto_history, n_history, messages,
200
208
  **kwargs)
@@ -236,15 +244,13 @@ class Conversation(BaseApi):
236
244
  def _build_input_parameters(self, model, prompt, history, auto_history,
237
245
  n_history, messages, **kwargs):
238
246
  if model == Conversation.Models.qwen_v1:
239
- logger.warning("Model %s is deprecated, use %s instead!" % (
240
- Conversation.Models.qwen_v1,
241
- Conversation.Models.qwen_turbo
242
- ))
247
+ logger.warning(
248
+ 'Model %s is deprecated, use %s instead!' %
249
+ (Conversation.Models.qwen_v1, Conversation.Models.qwen_turbo))
243
250
  if model == Conversation.Models.qwen_plus_v1:
244
- logger.warning("Model %s is deprecated, use %s instead!" % (
245
- Conversation.Models.qwen_plus_v1,
246
- Conversation.Models.qwen_plus
247
- ))
251
+ logger.warning('Model %s is deprecated, use %s instead!' %
252
+ (Conversation.Models.qwen_plus_v1,
253
+ Conversation.Models.qwen_plus))
248
254
  parameters = {}
249
255
  if history is not None and auto_history:
250
256
  raise InvalidInput('auto_history is True, history must None')
@@ -1,5 +1,6 @@
1
1
  import copy
2
- from typing import Any, Generator, List, Union
2
+ import json
3
+ from typing import Any, Dict, Generator, List, Union
3
4
 
4
5
  from dashscope.api_entities.dashscope_response import (GenerationResponse,
5
6
  Message, Role)
@@ -36,6 +37,7 @@ class Generation(BaseApi):
36
37
  history: list = None,
37
38
  api_key: str = None,
38
39
  messages: List[Message] = None,
40
+ plugins: Union[str, Dict[str, Any]] = None,
39
41
  **kwargs
40
42
  ) -> Union[GenerationResponse, Generator[GenerationResponse, None, None]]:
41
43
  """Call generation model service.
@@ -55,6 +57,7 @@ class Generation(BaseApi):
55
57
  [{'role': 'user',
56
58
  'content': 'The weather is fine today.'},
57
59
  {'role': 'assistant', 'content': 'Suitable for outings'}]
60
+ plugins (Any): The plugin config. Can be plugins config str, or dict.
58
61
  **kwargs:
59
62
  stream(bool, `optional`): Enable server-sent events
60
63
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -102,6 +105,13 @@ class Generation(BaseApi):
102
105
  if model is None or not model:
103
106
  raise ModelRequired('Model is required!')
104
107
  task_group, function = _get_task_group_and_task(__name__)
108
+ if plugins is not None:
109
+ headers = kwargs.pop('headers', {})
110
+ if isinstance(plugins, str):
111
+ headers['X-DashScope-Plugin'] = plugins
112
+ else:
113
+ headers['X-DashScope-Plugin'] = json.dumps(plugins)
114
+ kwargs['headers'] = headers
105
115
  input, parameters = cls._build_input_parameters(
106
116
  model, prompt, history, messages, **kwargs)
107
117
  response = super().call(model=model,
@@ -1,9 +1,13 @@
1
+ import copy
1
2
  from typing import Generator, List, Union
2
3
 
3
- from dashscope.api_entities.dashscope_response import (MultiModalConversationResponse)
4
+ from dashscope.api_entities.dashscope_response import \
5
+ MultiModalConversationResponse
4
6
  from dashscope.client.base_api import BaseApi
5
7
  from dashscope.common.error import InputRequired, ModelRequired
6
8
  from dashscope.common.utils import _get_task_group_and_task
9
+ from dashscope.utils.oss_utils import preprocess_message_element
10
+
7
11
 
8
12
  class MultiModalConversation(BaseApi):
9
13
  """MultiModal conversational robot interface.
@@ -21,7 +25,8 @@ class MultiModalConversation(BaseApi):
21
25
  messages: List,
22
26
  api_key: str = None,
23
27
  **kwargs
24
- ) -> Union[MultiModalConversationResponse, Generator[MultiModalConversationResponse, None, None]]:
28
+ ) -> Union[MultiModalConversationResponse, Generator[
29
+ MultiModalConversationResponse, None, None]]:
25
30
  """Call the conversation model service.
26
31
 
27
32
  Args:
@@ -59,7 +64,7 @@ class MultiModalConversation(BaseApi):
59
64
  tokens with top_p probability mass. So 0.1 means only
60
65
  the tokens comprising the top 10% probability mass are
61
66
  considered[qwen-turbo,bailian-v1].
62
- top_k(float, `optional`):
67
+ top_k(float, `optional`):
63
68
 
64
69
  Raises:
65
70
  InvalidInput: The history and auto_history are mutually exclusive.
@@ -74,7 +79,13 @@ class MultiModalConversation(BaseApi):
74
79
  if model is None or not model:
75
80
  raise ModelRequired('Model is required!')
76
81
  task_group, _ = _get_task_group_and_task(__name__)
77
- input = {'messages': messages}
82
+ msg_copy = copy.deepcopy(messages)
83
+ has_upload = cls._preprocess_messages(model, msg_copy, api_key)
84
+ if has_upload:
85
+ headers = kwargs.pop('headers', {})
86
+ headers['X-DashScope-OssResourceResolve'] = 'enable'
87
+ kwargs['headers'] = headers
88
+ input = {'messages': msg_copy}
78
89
  response = super().call(model=model,
79
90
  task_group=task_group,
80
91
  task=MultiModalConversation.task,
@@ -88,3 +99,27 @@ class MultiModalConversation(BaseApi):
88
99
  for rsp in response)
89
100
  else:
90
101
  return MultiModalConversationResponse.from_api_response(response)
102
+
103
+ @classmethod
104
+ def _preprocess_messages(cls, model: str, messages: List[dict],
105
+ api_key: str):
106
+ """
107
+ messages = [
108
+ {
109
+ "role": "user",
110
+ "content": [
111
+ {"image": ""},
112
+ {"text": ""},
113
+ ]
114
+ }
115
+ ]
116
+ """
117
+ has_upload = False
118
+ for message in messages:
119
+ content = message['content']
120
+ for elem in content:
121
+ is_upload = preprocess_message_element(model, elem, api_key)
122
+ if is_upload and not has_upload:
123
+ has_upload = True
124
+
125
+ return has_upload
@@ -41,10 +41,10 @@ class ApiRequestData():
41
41
  def add_parameters(self, **params):
42
42
  for key, value in params.items():
43
43
  self.parameters[key] = value
44
-
44
+
45
45
  def add_resources(self, resources):
46
46
  self.resources = resources
47
-
47
+
48
48
  def to_request_object(self) -> str:
49
49
  """Convert data to json, called from http request.
50
50
  Returns:
@@ -28,14 +28,8 @@ def _get_protocol_params(kwargs):
28
28
  form, resources)
29
29
 
30
30
 
31
- def _build_api_request(model: str,
32
- input: object,
33
- task_group: str,
34
- task: str,
35
- function: str,
36
- api_key: str,
37
- is_service=True,
38
- **kwargs):
31
+ def _build_api_request(model: str, input: object, task_group: str, task: str,
32
+ function: str, api_key: str, is_service=True, **kwargs):
39
33
  (api_protocol, ws_stream_mode, is_binary_input, http_method, stream,
40
34
  async_request, query, headers, request_timeout, stream_result_mode, form,
41
35
  resources) = _get_protocol_params(kwargs)
@@ -44,8 +38,8 @@ def _build_api_request(model: str,
44
38
  if not dashscope.base_http_api_url.endswith('/'):
45
39
  http_url = dashscope.base_http_api_url + '/'
46
40
  else:
47
- http_url = dashscope.base_http_api_url
48
-
41
+ http_url = dashscope.base_http_api_url
42
+
49
43
  if is_service:
50
44
  http_url = http_url + SERVICE_API_PATH + '/'
51
45
 
@@ -137,12 +137,17 @@ class Message(DictMixin):
137
137
  class Choice(DictMixin):
138
138
  finish_reason: str
139
139
  message: Message
140
- def __init__(self, finish_reason: str = None, message: Message = None, **kwargs):
140
+
141
+ def __init__(self,
142
+ finish_reason: str = None,
143
+ message: Message = None,
144
+ **kwargs):
141
145
  msgObject = None
142
146
  if message is not None:
143
147
  msgObject = Message(**message)
144
- super().__init__(finish_reason=finish_reason,
145
- message=msgObject, **kwargs)
148
+ super().__init__(finish_reason=finish_reason,
149
+ message=msgObject,
150
+ **kwargs)
146
151
 
147
152
 
148
153
  @dataclass(init=False)
@@ -160,7 +165,7 @@ class GenerationOutput(DictMixin):
160
165
  if choices is not None:
161
166
  chs = []
162
167
  for choice in choices:
163
- chs.append(Choice(**choice))
168
+ chs.append(Choice(**choice))
164
169
  super().__init__(text=text,
165
170
  finish_reason=finish_reason,
166
171
  choices=chs,
@@ -206,6 +211,7 @@ class GenerationResponse(DashScopeAPIResponse):
206
211
  code=api_response.code,
207
212
  message=api_response.message)
208
213
 
214
+
209
215
  @dataclass(init=False)
210
216
  class MultiModalConversationOutput(DictMixin):
211
217
  choices: List[Choice]
@@ -219,7 +225,7 @@ class MultiModalConversationOutput(DictMixin):
219
225
  if choices is not None:
220
226
  chs = []
221
227
  for choice in choices:
222
- chs.append(Choice(**choice))
228
+ chs.append(Choice(**choice))
223
229
  super().__init__(text=text,
224
230
  finish_reason=finish_reason,
225
231
  choices=chs,
@@ -230,6 +236,7 @@ class MultiModalConversationOutput(DictMixin):
230
236
  class MultiModalConversationUsage(DictMixin):
231
237
  input_tokens: int
232
238
  output_tokens: int
239
+
233
240
  # TODO add image usage info.
234
241
 
235
242
  def __init__(self,
@@ -261,10 +268,12 @@ class MultiModalConversationResponse(DashScopeAPIResponse):
261
268
  output=MultiModalConversationOutput(**api_response.output),
262
269
  usage=MultiModalConversationUsage(**usage))
263
270
  else:
264
- return MultiModalConversationResponse(status_code=api_response.status_code,
265
- request_id=api_response.request_id,
266
- code=api_response.code,
267
- message=api_response.message)
271
+ return MultiModalConversationResponse(
272
+ status_code=api_response.status_code,
273
+ request_id=api_response.request_id,
274
+ code=api_response.code,
275
+ message=api_response.message)
276
+
268
277
 
269
278
  @dataclass(init=False)
270
279
  class ConversationResponse(GenerationResponse):
@@ -1,4 +1,8 @@
1
+ from .asr_phrase_manager import AsrPhraseManager
1
2
  from .recognition import Recognition, RecognitionCallback, RecognitionResult
2
3
  from .transcription import Transcription
3
4
 
4
- __all__ = [Transcription, Recognition, RecognitionCallback, RecognitionResult]
5
+ __all__ = [
6
+ Transcription, Recognition, RecognitionCallback, RecognitionResult,
7
+ AsrPhraseManager
8
+ ]
@@ -0,0 +1,179 @@
1
+ from http import HTTPStatus
2
+ from typing import Any, Dict
3
+
4
+ from dashscope.api_entities.dashscope_response import DashScopeAPIResponse
5
+ from dashscope.client.base_api import BaseAsyncApi
6
+ from dashscope.common.error import InvalidParameter
7
+ from dashscope.common.logging import logger
8
+ from dashscope.finetune import FineTune
9
+
10
+
11
+ class AsrPhraseManager(BaseAsyncApi):
12
+ """Hot word management for speech recognition.
13
+ """
14
+ @classmethod
15
+ def create_phrases(cls,
16
+ model: str,
17
+ phrases: Dict[str, Any],
18
+ training_type: str = 'compile_asr_phrase',
19
+ **kwargs) -> DashScopeAPIResponse:
20
+ """Create hot words.
21
+
22
+ Args:
23
+ model (str): The requested model.
24
+ phrases (Dict[str, Any]): A dictionary that contains phrases,
25
+ such as {'下一首':90,'上一首':90}.
26
+ training_type (str, `optional`): The training type,
27
+ 'compile_asr_phrase' is default.
28
+
29
+ Raises:
30
+ InvalidParameter: Parameter input is None or empty!
31
+
32
+ Returns:
33
+ DashScopeAPIResponse: The results of creating hot words.
34
+ """
35
+ if phrases is None or len(phrases) == 0:
36
+ raise InvalidParameter('phrases is empty!')
37
+ if training_type is None or len(training_type) == 0:
38
+ raise InvalidParameter('training_type is empty!')
39
+
40
+ original_ft_sub_path = FineTune.SUB_PATH
41
+ FineTune.SUB_PATH = 'fine-tunes'
42
+ response = FineTune.call(model=model,
43
+ training_file_ids=[],
44
+ validation_file_ids=[],
45
+ mode=training_type,
46
+ hyper_parameters={'phrase_list': phrases},
47
+ **kwargs)
48
+ FineTune.SUB_PATH = original_ft_sub_path
49
+
50
+ if response.status_code != HTTPStatus.OK:
51
+ logger.error('Create phrase failed, ' + str(response))
52
+
53
+ return response
54
+
55
+ @classmethod
56
+ def update_phrases(cls,
57
+ model: str,
58
+ phrase_id: str,
59
+ phrases: Dict[str, Any],
60
+ training_type: str = 'compile_asr_phrase',
61
+ **kwargs) -> DashScopeAPIResponse:
62
+ """Update the hot words marked phrase_id.
63
+
64
+ Args:
65
+ model (str): The requested model.
66
+ phrase_id (str): The ID of phrases,
67
+ which created by create_phrases().
68
+ phrases (Dict[str, Any]): A dictionary that contains phrases,
69
+ such as {'暂停':90}.
70
+ training_type (str, `optional`):
71
+ The training type, 'compile_asr_phrase' is default.
72
+
73
+ Raises:
74
+ InvalidParameter: Parameter input is None or empty!
75
+
76
+ Returns:
77
+ DashScopeAPIResponse: The results of updating hot words.
78
+ """
79
+ if phrase_id is None or len(phrase_id) == 0:
80
+ raise InvalidParameter('phrase_id is empty!')
81
+ if phrases is None or len(phrases) == 0:
82
+ raise InvalidParameter('phrases is empty!')
83
+ if training_type is None or len(training_type) == 0:
84
+ raise InvalidParameter('training_type is empty!')
85
+
86
+ original_ft_sub_path = FineTune.SUB_PATH
87
+ FineTune.SUB_PATH = 'fine-tunes'
88
+ response = FineTune.call(model=model,
89
+ training_file_ids=[],
90
+ validation_file_ids=[],
91
+ mode=training_type,
92
+ hyper_parameters={'phrase_list': phrases},
93
+ finetuned_output=phrase_id,
94
+ **kwargs)
95
+ FineTune.SUB_PATH = original_ft_sub_path
96
+
97
+ if response.status_code != HTTPStatus.OK:
98
+ logger.error('Update phrase failed, ' + str(response))
99
+
100
+ return response
101
+
102
+ @classmethod
103
+ def query_phrases(cls, phrase_id: str, **kwargs) -> DashScopeAPIResponse:
104
+ """Query the hot words by phrase_id.
105
+
106
+ Args:
107
+ phrase_id (str): The ID of phrases,
108
+ which created by create_phrases().
109
+
110
+ Raises:
111
+ InvalidParameter: phrase_id input is None or empty!
112
+
113
+ Returns:
114
+ AsrPhraseManagerResult: The results of querying hot words.
115
+ """
116
+ if phrase_id is None or len(phrase_id) == 0:
117
+ raise InvalidParameter('phrase_id is empty!')
118
+
119
+ original_ft_sub_path = FineTune.SUB_PATH
120
+ FineTune.SUB_PATH = 'fine-tunes/outputs'
121
+ response = FineTune.get(job_id=phrase_id, **kwargs)
122
+ FineTune.SUB_PATH = original_ft_sub_path
123
+
124
+ if response.status_code != HTTPStatus.OK:
125
+ logger.error('Query phrase failed, ' + str(response))
126
+
127
+ return response
128
+
129
+ @classmethod
130
+ def list_phrases(cls,
131
+ page: int = 1,
132
+ page_size: int = 10,
133
+ **kwargs) -> DashScopeAPIResponse:
134
+ """List all information of phrases.
135
+
136
+ Args:
137
+ page (int): Page number, greater than 0, default value 1.
138
+ page_size (int): The paging size, greater than 0
139
+ and less than or equal to 100, default value 10.
140
+
141
+ Returns:
142
+ DashScopeAPIResponse: The results of listing hot words.
143
+ """
144
+ original_ft_sub_path = FineTune.SUB_PATH
145
+ FineTune.SUB_PATH = 'fine-tunes/outputs'
146
+ response = FineTune.list(page=page, page_size=page_size, **kwargs)
147
+ FineTune.SUB_PATH = original_ft_sub_path
148
+
149
+ if response.status_code != HTTPStatus.OK:
150
+ logger.error('List phrase failed, ' + str(response))
151
+
152
+ return response
153
+
154
+ @classmethod
155
+ def delete_phrases(cls, phrase_id: str, **kwargs) -> DashScopeAPIResponse:
156
+ """Delete the hot words by phrase_id.
157
+
158
+ Args:
159
+ phrase_id (str): The ID of phrases,
160
+ which created by create_phrases().
161
+
162
+ Raises:
163
+ InvalidParameter: phrase_id input is None or empty!
164
+
165
+ Returns:
166
+ DashScopeAPIResponse: The results of deleting hot words.
167
+ """
168
+ if phrase_id is None or len(phrase_id) == 0:
169
+ raise InvalidParameter('phrase_id is empty!')
170
+
171
+ original_ft_sub_path = FineTune.SUB_PATH
172
+ FineTune.SUB_PATH = 'fine-tunes/outputs'
173
+ response = FineTune.delete(job_id=phrase_id, **kwargs)
174
+ FineTune.SUB_PATH = original_ft_sub_path
175
+
176
+ if response.status_code != HTTPStatus.OK:
177
+ logger.error('Delete phrase failed, ' + str(response))
178
+
179
+ return response