dashscope 1.22.2__py3-none-any.whl → 1.23.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

Files changed (87) hide show
  1. dashscope/__init__.py +3 -1
  2. dashscope/aigc/__init__.py +2 -0
  3. dashscope/aigc/chat_completion.py +282 -0
  4. dashscope/aigc/code_generation.py +2 -0
  5. dashscope/aigc/conversation.py +2 -0
  6. dashscope/aigc/generation.py +2 -0
  7. dashscope/aigc/image_synthesis.py +2 -0
  8. dashscope/aigc/multimodal_conversation.py +2 -0
  9. dashscope/aigc/video_synthesis.py +2 -0
  10. dashscope/api_entities/aiohttp_request.py +3 -0
  11. dashscope/api_entities/api_request_data.py +3 -0
  12. dashscope/api_entities/api_request_factory.py +2 -0
  13. dashscope/api_entities/base_request.py +2 -0
  14. dashscope/api_entities/chat_completion_types.py +344 -0
  15. dashscope/api_entities/dashscope_response.py +63 -0
  16. dashscope/api_entities/http_request.py +3 -0
  17. dashscope/api_entities/websocket_request.py +3 -0
  18. dashscope/app/__init__.py +2 -0
  19. dashscope/app/application.py +17 -15
  20. dashscope/app/application_response.py +1 -1
  21. dashscope/assistants/__init__.py +2 -0
  22. dashscope/assistants/assistant_types.py +2 -0
  23. dashscope/assistants/assistants.py +2 -0
  24. dashscope/assistants/files.py +2 -0
  25. dashscope/audio/__init__.py +4 -2
  26. dashscope/audio/asr/__init__.py +2 -0
  27. dashscope/audio/asr/asr_phrase_manager.py +2 -0
  28. dashscope/audio/asr/recognition.py +5 -0
  29. dashscope/audio/asr/transcription.py +3 -0
  30. dashscope/audio/asr/translation_recognizer.py +2 -0
  31. dashscope/audio/asr/vocabulary.py +3 -0
  32. dashscope/audio/qwen_tts/__init__.py +5 -0
  33. dashscope/audio/qwen_tts/speech_synthesizer.py +77 -0
  34. dashscope/audio/tts/__init__.py +2 -0
  35. dashscope/audio/tts/speech_synthesizer.py +2 -0
  36. dashscope/audio/tts_v2/__init__.py +2 -0
  37. dashscope/audio/tts_v2/enrollment.py +3 -0
  38. dashscope/audio/tts_v2/speech_synthesizer.py +4 -1
  39. dashscope/client/base_api.py +4 -1
  40. dashscope/common/api_key.py +2 -0
  41. dashscope/common/base_type.py +2 -0
  42. dashscope/common/constants.py +2 -0
  43. dashscope/common/env.py +2 -0
  44. dashscope/common/error.py +3 -0
  45. dashscope/common/logging.py +2 -0
  46. dashscope/common/message_manager.py +2 -0
  47. dashscope/common/utils.py +3 -0
  48. dashscope/customize/customize_types.py +2 -0
  49. dashscope/customize/deployments.py +2 -0
  50. dashscope/customize/finetunes.py +2 -0
  51. dashscope/embeddings/__init__.py +2 -0
  52. dashscope/embeddings/batch_text_embedding.py +2 -0
  53. dashscope/embeddings/batch_text_embedding_response.py +4 -1
  54. dashscope/embeddings/multimodal_embedding.py +2 -0
  55. dashscope/embeddings/text_embedding.py +2 -0
  56. dashscope/files.py +2 -0
  57. dashscope/io/input_output.py +2 -0
  58. dashscope/model.py +2 -0
  59. dashscope/models.py +2 -0
  60. dashscope/nlp/understanding.py +2 -0
  61. dashscope/protocol/websocket.py +3 -0
  62. dashscope/rerank/text_rerank.py +2 -0
  63. dashscope/threads/__init__.py +2 -0
  64. dashscope/threads/messages/files.py +2 -0
  65. dashscope/threads/messages/messages.py +2 -0
  66. dashscope/threads/runs/runs.py +2 -0
  67. dashscope/threads/runs/steps.py +2 -0
  68. dashscope/threads/thread_types.py +2 -0
  69. dashscope/threads/threads.py +2 -0
  70. dashscope/tokenizers/__init__.py +2 -0
  71. dashscope/tokenizers/qwen_tokenizer.py +2 -0
  72. dashscope/tokenizers/tokenization.py +2 -0
  73. dashscope/tokenizers/tokenizer.py +2 -0
  74. dashscope/tokenizers/tokenizer_base.py +2 -0
  75. dashscope/utils/oss_utils.py +3 -0
  76. dashscope/version.py +3 -1
  77. {dashscope-1.22.2.dist-info → dashscope-1.23.1.dist-info}/LICENSE +2 -4
  78. {dashscope-1.22.2.dist-info → dashscope-1.23.1.dist-info}/METADATA +1 -1
  79. dashscope-1.23.1.dist-info/RECORD +95 -0
  80. dashscope/audio/asr/transcribe.py +0 -270
  81. dashscope/deployment.py +0 -163
  82. dashscope/file.py +0 -94
  83. dashscope/finetune.py +0 -175
  84. dashscope-1.22.2.dist-info/RECORD +0 -95
  85. {dashscope-1.22.2.dist-info → dashscope-1.23.1.dist-info}/WHEEL +0 -0
  86. {dashscope-1.22.2.dist-info → dashscope-1.23.1.dist-info}/entry_points.txt +0 -0
  87. {dashscope-1.22.2.dist-info → dashscope-1.23.1.dist-info}/top_level.txt +0 -0
dashscope/__init__.py CHANGED
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import logging
2
4
  from logging import NullHandler
3
5
 
@@ -5,8 +7,8 @@ from dashscope.aigc.code_generation import CodeGeneration
5
7
  from dashscope.aigc.conversation import Conversation, History, HistoryItem
6
8
  from dashscope.aigc.generation import AioGeneration, Generation
7
9
  from dashscope.aigc.image_synthesis import ImageSynthesis
8
- from dashscope.aigc.video_synthesis import VideoSynthesis
9
10
  from dashscope.aigc.multimodal_conversation import MultiModalConversation
11
+ from dashscope.aigc.video_synthesis import VideoSynthesis
10
12
  from dashscope.app.application import Application
11
13
  from dashscope.assistants import Assistant, AssistantList, Assistants
12
14
  from dashscope.assistants.assistant_types import AssistantFile, DeleteResponse
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  from .conversation import Conversation, History, HistoryItem
2
4
  from .generation import Generation
3
5
  from .image_synthesis import ImageSynthesis
@@ -0,0 +1,282 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ import json
4
+ from typing import Any, Dict, Generator, List, Union
5
+
6
+ import dashscope
7
+ from dashscope.aigc.generation import Generation
8
+ from dashscope.api_entities.chat_completion_types import (ChatCompletion,
9
+ ChatCompletionChunk)
10
+ from dashscope.api_entities.dashscope_response import (GenerationResponse,
11
+ Message)
12
+ from dashscope.client.base_api import BaseAioApi, CreateMixin
13
+ from dashscope.common.error import InputRequired, ModelRequired
14
+ from dashscope.common.utils import _get_task_group_and_task
15
+
16
+
17
+ class Completions(CreateMixin):
18
+ """Support openai compatible chat completion interface.
19
+
20
+ """
21
+ SUB_PATH = ''
22
+
23
+ @classmethod
24
+ def create(
25
+ cls,
26
+ *,
27
+ model: str,
28
+ messages: List[Message],
29
+ stream: bool = False,
30
+ temperature: float = None,
31
+ top_p: float = None,
32
+ top_k: int = None,
33
+ stop: Union[List[str], List[List[int]]] = None,
34
+ max_tokens: int = None,
35
+ repetition_penalty: float = None,
36
+ api_key: str = None,
37
+ workspace: str = None,
38
+ extra_headers: Dict = None,
39
+ extra_body: Dict = None,
40
+ **kwargs
41
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
+ """Call openai compatible chat completion model service.
43
+
44
+ Args:
45
+ model (str): The requested model, such as qwen-long
46
+ messages (list): The generation messages.
47
+ examples:
48
+ [{'role': 'user',
49
+ 'content': 'The weather is fine today.'},
50
+ {'role': 'assistant', 'content': 'Suitable for outings'}]
51
+ stream(bool, `optional`): Enable server-sent events
52
+ (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
53
+ the result will back partially[qwen-turbo,bailian-v1].
54
+ temperature(float, `optional`): Used to control the degree
55
+ of randomness and diversity. Specifically, the temperature
56
+ value controls the degree to which the probability distribution
57
+ of each candidate word is smoothed when generating text.
58
+ A higher temperature value will reduce the peak value of
59
+ the probability, allowing more low-probability words to be
60
+ selected, and the generated results will be more diverse;
61
+ while a lower temperature value will enhance the peak value
62
+ of the probability, making it easier for high-probability
63
+ words to be selected, the generated results are more
64
+ deterministic.
65
+ top_p(float, `optional`): A sampling strategy, called nucleus
66
+ sampling, where the model considers the results of the
67
+ tokens with top_p probability mass. So 0.1 means only
68
+ the tokens comprising the top 10% probability mass are
69
+ considered.
70
+ top_k(int, `optional`): The size of the sample candidate set when generated. # noqa E501
71
+ For example, when the value is 50, only the 50 highest-scoring tokens # noqa E501
72
+ in a single generation form a randomly sampled candidate set. # noqa E501
73
+ The larger the value, the higher the randomness generated; # noqa E501
74
+ the smaller the value, the higher the certainty generated. # noqa E501
75
+ The default value is 0, which means the top_k policy is # noqa E501
76
+ not enabled. At this time, only the top_p policy takes effect. # noqa E501
77
+ stop(list[str] or list[list[int]], `optional`): Used to control the generation to stop # noqa E501
78
+ when encountering setting str or token ids, the result will not include # noqa E501
79
+ stop words or tokens.
80
+ max_tokens(int, `optional`): The maximum token num expected to be output. It should be # noqa E501
81
+ noted that the length generated by the model will only be less than max_tokens, # noqa E501
82
+ not necessarily equal to it. If max_tokens is set too large, the service will # noqa E501
83
+ directly prompt that the length exceeds the limit. It is generally # noqa E501
84
+ not recommended to set this value.
85
+ repetition_penalty(float, `optional`): Used to control the repeatability when generating models. # noqa E501
86
+ Increasing repetition_penalty can reduce the duplication of model generation. # noqa E501
87
+ 1.0 means no punishment.
88
+ api_key (str, optional): The api api_key, can be None,
89
+ if None, will get by default rule.
90
+ workspace (str, optional): The bailian workspace id.
91
+ **kwargs:
92
+ timeout: set request timeout.
93
+ Raises:
94
+ InvalidInput: The history and auto_history are mutually exclusive.
95
+
96
+ Returns:
97
+ Union[ChatCompletion,
98
+ Generator[ChatCompletionChunk, None, None]]: If
99
+ stream is True, return Generator, otherwise ChatCompletion.
100
+ """
101
+ if messages is None or not messages:
102
+ raise InputRequired('Messages is required!')
103
+ if model is None or not model:
104
+ raise ModelRequired('Model is required!')
105
+ data = {}
106
+ data['model'] = model
107
+ data['messages'] = messages
108
+ if temperature is not None:
109
+ data['temperature'] = temperature
110
+ if top_p is not None:
111
+ data['top_p'] = top_p
112
+ if top_k is not None:
113
+ data['top_k'] = top_k
114
+ if stop is not None:
115
+ data['stop'] = stop
116
+ if max_tokens is not None:
117
+ data[max_tokens] = max_tokens
118
+ if repetition_penalty is not None:
119
+ data['repetition_penalty'] = repetition_penalty
120
+ if extra_body is not None and extra_body:
121
+ data = {**data, **extra_body}
122
+
123
+ if extra_headers is not None and extra_headers:
124
+ kwargs = {
125
+ 'headers': extra_headers
126
+ } if kwargs else {
127
+ **kwargs,
128
+ **{
129
+ 'headers': extra_headers
130
+ }
131
+ }
132
+
133
+ response = super().call(data=data,
134
+ path='chat/completions',
135
+ base_address=dashscope.base_compatible_api_url,
136
+ api_key=api_key,
137
+ flattened_output=True,
138
+ stream=stream,
139
+ workspace=workspace,
140
+ **kwargs)
141
+ if stream:
142
+ return (ChatCompletionChunk(**item) for _, item in response)
143
+ else:
144
+ return ChatCompletion(**response)
145
+
146
+
147
+ class AioGeneration(BaseAioApi):
148
+ task = 'text-generation'
149
+ """API for AI-Generated Content(AIGC) models.
150
+
151
+ """
152
+ class Models:
153
+ """@deprecated, use qwen_turbo instead"""
154
+ qwen_v1 = 'qwen-v1'
155
+ """@deprecated, use qwen_plus instead"""
156
+ qwen_plus_v1 = 'qwen-plus-v1'
157
+
158
+ bailian_v1 = 'bailian-v1'
159
+ dolly_12b_v2 = 'dolly-12b-v2'
160
+ qwen_turbo = 'qwen-turbo'
161
+ qwen_plus = 'qwen-plus'
162
+ qwen_max = 'qwen-max'
163
+
164
+ @classmethod
165
+ async def call(
166
+ cls,
167
+ model: str,
168
+ prompt: Any = None,
169
+ history: list = None,
170
+ api_key: str = None,
171
+ messages: List[Message] = None,
172
+ plugins: Union[str, Dict[str, Any]] = None,
173
+ workspace: str = None,
174
+ **kwargs
175
+ ) -> Union[GenerationResponse, Generator[GenerationResponse, None, None]]:
176
+ """Call generation model service.
177
+
178
+ Args:
179
+ model (str): The requested model, such as qwen-turbo
180
+ prompt (Any): The input prompt.
181
+ history (list):The user provided history, deprecated
182
+ examples:
183
+ [{'user':'The weather is fine today.',
184
+ 'bot': 'Suitable for outings'}].
185
+ Defaults to None.
186
+ api_key (str, optional): The api api_key, can be None,
187
+ if None, will get by default rule(TODO: api key doc).
188
+ messages (list): The generation messages.
189
+ examples:
190
+ [{'role': 'user',
191
+ 'content': 'The weather is fine today.'},
192
+ {'role': 'assistant', 'content': 'Suitable for outings'}]
193
+ plugins (Any): The plugin config. Can be plugins config str, or dict.
194
+ **kwargs:
195
+ stream(bool, `optional`): Enable server-sent events
196
+ (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
197
+ the result will back partially[qwen-turbo,bailian-v1].
198
+ temperature(float, `optional`): Used to control the degree
199
+ of randomness and diversity. Specifically, the temperature
200
+ value controls the degree to which the probability distribution
201
+ of each candidate word is smoothed when generating text.
202
+ A higher temperature value will reduce the peak value of
203
+ the probability, allowing more low-probability words to be
204
+ selected, and the generated results will be more diverse;
205
+ while a lower temperature value will enhance the peak value
206
+ of the probability, making it easier for high-probability
207
+ words to be selected, the generated results are more
208
+ deterministic, range(0, 2) .[qwen-turbo,qwen-plus].
209
+ top_p(float, `optional`): A sampling strategy, called nucleus
210
+ sampling, where the model considers the results of the
211
+ tokens with top_p probability mass. So 0.1 means only
212
+ the tokens comprising the top 10% probability mass are
213
+ considered[qwen-turbo,bailian-v1].
214
+ top_k(int, `optional`): The size of the sample candidate set when generated. # noqa E501
215
+ For example, when the value is 50, only the 50 highest-scoring tokens # noqa E501
216
+ in a single generation form a randomly sampled candidate set. # noqa E501
217
+ The larger the value, the higher the randomness generated; # noqa E501
218
+ the smaller the value, the higher the certainty generated. # noqa E501
219
+ The default value is 0, which means the top_k policy is # noqa E501
220
+ not enabled. At this time, only the top_p policy takes effect. # noqa E501
221
+ enable_search(bool, `optional`): Whether to enable web search(quark). # noqa E501
222
+ Currently works best only on the first round of conversation.
223
+ Default to False, support model: [qwen-turbo].
224
+ customized_model_id(str, required) The enterprise-specific
225
+ large model id, which needs to be generated from the
226
+ operation background of the enterprise-specific
227
+ large model product, support model: [bailian-v1].
228
+ result_format(str, `optional`): [message|text] Set result result format. # noqa E501
229
+ Default result is text
230
+ incremental_output(bool, `optional`): Used to control the streaming output mode. # noqa E501
231
+ If true, the subsequent output will include the previously input content. # noqa E501
232
+ Otherwise, the subsequent output will not include the previously output # noqa E501
233
+ content. Default false.
234
+ stop(list[str] or list[list[int]], `optional`): Used to control the generation to stop # noqa E501
235
+ when encountering setting str or token ids, the result will not include # noqa E501
236
+ stop words or tokens.
237
+ max_tokens(int, `optional`): The maximum token num expected to be output. It should be # noqa E501
238
+ noted that the length generated by the model will only be less than max_tokens, # noqa E501
239
+ not necessarily equal to it. If max_tokens is set too large, the service will # noqa E501
240
+ directly prompt that the length exceeds the limit. It is generally # noqa E501
241
+ not recommended to set this value.
242
+ repetition_penalty(float, `optional`): Used to control the repeatability when generating models. # noqa E501
243
+ Increasing repetition_penalty can reduce the duplication of model generation. # noqa E501
244
+ 1.0 means no punishment.
245
+ workspace (str): The dashscope workspace id.
246
+ Raises:
247
+ InvalidInput: The history and auto_history are mutually exclusive.
248
+
249
+ Returns:
250
+ Union[GenerationResponse,
251
+ Generator[GenerationResponse, None, None]]: If
252
+ stream is True, return Generator, otherwise GenerationResponse.
253
+ """
254
+ if (prompt is None or not prompt) and (messages is None
255
+ or not messages):
256
+ raise InputRequired('prompt or messages is required!')
257
+ if model is None or not model:
258
+ raise ModelRequired('Model is required!')
259
+ task_group, function = _get_task_group_and_task(__name__)
260
+ if plugins is not None:
261
+ headers = kwargs.pop('headers', {})
262
+ if isinstance(plugins, str):
263
+ headers['X-DashScope-Plugin'] = plugins
264
+ else:
265
+ headers['X-DashScope-Plugin'] = json.dumps(plugins)
266
+ kwargs['headers'] = headers
267
+ input, parameters = Generation._build_input_parameters(
268
+ model, prompt, history, messages, **kwargs)
269
+ response = await super().call(model=model,
270
+ task_group=task_group,
271
+ task=Generation.task,
272
+ function=function,
273
+ api_key=api_key,
274
+ input=input,
275
+ workspace=workspace,
276
+ **parameters)
277
+ is_stream = kwargs.get('stream', False)
278
+ if is_stream:
279
+ return (GenerationResponse.from_api_response(rsp)
280
+ async for rsp in response)
281
+ else:
282
+ return GenerationResponse.from_api_response(response)
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  from typing import Generator, List, Union
2
4
 
3
5
  from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import json
2
4
  from copy import deepcopy
3
5
  from http import HTTPStatus
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import copy
2
4
  import json
3
5
  from typing import Any, Dict, Generator, List, Union
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  from typing import Any, Dict, List, Union
2
4
 
3
5
  from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import copy
2
4
  from typing import Generator, List, Union
3
5
 
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  from typing import Any, Dict, Union
2
4
 
3
5
  from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
@@ -1,7 +1,10 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import json
2
4
  from http import HTTPStatus
3
5
 
4
6
  import aiohttp
7
+
5
8
  from dashscope.api_entities.base_request import AioBaseRequest
6
9
  from dashscope.api_entities.dashscope_response import DashScopeAPIResponse
7
10
  from dashscope.common.constants import (DEFAULT_REQUEST_TIMEOUT_SECONDS,
@@ -1,7 +1,10 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import json
2
4
  from urllib.parse import urlencode
3
5
 
4
6
  import aiohttp
7
+
5
8
  from dashscope.common.constants import ApiProtocol
6
9
  from dashscope.io.input_output import InputResolver
7
10
 
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  from urllib.parse import urlencode
2
4
 
3
5
  import dashscope
@@ -1,3 +1,5 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import os
2
4
  import platform
3
5
  from abc import ABC, abstractmethod