dashscope 1.8.0__py3-none-any.whl → 1.25.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. dashscope/__init__.py +61 -14
  2. dashscope/aigc/__init__.py +10 -3
  3. dashscope/aigc/chat_completion.py +282 -0
  4. dashscope/aigc/code_generation.py +145 -0
  5. dashscope/aigc/conversation.py +71 -12
  6. dashscope/aigc/generation.py +288 -16
  7. dashscope/aigc/image_synthesis.py +473 -31
  8. dashscope/aigc/multimodal_conversation.py +299 -14
  9. dashscope/aigc/video_synthesis.py +610 -0
  10. dashscope/api_entities/aiohttp_request.py +8 -5
  11. dashscope/api_entities/api_request_data.py +4 -2
  12. dashscope/api_entities/api_request_factory.py +68 -20
  13. dashscope/api_entities/base_request.py +20 -3
  14. dashscope/api_entities/chat_completion_types.py +344 -0
  15. dashscope/api_entities/dashscope_response.py +243 -15
  16. dashscope/api_entities/encryption.py +179 -0
  17. dashscope/api_entities/http_request.py +216 -62
  18. dashscope/api_entities/websocket_request.py +43 -34
  19. dashscope/app/__init__.py +5 -0
  20. dashscope/app/application.py +203 -0
  21. dashscope/app/application_response.py +246 -0
  22. dashscope/assistants/__init__.py +16 -0
  23. dashscope/assistants/assistant_types.py +175 -0
  24. dashscope/assistants/assistants.py +311 -0
  25. dashscope/assistants/files.py +197 -0
  26. dashscope/audio/__init__.py +4 -2
  27. dashscope/audio/asr/__init__.py +17 -1
  28. dashscope/audio/asr/asr_phrase_manager.py +203 -0
  29. dashscope/audio/asr/recognition.py +167 -27
  30. dashscope/audio/asr/transcription.py +107 -14
  31. dashscope/audio/asr/translation_recognizer.py +1006 -0
  32. dashscope/audio/asr/vocabulary.py +177 -0
  33. dashscope/audio/qwen_asr/__init__.py +7 -0
  34. dashscope/audio/qwen_asr/qwen_transcription.py +189 -0
  35. dashscope/audio/qwen_omni/__init__.py +11 -0
  36. dashscope/audio/qwen_omni/omni_realtime.py +524 -0
  37. dashscope/audio/qwen_tts/__init__.py +5 -0
  38. dashscope/audio/qwen_tts/speech_synthesizer.py +77 -0
  39. dashscope/audio/qwen_tts_realtime/__init__.py +10 -0
  40. dashscope/audio/qwen_tts_realtime/qwen_tts_realtime.py +355 -0
  41. dashscope/audio/tts/__init__.py +2 -0
  42. dashscope/audio/tts/speech_synthesizer.py +5 -0
  43. dashscope/audio/tts_v2/__init__.py +12 -0
  44. dashscope/audio/tts_v2/enrollment.py +179 -0
  45. dashscope/audio/tts_v2/speech_synthesizer.py +886 -0
  46. dashscope/cli.py +157 -37
  47. dashscope/client/base_api.py +652 -87
  48. dashscope/common/api_key.py +2 -0
  49. dashscope/common/base_type.py +135 -0
  50. dashscope/common/constants.py +13 -16
  51. dashscope/common/env.py +2 -0
  52. dashscope/common/error.py +58 -22
  53. dashscope/common/logging.py +2 -0
  54. dashscope/common/message_manager.py +2 -0
  55. dashscope/common/utils.py +276 -46
  56. dashscope/customize/__init__.py +0 -0
  57. dashscope/customize/customize_types.py +192 -0
  58. dashscope/customize/deployments.py +146 -0
  59. dashscope/customize/finetunes.py +234 -0
  60. dashscope/embeddings/__init__.py +5 -1
  61. dashscope/embeddings/batch_text_embedding.py +208 -0
  62. dashscope/embeddings/batch_text_embedding_response.py +65 -0
  63. dashscope/embeddings/multimodal_embedding.py +118 -10
  64. dashscope/embeddings/text_embedding.py +13 -1
  65. dashscope/{file.py → files.py} +19 -4
  66. dashscope/io/input_output.py +2 -0
  67. dashscope/model.py +11 -2
  68. dashscope/models.py +43 -0
  69. dashscope/multimodal/__init__.py +20 -0
  70. dashscope/multimodal/dialog_state.py +56 -0
  71. dashscope/multimodal/multimodal_constants.py +28 -0
  72. dashscope/multimodal/multimodal_dialog.py +648 -0
  73. dashscope/multimodal/multimodal_request_params.py +313 -0
  74. dashscope/multimodal/tingwu/__init__.py +10 -0
  75. dashscope/multimodal/tingwu/tingwu.py +80 -0
  76. dashscope/multimodal/tingwu/tingwu_realtime.py +579 -0
  77. dashscope/nlp/__init__.py +0 -0
  78. dashscope/nlp/understanding.py +64 -0
  79. dashscope/protocol/websocket.py +3 -0
  80. dashscope/rerank/__init__.py +0 -0
  81. dashscope/rerank/text_rerank.py +69 -0
  82. dashscope/resources/qwen.tiktoken +151643 -0
  83. dashscope/threads/__init__.py +26 -0
  84. dashscope/threads/messages/__init__.py +0 -0
  85. dashscope/threads/messages/files.py +113 -0
  86. dashscope/threads/messages/messages.py +220 -0
  87. dashscope/threads/runs/__init__.py +0 -0
  88. dashscope/threads/runs/runs.py +501 -0
  89. dashscope/threads/runs/steps.py +112 -0
  90. dashscope/threads/thread_types.py +665 -0
  91. dashscope/threads/threads.py +212 -0
  92. dashscope/tokenizers/__init__.py +7 -0
  93. dashscope/tokenizers/qwen_tokenizer.py +111 -0
  94. dashscope/tokenizers/tokenization.py +125 -0
  95. dashscope/tokenizers/tokenizer.py +45 -0
  96. dashscope/tokenizers/tokenizer_base.py +32 -0
  97. dashscope/utils/__init__.py +0 -0
  98. dashscope/utils/message_utils.py +838 -0
  99. dashscope/utils/oss_utils.py +243 -0
  100. dashscope/utils/param_utils.py +29 -0
  101. dashscope/version.py +3 -1
  102. {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/METADATA +53 -50
  103. dashscope-1.25.6.dist-info/RECORD +112 -0
  104. {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/WHEEL +1 -1
  105. {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/entry_points.txt +0 -1
  106. {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info/licenses}/LICENSE +2 -4
  107. dashscope/deployment.py +0 -129
  108. dashscope/finetune.py +0 -149
  109. dashscope-1.8.0.dist-info/RECORD +0 -49
  110. {dashscope-1.8.0.dist-info → dashscope-1.25.6.dist-info}/top_level.txt +0 -0
dashscope/__init__.py CHANGED
@@ -1,24 +1,40 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
1
3
  import logging
2
4
  from logging import NullHandler
3
5
 
6
+ from dashscope.aigc.code_generation import CodeGeneration
4
7
  from dashscope.aigc.conversation import Conversation, History, HistoryItem
5
- from dashscope.aigc.generation import Generation
8
+ from dashscope.aigc.generation import AioGeneration, Generation
6
9
  from dashscope.aigc.image_synthesis import ImageSynthesis
7
- from dashscope.aigc.multimodal_conversation import MultiModalConversation
10
+ from dashscope.aigc.multimodal_conversation import MultiModalConversation, AioMultiModalConversation
11
+ from dashscope.aigc.video_synthesis import VideoSynthesis
12
+ from dashscope.app.application import Application
13
+ from dashscope.assistants import Assistant, AssistantList, Assistants
14
+ from dashscope.assistants.assistant_types import AssistantFile, DeleteResponse
8
15
  from dashscope.audio.asr.transcription import Transcription
9
16
  from dashscope.audio.tts.speech_synthesizer import SpeechSynthesizer
10
17
  from dashscope.common.api_key import save_api_key
11
18
  from dashscope.common.env import (api_key, api_key_file_path,
12
19
  base_http_api_url, base_websocket_api_url)
13
- from dashscope.deployment import Deployment
20
+ from dashscope.customize.deployments import Deployments
21
+ from dashscope.customize.finetunes import FineTunes
22
+ from dashscope.embeddings.batch_text_embedding import BatchTextEmbedding
23
+ from dashscope.embeddings.batch_text_embedding_response import \
24
+ BatchTextEmbeddingResponse
25
+ from dashscope.embeddings.multimodal_embedding import (
26
+ MultiModalEmbedding, MultiModalEmbeddingItemAudio,
27
+ MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText, AioMultiModalEmbedding)
14
28
  from dashscope.embeddings.text_embedding import TextEmbedding
15
- from dashscope.embeddings.multimodal_embedding import (MultiModalEmbedding,
16
- MultiModalEmbeddingItemAudio,
17
- MultiModalEmbeddingItemImage,
18
- MultiModalEmbeddingItemText)
19
- from dashscope.file import File
20
- from dashscope.finetune import FineTune
21
- from dashscope.model import Model
29
+ from dashscope.files import Files
30
+ from dashscope.models import Models
31
+ from dashscope.nlp.understanding import Understanding
32
+ from dashscope.rerank.text_rerank import TextReRank
33
+ from dashscope.threads import (MessageFile, Messages, Run, RunList, Runs,
34
+ RunStep, RunStepList, Steps, Thread,
35
+ ThreadMessage, ThreadMessageList, Threads)
36
+ from dashscope.tokenizers import (Tokenization, Tokenizer, get_tokenizer,
37
+ list_tokenizers)
22
38
 
23
39
  __all__ = [
24
40
  base_http_api_url,
@@ -26,23 +42,54 @@ __all__ = [
26
42
  api_key,
27
43
  api_key_file_path,
28
44
  save_api_key,
45
+ AioGeneration,
29
46
  Conversation,
30
47
  Generation,
31
48
  History,
32
49
  HistoryItem,
33
50
  ImageSynthesis,
34
51
  Transcription,
35
- File,
36
- Deployment,
37
- FineTune,
38
- Model,
52
+ Files,
53
+ Deployments,
54
+ FineTunes,
55
+ Models,
39
56
  TextEmbedding,
40
57
  MultiModalEmbedding,
58
+ AioMultiModalEmbedding,
41
59
  MultiModalEmbeddingItemAudio,
42
60
  MultiModalEmbeddingItemImage,
43
61
  MultiModalEmbeddingItemText,
44
62
  SpeechSynthesizer,
45
63
  MultiModalConversation,
64
+ AioMultiModalConversation,
65
+ BatchTextEmbedding,
66
+ BatchTextEmbeddingResponse,
67
+ Understanding,
68
+ CodeGeneration,
69
+ Tokenization,
70
+ Tokenizer,
71
+ get_tokenizer,
72
+ list_tokenizers,
73
+ Application,
74
+ TextReRank,
75
+ Assistants,
76
+ Threads,
77
+ Messages,
78
+ Runs,
79
+ Assistant,
80
+ ThreadMessage,
81
+ Run,
82
+ Steps,
83
+ AssistantList,
84
+ ThreadMessageList,
85
+ RunList,
86
+ RunStepList,
87
+ Thread,
88
+ DeleteResponse,
89
+ RunStep,
90
+ MessageFile,
91
+ AssistantFile,
92
+ VideoSynthesis,
46
93
  ]
47
94
 
48
95
  logging.getLogger(__name__).addHandler(NullHandler())
@@ -1,13 +1,20 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
1
2
  from .conversation import Conversation, History, HistoryItem
2
- from .generation import Generation
3
- from .image_synthesis import ImageSynthesis
4
- from .multimodal_conversation import MultiModalConversation
3
+ from .generation import Generation, AioGeneration
4
+ from .image_synthesis import ImageSynthesis, AioImageSynthesis
5
+ from .multimodal_conversation import MultiModalConversation, AioMultiModalConversation
6
+ from .video_synthesis import VideoSynthesis, AioVideoSynthesis
5
7
 
6
8
  __all__ = [
7
9
  Generation,
10
+ AioGeneration,
8
11
  Conversation,
9
12
  HistoryItem,
10
13
  History,
11
14
  ImageSynthesis,
15
+ AioImageSynthesis,
12
16
  MultiModalConversation,
17
+ AioMultiModalConversation,
18
+ VideoSynthesis,
19
+ AioVideoSynthesis,
13
20
  ]
@@ -0,0 +1,282 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ import json
4
+ from typing import Any, Dict, Generator, List, Union
5
+
6
+ import dashscope
7
+ from dashscope.aigc.generation import Generation
8
+ from dashscope.api_entities.chat_completion_types import (ChatCompletion,
9
+ ChatCompletionChunk)
10
+ from dashscope.api_entities.dashscope_response import (GenerationResponse,
11
+ Message)
12
+ from dashscope.client.base_api import BaseAioApi, CreateMixin
13
+ from dashscope.common.error import InputRequired, ModelRequired
14
+ from dashscope.common.utils import _get_task_group_and_task
15
+
16
+
17
+ class Completions(CreateMixin):
18
+ """Support openai compatible chat completion interface.
19
+
20
+ """
21
+ SUB_PATH = ''
22
+
23
+ @classmethod
24
+ def create(
25
+ cls,
26
+ *,
27
+ model: str,
28
+ messages: List[Message],
29
+ stream: bool = False,
30
+ temperature: float = None,
31
+ top_p: float = None,
32
+ top_k: int = None,
33
+ stop: Union[List[str], List[List[int]]] = None,
34
+ max_tokens: int = None,
35
+ repetition_penalty: float = None,
36
+ api_key: str = None,
37
+ workspace: str = None,
38
+ extra_headers: Dict = None,
39
+ extra_body: Dict = None,
40
+ **kwargs
41
+ ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
42
+ """Call openai compatible chat completion model service.
43
+
44
+ Args:
45
+ model (str): The requested model, such as qwen-long
46
+ messages (list): The generation messages.
47
+ examples:
48
+ [{'role': 'user',
49
+ 'content': 'The weather is fine today.'},
50
+ {'role': 'assistant', 'content': 'Suitable for outings'}]
51
+ stream(bool, `optional`): Enable server-sent events
52
+ (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
53
+ the result will back partially[qwen-turbo,bailian-v1].
54
+ temperature(float, `optional`): Used to control the degree
55
+ of randomness and diversity. Specifically, the temperature
56
+ value controls the degree to which the probability distribution
57
+ of each candidate word is smoothed when generating text.
58
+ A higher temperature value will reduce the peak value of
59
+ the probability, allowing more low-probability words to be
60
+ selected, and the generated results will be more diverse;
61
+ while a lower temperature value will enhance the peak value
62
+ of the probability, making it easier for high-probability
63
+ words to be selected, the generated results are more
64
+ deterministic.
65
+ top_p(float, `optional`): A sampling strategy, called nucleus
66
+ sampling, where the model considers the results of the
67
+ tokens with top_p probability mass. So 0.1 means only
68
+ the tokens comprising the top 10% probability mass are
69
+ considered.
70
+ top_k(int, `optional`): The size of the sample candidate set when generated. # noqa E501
71
+ For example, when the value is 50, only the 50 highest-scoring tokens # noqa E501
72
+ in a single generation form a randomly sampled candidate set. # noqa E501
73
+ The larger the value, the higher the randomness generated; # noqa E501
74
+ the smaller the value, the higher the certainty generated. # noqa E501
75
+ The default value is 0, which means the top_k policy is # noqa E501
76
+ not enabled. At this time, only the top_p policy takes effect. # noqa E501
77
+ stop(list[str] or list[list[int]], `optional`): Used to control the generation to stop # noqa E501
78
+ when encountering setting str or token ids, the result will not include # noqa E501
79
+ stop words or tokens.
80
+ max_tokens(int, `optional`): The maximum token num expected to be output. It should be # noqa E501
81
+ noted that the length generated by the model will only be less than max_tokens, # noqa E501
82
+ not necessarily equal to it. If max_tokens is set too large, the service will # noqa E501
83
+ directly prompt that the length exceeds the limit. It is generally # noqa E501
84
+ not recommended to set this value.
85
+ repetition_penalty(float, `optional`): Used to control the repeatability when generating models. # noqa E501
86
+ Increasing repetition_penalty can reduce the duplication of model generation. # noqa E501
87
+ 1.0 means no punishment.
88
+ api_key (str, optional): The api api_key, can be None,
89
+ if None, will get by default rule.
90
+ workspace (str, optional): The bailian workspace id.
91
+ **kwargs:
92
+ timeout: set request timeout.
93
+ Raises:
94
+ InvalidInput: The history and auto_history are mutually exclusive.
95
+
96
+ Returns:
97
+ Union[ChatCompletion,
98
+ Generator[ChatCompletionChunk, None, None]]: If
99
+ stream is True, return Generator, otherwise ChatCompletion.
100
+ """
101
+ if messages is None or not messages:
102
+ raise InputRequired('Messages is required!')
103
+ if model is None or not model:
104
+ raise ModelRequired('Model is required!')
105
+ data = {}
106
+ data['model'] = model
107
+ data['messages'] = messages
108
+ if temperature is not None:
109
+ data['temperature'] = temperature
110
+ if top_p is not None:
111
+ data['top_p'] = top_p
112
+ if top_k is not None:
113
+ data['top_k'] = top_k
114
+ if stop is not None:
115
+ data['stop'] = stop
116
+ if max_tokens is not None:
117
+ data[max_tokens] = max_tokens
118
+ if repetition_penalty is not None:
119
+ data['repetition_penalty'] = repetition_penalty
120
+ if extra_body is not None and extra_body:
121
+ data = {**data, **extra_body}
122
+
123
+ if extra_headers is not None and extra_headers:
124
+ kwargs = {
125
+ 'headers': extra_headers
126
+ } if kwargs else {
127
+ **kwargs,
128
+ **{
129
+ 'headers': extra_headers
130
+ }
131
+ }
132
+
133
+ response = super().call(data=data,
134
+ path='chat/completions',
135
+ base_address=dashscope.base_compatible_api_url,
136
+ api_key=api_key,
137
+ flattened_output=True,
138
+ stream=stream,
139
+ workspace=workspace,
140
+ **kwargs)
141
+ if stream:
142
+ return (ChatCompletionChunk(**item) for _, item in response)
143
+ else:
144
+ return ChatCompletion(**response)
145
+
146
+
147
+ class AioGeneration(BaseAioApi):
148
+ task = 'text-generation'
149
+ """API for AI-Generated Content(AIGC) models.
150
+
151
+ """
152
+ class Models:
153
+ """@deprecated, use qwen_turbo instead"""
154
+ qwen_v1 = 'qwen-v1'
155
+ """@deprecated, use qwen_plus instead"""
156
+ qwen_plus_v1 = 'qwen-plus-v1'
157
+
158
+ bailian_v1 = 'bailian-v1'
159
+ dolly_12b_v2 = 'dolly-12b-v2'
160
+ qwen_turbo = 'qwen-turbo'
161
+ qwen_plus = 'qwen-plus'
162
+ qwen_max = 'qwen-max'
163
+
164
+ @classmethod
165
+ async def call(
166
+ cls,
167
+ model: str,
168
+ prompt: Any = None,
169
+ history: list = None,
170
+ api_key: str = None,
171
+ messages: List[Message] = None,
172
+ plugins: Union[str, Dict[str, Any]] = None,
173
+ workspace: str = None,
174
+ **kwargs
175
+ ) -> Union[GenerationResponse, Generator[GenerationResponse, None, None]]:
176
+ """Call generation model service.
177
+
178
+ Args:
179
+ model (str): The requested model, such as qwen-turbo
180
+ prompt (Any): The input prompt.
181
+ history (list):The user provided history, deprecated
182
+ examples:
183
+ [{'user':'The weather is fine today.',
184
+ 'bot': 'Suitable for outings'}].
185
+ Defaults to None.
186
+ api_key (str, optional): The api api_key, can be None,
187
+ if None, will get by default rule(TODO: api key doc).
188
+ messages (list): The generation messages.
189
+ examples:
190
+ [{'role': 'user',
191
+ 'content': 'The weather is fine today.'},
192
+ {'role': 'assistant', 'content': 'Suitable for outings'}]
193
+ plugins (Any): The plugin config. Can be plugins config str, or dict.
194
+ **kwargs:
195
+ stream(bool, `optional`): Enable server-sent events
196
+ (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
197
+ the result will back partially[qwen-turbo,bailian-v1].
198
+ temperature(float, `optional`): Used to control the degree
199
+ of randomness and diversity. Specifically, the temperature
200
+ value controls the degree to which the probability distribution
201
+ of each candidate word is smoothed when generating text.
202
+ A higher temperature value will reduce the peak value of
203
+ the probability, allowing more low-probability words to be
204
+ selected, and the generated results will be more diverse;
205
+ while a lower temperature value will enhance the peak value
206
+ of the probability, making it easier for high-probability
207
+ words to be selected, the generated results are more
208
+ deterministic, range(0, 2) .[qwen-turbo,qwen-plus].
209
+ top_p(float, `optional`): A sampling strategy, called nucleus
210
+ sampling, where the model considers the results of the
211
+ tokens with top_p probability mass. So 0.1 means only
212
+ the tokens comprising the top 10% probability mass are
213
+ considered[qwen-turbo,bailian-v1].
214
+ top_k(int, `optional`): The size of the sample candidate set when generated. # noqa E501
215
+ For example, when the value is 50, only the 50 highest-scoring tokens # noqa E501
216
+ in a single generation form a randomly sampled candidate set. # noqa E501
217
+ The larger the value, the higher the randomness generated; # noqa E501
218
+ the smaller the value, the higher the certainty generated. # noqa E501
219
+ The default value is 0, which means the top_k policy is # noqa E501
220
+ not enabled. At this time, only the top_p policy takes effect. # noqa E501
221
+ enable_search(bool, `optional`): Whether to enable web search(quark). # noqa E501
222
+ Currently works best only on the first round of conversation.
223
+ Default to False, support model: [qwen-turbo].
224
+ customized_model_id(str, required) The enterprise-specific
225
+ large model id, which needs to be generated from the
226
+ operation background of the enterprise-specific
227
+ large model product, support model: [bailian-v1].
228
+ result_format(str, `optional`): [message|text] Set result result format. # noqa E501
229
+ Default result is text
230
+ incremental_output(bool, `optional`): Used to control the streaming output mode. # noqa E501
231
+ If true, the subsequent output will include the previously input content. # noqa E501
232
+ Otherwise, the subsequent output will not include the previously output # noqa E501
233
+ content. Default false.
234
+ stop(list[str] or list[list[int]], `optional`): Used to control the generation to stop # noqa E501
235
+ when encountering setting str or token ids, the result will not include # noqa E501
236
+ stop words or tokens.
237
+ max_tokens(int, `optional`): The maximum token num expected to be output. It should be # noqa E501
238
+ noted that the length generated by the model will only be less than max_tokens, # noqa E501
239
+ not necessarily equal to it. If max_tokens is set too large, the service will # noqa E501
240
+ directly prompt that the length exceeds the limit. It is generally # noqa E501
241
+ not recommended to set this value.
242
+ repetition_penalty(float, `optional`): Used to control the repeatability when generating models. # noqa E501
243
+ Increasing repetition_penalty can reduce the duplication of model generation. # noqa E501
244
+ 1.0 means no punishment.
245
+ workspace (str): The dashscope workspace id.
246
+ Raises:
247
+ InvalidInput: The history and auto_history are mutually exclusive.
248
+
249
+ Returns:
250
+ Union[GenerationResponse,
251
+ Generator[GenerationResponse, None, None]]: If
252
+ stream is True, return Generator, otherwise GenerationResponse.
253
+ """
254
+ if (prompt is None or not prompt) and (messages is None
255
+ or not messages):
256
+ raise InputRequired('prompt or messages is required!')
257
+ if model is None or not model:
258
+ raise ModelRequired('Model is required!')
259
+ task_group, function = _get_task_group_and_task(__name__)
260
+ if plugins is not None:
261
+ headers = kwargs.pop('headers', {})
262
+ if isinstance(plugins, str):
263
+ headers['X-DashScope-Plugin'] = plugins
264
+ else:
265
+ headers['X-DashScope-Plugin'] = json.dumps(plugins)
266
+ kwargs['headers'] = headers
267
+ input, parameters = Generation._build_input_parameters(
268
+ model, prompt, history, messages, **kwargs)
269
+ response = await super().call(model=model,
270
+ task_group=task_group,
271
+ task=Generation.task,
272
+ function=function,
273
+ api_key=api_key,
274
+ input=input,
275
+ workspace=workspace,
276
+ **parameters)
277
+ is_stream = kwargs.get('stream', False)
278
+ if is_stream:
279
+ return (GenerationResponse.from_api_response(rsp)
280
+ async for rsp in response)
281
+ else:
282
+ return GenerationResponse.from_api_response(response)
@@ -0,0 +1,145 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ from typing import Generator, List, Union
4
+
5
+ from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
6
+ DictMixin, Role)
7
+ from dashscope.client.base_api import BaseApi
8
+ from dashscope.common.constants import MESSAGE, SCENE
9
+ from dashscope.common.error import InputRequired, ModelRequired
10
+ from dashscope.common.utils import _get_task_group_and_task
11
+
12
+
13
+ class MessageParam(DictMixin):
14
+ role: str
15
+
16
+ def __init__(self, role: str, **kwargs):
17
+ super().__init__(role=role, **kwargs)
18
+
19
+
20
+ class UserRoleMessageParam(MessageParam):
21
+ content: str
22
+
23
+ def __init__(self, content: str, **kwargs):
24
+ super().__init__(role=Role.USER, content=content, **kwargs)
25
+
26
+
27
+ class AttachmentRoleMessageParam(MessageParam):
28
+ meta: dict
29
+
30
+ def __init__(self, meta: dict, **kwargs):
31
+ super().__init__(role=Role.ATTACHMENT, meta=meta, **kwargs)
32
+
33
+
34
+ class OtherRoleContentMessageParam(MessageParam):
35
+ content: str
36
+
37
+ def __init__(self, role: str, content: str, **kwargs):
38
+ super().__init__(role=role, content=content, **kwargs)
39
+
40
+
41
+ class OtherRoleMetaMessageParam(MessageParam):
42
+ meta: dict
43
+
44
+ def __init__(self, role: str, meta: dict, **kwargs):
45
+ super().__init__(role=role, meta=meta, **kwargs)
46
+
47
+
48
+ class CodeGeneration(BaseApi):
49
+ function = 'generation'
50
+ """API for AI-Generated Content(AIGC) models.
51
+
52
+ """
53
+ class Models:
54
+ tongyi_lingma_v1 = 'tongyi-lingma-v1'
55
+
56
+ class Scenes:
57
+ custom = 'custom'
58
+ nl2code = 'nl2code'
59
+ code2comment = 'code2comment'
60
+ code2explain = 'code2explain'
61
+ commit2msg = 'commit2msg'
62
+ unit_test = 'unittest'
63
+ code_qa = 'codeqa'
64
+ nl2sql = 'nl2sql'
65
+
66
+ @classmethod
67
+ def call(
68
+ cls,
69
+ model: str,
70
+ scene: str = None,
71
+ api_key: str = None,
72
+ message: List[MessageParam] = None,
73
+ workspace: str = None,
74
+ **kwargs
75
+ ) -> Union[DashScopeAPIResponse, Generator[DashScopeAPIResponse, None,
76
+ None]]:
77
+ """Call generation model service.
78
+
79
+ Args:
80
+ model (str): The requested model, such as tongyi-lingma-v1
81
+ scene (str): Scene type, single choice, such as custom
82
+ examples:
83
+ custom:User defined prompt
84
+ nl2code:Natural language generated code
85
+ code2comment:annotation
86
+ code2explain:explain
87
+ commit2msg:Automatically generate commit
88
+ uinttest:Generating Unit Tests
89
+ codeqa:Code Q&A
90
+ nl2sql:Generate SQL code using natural language
91
+ api_key (str, optional): The api api_key, can be None,
92
+ if None, will get by default rule(TODO: api key doc).
93
+ message (list): The generation messages.
94
+ scene == custom, examples:
95
+ [{"role": "user", "content": "根据下面的功能描述生成一个python函数。代码的功能是计算给定路径下所有文件的总大小。"}] # noqa E501
96
+ scene == nl2code, examples:
97
+ [{"role": "user", "content": "计算给定路径下所有文件的总大小"}, {"role": "attachment", "meta": {"language": "java"}}] # noqa E501
98
+ scene == code2comment, examples:
99
+ [{"role": "user", "content": "1. 生成中文注释\n2. 仅生成代码部分,不需要额外解释函数功能\n"}, {"role": "attachment", "meta": {"code": "\t\t@Override\n\t\tpublic CancelExportTaskResponse cancelExportTask(\n\t\t\t\tCancelExportTask cancelExportTask) {\n\t\t\tAmazonEC2SkeletonInterface ec2Service = ServiceProvider.getInstance().getServiceImpl(AmazonEC2SkeletonInterface.class);\n\t\t\treturn ec2Service.cancelExportTask(cancelExportTask);\n\t\t}", "language": "java"}}] # noqa E501
100
+ scene == code2explain, examples:
101
+ [{"role": "user", "content": "要求不低于200字"}, {"role": "attachment", "meta": {"code": "@Override\n public int getHeaderCacheSize()\n {\n return 0;\n }\n\n", "language": "java"}}] # noqa E501
102
+ scene == commit2msg, examples:
103
+ [{"role": "attachment", "meta": {"diff_list": [{"diff": "--- src/com/siondream/core/PlatformResolver.java\n+++ src/com/siondream/core/PlatformResolver.java\n@@ -1,11 +1,8 @@\npackage com.siondream.core;\n-\n-import com.badlogic.gdx.files.FileHandle;\n\npublic interface PlatformResolver {\npublic void openURL(String url);\npublic void rateApp();\npublic void sendFeedback();\n-\tpublic FileHandle[] listFolder(String path);\n}\n", "old_file_path": "src/com/siondream/core/PlatformResolver.java", "new_file_path": "src/com/siondream/core/PlatformResolver.java"}]}}] # noqa E501
104
+ scene == unittest, examples:
105
+ [{"role": "attachment", "meta": {"code": "public static <T> TimestampMap<T> parseTimestampMap(Class<T> typeClass, String input, DateTimeZone timeZone) throws IllegalArgumentException {\n if (typeClass == null) {\n throw new IllegalArgumentException(\"typeClass required\");\n }\n\n if (input == null) {\n return null;\n }\n\n TimestampMap result;\n\n typeClass = AttributeUtils.getStandardizedType(typeClass);\n if (typeClass.equals(String.class)) {\n result = new TimestampStringMap();\n } else if (typeClass.equals(Byte.class)) {\n result = new TimestampByteMap();\n } else if (typeClass.equals(Short.class)) {\n result = new TimestampShortMap();\n } else if (typeClass.equals(Integer.class)) {\n result = new TimestampIntegerMap();\n } else if (typeClass.equals(Long.class)) {\n result = new TimestampLongMap();\n } else if (typeClass.equals(Float.class)) {\n result = new TimestampFloatMap();\n } else if (typeClass.equals(Double.class)) {\n result = new TimestampDoubleMap();\n } else if (typeClass.equals(Boolean.class)) {\n result = new TimestampBooleanMap();\n } else if (typeClass.equals(Character.class)) {\n result = new TimestampCharMap();\n } else {\n throw new IllegalArgumentException(\"Unsupported type \" + typeClass.getClass().getCanonicalName());\n }\n\n if (input.equalsIgnoreCase(EMPTY_VALUE)) {\n return result;\n }\n\n StringReader reader = new StringReader(input + ' ');// Add 1 space so\n // reader.skip\n // function always\n // works when\n // necessary (end of\n // string not\n // reached).\n\n try {\n int r;\n char c;\n while ((r = reader.read()) != -1) {\n c = (char) r;\n switch (c) {\n case LEFT_BOUND_SQUARE_BRACKET:\n case LEFT_BOUND_BRACKET:\n parseTimestampAndValue(typeClass, reader, result, timeZone);\n break;\n default:\n // Ignore other chars outside of bounds\n }\n }\n } catch (IOException ex) {\n throw new RuntimeException(\"Unexpected expection while parsing timestamps\", ex);\n }\n\n return result;\n }", "language": "java"}}] # noqa E501
106
+ scene == codeqa, examples:
107
+ [{"role": "user", "content": "I'm writing a small web server in Python, using BaseHTTPServer and a custom subclass of BaseHTTPServer.BaseHTTPRequestHandler. Is it possible to make this listen on more than one port?\nWhat I'm doing now:\nclass MyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n def doGET\n [...]\n\nclass ThreadingHTTPServer(ThreadingMixIn, HTTPServer): \n pass\n\nserver = ThreadingHTTPServer(('localhost', 80), MyRequestHandler)\nserver.serve_forever()"}] # noqa E501
108
+ scene == nl2sql, examples:
109
+ [{"role": "user", "content": "小明的总分数是多少"}, {"role": "attachment", "meta": {"synonym_infos": {"学生姓名": "姓名|名字|名称", "学生分数": "分数|得分"}, "recall_infos": [{"content": "student_score.id='小明'", "score": "0.83"}], "schema_infos": [{"table_id": "student_score", "table_desc": "学生分数表", "columns": [{"col_name": "id", "col_caption": "学生id", "col_desc": "例值为:1,2,3", "col_type": "string"}, {"col_name": "name", "col_caption": "学生姓名", "col_desc": "例值为:张三,李四,小明", "col_type": "string"}, {"col_name": "score", "col_caption": "学生分数", "col_desc": "例值为:98,100,66", "col_type": "string"}]}]}}] # noqa E501
110
+ workspace (str): The dashscope workspace id.
111
+ **kwargs:
112
+ n(int, `optional`): The number of output results, currently only supports 1, with a default value of 1 # noqa E501
113
+
114
+ Returns:
115
+ Union[DashScopeAPIResponse,
116
+ Generator[DashScopeAPIResponse, None, None]]: If
117
+ stream is True, return Generator, otherwise DashScopeAPIResponse.
118
+ """
119
+ if (scene is None or not scene) or (message is None or not message):
120
+ raise InputRequired('scene and message is required!')
121
+ if model is None or not model:
122
+ raise ModelRequired('Model is required!')
123
+ task_group, task = _get_task_group_and_task(__name__)
124
+ input, parameters = cls._build_input_parameters(
125
+ model, scene, message, **kwargs)
126
+ response = super().call(model=model,
127
+ task_group=task_group,
128
+ task=task,
129
+ function=CodeGeneration.function,
130
+ api_key=api_key,
131
+ input=input,
132
+ workspace=workspace,
133
+ **parameters)
134
+
135
+ is_stream = kwargs.get('stream', False)
136
+ if is_stream:
137
+ return (rsp for rsp in response)
138
+ else:
139
+ return response
140
+
141
+ @classmethod
142
+ def _build_input_parameters(cls, model, scene, message, **kwargs):
143
+ parameters = {'n': kwargs.pop('n', 1)}
144
+ input = {SCENE: scene, MESSAGE: message}
145
+ return input, {**parameters, **kwargs}