dashscope 1.24.4__py3-none-any.whl → 1.24.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dashscope might be problematic. Click here for more details.

dashscope/__init__.py CHANGED
@@ -24,7 +24,7 @@ from dashscope.embeddings.batch_text_embedding_response import \
24
24
  BatchTextEmbeddingResponse
25
25
  from dashscope.embeddings.multimodal_embedding import (
26
26
  MultiModalEmbedding, MultiModalEmbeddingItemAudio,
27
- MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText)
27
+ MultiModalEmbeddingItemImage, MultiModalEmbeddingItemText, AioMultiModalEmbedding)
28
28
  from dashscope.embeddings.text_embedding import TextEmbedding
29
29
  from dashscope.files import Files
30
30
  from dashscope.models import Models
@@ -55,6 +55,7 @@ __all__ = [
55
55
  Models,
56
56
  TextEmbedding,
57
57
  MultiModalEmbedding,
58
+ AioMultiModalEmbedding,
58
59
  MultiModalEmbeddingItemAudio,
59
60
  MultiModalEmbeddingItemImage,
60
61
  MultiModalEmbeddingItemText,
@@ -1,18 +1,20 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
-
3
2
  from .conversation import Conversation, History, HistoryItem
4
- from .generation import Generation
5
- from .image_synthesis import ImageSynthesis
3
+ from .generation import Generation, AioGeneration
4
+ from .image_synthesis import ImageSynthesis, AioImageSynthesis
6
5
  from .multimodal_conversation import MultiModalConversation, AioMultiModalConversation
7
- from .video_synthesis import VideoSynthesis
6
+ from .video_synthesis import VideoSynthesis, AioVideoSynthesis
8
7
 
9
8
  __all__ = [
10
9
  Generation,
10
+ AioGeneration,
11
11
  Conversation,
12
12
  HistoryItem,
13
13
  History,
14
14
  ImageSynthesis,
15
+ AioImageSynthesis,
15
16
  MultiModalConversation,
16
17
  AioMultiModalConversation,
17
18
  VideoSynthesis,
19
+ AioVideoSynthesis,
18
20
  ]
@@ -24,9 +24,10 @@ class MultiModalConversation(BaseApi):
24
24
  def call(
25
25
  cls,
26
26
  model: str,
27
- messages: List,
27
+ messages: List = None,
28
28
  api_key: str = None,
29
29
  workspace: str = None,
30
+ text: str = None,
30
31
  **kwargs
31
32
  ) -> Union[MultiModalConversationResponse, Generator[
32
33
  MultiModalConversationResponse, None, None]]:
@@ -55,6 +56,7 @@ class MultiModalConversation(BaseApi):
55
56
  if None, will retrieve by rule [1].
56
57
  [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
57
58
  workspace (str): The dashscope workspace id.
59
+ text (str): The text to generate.
58
60
  **kwargs:
59
61
  stream(bool, `optional`): Enable server-sent events
60
62
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -68,8 +70,11 @@ class MultiModalConversation(BaseApi):
68
70
  tokens with top_p probability mass. So 0.1 means only
69
71
  the tokens comprising the top 10% probability mass are
70
72
  considered[qwen-turbo,bailian-v1].
73
+ voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
74
+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
71
75
  top_k(float, `optional`):
72
76
 
77
+
73
78
  Raises:
74
79
  InvalidInput: The history and auto_history are mutually exclusive.
75
80
 
@@ -78,18 +83,24 @@ class MultiModalConversation(BaseApi):
78
83
  Generator[MultiModalConversationResponse, None, None]]: If
79
84
  stream is True, return Generator, otherwise MultiModalConversationResponse.
80
85
  """
81
- if (messages is None or not messages):
82
- raise InputRequired('prompt or messages is required!')
83
86
  if model is None or not model:
84
87
  raise ModelRequired('Model is required!')
85
88
  task_group, _ = _get_task_group_and_task(__name__)
86
- msg_copy = copy.deepcopy(messages)
87
- has_upload = cls._preprocess_messages(model, msg_copy, api_key)
88
- if has_upload:
89
- headers = kwargs.pop('headers', {})
90
- headers['X-DashScope-OssResourceResolve'] = 'enable'
91
- kwargs['headers'] = headers
92
- input = {'messages': msg_copy}
89
+ input = {}
90
+ msg_copy = None
91
+
92
+ if messages is not None and messages:
93
+ msg_copy = copy.deepcopy(messages)
94
+ has_upload = cls._preprocess_messages(model, msg_copy, api_key)
95
+ if has_upload:
96
+ headers = kwargs.pop('headers', {})
97
+ headers['X-DashScope-OssResourceResolve'] = 'enable'
98
+ kwargs['headers'] = headers
99
+
100
+ if text is not None and text:
101
+ input.update({'text': text})
102
+ if msg_copy is not None:
103
+ input.update({'messages': msg_copy})
93
104
  response = super().call(model=model,
94
105
  task_group=task_group,
95
106
  task=MultiModalConversation.task,
@@ -145,9 +156,10 @@ class AioMultiModalConversation(BaseAioApi):
145
156
  async def call(
146
157
  cls,
147
158
  model: str,
148
- messages: List,
159
+ messages: List = None,
149
160
  api_key: str = None,
150
161
  workspace: str = None,
162
+ text: str = None,
151
163
  **kwargs
152
164
  ) -> Union[MultiModalConversationResponse, Generator[
153
165
  MultiModalConversationResponse, None, None]]:
@@ -176,6 +188,7 @@ class AioMultiModalConversation(BaseAioApi):
176
188
  if None, will retrieve by rule [1].
177
189
  [1]: https://help.aliyun.com/zh/dashscope/developer-reference/api-key-settings. # noqa E501
178
190
  workspace (str): The dashscope workspace id.
191
+ text (str): The text to generate.
179
192
  **kwargs:
180
193
  stream(bool, `optional`): Enable server-sent events
181
194
  (ref: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) # noqa E501
@@ -189,6 +202,8 @@ class AioMultiModalConversation(BaseAioApi):
189
202
  tokens with top_p probability mass. So 0.1 means only
190
203
  the tokens comprising the top 10% probability mass are
191
204
  considered[qwen-turbo,bailian-v1].
205
+ voice(string, `optional`): The voice name of qwen tts, include 'Cherry'/'Ethan'/'Sunny'/'Dylan' and so on,
206
+ you can get the total voice list : https://help.aliyun.com/zh/model-studio/qwen-tts.
192
207
  top_k(float, `optional`):
193
208
 
194
209
  Raises:
@@ -199,18 +214,24 @@ class AioMultiModalConversation(BaseAioApi):
199
214
  Generator[MultiModalConversationResponse, None, None]]: If
200
215
  stream is True, return Generator, otherwise MultiModalConversationResponse.
201
216
  """
202
- if (messages is None or not messages):
203
- raise InputRequired('prompt or messages is required!')
204
217
  if model is None or not model:
205
218
  raise ModelRequired('Model is required!')
206
219
  task_group, _ = _get_task_group_and_task(__name__)
207
- msg_copy = copy.deepcopy(messages)
208
- has_upload = cls._preprocess_messages(model, msg_copy, api_key)
209
- if has_upload:
210
- headers = kwargs.pop('headers', {})
211
- headers['X-DashScope-OssResourceResolve'] = 'enable'
212
- kwargs['headers'] = headers
213
- input = {'messages': msg_copy}
220
+ input = {}
221
+ msg_copy = None
222
+
223
+ if messages is not None and messages:
224
+ msg_copy = copy.deepcopy(messages)
225
+ has_upload = cls._preprocess_messages(model, msg_copy, api_key)
226
+ if has_upload:
227
+ headers = kwargs.pop('headers', {})
228
+ headers['X-DashScope-OssResourceResolve'] = 'enable'
229
+ kwargs['headers'] = headers
230
+
231
+ if text is not None and text:
232
+ input.update({'text': text})
233
+ if msg_copy is not None:
234
+ input.update({'messages': msg_copy})
214
235
  response = await super().call(model=model,
215
236
  task_group=task_group,
216
237
  task=AioMultiModalConversation.task,
@@ -152,6 +152,26 @@ class Choice(DictMixin):
152
152
  **kwargs)
153
153
 
154
154
 
155
+ @dataclass(init=False)
156
+ class Audio(DictMixin):
157
+ data: str
158
+ url: str
159
+ id: str
160
+ expires_at: int
161
+
162
+ def __init__(self,
163
+ data: str = None,
164
+ url: str = None,
165
+ id: str = None,
166
+ expires_at: int = None,
167
+ **kwargs):
168
+ super().__init__(data=data,
169
+ url=url,
170
+ id=id,
171
+ expires_at=expires_at,
172
+ **kwargs)
173
+
174
+
155
175
  @dataclass(init=False)
156
176
  class GenerationOutput(DictMixin):
157
177
  text: str
@@ -217,20 +237,25 @@ class GenerationResponse(DashScopeAPIResponse):
217
237
  @dataclass(init=False)
218
238
  class MultiModalConversationOutput(DictMixin):
219
239
  choices: List[Choice]
240
+ audio: Audio
220
241
 
221
242
  def __init__(self,
222
243
  text: str = None,
223
244
  finish_reason: str = None,
224
245
  choices: List[Choice] = None,
246
+ audio: Audio = None,
225
247
  **kwargs):
226
248
  chs = None
227
249
  if choices is not None:
228
250
  chs = []
229
251
  for choice in choices:
230
252
  chs.append(Choice(**choice))
253
+ if audio is not None:
254
+ audio = Audio(**audio)
231
255
  super().__init__(text=text,
232
256
  finish_reason=finish_reason,
233
257
  choices=chs,
258
+ audio=audio,
234
259
  **kwargs)
235
260
 
236
261
 
@@ -238,15 +263,18 @@ class MultiModalConversationOutput(DictMixin):
238
263
  class MultiModalConversationUsage(DictMixin):
239
264
  input_tokens: int
240
265
  output_tokens: int
266
+ characters: int
241
267
 
242
268
  # TODO add image usage info.
243
269
 
244
270
  def __init__(self,
245
271
  input_tokens: int = 0,
246
272
  output_tokens: int = 0,
273
+ characters: int = 0,
247
274
  **kwargs):
248
275
  super().__init__(input_tokens=input_tokens,
249
276
  output_tokens=output_tokens,
277
+ characters=characters,
250
278
  **kwargs)
251
279
 
252
280
 
@@ -378,7 +406,7 @@ class RecognitionResponse(DashScopeAPIResponse):
378
406
  """
379
407
  result = False
380
408
  if sentence is not None and 'end_time' in sentence and sentence[
381
- 'end_time'] is not None:
409
+ 'end_time'] is not None:
382
410
  result = True
383
411
  return result
384
412
 
@@ -445,8 +473,8 @@ class ImageSynthesisOutput(DictMixin):
445
473
  results: List[ImageSynthesisResult]
446
474
 
447
475
  def __init__(self,
448
- task_id: str = None,
449
- task_status: str = None,
476
+ task_id: str = None,
477
+ task_status: str = None,
450
478
  results: List[ImageSynthesisResult] = [],
451
479
  **kwargs):
452
480
  res = []
@@ -5,7 +5,7 @@ from typing import List
5
5
 
6
6
  from dashscope.api_entities.dashscope_response import (DashScopeAPIResponse,
7
7
  DictMixin)
8
- from dashscope.client.base_api import BaseApi
8
+ from dashscope.client.base_api import BaseApi, BaseAioApi
9
9
  from dashscope.common.error import InputRequired, ModelRequired
10
10
  from dashscope.common.utils import _get_task_group_and_task
11
11
  from dashscope.utils.oss_utils import preprocess_message_element
@@ -111,3 +111,72 @@ class MultiModalEmbedding(BaseApi):
111
111
  if is_upload and not has_upload:
112
112
  has_upload = True
113
113
  return has_upload
114
+
115
+
116
+ class AioMultiModalEmbedding(BaseAioApi):
117
+ task = 'multimodal-embedding'
118
+
119
+ class Models:
120
+ multimodal_embedding_one_peace_v1 = 'multimodal-embedding-one-peace-v1'
121
+
122
+ @classmethod
123
+ async def call(cls,
124
+ model: str,
125
+ input: List[MultiModalEmbeddingItemBase],
126
+ api_key: str = None,
127
+ workspace: str = None,
128
+ **kwargs) -> DashScopeAPIResponse:
129
+ """Get embedding multimodal contents..
130
+
131
+ Args:
132
+ model (str): The embedding model name.
133
+ input (List[MultiModalEmbeddingElement]): The embedding elements,
134
+ every element include data, modal, factor field.
135
+ workspace (str): The dashscope workspace id.
136
+ **kwargs:
137
+ auto_truncation(bool, `optional`): Automatically truncate
138
+ audio longer than 15 seconds or text longer than 70 words.
139
+ Default to false(Too long input will result in failure).
140
+
141
+ Returns:
142
+ DashScopeAPIResponse: The embedding result.
143
+ """
144
+ if input is None or not input:
145
+ raise InputRequired('prompt is required!')
146
+ if model is None or not model:
147
+ raise ModelRequired('Model is required!')
148
+ embedding_input = {}
149
+ has_upload = cls._preprocess_message_inputs(model, input, api_key)
150
+ if has_upload:
151
+ headers = kwargs.pop('headers', {})
152
+ headers['X-DashScope-OssResourceResolve'] = 'enable'
153
+ kwargs['headers'] = headers
154
+ embedding_input['contents'] = input
155
+ kwargs.pop('stream', False) # not support streaming output.
156
+ task_group, function = _get_task_group_and_task(__name__)
157
+ response = await super().call(
158
+ model=model,
159
+ input=embedding_input,
160
+ task_group=task_group,
161
+ task=MultiModalEmbedding.task,
162
+ function=function,
163
+ api_key=api_key,
164
+ workspace=workspace,
165
+ **kwargs)
166
+ return response
167
+
168
+ @classmethod
169
+ def _preprocess_message_inputs(cls, model: str, input: List[dict],
170
+ api_key: str):
171
+ """preprocess following inputs
172
+ input = [{'factor': 1, 'text': 'hello'},
173
+ {'factor': 2, 'audio': ''},
174
+ {'factor': 3, 'image': ''}]
175
+ """
176
+ has_upload = False
177
+ for elem in input:
178
+ if not isinstance(elem, (int, float, bool, str, bytes, bytearray)):
179
+ is_upload = preprocess_message_element(model, elem, api_key)
180
+ if is_upload and not has_upload:
181
+ has_upload = True
182
+ return has_upload
dashscope/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # Copyright (c) Alibaba, Inc. and its affiliates.
2
2
 
3
- __version__ = '1.24.4'
3
+ __version__ = '1.24.5'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dashscope
3
- Version: 1.24.4
3
+ Version: 1.24.5
4
4
  Summary: dashscope client sdk library
5
5
  Home-page: https://dashscope.aliyun.com/
6
6
  Author: Alibaba Cloud
@@ -1,16 +1,16 @@
1
- dashscope/__init__.py,sha256=MJI4PJmnevfQA_GA30_L3hmsq49hooZpg25k6w39dts,3120
1
+ dashscope/__init__.py,sha256=96J137Im9Ii9uxfVOOYkZDJNZXF1sEbcH4-QXFr4xEw,3172
2
2
  dashscope/cli.py,sha256=64oGkevgX0RHPPmMg0sevXDgaFLQNA_0vdtjQ7Z2pHM,26492
3
3
  dashscope/files.py,sha256=vRDQygm3lOqBZR73o7KNHs1iTBVuvLncuwJNxIYjzAU,3981
4
4
  dashscope/model.py,sha256=B5v_BtYLPqj6raClejBgdKg6WTGwhH_f-20pvsQqmsk,1491
5
5
  dashscope/models.py,sha256=dE4mzXkl85G343qVylSGpURPRdA5pZSqXlx6PcxqC_Q,1275
6
- dashscope/version.py,sha256=-rY4TVBLCGXmd6f3VVXw5GRs4Fvfu8nGa-Yu2KG4ReM,74
7
- dashscope/aigc/__init__.py,sha256=m51CHEKL3WPq-s14OF-G1Uk3rLj6B6KrU55bbCKU-Ak,500
6
+ dashscope/version.py,sha256=2fvqw7bZLyWOIDvUb8DEkdi6y_VgyljhOeYdITEksWM,74
7
+ dashscope/aigc/__init__.py,sha256=kYvYEoRK-NUHyMWpBDNQBz4fVA__uOhHRK2kDTBaWgk,617
8
8
  dashscope/aigc/chat_completion.py,sha256=ONlyyssIbfaKKcFo7cEKhHx5OCF2XX810HFzIExW1ho,14813
9
9
  dashscope/aigc/code_generation.py,sha256=p_mxDKJLQMW0IjFD46JRlZuEZCRESSVKEfLlAevBtqw,10936
10
10
  dashscope/aigc/conversation.py,sha256=95xEEY4ThZJysj5zy3aMw7ql9KLJVfD_1iHv9QZ17Ew,14282
11
11
  dashscope/aigc/generation.py,sha256=xMcMu16rICTdjZiD_sPqYV_Ltdp4ewGzzfC7JD9VApY,17948
12
12
  dashscope/aigc/image_synthesis.py,sha256=Itx9h5brEwC-d3Mj_ntDHGd4qaitqDg9DeGHMJouhMk,28178
13
- dashscope/aigc/multimodal_conversation.py,sha256=Kjg8Gtfhl_Ok8WVwD-AeT-VBN9hh6E74TfkCxkL5wbY,10821
13
+ dashscope/aigc/multimodal_conversation.py,sha256=BXpUthyGxJHQs18-m_ZzAw6MI5nSM4_NVMUfTDSC1-k,11682
14
14
  dashscope/aigc/video_synthesis.py,sha256=RSPjar5-YiF9xclRmf9H7-5QbRxLcsNXO4zS7oTKi2I,24137
15
15
  dashscope/api_entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  dashscope/api_entities/aiohttp_request.py,sha256=1L7XdIJ9L65cQmX8x9JCR4t5hNIMDrbiWADfKKp9yfo,10280
@@ -18,7 +18,7 @@ dashscope/api_entities/api_request_data.py,sha256=04rpYPNK1HkT3iTPJmZpquH621xcBb
18
18
  dashscope/api_entities/api_request_factory.py,sha256=ynpbFmxSne4dJkv5m40Vlwt4hJSxQPprAuUgMSQIQDg,5639
19
19
  dashscope/api_entities/base_request.py,sha256=W2SzrSAGFS6V8DErfSrayQtSL0T4iO7BrC8flr7nt1w,977
20
20
  dashscope/api_entities/chat_completion_types.py,sha256=1WMWPszhM3HaJBVz-ZXx-El4D8-RfVUL3ym65xsDRLk,11435
21
- dashscope/api_entities/dashscope_response.py,sha256=slc7o9jNS5yzv2giEPzz9CDOW6X797nkAocgZ1r84aU,22089
21
+ dashscope/api_entities/dashscope_response.py,sha256=31guU41ePkLyFsVVN-1WODXdOHiURzRyxxhrUmX9dGM,22835
22
22
  dashscope/api_entities/encryption.py,sha256=rUCZx3wwVvS5oyKXEeWgyWPxM8Y5d4AaVdgxLhizBqA,5517
23
23
  dashscope/api_entities/http_request.py,sha256=MTxYsbkK8oYWDp8ZPjrkdY9YbnQ9SEIy87riyJidMXo,16484
24
24
  dashscope/api_entities/websocket_request.py,sha256=PS0FU854-HjTbKa68f4GHa7-noFRMzKySJGfPkrrBjw,16146
@@ -65,7 +65,7 @@ dashscope/customize/finetunes.py,sha256=AL_kGTJXMvM2ej-EKsLLd1dUphPQdVTefFVCSVH-
65
65
  dashscope/embeddings/__init__.py,sha256=XQ7vKr8oZM2CmdOduE53BWy6_Qpn9xUPkma64yw8Gws,291
66
66
  dashscope/embeddings/batch_text_embedding.py,sha256=lVhvTS8McYfXuqt_8CmmhA6bPqD0nrGv965kjYG_j0E,8842
67
67
  dashscope/embeddings/batch_text_embedding_response.py,sha256=ZfkJMUq8GRsFA6XUTsiAsIySqGJH-VPi2P9Ba1KTU-s,2056
68
- dashscope/embeddings/multimodal_embedding.py,sha256=NwjQsdkKgUz51ozGjqFDzVlLcZjY0m1JNdH1EyAY0a4,4109
68
+ dashscope/embeddings/multimodal_embedding.py,sha256=PEF7DmtE5cbrXw4k3WQcfmsBKaAY3CTIoei3SyhOl34,6774
69
69
  dashscope/embeddings/text_embedding.py,sha256=2MPEyMB99xueDbvFg9kKAe8bgHMDEaFLaFa6GzDWDHg,2108
70
70
  dashscope/io/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
71
  dashscope/io/input_output.py,sha256=0aXrRJFo1ZqYm_AJWR_w88O4-Btn9np2zUhrrUdBdfw,3992
@@ -100,9 +100,9 @@ dashscope/tokenizers/tokenizer.py,sha256=3FQVDvMNkCW9ccYeJdjrd_PIMMD3Xv7aNZkaYOE
100
100
  dashscope/tokenizers/tokenizer_base.py,sha256=5EJIFuizMWESEmLmbd38yJnfeHmPnzZPwsO4aOGjpl4,707
101
101
  dashscope/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  dashscope/utils/oss_utils.py,sha256=aZIHlMN2JOfVw6kp0SVrMw_N1MfoTcR_-wiRbJ7DgHw,7501
103
- dashscope-1.24.4.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
104
- dashscope-1.24.4.dist-info/METADATA,sha256=uGVIdzKXASvLK5vUwMAFzIOS_qCj1RIGo_XCqvrjloQ,7146
105
- dashscope-1.24.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
- dashscope-1.24.4.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
107
- dashscope-1.24.4.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
108
- dashscope-1.24.4.dist-info/RECORD,,
103
+ dashscope-1.24.5.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
104
+ dashscope-1.24.5.dist-info/METADATA,sha256=eRxoK1TphMD4hn-vUM3aPMtaCsvCzstnDe-QPH9A4Q0,7146
105
+ dashscope-1.24.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
106
+ dashscope-1.24.5.dist-info/entry_points.txt,sha256=e9C3sOf9zDYL0O5ROEGX6FT8w-QK_kaGRWmPZDHAFys,49
107
+ dashscope-1.24.5.dist-info/top_level.txt,sha256=woqavFJK9zas5xTqynmALqOtlafghjsk63Xk86powTU,10
108
+ dashscope-1.24.5.dist-info/RECORD,,