unique_toolkit 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/chat/functions.py +146 -2
- unique_toolkit/chat/service.py +215 -7
- unique_toolkit/evaluators/config.py +1 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +1 -1
- unique_toolkit/language_model/functions.py +18 -151
- unique_toolkit/language_model/infos.py +146 -207
- unique_toolkit/language_model/schemas.py +46 -24
- unique_toolkit/language_model/service.py +0 -220
- {unique_toolkit-0.6.0.dist-info → unique_toolkit-0.6.2.dist-info}/METADATA +13 -3
- {unique_toolkit-0.6.0.dist-info → unique_toolkit-0.6.2.dist-info}/RECORD +12 -12
- {unique_toolkit-0.6.0.dist-info → unique_toolkit-0.6.2.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.6.0.dist-info → unique_toolkit-0.6.2.dist-info}/WHEEL +0 -0
@@ -6,7 +6,6 @@ from typing_extensions import deprecated
|
|
6
6
|
|
7
7
|
from unique_toolkit._common.validate_required_values import validate_required_values
|
8
8
|
from unique_toolkit.app.schemas import BaseEvent, ChatEvent, Event
|
9
|
-
from unique_toolkit.content.schemas import ContentChunk
|
10
9
|
from unique_toolkit.language_model.constants import (
|
11
10
|
DEFAULT_COMPLETE_TEMPERATURE,
|
12
11
|
DEFAULT_COMPLETE_TIMEOUT,
|
@@ -15,14 +14,11 @@ from unique_toolkit.language_model.constants import (
|
|
15
14
|
from unique_toolkit.language_model.functions import (
|
16
15
|
complete,
|
17
16
|
complete_async,
|
18
|
-
stream_complete_to_chat,
|
19
|
-
stream_complete_to_chat_async,
|
20
17
|
)
|
21
18
|
from unique_toolkit.language_model.infos import LanguageModelName
|
22
19
|
from unique_toolkit.language_model.schemas import (
|
23
20
|
LanguageModelMessages,
|
24
21
|
LanguageModelResponse,
|
25
|
-
LanguageModelStreamResponse,
|
26
22
|
LanguageModelTool,
|
27
23
|
)
|
28
24
|
|
@@ -36,10 +32,8 @@ class LanguageModelService:
|
|
36
32
|
Args:
|
37
33
|
company_id (str | None, optional): The company identifier. Defaults to None.
|
38
34
|
user_id (str | None, optional): The user identifier. Defaults to None.
|
39
|
-
assistant_message_id (str | None, optional): The assistant message identifier. Defaults to None.
|
40
35
|
chat_id (str | None, optional): The chat identifier. Defaults to None.
|
41
36
|
assistant_id (str | None, optional): The assistant identifier. Defaults to None.
|
42
|
-
user_message_id (str | None, optional): The user message identifier. Defaults to None.
|
43
37
|
"""
|
44
38
|
|
45
39
|
def __init__(
|
@@ -47,16 +41,12 @@ class LanguageModelService:
|
|
47
41
|
event: Event | BaseEvent | None = None,
|
48
42
|
company_id: str | None = None,
|
49
43
|
user_id: str | None = None,
|
50
|
-
assistant_message_id: str | None = None,
|
51
44
|
chat_id: str | None = None,
|
52
45
|
assistant_id: str | None = None,
|
53
|
-
user_message_id: str | None = None,
|
54
46
|
):
|
55
47
|
self._event = event
|
56
48
|
self.company_id = company_id
|
57
49
|
self.user_id = user_id
|
58
|
-
self.assistant_message_id = assistant_message_id
|
59
|
-
self.user_message_id = user_message_id
|
60
50
|
self.chat_id = chat_id
|
61
51
|
self.assistant_id = assistant_id
|
62
52
|
|
@@ -64,8 +54,6 @@ class LanguageModelService:
|
|
64
54
|
self.company_id = event.company_id
|
65
55
|
self.user_id = event.user_id
|
66
56
|
if isinstance(event, (ChatEvent, Event)):
|
67
|
-
self.assistant_message_id = event.payload.assistant_message.id
|
68
|
-
self.user_message_id = event.payload.user_message.id
|
69
57
|
self.chat_id = event.payload.chat_id
|
70
58
|
self.assistant_id = event.payload.assistant_id
|
71
59
|
|
@@ -167,211 +155,3 @@ class LanguageModelService:
|
|
167
155
|
structured_output_model=structured_output_model,
|
168
156
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
169
157
|
)
|
170
|
-
|
171
|
-
@deprecated("Use stream_complete_to_chat instead")
|
172
|
-
def stream_complete(
|
173
|
-
self,
|
174
|
-
messages: LanguageModelMessages,
|
175
|
-
model_name: LanguageModelName | str,
|
176
|
-
content_chunks: list[ContentChunk] = [],
|
177
|
-
debug_info: dict = {},
|
178
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
179
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
180
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
181
|
-
start_text: Optional[str] = None,
|
182
|
-
other_options: Optional[dict] = None,
|
183
|
-
) -> LanguageModelStreamResponse:
|
184
|
-
"""
|
185
|
-
Streams a completion in the chat session synchronously.
|
186
|
-
"""
|
187
|
-
[
|
188
|
-
company_id,
|
189
|
-
user_id,
|
190
|
-
assistant_message_id,
|
191
|
-
user_message_id,
|
192
|
-
chat_id,
|
193
|
-
assistant_id,
|
194
|
-
] = validate_required_values(
|
195
|
-
[
|
196
|
-
self.company_id,
|
197
|
-
self.user_id,
|
198
|
-
self.assistant_message_id,
|
199
|
-
self.user_message_id,
|
200
|
-
self.chat_id,
|
201
|
-
self.assistant_id,
|
202
|
-
]
|
203
|
-
)
|
204
|
-
|
205
|
-
return stream_complete_to_chat(
|
206
|
-
company_id=company_id,
|
207
|
-
user_id=user_id,
|
208
|
-
assistant_message_id=assistant_message_id,
|
209
|
-
user_message_id=user_message_id,
|
210
|
-
chat_id=chat_id,
|
211
|
-
assistant_id=assistant_id,
|
212
|
-
messages=messages,
|
213
|
-
model_name=model_name,
|
214
|
-
content_chunks=content_chunks,
|
215
|
-
debug_info=debug_info,
|
216
|
-
temperature=temperature,
|
217
|
-
timeout=timeout,
|
218
|
-
tools=tools,
|
219
|
-
start_text=start_text,
|
220
|
-
other_options=other_options,
|
221
|
-
)
|
222
|
-
|
223
|
-
def stream_complete_to_chat(
|
224
|
-
self,
|
225
|
-
messages: LanguageModelMessages,
|
226
|
-
model_name: LanguageModelName | str,
|
227
|
-
content_chunks: list[ContentChunk] = [],
|
228
|
-
debug_info: dict = {},
|
229
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
230
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
231
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
232
|
-
start_text: Optional[str] = None,
|
233
|
-
other_options: Optional[dict] = None,
|
234
|
-
) -> LanguageModelStreamResponse:
|
235
|
-
"""
|
236
|
-
Streams a completion in the chat session synchronously.
|
237
|
-
"""
|
238
|
-
[
|
239
|
-
company_id,
|
240
|
-
user_id,
|
241
|
-
assistant_message_id,
|
242
|
-
user_message_id,
|
243
|
-
chat_id,
|
244
|
-
assistant_id,
|
245
|
-
] = validate_required_values(
|
246
|
-
[
|
247
|
-
self.company_id,
|
248
|
-
self.user_id,
|
249
|
-
self.assistant_message_id,
|
250
|
-
self.user_message_id,
|
251
|
-
self.chat_id,
|
252
|
-
self.assistant_id,
|
253
|
-
]
|
254
|
-
)
|
255
|
-
|
256
|
-
return stream_complete_to_chat(
|
257
|
-
company_id=company_id,
|
258
|
-
user_id=user_id,
|
259
|
-
assistant_message_id=assistant_message_id,
|
260
|
-
user_message_id=user_message_id,
|
261
|
-
chat_id=chat_id,
|
262
|
-
assistant_id=assistant_id,
|
263
|
-
messages=messages,
|
264
|
-
model_name=model_name,
|
265
|
-
content_chunks=content_chunks,
|
266
|
-
debug_info=debug_info,
|
267
|
-
temperature=temperature,
|
268
|
-
timeout=timeout,
|
269
|
-
tools=tools,
|
270
|
-
start_text=start_text,
|
271
|
-
other_options=other_options,
|
272
|
-
)
|
273
|
-
|
274
|
-
@deprecated("Use stream_complete_to_chat_async instead")
|
275
|
-
async def stream_complete_async(
|
276
|
-
self,
|
277
|
-
messages: LanguageModelMessages,
|
278
|
-
model_name: LanguageModelName | str,
|
279
|
-
content_chunks: list[ContentChunk] = [],
|
280
|
-
debug_info: dict = {},
|
281
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
282
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
283
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
284
|
-
start_text: Optional[str] = None,
|
285
|
-
other_options: Optional[dict] = None,
|
286
|
-
) -> LanguageModelStreamResponse:
|
287
|
-
"""
|
288
|
-
Streams a completion in the chat session asynchronously.
|
289
|
-
"""
|
290
|
-
|
291
|
-
[
|
292
|
-
company_id,
|
293
|
-
user_id,
|
294
|
-
assistant_message_id,
|
295
|
-
user_message_id,
|
296
|
-
chat_id,
|
297
|
-
assistant_id,
|
298
|
-
] = validate_required_values(
|
299
|
-
[
|
300
|
-
self.company_id,
|
301
|
-
self.user_id,
|
302
|
-
self.assistant_message_id,
|
303
|
-
self.user_message_id,
|
304
|
-
self.chat_id,
|
305
|
-
self.assistant_id,
|
306
|
-
]
|
307
|
-
)
|
308
|
-
|
309
|
-
return await stream_complete_to_chat_async(
|
310
|
-
company_id=company_id,
|
311
|
-
user_id=user_id,
|
312
|
-
assistant_message_id=assistant_message_id,
|
313
|
-
user_message_id=user_message_id,
|
314
|
-
chat_id=chat_id,
|
315
|
-
assistant_id=assistant_id,
|
316
|
-
messages=messages,
|
317
|
-
model_name=model_name,
|
318
|
-
content_chunks=content_chunks,
|
319
|
-
debug_info=debug_info,
|
320
|
-
temperature=temperature,
|
321
|
-
timeout=timeout,
|
322
|
-
tools=tools,
|
323
|
-
start_text=start_text,
|
324
|
-
other_options=other_options,
|
325
|
-
)
|
326
|
-
|
327
|
-
async def stream_complete_to_chat_async(
|
328
|
-
self,
|
329
|
-
messages: LanguageModelMessages,
|
330
|
-
model_name: LanguageModelName | str,
|
331
|
-
content_chunks: list[ContentChunk] = [],
|
332
|
-
debug_info: dict = {},
|
333
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
334
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
335
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
336
|
-
start_text: Optional[str] = None,
|
337
|
-
other_options: Optional[dict] = None,
|
338
|
-
) -> LanguageModelStreamResponse:
|
339
|
-
"""
|
340
|
-
Streams a completion in the chat session asynchronously.
|
341
|
-
"""
|
342
|
-
|
343
|
-
[
|
344
|
-
company_id,
|
345
|
-
user_id,
|
346
|
-
assistant_message_id,
|
347
|
-
user_message_id,
|
348
|
-
chat_id,
|
349
|
-
assistant_id,
|
350
|
-
] = validate_required_values(
|
351
|
-
[
|
352
|
-
self.company_id,
|
353
|
-
self.user_id,
|
354
|
-
self.assistant_message_id,
|
355
|
-
self.user_message_id,
|
356
|
-
self.chat_id,
|
357
|
-
self.assistant_id,
|
358
|
-
]
|
359
|
-
)
|
360
|
-
|
361
|
-
return await stream_complete_to_chat_async(
|
362
|
-
company_id=company_id,
|
363
|
-
user_id=user_id,
|
364
|
-
assistant_message_id=assistant_message_id,
|
365
|
-
user_message_id=user_message_id,
|
366
|
-
chat_id=chat_id,
|
367
|
-
assistant_id=assistant_id,
|
368
|
-
messages=messages,
|
369
|
-
model_name=model_name,
|
370
|
-
content_chunks=content_chunks,
|
371
|
-
debug_info=debug_info,
|
372
|
-
temperature=temperature,
|
373
|
-
timeout=timeout,
|
374
|
-
tools=tools,
|
375
|
-
start_text=start_text,
|
376
|
-
other_options=other_options,
|
377
|
-
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: unique_toolkit
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.2
|
4
4
|
Summary:
|
5
5
|
License: Proprietary
|
6
6
|
Author: Martin Fadler
|
@@ -55,7 +55,7 @@ The `unique_toolkit.app` module encompasses functions for initializing and secur
|
|
55
55
|
The `unique_toolkit.chat` module encompasses all chat related functionality.
|
56
56
|
|
57
57
|
- `functions.py` comprises the functions to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
58
|
-
- `service.py` comprises the ChatService and provides an interface to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
58
|
+
- `service.py` comprises the ChatService and provides an interface to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message and stream complete.
|
59
59
|
- `schemas.py` comprises all relevant schemas, e.g., ChatMessage, used in the ChatService.
|
60
60
|
- `utils.py` comprises utility functions to use and convert ChatMessage objects in assistants, e.g., convert_chat_history_to_injectable_string converts the chat history to a string that can be injected into a prompt.
|
61
61
|
|
@@ -83,7 +83,7 @@ Unique platform.
|
|
83
83
|
|
84
84
|
- `infos.py` comprises the information on all language models deployed through the Unique platform. We recommend to use the LanguageModel class, initialized with the LanguageModelName, e.g., LanguageModel(LanguageModelName.AZURE_GPT_35_TURBO_0125) to get the information on the specific language model like the name, version, token limits or retirement date.
|
85
85
|
- `functions.py` comprises the functions to complete and stream complete to chat.
|
86
|
-
- `service.py` comprises the LanguageModelService and provides an interface to interact with the language models, e.g., complete
|
86
|
+
- `service.py` comprises the LanguageModelService and provides an interface to interact with the language models, e.g., complete.
|
87
87
|
- `schemas.py` comprises all relevant schemas, e.g., LanguageModelResponse, used in the LanguageModelService.
|
88
88
|
- `utils.py` comprises utility functions to parse the output of the language model, e.g., convert_string_to_json finds and parses the last json object in a string.
|
89
89
|
|
@@ -111,6 +111,16 @@ All notable changes to this project will be documented in this file.
|
|
111
111
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
112
112
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
113
113
|
|
114
|
+
## [0.6.2] - 2025-02-25
|
115
|
+
- Deprecate `LanguageModel` in favor of `LanguageModelInfo`
|
116
|
+
- `LanguageModelTokenLimits` properties become mandatory, initialization allows
|
117
|
+
- init with `token_limit` and `fraction_input` or `input_token_limit` and `output_token_limit`
|
118
|
+
- only `input_token_limit` and `output_token_limit` are members of model
|
119
|
+
|
120
|
+
## [0.6.1] - 2025-02-25
|
121
|
+
- [BREAKING] `LanguageModelService.stream_complete` and `LanguageModelService.stream_complete_async` are now moved to `ChatService.stream_complete` and `ChatService.stream_complete_async`. Correspondingly `assistant_message_id` and `user_message_id` are removed from `LanguageModelService`.
|
122
|
+
- Add `create_user_message` and `create_user_message_async` to `ChatService` (similar to `create_assistant_message` and `create_assistant_message_async`)
|
123
|
+
|
114
124
|
## [0.6.0] - 2025-02-21
|
115
125
|
- make for each domain, its base functionality accessible from `functions.py`
|
116
126
|
- make it possible to instantiate the domain services directly from different event types, inhereted from common `BaseEvent`
|
@@ -13,9 +13,9 @@ unique_toolkit/app/schemas.py,sha256=hPOh5xLNNWgWVIkdrj6ZHYaGz0cTV-5Kv7OQHOaUgV8
|
|
13
13
|
unique_toolkit/app/verification.py,sha256=mffa6wm0i4hJbwzofePrkaia46xumMzECwQ0T3eKAx0,1929
|
14
14
|
unique_toolkit/chat/__init__.py,sha256=LRs2G-JTVuci4lbtHTkVUiNcZcSR6uqqfnAyo7af6nY,619
|
15
15
|
unique_toolkit/chat/constants.py,sha256=05kq6zjqUVB2d6_P7s-90nbljpB3ryxwCI-CAz0r2O4,83
|
16
|
-
unique_toolkit/chat/functions.py,sha256=
|
16
|
+
unique_toolkit/chat/functions.py,sha256=J9Cmgkhj9bBxZja3ggkSp48af_LPU4Dfi9Sbc_WhhNY,27204
|
17
17
|
unique_toolkit/chat/schemas.py,sha256=MNcGAXjK1K8zOODeMFz3FHVQL5sIBQXRwkr_2hFkG8k,2672
|
18
|
-
unique_toolkit/chat/service.py,sha256=
|
18
|
+
unique_toolkit/chat/service.py,sha256=SoFeaOi0BTexhiyX6busui_7JjhlRu30YNiKdgwV3JQ,29127
|
19
19
|
unique_toolkit/chat/state.py,sha256=Cjgwv_2vhDFbV69xxsn7SefhaoIAEqLx3ferdVFCnOg,1445
|
20
20
|
unique_toolkit/chat/utils.py,sha256=ihm-wQykBWhB4liR3LnwPVPt_qGW6ETq21Mw4HY0THE,854
|
21
21
|
unique_toolkit/content/__init__.py,sha256=EdJg_A_7loEtCQf4cah3QARQreJx6pdz89Rm96YbMVg,940
|
@@ -31,9 +31,9 @@ unique_toolkit/embedding/schemas.py,sha256=1GvKCaSk4jixzVQ2PKq8yDqwGEVY_hWclYtoA
|
|
31
31
|
unique_toolkit/embedding/service.py,sha256=sCMKeFjwNrWYQic1UUW2c1jnhjRQLcDYfsBgxmR70sY,2697
|
32
32
|
unique_toolkit/embedding/utils.py,sha256=v86lo__bCJbxZBQ3OcLu5SuwT6NbFfWlcq8iyk6BuzQ,279
|
33
33
|
unique_toolkit/evaluators/__init__.py,sha256=3Rfpnowm7MUXHWmeU4UV4s_3Hk-sw3V20oBwQCYlejQ,50
|
34
|
-
unique_toolkit/evaluators/config.py,sha256=
|
34
|
+
unique_toolkit/evaluators/config.py,sha256=iYiBi7M6u5MG9nVgpxl9dKfoS4j72stA6Hl-MQHmYp8,1056
|
35
35
|
unique_toolkit/evaluators/constants.py,sha256=1oI93jsh0R_TjX_8OenliiiywVe3vTooSnaMqtq6R18,27
|
36
|
-
unique_toolkit/evaluators/context_relevancy/constants.py,sha256=
|
36
|
+
unique_toolkit/evaluators/context_relevancy/constants.py,sha256=9mAGc23e5XjTYOBfeuZVbaqOyYrvRoXYjfUnsBOVShU,1126
|
37
37
|
unique_toolkit/evaluators/context_relevancy/prompts.py,sha256=gTlWP7fDuxhrXhCYNCqXMbCey_DalZMdi5l-a6RHgk0,713
|
38
38
|
unique_toolkit/evaluators/context_relevancy/service.py,sha256=9hzdMuF4A4T97-3X3zcXgrDISLn1bleZ6tTL1bHa9dQ,1722
|
39
39
|
unique_toolkit/evaluators/context_relevancy/utils.py,sha256=E9ljdRNbwYlx04fQDLvgF4SwxvlTJT0vE328PlUF6KA,5191
|
@@ -47,18 +47,18 @@ unique_toolkit/evaluators/schemas.py,sha256=Jaue6Uhx75X1CyHKWj8sT3RE1JZXTqoLtfLt
|
|
47
47
|
unique_toolkit/language_model/__init__.py,sha256=jWko_vQj48wjnpTtlkg8iNdef0SMI3FN2kGywXRTMzg,1880
|
48
48
|
unique_toolkit/language_model/builder.py,sha256=nsRqWO_2dgFehK5CgtqR5aqXgYUU0QL6mR0lALPrQXM,1898
|
49
49
|
unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
|
50
|
-
unique_toolkit/language_model/functions.py,sha256=
|
51
|
-
unique_toolkit/language_model/infos.py,sha256
|
50
|
+
unique_toolkit/language_model/functions.py,sha256=I5jHhHsKoq7GwEQyTrM8LXB2n_6dvMAk7UklenjuHSY,7945
|
51
|
+
unique_toolkit/language_model/infos.py,sha256=-axWHj55mp6tZfX_3i-FSkfh8e9fwORXWMfi9xQ_UjA,12232
|
52
52
|
unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
|
53
|
-
unique_toolkit/language_model/schemas.py,sha256=
|
54
|
-
unique_toolkit/language_model/service.py,sha256=
|
53
|
+
unique_toolkit/language_model/schemas.py,sha256=76FtgWc0qtk9fpEwoecA59WxG__aR7_kYyWJooHIaF8,8297
|
54
|
+
unique_toolkit/language_model/service.py,sha256=GupYD4uDZjy1TfVQW3jichmgQwiSgQCj350FtL4O0W4,5569
|
55
55
|
unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
|
56
56
|
unique_toolkit/short_term_memory/__init__.py,sha256=2mI3AUrffgH7Yt-xS57EGqnHf7jnn6xquoKEhJqk3Wg,185
|
57
57
|
unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7aqW_OOpZB7sbz_Xg,34
|
58
58
|
unique_toolkit/short_term_memory/functions.py,sha256=3WiK-xatY5nh4Dr5zlDUye1k3E6kr41RiscwtTplw5k,4484
|
59
59
|
unique_toolkit/short_term_memory/schemas.py,sha256=OhfcXyF6ACdwIXW45sKzjtZX_gkcJs8FEZXcgQTNenw,1406
|
60
60
|
unique_toolkit/short_term_memory/service.py,sha256=gdsVzoNqTXmLoBR_-p_lJlZDBo8L7Cr5EKchTNVJg1Q,5233
|
61
|
-
unique_toolkit-0.6.
|
62
|
-
unique_toolkit-0.6.
|
63
|
-
unique_toolkit-0.6.
|
64
|
-
unique_toolkit-0.6.
|
61
|
+
unique_toolkit-0.6.2.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
62
|
+
unique_toolkit-0.6.2.dist-info/METADATA,sha256=X3vk5KZ3npmcio6kiyCcOKhw2WE7g4zLCkA-urhZsSA,19409
|
63
|
+
unique_toolkit-0.6.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
64
|
+
unique_toolkit-0.6.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|