unique_toolkit 0.0.2__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/app/init_logging.py +31 -0
- unique_toolkit/app/init_sdk.py +41 -0
- unique_toolkit/app/performance/async_executor.py +186 -0
- unique_toolkit/app/performance/async_wrapper.py +28 -0
- unique_toolkit/app/schemas.py +54 -0
- unique_toolkit/app/verification.py +58 -0
- unique_toolkit/chat/schemas.py +30 -0
- unique_toolkit/chat/service.py +380 -0
- unique_toolkit/chat/state.py +60 -0
- unique_toolkit/chat/utils.py +25 -0
- unique_toolkit/content/schemas.py +90 -0
- unique_toolkit/content/service.py +356 -0
- unique_toolkit/content/utils.py +188 -0
- unique_toolkit/embedding/schemas.py +5 -0
- unique_toolkit/embedding/service.py +89 -0
- unique_toolkit/language_model/infos.py +305 -0
- unique_toolkit/language_model/schemas.py +168 -0
- unique_toolkit/language_model/service.py +261 -0
- unique_toolkit/language_model/utils.py +44 -0
- unique_toolkit-0.5.1.dist-info/METADATA +138 -0
- unique_toolkit-0.5.1.dist-info/RECORD +24 -0
- unique_toolkit-0.0.2.dist-info/METADATA +0 -33
- unique_toolkit-0.0.2.dist-info/RECORD +0 -5
- {unique_toolkit-0.0.2.dist-info → unique_toolkit-0.5.1.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.0.2.dist-info → unique_toolkit-0.5.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,261 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Optional, cast
|
3
|
+
|
4
|
+
import unique_sdk
|
5
|
+
|
6
|
+
from unique_toolkit.app.performance.async_wrapper import async_warning, to_async
|
7
|
+
from unique_toolkit.chat.state import ChatState
|
8
|
+
from unique_toolkit.content.schemas import ContentChunk
|
9
|
+
from unique_toolkit.language_model.infos import LanguageModelName
|
10
|
+
from unique_toolkit.language_model.schemas import (
|
11
|
+
LanguageModelMessages,
|
12
|
+
LanguageModelResponse,
|
13
|
+
LanguageModelStreamResponse,
|
14
|
+
LanguageModelTool,
|
15
|
+
)
|
16
|
+
|
17
|
+
|
18
|
+
class LanguageModelService:
|
19
|
+
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
20
|
+
self.state = state
|
21
|
+
self.logger = logger or logging.getLogger(__name__)
|
22
|
+
|
23
|
+
_DEFAULT_COMPLETE_TIMEOUT = 240_000
|
24
|
+
_DEFAULT_COMPLETE_TEMPERATURE = 0.0
|
25
|
+
|
26
|
+
def complete(
|
27
|
+
self,
|
28
|
+
messages: LanguageModelMessages,
|
29
|
+
model_name: LanguageModelName,
|
30
|
+
temperature: float = _DEFAULT_COMPLETE_TEMPERATURE,
|
31
|
+
timeout: int = _DEFAULT_COMPLETE_TIMEOUT,
|
32
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
33
|
+
):
|
34
|
+
"""
|
35
|
+
Calls the completion endpoint synchronously without streaming the response.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
messages (LanguageModelMessages): The LanguageModelMessages obj to complete.
|
39
|
+
model_name (LanguageModelName): The model name.
|
40
|
+
temperature (float): The temperature value. Defaults to 0.
|
41
|
+
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
42
|
+
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
LanguageModelResponse: The LanguageModelResponse object.
|
46
|
+
"""
|
47
|
+
return self._trigger_complete(
|
48
|
+
messages=messages,
|
49
|
+
model_name=model_name,
|
50
|
+
temperature=temperature,
|
51
|
+
timeout=timeout,
|
52
|
+
tools=tools,
|
53
|
+
)
|
54
|
+
|
55
|
+
@to_async
|
56
|
+
@async_warning
|
57
|
+
def async_complete(
|
58
|
+
self,
|
59
|
+
messages: LanguageModelMessages,
|
60
|
+
model_name: LanguageModelName,
|
61
|
+
temperature: float = _DEFAULT_COMPLETE_TEMPERATURE,
|
62
|
+
timeout: int = _DEFAULT_COMPLETE_TIMEOUT,
|
63
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
64
|
+
):
|
65
|
+
"""
|
66
|
+
Calls the completion endpoint asynchronously without streaming the response.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
messages (LanguageModelMessages): The messages to complete.
|
70
|
+
model_name (LanguageModelName): The model name.
|
71
|
+
temperature (float): The temperature value. Defaults to 0.
|
72
|
+
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
73
|
+
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
str: The completed message content.
|
77
|
+
"""
|
78
|
+
return self._trigger_complete(
|
79
|
+
messages=messages,
|
80
|
+
model_name=model_name,
|
81
|
+
temperature=temperature,
|
82
|
+
timeout=timeout,
|
83
|
+
tools=tools,
|
84
|
+
)
|
85
|
+
|
86
|
+
def _trigger_complete(
|
87
|
+
self,
|
88
|
+
messages: LanguageModelMessages,
|
89
|
+
model_name: LanguageModelName,
|
90
|
+
temperature: float,
|
91
|
+
timeout: int,
|
92
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
93
|
+
) -> LanguageModelResponse:
|
94
|
+
options = self._add_tools_to_options({}, tools)
|
95
|
+
messages = messages.model_dump(exclude_none=True)
|
96
|
+
try:
|
97
|
+
response = unique_sdk.ChatCompletion.create(
|
98
|
+
company_id=self.state.company_id,
|
99
|
+
# TODO change or extend types in unique_sdk
|
100
|
+
model=model_name.name, # type: ignore
|
101
|
+
messages=cast(
|
102
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
103
|
+
messages,
|
104
|
+
),
|
105
|
+
timeout=timeout,
|
106
|
+
temperature=temperature,
|
107
|
+
options=options, # type: ignore
|
108
|
+
)
|
109
|
+
except Exception as e:
|
110
|
+
self.logger.error(f"Error completing: {e}")
|
111
|
+
raise e
|
112
|
+
|
113
|
+
return LanguageModelResponse(**response)
|
114
|
+
|
115
|
+
def stream_complete(
|
116
|
+
self,
|
117
|
+
messages: LanguageModelMessages,
|
118
|
+
model_name: LanguageModelName,
|
119
|
+
content_chunks: list[ContentChunk] = [],
|
120
|
+
debug_info: dict = {},
|
121
|
+
temperature: float = _DEFAULT_COMPLETE_TEMPERATURE,
|
122
|
+
timeout: int = _DEFAULT_COMPLETE_TIMEOUT,
|
123
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
124
|
+
start_text: Optional[str] = None,
|
125
|
+
):
|
126
|
+
"""
|
127
|
+
Streams a completion in the chat session synchronously.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
messages (LanguageModelMessages): The LanguageModelMessages object to stream.
|
131
|
+
content_chunks (list[ContentChunk]): The ContentChunks objects.
|
132
|
+
model_name (LanguageModelName): The language model to use for the completion.
|
133
|
+
debug_info (dict): The debug information. Defaults to {}.
|
134
|
+
temperature (float): The temperature value. Defaults to 0.25.
|
135
|
+
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
136
|
+
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
137
|
+
start_text (Optional[str]): The start text. Defaults to None.
|
138
|
+
|
139
|
+
Returns:
|
140
|
+
The LanguageModelStreamResponse object once the stream has finished.
|
141
|
+
"""
|
142
|
+
return self._trigger_stream_complete(
|
143
|
+
messages=messages,
|
144
|
+
content_chunks=content_chunks,
|
145
|
+
model_name=model_name,
|
146
|
+
debug_info=debug_info,
|
147
|
+
timeout=timeout,
|
148
|
+
temperature=temperature,
|
149
|
+
tools=tools,
|
150
|
+
start_text=start_text,
|
151
|
+
)
|
152
|
+
|
153
|
+
@to_async
|
154
|
+
@async_warning
|
155
|
+
def async_stream_complete(
|
156
|
+
self,
|
157
|
+
messages: LanguageModelMessages,
|
158
|
+
model_name: LanguageModelName,
|
159
|
+
content_chunks: list[ContentChunk] = [],
|
160
|
+
debug_info: dict = {},
|
161
|
+
temperature: float = _DEFAULT_COMPLETE_TEMPERATURE,
|
162
|
+
timeout: int = _DEFAULT_COMPLETE_TIMEOUT,
|
163
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
164
|
+
start_text: Optional[str] = None,
|
165
|
+
):
|
166
|
+
"""
|
167
|
+
Streams a completion in the chat session asynchronously.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
messages (LanguageModelMessages): The LanguageModelMessages object to stream.
|
171
|
+
content_chunks (list[ContentChunk]): The content chunks.
|
172
|
+
model_name (LanguageModelName): The language model to use for the completion.
|
173
|
+
debug_info (dict): The debug information. Defaults to {}.
|
174
|
+
temperature (float): The temperature value. Defaults to 0.25.
|
175
|
+
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
176
|
+
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
177
|
+
start_text (Optional[str]): The start text. Defaults to None.
|
178
|
+
|
179
|
+
Returns:
|
180
|
+
The LanguageModelStreamResponse object once the stream has finished.
|
181
|
+
"""
|
182
|
+
return self._trigger_stream_complete(
|
183
|
+
messages=messages,
|
184
|
+
content_chunks=content_chunks,
|
185
|
+
model_name=model_name,
|
186
|
+
debug_info=debug_info,
|
187
|
+
timeout=timeout,
|
188
|
+
temperature=temperature,
|
189
|
+
tools=tools,
|
190
|
+
start_text=start_text,
|
191
|
+
)
|
192
|
+
|
193
|
+
def _trigger_stream_complete(
|
194
|
+
self,
|
195
|
+
messages: LanguageModelMessages,
|
196
|
+
model_name: LanguageModelName,
|
197
|
+
content_chunks: list[ContentChunk],
|
198
|
+
debug_info: dict,
|
199
|
+
timeout: int,
|
200
|
+
temperature: float,
|
201
|
+
tools: Optional[list[LanguageModelTool]] = None,
|
202
|
+
start_text: Optional[str] = None,
|
203
|
+
) -> LanguageModelStreamResponse:
|
204
|
+
options = self._add_tools_to_options({}, tools)
|
205
|
+
search_context = [
|
206
|
+
unique_sdk.Integrated.SearchResult(
|
207
|
+
id=chunk.id,
|
208
|
+
chunkId=chunk.chunk_id,
|
209
|
+
key=chunk.key,
|
210
|
+
title=chunk.title,
|
211
|
+
url=chunk.url,
|
212
|
+
startPage=chunk.start_page,
|
213
|
+
endPage=chunk.end_page,
|
214
|
+
order=chunk.order,
|
215
|
+
object=chunk.object,
|
216
|
+
) # type: ignore
|
217
|
+
for chunk in content_chunks
|
218
|
+
]
|
219
|
+
|
220
|
+
messages = messages.model_dump(exclude_none=True)
|
221
|
+
|
222
|
+
try:
|
223
|
+
response = unique_sdk.Integrated.chat_stream_completion(
|
224
|
+
user_id=self.state.user_id,
|
225
|
+
company_id=self.state.company_id,
|
226
|
+
assistantMessageId=self.state.assistant_message_id, # type: ignore
|
227
|
+
userMessageId=self.state.user_message_id, # type: ignore
|
228
|
+
messages=cast(
|
229
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
230
|
+
messages,
|
231
|
+
),
|
232
|
+
chatId=self.state.chat_id,
|
233
|
+
searchContext=search_context,
|
234
|
+
# TODO change or extend types in unique_sdk
|
235
|
+
model=model_name.name, # type: ignore
|
236
|
+
timeout=timeout,
|
237
|
+
temperature=temperature,
|
238
|
+
assistantId=self.state.assistant_id,
|
239
|
+
debugInfo=debug_info,
|
240
|
+
options=options, # type: ignore
|
241
|
+
startText=start_text,
|
242
|
+
)
|
243
|
+
except Exception as e:
|
244
|
+
self.logger.error(f"Error streaming completion: {e}")
|
245
|
+
raise e
|
246
|
+
|
247
|
+
return LanguageModelStreamResponse(**response)
|
248
|
+
|
249
|
+
@staticmethod
|
250
|
+
def _add_tools_to_options(
|
251
|
+
options: dict, tools: Optional[list[LanguageModelTool]]
|
252
|
+
) -> dict:
|
253
|
+
if tools:
|
254
|
+
options["tools"] = [
|
255
|
+
{
|
256
|
+
"type": "function",
|
257
|
+
"function": tool.model_dump(exclude_none=True),
|
258
|
+
}
|
259
|
+
for tool in tools
|
260
|
+
]
|
261
|
+
return options
|
@@ -0,0 +1,44 @@
|
|
1
|
+
import json as j
|
2
|
+
import re
|
3
|
+
|
4
|
+
|
5
|
+
def convert_string_to_json(string: str):
|
6
|
+
"""
|
7
|
+
Removes any json tags and converts string to json.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
string: The string to convert to json.
|
11
|
+
|
12
|
+
Returns:
|
13
|
+
dict: The json object.
|
14
|
+
|
15
|
+
Raises:
|
16
|
+
ValueError: If the string cannot be converted to json.
|
17
|
+
"""
|
18
|
+
cleaned_result = find_last_json_object(string)
|
19
|
+
if not cleaned_result:
|
20
|
+
raise ValueError("Could not find a valid json object in the string.")
|
21
|
+
try:
|
22
|
+
json = j.loads(cleaned_result)
|
23
|
+
except j.JSONDecodeError:
|
24
|
+
raise ValueError("Could not convert the string to JSON.")
|
25
|
+
return json
|
26
|
+
|
27
|
+
|
28
|
+
def find_last_json_object(text: str) -> str | None:
|
29
|
+
"""
|
30
|
+
Finds the last json object in a string.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
text: The text to search for the last json object.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
str | None: The last json object as a string or None if no json object was found.
|
37
|
+
"""
|
38
|
+
|
39
|
+
pattern = r"\{(?:[^{}]|\{[^{}]*\})*\}"
|
40
|
+
matches = re.findall(pattern, text)
|
41
|
+
if matches:
|
42
|
+
return matches[-1]
|
43
|
+
else:
|
44
|
+
return None
|
@@ -0,0 +1,138 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: unique_toolkit
|
3
|
+
Version: 0.5.1
|
4
|
+
Summary:
|
5
|
+
License: MIT
|
6
|
+
Author: Martin Fadler
|
7
|
+
Author-email: martin.fadler@unique.ch
|
8
|
+
Requires-Python: >=3.11,<4.0
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Classifier: Programming Language :: Python :: 3.11
|
12
|
+
Classifier: Programming Language :: Python :: 3.12
|
13
|
+
Requires-Dist: numpy (>=2.0.1,<3.0.0)
|
14
|
+
Requires-Dist: pydantic (>=2.8.2,<3.0.0)
|
15
|
+
Requires-Dist: pyhumps (>=3.8.0,<4.0.0)
|
16
|
+
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
17
|
+
Requires-Dist: regex (>=2024.5.15,<2025.0.0)
|
18
|
+
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
|
+
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
20
|
+
Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
21
|
+
Requires-Dist: unique-sdk (>=0.8.10,<0.9.0)
|
22
|
+
Description-Content-Type: text/markdown
|
23
|
+
|
24
|
+
# Unique Toolkit
|
25
|
+
|
26
|
+
This package provides highlevel abstractions and methods on top of `unique_sdk` to ease application development for the Unique Platform.
|
27
|
+
|
28
|
+
The Toolkit is structured along the following domains:
|
29
|
+
- `unique_toolkit.chat`
|
30
|
+
- `unique_toolkit.content`
|
31
|
+
- `unique_toolkit.embedding`
|
32
|
+
- `unique_toolkit.language_model`
|
33
|
+
|
34
|
+
Each domain comprises a service class (in `service.py`) which encapsulates the basic functionalities to interact with the domain entities, the schemas
|
35
|
+
(in `schemas.py`) used in the service and required for interacting with the service functions, utility functions (in `utils.py`) which give additional
|
36
|
+
functionality to interact with the domain entities (all domains except embedding) and other domain specific functionalities which are explained in the respective domain documentation.
|
37
|
+
|
38
|
+
In addition, the `unique_toolkit.app` module provides functions to initialize apps that interact with the Unique platform. It also includes some utility functions to run async tasks in parallel (async webserver and app implementation required).
|
39
|
+
|
40
|
+
## Changelog
|
41
|
+
|
42
|
+
See the [CHANGELOG.md](https://github.com/Unique-AG/ai/blob/main/unique_toolkit/CHANGELOG.md) file for details on changes and version history.
|
43
|
+
|
44
|
+
# Domains
|
45
|
+
|
46
|
+
## App
|
47
|
+
|
48
|
+
The `unique_toolkit.app` module encompasses functions for initializing and securing apps that will interact with the Unique platform.
|
49
|
+
|
50
|
+
- `init_logging.py` can be used to initalize the logger either with unique dictConfig or an any other dictConfig.
|
51
|
+
- `init_sdk.py` can be used to initialize the sdk using the correct env variables and retrieving the endpoint secret.
|
52
|
+
- `schemas.py` contains the Event schema which can be used to parse and validate the unique.chat.external-module.chosen event.
|
53
|
+
- `verification.py` can be used to verify the endpoint secret and construct the event.
|
54
|
+
|
55
|
+
## Chat
|
56
|
+
|
57
|
+
The `unique_toolkit.chat` module encompasses all chat related functionality.
|
58
|
+
|
59
|
+
- `state.py` comprises the ChatState which is used to store the current state of the chat interaction and the user information.
|
60
|
+
- `service.py` comprises the ChatService and provides an interface to manage and load the chat history and interact with the chat ui, e.g., creating a new assistant message.
|
61
|
+
- `schemas.py` comprises all relevant schemas, e.g., ChatMessage, used in the ChatService.
|
62
|
+
- `utils.py` comprises utility functions to use and convert ChatMessage objects in assistants, e.g., convert_chat_history_to_injectable_string converts the chat history to a string that can be injected into a prompt.
|
63
|
+
|
64
|
+
## Content
|
65
|
+
|
66
|
+
The `unique_toolkit.content` module encompasses all content related functionality. Content can be any type of textual data that is stored in the Knowledgebase on the Unique platform. During the ingestion of the content, the content is parsed, split in chunks, indexed, and stored in the database.
|
67
|
+
|
68
|
+
- `service.py` comprises the ContentService and provides an interface to interact with the content, e.g., search content, search content chunks, upload and download content.
|
69
|
+
- `schemas.py` comprises all relevant schemas, e.g., Content and ContentChunk, used in the ContentService.
|
70
|
+
- `utils.py` comprise utility functions to manipulate Content and ContentChunk objects, e.g., sort_content_chunks and merge_content_chunks.
|
71
|
+
|
72
|
+
## Embedding
|
73
|
+
|
74
|
+
The `unique_toolkit.embedding` module encompasses all embedding related functionality. Embeddings are used to represent textual data in a high-dimensional space. The embeddings can be used to calculate the similarity between two texts, for instance.
|
75
|
+
|
76
|
+
- `service.py` encompasses the EmbeddingService and provides an interface to interact with the embeddings, e.g., embed text and calculate the similarity between two texts.
|
77
|
+
- `schemas.py` comprises all relevant schemas, e.g., Embeddings, used in the EmbeddingService.
|
78
|
+
|
79
|
+
## Language Model
|
80
|
+
|
81
|
+
The `unique_toolkit.language_model` module encompasses all language model related functionality and information on the different language models deployed through the
|
82
|
+
Unique platform.
|
83
|
+
|
84
|
+
- `infos.py` comprises the information on all language models deployed through the Unique platform. We recommend to use the LanguageModel class, initialized with the LanguageModelName, e.g., LanguageModel(LanguageModelName.AZURE_GPT_35_TURBO_16K) to get the information on the specific language model like the name, version, token limits or retirement date.
|
85
|
+
- `service.py` comprises the LanguageModelService and provides an interface to interact with the language models, e.g., complete or stream_complete.
|
86
|
+
- `schemas.py` comprises all relevant schemas, e.g., LanguageModelResponse, used in the LanguageModelService.
|
87
|
+
- `utils.py` comprises utility functions to parse the output of the language model, e.g., convert_string_to_json finds and parses the last json object in a string.
|
88
|
+
|
89
|
+
# Development instructions
|
90
|
+
|
91
|
+
1. Install poetry on your system (through `brew` or `pipx`).
|
92
|
+
|
93
|
+
2. Install `pyenv` and install python 3.11. `pyenv` is recommended as otherwise poetry uses the python version used to install itself and not the user preferred python version.
|
94
|
+
|
95
|
+
3. If you then run `python --version` in your terminal, you should be able to see python version as specified in `.python-version`.
|
96
|
+
|
97
|
+
4. Then finally run `poetry install` to install the package and all dependencies.
|
98
|
+
# Changelog
|
99
|
+
|
100
|
+
All notable changes to this project will be documented in this file.
|
101
|
+
|
102
|
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
103
|
+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
104
|
+
|
105
|
+
## [0.5.1] - 2024-07-23
|
106
|
+
- correct documentation
|
107
|
+
|
108
|
+
## [0.5.0] - 2024-07-23
|
109
|
+
### Added
|
110
|
+
- Added `unique_toolkit.app` module with the following components:
|
111
|
+
- `init_logging.py` for initializing the logger.
|
112
|
+
- `init_sdk.py` for initializing the SDK with environment variables.
|
113
|
+
- `schemas.py` containing the Event schema.
|
114
|
+
- `verification.py` for verifying the endpoint secret and constructing the event.
|
115
|
+
|
116
|
+
- Added `unique_toolkit.chat` module with the following components:
|
117
|
+
- `state.py` containing the `ChatState` class.
|
118
|
+
- `service.py` containing the `ChatService` class for managing chat interactions.
|
119
|
+
- `schemas.py` containing relevant schemas such as `ChatMessage`.
|
120
|
+
- `utils.py` with utility functions for chat interactions.
|
121
|
+
|
122
|
+
- Added `unique_toolkit.content` module with the following components:
|
123
|
+
- `service.py` containing the `ContentService` class for interacting with content.
|
124
|
+
- `schemas.py` containing relevant schemas such as `Content` and `ContentChunk`.
|
125
|
+
- `utils.py` with utility functions for manipulating content objects.
|
126
|
+
|
127
|
+
- Added `unique_toolkit.embedding` module with the following components:
|
128
|
+
- `service.py` containing the `EmbeddingService` class for working with embeddings.
|
129
|
+
- `schemas.py` containing relevant schemas such as `Embeddings`.
|
130
|
+
|
131
|
+
- Added `unique_toolkit.language_model` module with the following components:
|
132
|
+
- `infos.py` containing information on language models deployed on the Unique platform.
|
133
|
+
- `service.py` containing the `LanguageModelService` class for interacting with language models.
|
134
|
+
- `schemas.py` containing relevant schemas such as `LanguageModelResponse`.
|
135
|
+
- `utils.py` with utility functions for parsing language model output.
|
136
|
+
|
137
|
+
## [0.0.2] - 2024-07-10
|
138
|
+
- Initial release of `unique_toolkit`.
|
@@ -0,0 +1,24 @@
|
|
1
|
+
unique_toolkit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
+
unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3h23gQ,678
|
3
|
+
unique_toolkit/app/init_sdk.py,sha256=Nv4Now4pMfM0AgRhbtatLpm_39rKxn0WmRLwmPhRl-8,1285
|
4
|
+
unique_toolkit/app/performance/async_executor.py,sha256=ImekDRWkFeXuIYFnCpSJAqE0RP-liK-5R8_buZGqYaE,6435
|
5
|
+
unique_toolkit/app/performance/async_wrapper.py,sha256=Xli5zH9RFdbBJnlBQS_kP7TGqXhPNou5WKst4FLWzGo,770
|
6
|
+
unique_toolkit/app/schemas.py,sha256=2PYnMmH3eBzmlIhHHir44WN5YHGYlUWEpdY7fyQkK8E,1098
|
7
|
+
unique_toolkit/app/verification.py,sha256=UZqTHg3PX_QxMjeLH_BVBYoMVqMnMpeMoqvyTBKDqj8,1996
|
8
|
+
unique_toolkit/chat/schemas.py,sha256=aEXf0tX1ljHjb73jy1pilUKivrvle54iDLiyQ093u7c,794
|
9
|
+
unique_toolkit/chat/service.py,sha256=zxMoOMKlLJNwtRAO8GCpX9tGwVUsbgHWEPWztGe6htw,12300
|
10
|
+
unique_toolkit/chat/state.py,sha256=IljPSqbzQq9TMD9cnbkII8k4eTtkyYOGzRe-DuR-Nic,1831
|
11
|
+
unique_toolkit/chat/utils.py,sha256=ihm-wQykBWhB4liR3LnwPVPt_qGW6ETq21Mw4HY0THE,854
|
12
|
+
unique_toolkit/content/schemas.py,sha256=u3b0R6bIEjbI4zGMoBRi-bdZy_SeRKtMRi8PKW6yMZ4,1969
|
13
|
+
unique_toolkit/content/service.py,sha256=Y7MsKoQnuDAp3cAtNawzmH_rzfSKOaib8jjA3HsflTs,11372
|
14
|
+
unique_toolkit/content/utils.py,sha256=x3ABo8ZCRm3YJAQwDtrr82z77DmW4Mei7KCIITjP0fk,6897
|
15
|
+
unique_toolkit/embedding/schemas.py,sha256=1GvKCaSk4jixzVQ2PKq8yDqwGEVY_hWclYtoAr6CC2g,96
|
16
|
+
unique_toolkit/embedding/service.py,sha256=IjBag3koSSeoQTo9836t0K5tkTbu0otvytVWnYQJHw4,2403
|
17
|
+
unique_toolkit/language_model/infos.py,sha256=NhAkeW7PyusSIHCMvwRikLlzGG4tOXSLf_Fnq7V9rNE,8881
|
18
|
+
unique_toolkit/language_model/schemas.py,sha256=kTGSGT3ygrH3guQELOWpxN4MTgEPuudi-CTvRu-zCcI,4377
|
19
|
+
unique_toolkit/language_model/service.py,sha256=VHixKmi6-BP-StDQdomS-J-EWKIhHvDjNCi5zrP3mpM,9749
|
20
|
+
unique_toolkit/language_model/utils.py,sha256=WBPj1XKkDgxy_-T8HCZvsfkkSzj_1w4UZzNmyvdbBLY,1081
|
21
|
+
unique_toolkit-0.5.1.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
|
22
|
+
unique_toolkit-0.5.1.dist-info/METADATA,sha256=4_lwdj3XE4JxtY2fPtHdZKboLC8jUwQyLFPSKMqMjXY,8062
|
23
|
+
unique_toolkit-0.5.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
24
|
+
unique_toolkit-0.5.1.dist-info/RECORD,,
|
@@ -1,33 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: unique_toolkit
|
3
|
-
Version: 0.0.2
|
4
|
-
Summary:
|
5
|
-
License: MIT
|
6
|
-
Author: Unique Data Science
|
7
|
-
Author-email: datascience@unique.ch
|
8
|
-
Requires-Python: >=3.11,<4.0
|
9
|
-
Classifier: License :: OSI Approved :: MIT License
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
11
|
-
Classifier: Programming Language :: Python :: 3.11
|
12
|
-
Classifier: Programming Language :: Python :: 3.12
|
13
|
-
Requires-Dist: pydantic (>=2.8.2,<3.0.0)
|
14
|
-
Requires-Dist: pyhumps (>=3.8.0,<4.0.0)
|
15
|
-
Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
16
|
-
Requires-Dist: unique-sdk (>=0.8.10,<0.9.0)
|
17
|
-
Description-Content-Type: text/markdown
|
18
|
-
|
19
|
-
# Unique Tool Kit
|
20
|
-
|
21
|
-
This package provides highlevel abstractions and methods on top of `unique_sdk` to ease application development for the Unique Platform.
|
22
|
-
|
23
|
-
|
24
|
-
# Development instructions
|
25
|
-
|
26
|
-
1. Install poetry on your system (through `brew` or `pipx`).
|
27
|
-
|
28
|
-
2. Install `pyenv` and install python 3.11. `pyenv` is recommended as otherwise poetry uses the python version used to install itself and not the user preferred python version.
|
29
|
-
|
30
|
-
3. If you then run `python --version` in your terminal, you should be able to see python version as specified in `.python-version`.
|
31
|
-
|
32
|
-
4. Then finally run `poetry install` to install the package and all dependencies.
|
33
|
-
|
@@ -1,5 +0,0 @@
|
|
1
|
-
unique_toolkit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
unique_toolkit-0.0.2.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
|
3
|
-
unique_toolkit-0.0.2.dist-info/METADATA,sha256=caF_T0teqWqTgctUU9qI2F2CtxlU3TKYuBQyGlEzkwk,1233
|
4
|
-
unique_toolkit-0.0.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
5
|
-
unique_toolkit-0.0.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|