unique_toolkit 0.5.55__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_toolkit might be problematic. Click here for more details.
- unique_toolkit/_common/validate_required_values.py +21 -0
- unique_toolkit/app/__init__.py +20 -0
- unique_toolkit/app/schemas.py +73 -7
- unique_toolkit/chat/__init__.py +5 -4
- unique_toolkit/chat/constants.py +3 -0
- unique_toolkit/chat/functions.py +805 -0
- unique_toolkit/chat/schemas.py +11 -11
- unique_toolkit/chat/service.py +483 -432
- unique_toolkit/content/__init__.py +1 -0
- unique_toolkit/content/constants.py +2 -0
- unique_toolkit/content/functions.py +475 -0
- unique_toolkit/content/service.py +163 -315
- unique_toolkit/content/utils.py +32 -0
- unique_toolkit/embedding/__init__.py +3 -0
- unique_toolkit/embedding/constants.py +2 -0
- unique_toolkit/embedding/functions.py +79 -0
- unique_toolkit/embedding/service.py +47 -34
- unique_toolkit/evaluators/__init__.py +1 -0
- unique_toolkit/evaluators/constants.py +1 -0
- unique_toolkit/evaluators/context_relevancy/constants.py +3 -3
- unique_toolkit/evaluators/context_relevancy/utils.py +5 -2
- unique_toolkit/evaluators/hallucination/utils.py +2 -1
- unique_toolkit/language_model/__init__.py +1 -0
- unique_toolkit/language_model/constants.py +4 -0
- unique_toolkit/language_model/functions.py +229 -0
- unique_toolkit/language_model/service.py +76 -343
- unique_toolkit/short_term_memory/__init__.py +5 -0
- unique_toolkit/short_term_memory/constants.py +1 -0
- unique_toolkit/short_term_memory/functions.py +175 -0
- unique_toolkit/short_term_memory/service.py +153 -27
- {unique_toolkit-0.5.55.dist-info → unique_toolkit-0.6.1.dist-info}/METADATA +38 -8
- unique_toolkit-0.6.1.dist-info/RECORD +64 -0
- unique_toolkit-0.5.55.dist-info/RECORD +0 -50
- {unique_toolkit-0.5.55.dist-info → unique_toolkit-0.6.1.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.5.55.dist-info → unique_toolkit-0.6.1.dist-info}/WHEEL +0 -0
|
@@ -1,35 +1,74 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Optional, Type
|
|
2
|
+
from typing import Optional, Type
|
|
3
3
|
|
|
4
|
-
import unique_sdk
|
|
5
4
|
from pydantic import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
from unique_toolkit.
|
|
9
|
-
from unique_toolkit.
|
|
5
|
+
from typing_extensions import deprecated
|
|
6
|
+
|
|
7
|
+
from unique_toolkit._common.validate_required_values import validate_required_values
|
|
8
|
+
from unique_toolkit.app.schemas import BaseEvent, ChatEvent, Event
|
|
9
|
+
from unique_toolkit.language_model.constants import (
|
|
10
|
+
DEFAULT_COMPLETE_TEMPERATURE,
|
|
11
|
+
DEFAULT_COMPLETE_TIMEOUT,
|
|
12
|
+
DOMAIN_NAME,
|
|
13
|
+
)
|
|
14
|
+
from unique_toolkit.language_model.functions import (
|
|
15
|
+
complete,
|
|
16
|
+
complete_async,
|
|
17
|
+
)
|
|
10
18
|
from unique_toolkit.language_model.infos import LanguageModelName
|
|
11
19
|
from unique_toolkit.language_model.schemas import (
|
|
12
20
|
LanguageModelMessages,
|
|
13
21
|
LanguageModelResponse,
|
|
14
|
-
LanguageModelStreamResponse,
|
|
15
22
|
LanguageModelTool,
|
|
16
23
|
)
|
|
17
24
|
|
|
25
|
+
logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
|
|
26
|
+
|
|
18
27
|
|
|
19
|
-
class LanguageModelService
|
|
28
|
+
class LanguageModelService:
|
|
20
29
|
"""
|
|
21
30
|
Provides methods to interact with the Language Model by generating responses.
|
|
22
31
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
32
|
+
Args:
|
|
33
|
+
company_id (str | None, optional): The company identifier. Defaults to None.
|
|
34
|
+
user_id (str | None, optional): The user identifier. Defaults to None.
|
|
35
|
+
chat_id (str | None, optional): The chat identifier. Defaults to None.
|
|
36
|
+
assistant_id (str | None, optional): The assistant identifier. Defaults to None.
|
|
26
37
|
"""
|
|
27
38
|
|
|
28
|
-
def __init__(
|
|
29
|
-
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
event: Event | BaseEvent | None = None,
|
|
42
|
+
company_id: str | None = None,
|
|
43
|
+
user_id: str | None = None,
|
|
44
|
+
chat_id: str | None = None,
|
|
45
|
+
assistant_id: str | None = None,
|
|
46
|
+
):
|
|
47
|
+
self._event = event
|
|
48
|
+
self.company_id = company_id
|
|
49
|
+
self.user_id = user_id
|
|
50
|
+
self.chat_id = chat_id
|
|
51
|
+
self.assistant_id = assistant_id
|
|
52
|
+
|
|
53
|
+
if event:
|
|
54
|
+
self.company_id = event.company_id
|
|
55
|
+
self.user_id = event.user_id
|
|
56
|
+
if isinstance(event, (ChatEvent, Event)):
|
|
57
|
+
self.chat_id = event.payload.chat_id
|
|
58
|
+
self.assistant_id = event.payload.assistant_id
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
@deprecated(
|
|
62
|
+
"The event property is deprecated and will be removed in a future version."
|
|
63
|
+
)
|
|
64
|
+
def event(self) -> Event | BaseEvent | None:
|
|
65
|
+
"""
|
|
66
|
+
Get the event object (deprecated).
|
|
30
67
|
|
|
31
|
-
|
|
32
|
-
|
|
68
|
+
Returns:
|
|
69
|
+
Event | BaseEvent | None: The event object.
|
|
70
|
+
"""
|
|
71
|
+
return self._event
|
|
33
72
|
|
|
34
73
|
def complete(
|
|
35
74
|
self,
|
|
@@ -41,116 +80,57 @@ class LanguageModelService(BaseService):
|
|
|
41
80
|
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
42
81
|
structured_output_enforce_schema: bool = False,
|
|
43
82
|
other_options: Optional[dict] = None,
|
|
44
|
-
):
|
|
83
|
+
) -> LanguageModelResponse:
|
|
45
84
|
"""
|
|
46
85
|
Calls the completion endpoint synchronously without streaming the response.
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
messages (LanguageModelMessages): The LanguageModelMessages obj to complete.
|
|
50
|
-
model_name (LanguageModelName | str): The model name.
|
|
51
|
-
temperature (float): The temperature value. Defaults to 0.
|
|
52
|
-
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
|
53
|
-
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
|
54
|
-
structured_output_model (Optional[Type[BaseModel]]): The structured output model. Defaults to None.
|
|
55
|
-
structured_output_enforce_schema (bool): Whether to enforce the schema. Defaults to False.
|
|
56
|
-
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
57
|
-
|
|
58
|
-
Returns:
|
|
59
|
-
LanguageModelResponse: The LanguageModelResponse object.
|
|
60
86
|
"""
|
|
61
|
-
|
|
87
|
+
[company_id] = validate_required_values([self.company_id])
|
|
88
|
+
|
|
89
|
+
return complete(
|
|
90
|
+
company_id=company_id,
|
|
62
91
|
messages=messages,
|
|
63
92
|
model_name=model_name,
|
|
64
93
|
temperature=temperature,
|
|
94
|
+
timeout=timeout,
|
|
65
95
|
tools=tools,
|
|
66
96
|
other_options=other_options,
|
|
67
97
|
structured_output_model=structured_output_model,
|
|
68
98
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
69
99
|
)
|
|
70
100
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
company_id=self.event.company_id,
|
|
74
|
-
model=model,
|
|
75
|
-
messages=cast(
|
|
76
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
77
|
-
messages_dict,
|
|
78
|
-
),
|
|
79
|
-
timeout=timeout,
|
|
80
|
-
options=options, # type: ignore
|
|
81
|
-
)
|
|
82
|
-
return LanguageModelResponse(**response)
|
|
83
|
-
except Exception as e:
|
|
84
|
-
self.logger.error(f"Error completing: {e}")
|
|
85
|
-
raise e
|
|
86
|
-
|
|
87
|
-
@classmethod
|
|
88
|
-
async def complete_async_util(
|
|
89
|
-
cls,
|
|
90
|
-
company_id: str,
|
|
101
|
+
async def complete_async(
|
|
102
|
+
self,
|
|
91
103
|
messages: LanguageModelMessages,
|
|
92
104
|
model_name: LanguageModelName | str,
|
|
93
105
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
94
106
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
95
107
|
tools: Optional[list[LanguageModelTool]] = None,
|
|
96
|
-
other_options: Optional[dict] = None,
|
|
97
108
|
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
98
109
|
structured_output_enforce_schema: bool = False,
|
|
99
|
-
|
|
110
|
+
other_options: Optional[dict] = None,
|
|
100
111
|
) -> LanguageModelResponse:
|
|
101
112
|
"""
|
|
102
113
|
Calls the completion endpoint asynchronously without streaming the response.
|
|
103
|
-
|
|
104
|
-
This method sends a request to the completion endpoint using the provided messages, model name,
|
|
105
|
-
temperature, timeout, and optional tools. It returns a `LanguageModelResponse` object containing
|
|
106
|
-
the completed result.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
company_id (str): The company ID associated with the request.
|
|
110
|
-
messages (LanguageModelMessages): The messages to complete.
|
|
111
|
-
model_name (LanguageModelName | str): The model name to use for the completion.
|
|
112
|
-
temperature (float): The temperature setting for the completion. Defaults to 0.
|
|
113
|
-
timeout (int): The timeout value in milliseconds for the request. Defaults to 240_000.
|
|
114
|
-
tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
|
|
115
|
-
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
116
|
-
structured_output_model (Optional[Type[BaseModel]]): The structured output model. Defaults to None.
|
|
117
|
-
structured_output_enforce_schema (bool): Whether to enforce the schema. Defaults to False.
|
|
118
|
-
logger (Optional[logging.Logger], optional): The logger used to log errors. Defaults to the logger for the current module.
|
|
119
|
-
|
|
120
|
-
Returns:
|
|
121
|
-
LanguageModelResponse: The response object containing the completed result.
|
|
122
|
-
|
|
123
|
-
Raises:
|
|
124
|
-
Exception: If an error occurs during the request, an exception is raised and logged.
|
|
125
114
|
"""
|
|
126
|
-
|
|
115
|
+
[company_id] = validate_required_values([self.company_id])
|
|
116
|
+
|
|
117
|
+
return await complete_async(
|
|
118
|
+
company_id=company_id,
|
|
127
119
|
messages=messages,
|
|
128
120
|
model_name=model_name,
|
|
129
121
|
temperature=temperature,
|
|
122
|
+
timeout=timeout,
|
|
130
123
|
tools=tools,
|
|
131
124
|
other_options=other_options,
|
|
132
125
|
structured_output_model=structured_output_model,
|
|
133
126
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
134
127
|
)
|
|
135
128
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
142
|
-
messages_dict,
|
|
143
|
-
),
|
|
144
|
-
timeout=timeout,
|
|
145
|
-
options=options, # type: ignore
|
|
146
|
-
)
|
|
147
|
-
return LanguageModelResponse(**response)
|
|
148
|
-
except Exception as e:
|
|
149
|
-
logger.error(f"Error completing: {e}") # type: ignore
|
|
150
|
-
raise e
|
|
151
|
-
|
|
152
|
-
async def complete_async(
|
|
153
|
-
self,
|
|
129
|
+
@classmethod
|
|
130
|
+
@deprecated("Use complete_async of language_model.functions instead")
|
|
131
|
+
async def complete_async_util(
|
|
132
|
+
cls,
|
|
133
|
+
company_id: str,
|
|
154
134
|
messages: LanguageModelMessages,
|
|
155
135
|
model_name: LanguageModelName | str,
|
|
156
136
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
@@ -162,263 +142,16 @@ class LanguageModelService(BaseService):
|
|
|
162
142
|
) -> LanguageModelResponse:
|
|
163
143
|
"""
|
|
164
144
|
Calls the completion endpoint asynchronously without streaming the response.
|
|
165
|
-
|
|
166
|
-
This method utilizes the class method `complete_async_util` to perform the asynchronous completion
|
|
167
|
-
request using the provided messages, model name, temperature, timeout, and optional tools. It
|
|
168
|
-
returns a `LanguageModelResponse` object containing the result of the completion.
|
|
169
|
-
|
|
170
|
-
Args:
|
|
171
|
-
messages (LanguageModelMessages): The messages to complete.
|
|
172
|
-
model_name (LanguageModelName | str): The model name to use for the completion.
|
|
173
|
-
temperature (float): The temperature setting for the completion. Defaults to 0.0.
|
|
174
|
-
timeout (int): The timeout value in milliseconds for the request. Defaults to 240,000.
|
|
175
|
-
tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
|
|
176
|
-
structured_output_model (Optional[Type[BaseModel]]): The structured output model. Defaults to None.
|
|
177
|
-
structured_output_enforce_schema (bool): Whether to enforce the schema. Defaults to False.
|
|
178
|
-
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
179
|
-
Returns:
|
|
180
|
-
LanguageModelResponse: The response object containing the completed result.
|
|
181
|
-
|
|
182
|
-
Raises:
|
|
183
|
-
Exception: If an error occurs during the completion request.
|
|
184
145
|
"""
|
|
185
|
-
|
|
186
|
-
|
|
146
|
+
|
|
147
|
+
return await complete_async(
|
|
148
|
+
company_id=company_id,
|
|
187
149
|
messages=messages,
|
|
188
150
|
model_name=model_name,
|
|
189
151
|
temperature=temperature,
|
|
190
152
|
timeout=timeout,
|
|
191
153
|
tools=tools,
|
|
192
154
|
other_options=other_options,
|
|
193
|
-
logger=self.logger,
|
|
194
155
|
structured_output_model=structured_output_model,
|
|
195
156
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
196
157
|
)
|
|
197
|
-
|
|
198
|
-
def stream_complete(
|
|
199
|
-
self,
|
|
200
|
-
messages: LanguageModelMessages,
|
|
201
|
-
model_name: LanguageModelName | str,
|
|
202
|
-
content_chunks: list[ContentChunk] = [],
|
|
203
|
-
debug_info: dict = {},
|
|
204
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
205
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
206
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
207
|
-
start_text: Optional[str] = None,
|
|
208
|
-
other_options: Optional[dict] = None,
|
|
209
|
-
):
|
|
210
|
-
"""
|
|
211
|
-
Streams a completion in the chat session synchronously.
|
|
212
|
-
|
|
213
|
-
Args:
|
|
214
|
-
messages (LanguageModelMessages): The LanguageModelMessages object to stream.
|
|
215
|
-
content_chunks (list[ContentChunk]): The ContentChunks objects.
|
|
216
|
-
model_name (LanguageModelName | str): The language model to use for the completion.
|
|
217
|
-
debug_info (dict): The debug information. Defaults to {}.
|
|
218
|
-
temperature (float): The temperature value. Defaults to 0.25.
|
|
219
|
-
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
|
220
|
-
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
|
221
|
-
start_text (Optional[str]): The start text. Defaults to None.
|
|
222
|
-
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
223
|
-
Returns:
|
|
224
|
-
The LanguageModelStreamResponse object once the stream has finished.
|
|
225
|
-
"""
|
|
226
|
-
options, model, messages_dict, search_context = (
|
|
227
|
-
self._prepare_completion_params_util(
|
|
228
|
-
messages=messages,
|
|
229
|
-
model_name=model_name,
|
|
230
|
-
temperature=temperature,
|
|
231
|
-
tools=tools,
|
|
232
|
-
other_options=other_options,
|
|
233
|
-
content_chunks=content_chunks,
|
|
234
|
-
)
|
|
235
|
-
)
|
|
236
|
-
|
|
237
|
-
try:
|
|
238
|
-
response = unique_sdk.Integrated.chat_stream_completion(
|
|
239
|
-
user_id=self.event.user_id,
|
|
240
|
-
company_id=self.event.company_id,
|
|
241
|
-
assistantMessageId=self.event.payload.assistant_message.id,
|
|
242
|
-
userMessageId=self.event.payload.user_message.id,
|
|
243
|
-
messages=cast(
|
|
244
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
245
|
-
messages_dict,
|
|
246
|
-
),
|
|
247
|
-
chatId=self.event.payload.chat_id,
|
|
248
|
-
searchContext=search_context,
|
|
249
|
-
model=model,
|
|
250
|
-
timeout=timeout,
|
|
251
|
-
assistantId=self.event.payload.assistant_id,
|
|
252
|
-
debugInfo=debug_info,
|
|
253
|
-
options=options, # type: ignore
|
|
254
|
-
startText=start_text,
|
|
255
|
-
)
|
|
256
|
-
return LanguageModelStreamResponse(**response)
|
|
257
|
-
except Exception as e:
|
|
258
|
-
self.logger.error(f"Error streaming completion: {e}")
|
|
259
|
-
raise e
|
|
260
|
-
|
|
261
|
-
async def stream_complete_async(
|
|
262
|
-
self,
|
|
263
|
-
messages: LanguageModelMessages,
|
|
264
|
-
model_name: LanguageModelName | str,
|
|
265
|
-
content_chunks: list[ContentChunk] = [],
|
|
266
|
-
debug_info: dict = {},
|
|
267
|
-
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
268
|
-
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
269
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
270
|
-
start_text: Optional[str] = None,
|
|
271
|
-
other_options: Optional[dict] = None,
|
|
272
|
-
):
|
|
273
|
-
"""
|
|
274
|
-
Streams a completion in the chat session asynchronously.
|
|
275
|
-
|
|
276
|
-
Args:
|
|
277
|
-
messages (LanguageModelMessages): The LanguageModelMessages object to stream.
|
|
278
|
-
content_chunks (list[ContentChunk]): The content chunks.
|
|
279
|
-
model_name (LanguageModelName | str): The language model to use for the completion.
|
|
280
|
-
debug_info (dict): The debug information. Defaults to {}.
|
|
281
|
-
temperature (float): The temperature value. Defaults to 0.25.
|
|
282
|
-
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
|
283
|
-
tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
|
|
284
|
-
start_text (Optional[str]): The start text. Defaults to None.
|
|
285
|
-
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
286
|
-
Returns:
|
|
287
|
-
The LanguageModelStreamResponse object once the stream has finished.
|
|
288
|
-
"""
|
|
289
|
-
options, model, messages_dict, search_context = (
|
|
290
|
-
self._prepare_completion_params_util(
|
|
291
|
-
messages=messages,
|
|
292
|
-
model_name=model_name,
|
|
293
|
-
temperature=temperature,
|
|
294
|
-
tools=tools,
|
|
295
|
-
other_options=other_options,
|
|
296
|
-
content_chunks=content_chunks,
|
|
297
|
-
)
|
|
298
|
-
)
|
|
299
|
-
|
|
300
|
-
try:
|
|
301
|
-
response = await unique_sdk.Integrated.chat_stream_completion_async(
|
|
302
|
-
user_id=self.event.user_id,
|
|
303
|
-
company_id=self.event.company_id,
|
|
304
|
-
assistantMessageId=self.event.payload.assistant_message.id,
|
|
305
|
-
userMessageId=self.event.payload.user_message.id,
|
|
306
|
-
messages=cast(
|
|
307
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
308
|
-
messages_dict,
|
|
309
|
-
),
|
|
310
|
-
chatId=self.event.payload.chat_id,
|
|
311
|
-
searchContext=search_context,
|
|
312
|
-
model=model,
|
|
313
|
-
timeout=timeout,
|
|
314
|
-
assistantId=self.event.payload.assistant_id,
|
|
315
|
-
debugInfo=debug_info,
|
|
316
|
-
options=options, # type: ignore
|
|
317
|
-
startText=start_text,
|
|
318
|
-
)
|
|
319
|
-
return LanguageModelStreamResponse(**response)
|
|
320
|
-
except Exception as e:
|
|
321
|
-
self.logger.error(f"Error streaming completion: {e}")
|
|
322
|
-
raise e
|
|
323
|
-
|
|
324
|
-
@staticmethod
|
|
325
|
-
def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
|
|
326
|
-
if not chunks:
|
|
327
|
-
return None
|
|
328
|
-
return [
|
|
329
|
-
unique_sdk.Integrated.SearchResult(
|
|
330
|
-
id=chunk.id,
|
|
331
|
-
chunkId=chunk.chunk_id,
|
|
332
|
-
key=chunk.key,
|
|
333
|
-
title=chunk.title,
|
|
334
|
-
url=chunk.url,
|
|
335
|
-
startPage=chunk.start_page,
|
|
336
|
-
endPage=chunk.end_page,
|
|
337
|
-
order=chunk.order,
|
|
338
|
-
object=chunk.object,
|
|
339
|
-
) # type: ignore
|
|
340
|
-
for chunk in chunks
|
|
341
|
-
]
|
|
342
|
-
|
|
343
|
-
@staticmethod
|
|
344
|
-
def _add_tools_to_options(
|
|
345
|
-
options: dict, tools: Optional[list[LanguageModelTool]]
|
|
346
|
-
) -> dict:
|
|
347
|
-
if tools:
|
|
348
|
-
options["tools"] = [
|
|
349
|
-
{
|
|
350
|
-
"type": "function",
|
|
351
|
-
"function": tool.model_dump(exclude_none=True),
|
|
352
|
-
}
|
|
353
|
-
for tool in tools
|
|
354
|
-
]
|
|
355
|
-
return options
|
|
356
|
-
|
|
357
|
-
@staticmethod
|
|
358
|
-
def _add_response_format_to_options(
|
|
359
|
-
options: dict,
|
|
360
|
-
structured_output_model: Type[BaseModel],
|
|
361
|
-
structured_output_enforce_schema: bool = False,
|
|
362
|
-
) -> dict:
|
|
363
|
-
options["responseFormat"] = {
|
|
364
|
-
"type": "json_schema",
|
|
365
|
-
"json_schema": {
|
|
366
|
-
"name": structured_output_model.__name__,
|
|
367
|
-
"strict": structured_output_enforce_schema,
|
|
368
|
-
"schema": structured_output_model.model_json_schema(),
|
|
369
|
-
},
|
|
370
|
-
}
|
|
371
|
-
return options
|
|
372
|
-
|
|
373
|
-
@classmethod
|
|
374
|
-
def _prepare_completion_params_util(
|
|
375
|
-
cls,
|
|
376
|
-
messages: LanguageModelMessages,
|
|
377
|
-
model_name: LanguageModelName | str,
|
|
378
|
-
temperature: float,
|
|
379
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
380
|
-
other_options: Optional[dict] = None,
|
|
381
|
-
content_chunks: Optional[list[ContentChunk]] = None,
|
|
382
|
-
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
383
|
-
structured_output_enforce_schema: bool = False,
|
|
384
|
-
) -> tuple[dict, str, dict, Optional[dict]]:
|
|
385
|
-
"""
|
|
386
|
-
Prepares common parameters for completion requests.
|
|
387
|
-
|
|
388
|
-
Returns:
|
|
389
|
-
tuple containing:
|
|
390
|
-
- options (dict): Combined options including tools and temperature
|
|
391
|
-
- model (str): Resolved model name
|
|
392
|
-
- messages_dict (dict): Processed messages
|
|
393
|
-
- search_context (Optional[dict]): Processed content chunks if provided
|
|
394
|
-
"""
|
|
395
|
-
|
|
396
|
-
options = cls._add_tools_to_options({}, tools)
|
|
397
|
-
|
|
398
|
-
if structured_output_model:
|
|
399
|
-
options = cls._add_response_format_to_options(
|
|
400
|
-
options, structured_output_model, structured_output_enforce_schema
|
|
401
|
-
)
|
|
402
|
-
|
|
403
|
-
options["temperature"] = temperature
|
|
404
|
-
|
|
405
|
-
if other_options:
|
|
406
|
-
options.update(other_options)
|
|
407
|
-
|
|
408
|
-
model = (
|
|
409
|
-
model_name.name if isinstance(model_name, LanguageModelName) else model_name
|
|
410
|
-
)
|
|
411
|
-
|
|
412
|
-
# Different methods need different message dump parameters
|
|
413
|
-
messages_dict = messages.model_dump(
|
|
414
|
-
exclude_none=True,
|
|
415
|
-
by_alias=content_chunks is not None, # Use by_alias for streaming methods
|
|
416
|
-
)
|
|
417
|
-
|
|
418
|
-
search_context = (
|
|
419
|
-
LanguageModelService._to_search_context(content_chunks)
|
|
420
|
-
if content_chunks is not None
|
|
421
|
-
else None
|
|
422
|
-
)
|
|
423
|
-
|
|
424
|
-
return options, model, messages_dict, search_context
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
DOMAIN_NAME = "short_term_memory"
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
import unique_sdk
|
|
5
|
+
|
|
6
|
+
from unique_toolkit.short_term_memory.constants import DOMAIN_NAME
|
|
7
|
+
from unique_toolkit.short_term_memory.schemas import ShortTermMemory
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def find_latest_memory_async(
|
|
13
|
+
user_id: str,
|
|
14
|
+
company_id: str,
|
|
15
|
+
key: str,
|
|
16
|
+
chat_id: str | None = None,
|
|
17
|
+
message_id: str | None = None,
|
|
18
|
+
) -> ShortTermMemory:
|
|
19
|
+
"""
|
|
20
|
+
Find the latest short term memory asynchronously.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
user_id (str): The user ID.
|
|
24
|
+
company_id (str): The company ID.
|
|
25
|
+
key (str): The key.
|
|
26
|
+
chat_id (str | None): The chat ID.
|
|
27
|
+
message_id (str | None): The message ID.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
ShortTermMemory: The latest short term memory.
|
|
31
|
+
|
|
32
|
+
Raises:
|
|
33
|
+
Exception: If an error occurs.
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
logger.info("Finding latest short term memory")
|
|
37
|
+
stm = await unique_sdk.ShortTermMemory.find_latest_async(
|
|
38
|
+
user_id=user_id,
|
|
39
|
+
company_id=company_id,
|
|
40
|
+
memoryName=key,
|
|
41
|
+
chatId=chat_id,
|
|
42
|
+
messageId=message_id,
|
|
43
|
+
)
|
|
44
|
+
return ShortTermMemory(**stm)
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error(f"Error finding latest short term memory: {e}")
|
|
47
|
+
raise e
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def find_latest_memory(
|
|
51
|
+
user_id: str,
|
|
52
|
+
company_id: str,
|
|
53
|
+
key: str,
|
|
54
|
+
chat_id: str | None = None,
|
|
55
|
+
message_id: str | None = None,
|
|
56
|
+
) -> ShortTermMemory:
|
|
57
|
+
"""
|
|
58
|
+
Find the latest short term memory.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
user_id (str): The user ID.
|
|
62
|
+
company_id (str): The company ID.
|
|
63
|
+
key (str): The key.
|
|
64
|
+
chat_id (str | None): The chat ID.
|
|
65
|
+
message_id (str | None): The message ID.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
ShortTermMemory: The latest short term memory.
|
|
69
|
+
|
|
70
|
+
Raises:
|
|
71
|
+
Exception: If an error occurs.
|
|
72
|
+
"""
|
|
73
|
+
try:
|
|
74
|
+
logger.info("Finding latest short term memory")
|
|
75
|
+
stm = unique_sdk.ShortTermMemory.find_latest(
|
|
76
|
+
user_id=user_id,
|
|
77
|
+
company_id=company_id,
|
|
78
|
+
memoryName=key,
|
|
79
|
+
chatId=chat_id,
|
|
80
|
+
messageId=message_id,
|
|
81
|
+
)
|
|
82
|
+
return ShortTermMemory(**stm)
|
|
83
|
+
except Exception as e:
|
|
84
|
+
logger.error(f"Error finding latest short term memory: {e}")
|
|
85
|
+
raise e
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
async def create_memory_async(
|
|
89
|
+
user_id: str,
|
|
90
|
+
company_id: str,
|
|
91
|
+
key: str,
|
|
92
|
+
value: str | dict,
|
|
93
|
+
chat_id: str | None = None,
|
|
94
|
+
message_id: str | None = None,
|
|
95
|
+
):
|
|
96
|
+
"""
|
|
97
|
+
Create a short term memory asynchronously.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
user_id (str): The user ID.
|
|
101
|
+
company_id (str): The company ID.
|
|
102
|
+
key (str): The key.
|
|
103
|
+
value (str | dict): The value.
|
|
104
|
+
chat_id (str | None): The chat ID.
|
|
105
|
+
message_id (str | None): The message ID.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
ShortTermMemory: The created short term memory.
|
|
109
|
+
|
|
110
|
+
Raises:
|
|
111
|
+
Exception: If an error occurs.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
if isinstance(value, dict):
|
|
115
|
+
value = json.dumps(value)
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
logger.info("Creating short term memory")
|
|
119
|
+
stm = await unique_sdk.ShortTermMemory.create_async(
|
|
120
|
+
user_id=user_id,
|
|
121
|
+
company_id=company_id,
|
|
122
|
+
memoryName=key,
|
|
123
|
+
chatId=chat_id,
|
|
124
|
+
messageId=message_id,
|
|
125
|
+
data=value,
|
|
126
|
+
)
|
|
127
|
+
return ShortTermMemory(**stm)
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Error creating short term memory: {e}")
|
|
130
|
+
raise e
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def create_memory(
|
|
134
|
+
user_id: str,
|
|
135
|
+
company_id: str,
|
|
136
|
+
key: str,
|
|
137
|
+
value: str | dict,
|
|
138
|
+
chat_id: str | None = None,
|
|
139
|
+
message_id: str | None = None,
|
|
140
|
+
):
|
|
141
|
+
"""
|
|
142
|
+
Create a short term memory.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
user_id (str): The user ID.
|
|
146
|
+
company_id (str): The company ID.
|
|
147
|
+
key (str): The key.
|
|
148
|
+
value (str | dict): The value.
|
|
149
|
+
chat_id (str | None): The chat ID.
|
|
150
|
+
message_id (str | None): The message ID.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
ShortTermMemory: The created short term memory.
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
Exception: If an error occurs.
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
if isinstance(value, dict):
|
|
160
|
+
value = json.dumps(value)
|
|
161
|
+
|
|
162
|
+
try:
|
|
163
|
+
logger.info("Creating short term memory")
|
|
164
|
+
stm = unique_sdk.ShortTermMemory.create(
|
|
165
|
+
user_id=user_id,
|
|
166
|
+
company_id=company_id,
|
|
167
|
+
memoryName=key,
|
|
168
|
+
chatId=chat_id,
|
|
169
|
+
messageId=message_id,
|
|
170
|
+
data=value,
|
|
171
|
+
)
|
|
172
|
+
return ShortTermMemory(**stm)
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logger.error(f"Error creating short term memory: {e}")
|
|
175
|
+
raise e
|