unique_toolkit 0.7.13__py3-none-any.whl → 0.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/_common/validators.py +54 -5
- unique_toolkit/app/schemas.py +15 -20
- unique_toolkit/chat/service.py +313 -65
- unique_toolkit/content/functions.py +12 -3
- unique_toolkit/content/service.py +5 -0
- unique_toolkit/evaluators/config.py +9 -18
- unique_toolkit/evaluators/context_relevancy/constants.py +4 -2
- unique_toolkit/evaluators/context_relevancy/utils.py +32 -18
- unique_toolkit/evaluators/hallucination/constants.py +2 -2
- unique_toolkit/evaluators/hallucination/utils.py +40 -30
- unique_toolkit/language_model/functions.py +23 -19
- unique_toolkit/language_model/infos.py +6 -4
- unique_toolkit/language_model/schemas.py +116 -32
- unique_toolkit/protocols/support.py +28 -0
- {unique_toolkit-0.7.13.dist-info → unique_toolkit-0.7.17.dist-info}/METADATA +18 -2
- {unique_toolkit-0.7.13.dist-info → unique_toolkit-0.7.17.dist-info}/RECORD +18 -17
- {unique_toolkit-0.7.13.dist-info → unique_toolkit-0.7.17.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.13.dist-info → unique_toolkit-0.7.17.dist-info}/WHEEL +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
import json
|
2
2
|
import math
|
3
3
|
from enum import StrEnum
|
4
|
-
from typing import Any,
|
4
|
+
from typing import Any, Self
|
5
5
|
from uuid import uuid4
|
6
6
|
|
7
7
|
from humps import camelize
|
@@ -27,6 +27,8 @@ model_config = ConfigDict(
|
|
27
27
|
)
|
28
28
|
|
29
29
|
|
30
|
+
# Equivalent to
|
31
|
+
# from openai.types.chat.chat_completion_role import ChatCompletionRole
|
30
32
|
class LanguageModelMessageRole(StrEnum):
|
31
33
|
USER = "user"
|
32
34
|
SYSTEM = "system"
|
@@ -34,12 +36,32 @@ class LanguageModelMessageRole(StrEnum):
|
|
34
36
|
TOOL = "tool"
|
35
37
|
|
36
38
|
|
39
|
+
# This is tailored to the unique backend
|
40
|
+
class LanguageModelStreamResponseMessage(BaseModel):
|
41
|
+
model_config = model_config
|
42
|
+
|
43
|
+
id: str
|
44
|
+
previous_message_id: (
|
45
|
+
str | None
|
46
|
+
) # Stream response can return a null previous_message_id if an assisstant message is manually added
|
47
|
+
role: LanguageModelMessageRole
|
48
|
+
text: str
|
49
|
+
original_text: str | None = None
|
50
|
+
references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
|
51
|
+
|
52
|
+
# TODO make sdk return role in lowercase
|
53
|
+
# Currently needed as sdk returns role in uppercase
|
54
|
+
@field_validator("role", mode="before")
|
55
|
+
def set_role(cls, value: str):
|
56
|
+
return value.lower()
|
57
|
+
|
58
|
+
|
37
59
|
class LanguageModelFunction(BaseModel):
|
38
60
|
model_config = model_config
|
39
61
|
|
40
62
|
id: str | None = None
|
41
63
|
name: str
|
42
|
-
arguments:
|
64
|
+
arguments: dict[str, Any] | str | None = None # type: ignore
|
43
65
|
|
44
66
|
@field_validator("arguments", mode="before")
|
45
67
|
def set_arguments(cls, value):
|
@@ -62,6 +84,14 @@ class LanguageModelFunction(BaseModel):
|
|
62
84
|
return seralization
|
63
85
|
|
64
86
|
|
87
|
+
# This is tailored to the unique backend
|
88
|
+
class LanguageModelStreamResponse(BaseModel):
|
89
|
+
model_config = model_config
|
90
|
+
|
91
|
+
message: LanguageModelStreamResponseMessage
|
92
|
+
tool_calls: list[LanguageModelFunction] | None = None
|
93
|
+
|
94
|
+
|
65
95
|
class LanguageModelFunctionCall(BaseModel):
|
66
96
|
model_config = model_config
|
67
97
|
|
@@ -69,6 +99,8 @@ class LanguageModelFunctionCall(BaseModel):
|
|
69
99
|
type: str | None = None
|
70
100
|
function: LanguageModelFunction
|
71
101
|
|
102
|
+
# TODO: Circular reference of types
|
103
|
+
@deprecated("Use LanguageModelAssistantMessage.from_functions instead.")
|
72
104
|
@staticmethod
|
73
105
|
def create_assistant_message_from_tool_calls(
|
74
106
|
tool_calls: list[LanguageModelFunction],
|
@@ -93,8 +125,7 @@ class LanguageModelMessage(BaseModel):
|
|
93
125
|
content: str | list[dict] | None = None
|
94
126
|
|
95
127
|
def __str__(self):
|
96
|
-
|
97
|
-
message = ""
|
128
|
+
message = ""
|
98
129
|
if isinstance(self.content, str):
|
99
130
|
message = self.content
|
100
131
|
elif isinstance(self.content, list):
|
@@ -103,6 +134,8 @@ class LanguageModelMessage(BaseModel):
|
|
103
134
|
return format_message(self.role.capitalize(), message=message, num_tabs=1)
|
104
135
|
|
105
136
|
|
137
|
+
# Equivalent to
|
138
|
+
# from openai.types.chat.chat_completion_system_message_param import ChatCompletionSystemMessageParam
|
106
139
|
class LanguageModelSystemMessage(LanguageModelMessage):
|
107
140
|
role: LanguageModelMessageRole = LanguageModelMessageRole.SYSTEM
|
108
141
|
|
@@ -111,6 +144,10 @@ class LanguageModelSystemMessage(LanguageModelMessage):
|
|
111
144
|
return LanguageModelMessageRole.SYSTEM
|
112
145
|
|
113
146
|
|
147
|
+
# Equivalent to
|
148
|
+
# from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
|
149
|
+
|
150
|
+
|
114
151
|
class LanguageModelUserMessage(LanguageModelMessage):
|
115
152
|
role: LanguageModelMessageRole = LanguageModelMessageRole.USER
|
116
153
|
|
@@ -119,6 +156,8 @@ class LanguageModelUserMessage(LanguageModelMessage):
|
|
119
156
|
return LanguageModelMessageRole.USER
|
120
157
|
|
121
158
|
|
159
|
+
# Equivalent to
|
160
|
+
# from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
|
122
161
|
class LanguageModelAssistantMessage(LanguageModelMessage):
|
123
162
|
role: LanguageModelMessageRole = LanguageModelMessageRole.ASSISTANT
|
124
163
|
parsed: dict | None = None
|
@@ -129,6 +168,47 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
|
|
129
168
|
def set_role(cls, value):
|
130
169
|
return LanguageModelMessageRole.ASSISTANT
|
131
170
|
|
171
|
+
@classmethod
|
172
|
+
def from_functions(
|
173
|
+
cls,
|
174
|
+
tool_calls: list[LanguageModelFunction],
|
175
|
+
):
|
176
|
+
return cls(
|
177
|
+
content="",
|
178
|
+
tool_calls=[
|
179
|
+
LanguageModelFunctionCall(
|
180
|
+
id=tool_call.id,
|
181
|
+
type="function",
|
182
|
+
function=tool_call,
|
183
|
+
)
|
184
|
+
for tool_call in tool_calls
|
185
|
+
],
|
186
|
+
)
|
187
|
+
|
188
|
+
@classmethod
|
189
|
+
def from_stream_response(cls, response: LanguageModelStreamResponse):
|
190
|
+
tool_calls = [
|
191
|
+
LanguageModelFunctionCall(
|
192
|
+
id=None,
|
193
|
+
type=None,
|
194
|
+
function=f,
|
195
|
+
)
|
196
|
+
for f in response.tool_calls or []
|
197
|
+
]
|
198
|
+
|
199
|
+
tool_calls = tool_calls if len(tool_calls) > 0 else None
|
200
|
+
|
201
|
+
return cls(
|
202
|
+
content=response.message.text,
|
203
|
+
parsed=None,
|
204
|
+
refusal=None,
|
205
|
+
tool_calls=tool_calls,
|
206
|
+
)
|
207
|
+
|
208
|
+
|
209
|
+
# Equivalent to
|
210
|
+
# from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
|
211
|
+
|
132
212
|
|
133
213
|
class LanguageModelToolMessage(LanguageModelMessage):
|
134
214
|
role: LanguageModelMessageRole = LanguageModelMessageRole.TOOL
|
@@ -147,6 +227,11 @@ class LanguageModelToolMessage(LanguageModelMessage):
|
|
147
227
|
return LanguageModelMessageRole.TOOL
|
148
228
|
|
149
229
|
|
230
|
+
# Equivalent implementation for list of
|
231
|
+
# from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
|
232
|
+
# with the addition of the builder
|
233
|
+
|
234
|
+
|
150
235
|
class LanguageModelMessages(RootModel):
|
151
236
|
root: list[
|
152
237
|
LanguageModelMessage
|
@@ -174,6 +259,11 @@ class LanguageModelMessages(RootModel):
|
|
174
259
|
return builder
|
175
260
|
|
176
261
|
|
262
|
+
# This seems similar to
|
263
|
+
# from openai.types.completion_choice import CompletionChoice
|
264
|
+
# but is missing multiple attributes and uses message instead of text
|
265
|
+
|
266
|
+
|
177
267
|
class LanguageModelCompletionChoice(BaseModel):
|
178
268
|
model_config = model_config
|
179
269
|
|
@@ -182,38 +272,26 @@ class LanguageModelCompletionChoice(BaseModel):
|
|
182
272
|
finish_reason: str
|
183
273
|
|
184
274
|
|
275
|
+
# This seems similar to
|
276
|
+
# from openai.types.completion import Completion
|
277
|
+
# but is missing multiple attributes
|
185
278
|
class LanguageModelResponse(BaseModel):
|
186
279
|
model_config = model_config
|
187
280
|
|
188
281
|
choices: list[LanguageModelCompletionChoice]
|
189
282
|
|
283
|
+
@classmethod
|
284
|
+
def from_stream_response(cls, response: LanguageModelStreamResponse):
|
285
|
+
choice = LanguageModelCompletionChoice(
|
286
|
+
index=0,
|
287
|
+
message=LanguageModelAssistantMessage.from_stream_response(response),
|
288
|
+
finish_reason="",
|
289
|
+
)
|
190
290
|
|
191
|
-
|
192
|
-
model_config = model_config
|
193
|
-
|
194
|
-
id: str
|
195
|
-
previous_message_id: (
|
196
|
-
str | None
|
197
|
-
) # Stream response can return a null previous_message_id if an assisstant message is manually added
|
198
|
-
role: LanguageModelMessageRole
|
199
|
-
text: str
|
200
|
-
original_text: str | None = None
|
201
|
-
references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
|
202
|
-
|
203
|
-
# TODO make sdk return role in lowercase
|
204
|
-
# Currently needed as sdk returns role in uppercase
|
205
|
-
@field_validator("role", mode="before")
|
206
|
-
def set_role(cls, value: str):
|
207
|
-
return value.lower()
|
208
|
-
|
209
|
-
|
210
|
-
class LanguageModelStreamResponse(BaseModel):
|
211
|
-
model_config = model_config
|
212
|
-
|
213
|
-
message: LanguageModelStreamResponseMessage
|
214
|
-
tool_calls: Optional[list[LanguageModelFunction]] = None
|
291
|
+
return cls(choices=[choice])
|
215
292
|
|
216
293
|
|
294
|
+
# This is tailored for unique and only used in language model info
|
217
295
|
class LanguageModelTokenLimits(BaseModel):
|
218
296
|
token_limit_input: int
|
219
297
|
token_limit_output: int
|
@@ -255,29 +333,35 @@ class LanguageModelTokenLimits(BaseModel):
|
|
255
333
|
|
256
334
|
data["token_limit_input"] = math.floor(fraction_input * token_limit)
|
257
335
|
data["token_limit_output"] = math.floor(
|
258
|
-
(1 - fraction_input) * token_limit
|
336
|
+
(1 - fraction_input) * token_limit,
|
259
337
|
)
|
260
338
|
data["_fraction_adaptpable"] = True
|
261
339
|
return data
|
262
340
|
|
263
341
|
raise ValueError(
|
264
|
-
'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.'
|
342
|
+
'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.',
|
265
343
|
)
|
266
344
|
|
267
345
|
|
346
|
+
# This is more restrictive than what openai allows
|
268
347
|
class LanguageModelToolParameterProperty(BaseModel):
|
269
348
|
type: str
|
270
349
|
description: str
|
271
|
-
enum:
|
272
|
-
items:
|
350
|
+
enum: list[Any] | None = None
|
351
|
+
items: Self | None = None
|
273
352
|
|
274
353
|
|
354
|
+
# Looks most like
|
355
|
+
# from openai.types.shared.function_parameters import FunctionParameters
|
275
356
|
class LanguageModelToolParameters(BaseModel):
|
276
357
|
type: str = "object"
|
277
358
|
properties: dict[str, LanguageModelToolParameterProperty]
|
278
359
|
required: list[str]
|
279
360
|
|
280
361
|
|
362
|
+
# Looks most like
|
363
|
+
# from openai.types.shared_params.function_definition import FunctionDefinition
|
364
|
+
# but returns parameter is not known
|
281
365
|
class LanguageModelTool(BaseModel):
|
282
366
|
name: str = Field(
|
283
367
|
...,
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from typing import Protocol
|
2
|
+
|
3
|
+
from unique_toolkit.language_model import (
|
4
|
+
LanguageModelMessages,
|
5
|
+
LanguageModelName,
|
6
|
+
LanguageModelResponse,
|
7
|
+
LanguageModelTool,
|
8
|
+
)
|
9
|
+
from unique_toolkit.language_model.constants import (
|
10
|
+
DEFAULT_COMPLETE_TEMPERATURE,
|
11
|
+
DEFAULT_COMPLETE_TIMEOUT,
|
12
|
+
)
|
13
|
+
|
14
|
+
# As soon as we have multiple, remember
|
15
|
+
# https://pypi.org/project/typing-protocol-intersection/
|
16
|
+
# to generate combinations of protocols without inheritance
|
17
|
+
|
18
|
+
|
19
|
+
class SupportsComplete(Protocol):
|
20
|
+
def complete(
|
21
|
+
self,
|
22
|
+
messages: LanguageModelMessages,
|
23
|
+
model_name: LanguageModelName | str,
|
24
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
25
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
26
|
+
tools: list[LanguageModelTool] | None = None,
|
27
|
+
**kwargs,
|
28
|
+
) -> LanguageModelResponse: ...
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: unique_toolkit
|
3
|
-
Version: 0.7.
|
3
|
+
Version: 0.7.17
|
4
4
|
Summary:
|
5
5
|
License: Proprietary
|
6
6
|
Author: Martin Fadler
|
@@ -17,7 +17,7 @@ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
|
17
17
|
Requires-Dist: regex (>=2024.5.15,<2025.0.0)
|
18
18
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
19
19
|
Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
20
|
-
Requires-Dist: unique-sdk (>=0.9.
|
20
|
+
Requires-Dist: unique-sdk (>=0.9.26,<0.10.0)
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
|
23
23
|
# Unique Toolkit
|
@@ -111,6 +111,22 @@ All notable changes to this project will be documented in this file.
|
|
111
111
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
112
112
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
113
113
|
|
114
|
+
## [0.7.17] - 2025-05-16
|
115
|
+
- Change inheritance hierarchy of events for easier deprecation
|
116
|
+
|
117
|
+
## [0.7.16] - 2025-05-16
|
118
|
+
- Add classmethods to create LanguageModelAssistatnMessage from functions and stream response
|
119
|
+
- Add completion like method to chat
|
120
|
+
- Add protocol for completion like method
|
121
|
+
|
122
|
+
## [0.7.15] - 2025-05-13
|
123
|
+
- Add the possibility to specify ingestionConfig when creating or updating a Content.
|
124
|
+
|
125
|
+
## [0.7.14] - 2025-05-08
|
126
|
+
- Fix bug not selecting the correct llm
|
127
|
+
- Add LMI type for flexible init of LanguageModelInfo
|
128
|
+
- Replace LanguageModel with LanguageModelInfo in hallucination check
|
129
|
+
|
114
130
|
## [0.7.13] - 2025-05-07
|
115
131
|
- Adding litellm models `litellm:anthropic-claude-3-7-sonnet`, `litellm:anthropic-claude-3-7-sonnet-thinking`, `litellm:gemini-2-0-flash`, `gemini-2-5-flash-preview-04-17` , `litellm:gemini-2-5-pro-exp-03-25`
|
116
132
|
|
@@ -3,26 +3,26 @@ unique_toolkit/_common/_base_service.py,sha256=S8H0rAebx7GsOldA7xInLp3aQJt9yEPDQ
|
|
3
3
|
unique_toolkit/_common/_time_utils.py,sha256=ztmTovTvr-3w71Ns2VwXC65OKUUh-sQlzbHdKTQWm-w,135
|
4
4
|
unique_toolkit/_common/exception.py,sha256=caQIE1btsQnpKCHqL2cgWUSbHup06enQu_Pt7uGUTTE,727
|
5
5
|
unique_toolkit/_common/validate_required_values.py,sha256=Y_M1ub9gIKP9qZ45F6Zq3ZHtuIqhmOjl8Z2Vd3avg8w,588
|
6
|
-
unique_toolkit/_common/validators.py,sha256=
|
6
|
+
unique_toolkit/_common/validators.py,sha256=l7-hWyRTZ3aF_e73oTQFZdz93s06VhNWVpkERbg2a64,1569
|
7
7
|
unique_toolkit/app/__init__.py,sha256=jgwWfu27U911kZE1yRq920ZULGLAQGycD3222YxUvsY,1182
|
8
8
|
unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3h23gQ,678
|
9
9
|
unique_toolkit/app/init_sdk.py,sha256=Nv4Now4pMfM0AgRhbtatLpm_39rKxn0WmRLwmPhRl-8,1285
|
10
10
|
unique_toolkit/app/performance/async_tasks.py,sha256=H0l3OAcosLwNHZ8d2pd-Di4wHIXfclEvagi5kfqLFPA,1941
|
11
11
|
unique_toolkit/app/performance/async_wrapper.py,sha256=yVVcRDkcdyfjsxro-N29SBvi-7773wnfDplef6-y8xw,1077
|
12
|
-
unique_toolkit/app/schemas.py,sha256=
|
12
|
+
unique_toolkit/app/schemas.py,sha256=fNPRQPrpJjYrtkkXPR7sNFjP0AYPZtKe3H1YZkXd2QQ,3275
|
13
13
|
unique_toolkit/app/verification.py,sha256=GxFFwcJMy25fCA_Xe89wKW7bgqOu8PAs5y8QpHF0GSc,3861
|
14
14
|
unique_toolkit/chat/__init__.py,sha256=LRs2G-JTVuci4lbtHTkVUiNcZcSR6uqqfnAyo7af6nY,619
|
15
15
|
unique_toolkit/chat/constants.py,sha256=05kq6zjqUVB2d6_P7s-90nbljpB3ryxwCI-CAz0r2O4,83
|
16
16
|
unique_toolkit/chat/functions.py,sha256=J9Cmgkhj9bBxZja3ggkSp48af_LPU4Dfi9Sbc_WhhNY,27204
|
17
17
|
unique_toolkit/chat/schemas.py,sha256=MNcGAXjK1K8zOODeMFz3FHVQL5sIBQXRwkr_2hFkG8k,2672
|
18
|
-
unique_toolkit/chat/service.py,sha256=
|
18
|
+
unique_toolkit/chat/service.py,sha256=C8L5Alc9BKmXau5kcbQWKBjg1OGc5fmtO0F9xooxSCw,40641
|
19
19
|
unique_toolkit/chat/state.py,sha256=Cjgwv_2vhDFbV69xxsn7SefhaoIAEqLx3ferdVFCnOg,1445
|
20
20
|
unique_toolkit/chat/utils.py,sha256=ihm-wQykBWhB4liR3LnwPVPt_qGW6ETq21Mw4HY0THE,854
|
21
21
|
unique_toolkit/content/__init__.py,sha256=EdJg_A_7loEtCQf4cah3QARQreJx6pdz89Rm96YbMVg,940
|
22
22
|
unique_toolkit/content/constants.py,sha256=1iy4Y67xobl5VTnJB6SxSyuoBWbdLl9244xfVMUZi5o,60
|
23
|
-
unique_toolkit/content/functions.py,sha256=
|
23
|
+
unique_toolkit/content/functions.py,sha256=imNINvUW_-ejPBT8yPKuL9THdDplfjeKvnK9_EuFlqk,17497
|
24
24
|
unique_toolkit/content/schemas.py,sha256=zks_Pkki2VhxICJJgHZyc-LPmRuj5dLbw3pgcUT7SW8,2362
|
25
|
-
unique_toolkit/content/service.py,sha256=
|
25
|
+
unique_toolkit/content/service.py,sha256=JDqlCJc-z-VQOmEvCIA8VcWSNOSuVo3lFetJs257H7A,18842
|
26
26
|
unique_toolkit/content/utils.py,sha256=GUVPrkZfMoAj4MRoBs5BD_7vSuLZTZx69hyWzYFrI50,7747
|
27
27
|
unique_toolkit/embedding/__init__.py,sha256=uUyzjonPvuDCYsvXCIt7ErQXopLggpzX-MEQd3_e2kE,250
|
28
28
|
unique_toolkit/embedding/constants.py,sha256=Lj8-Lcy1FvuC31PM9Exq7vaFuxQV4pEI1huUMFX-J2M,52
|
@@ -31,34 +31,35 @@ unique_toolkit/embedding/schemas.py,sha256=1GvKCaSk4jixzVQ2PKq8yDqwGEVY_hWclYtoA
|
|
31
31
|
unique_toolkit/embedding/service.py,sha256=ptwNNe2ji7FGqAb5VayedrB9T5b1T00XABwYtgvlGO8,4076
|
32
32
|
unique_toolkit/embedding/utils.py,sha256=v86lo__bCJbxZBQ3OcLu5SuwT6NbFfWlcq8iyk6BuzQ,279
|
33
33
|
unique_toolkit/evaluators/__init__.py,sha256=3Rfpnowm7MUXHWmeU4UV4s_3Hk-sw3V20oBwQCYlejQ,50
|
34
|
-
unique_toolkit/evaluators/config.py,sha256=
|
34
|
+
unique_toolkit/evaluators/config.py,sha256=_DIXToJ-hGNpDAdWa7Q6GMjAsxiC_DquLF-SS5s9rTE,717
|
35
35
|
unique_toolkit/evaluators/constants.py,sha256=1oI93jsh0R_TjX_8OenliiiywVe3vTooSnaMqtq6R18,27
|
36
|
-
unique_toolkit/evaluators/context_relevancy/constants.py,sha256=
|
36
|
+
unique_toolkit/evaluators/context_relevancy/constants.py,sha256=QG2x32LzV42kAkeWTPuLvOX9NlTSxJlsAgDyxomUBmY,1158
|
37
37
|
unique_toolkit/evaluators/context_relevancy/prompts.py,sha256=gTlWP7fDuxhrXhCYNCqXMbCey_DalZMdi5l-a6RHgk0,713
|
38
38
|
unique_toolkit/evaluators/context_relevancy/service.py,sha256=9hzdMuF4A4T97-3X3zcXgrDISLn1bleZ6tTL1bHa9dQ,1722
|
39
|
-
unique_toolkit/evaluators/context_relevancy/utils.py,sha256=
|
39
|
+
unique_toolkit/evaluators/context_relevancy/utils.py,sha256=qwTkKah6S2hkEGOHxVdQ6RvV6OcjKj4eyd09TcJZlho,5813
|
40
40
|
unique_toolkit/evaluators/exception.py,sha256=7lcVbCyoN4Md1chNJDFxpUYyWbVrcr9dcc3TxWykJTc,115
|
41
|
-
unique_toolkit/evaluators/hallucination/constants.py,sha256=
|
41
|
+
unique_toolkit/evaluators/hallucination/constants.py,sha256=KDhmSlRBnUkfEAFQLaD80rKtj6p-ZJ3L98hqNmNL7xI,1458
|
42
42
|
unique_toolkit/evaluators/hallucination/prompts.py,sha256=9yCpO_WGLDvYfPWKL1VuRA-jt0P_-A-qvLUOmuv-Nks,3320
|
43
43
|
unique_toolkit/evaluators/hallucination/service.py,sha256=k8qro5Lw4Ak58m4HYp3G4HPLIaexeFySIIVvW6fAdeA,2408
|
44
|
-
unique_toolkit/evaluators/hallucination/utils.py,sha256=
|
44
|
+
unique_toolkit/evaluators/hallucination/utils.py,sha256=gO2AOzDQwVTev2_5vDKgJ9A6A9e0himJyAta_wglVG8,8326
|
45
45
|
unique_toolkit/evaluators/output_parser.py,sha256=eI72qkzK1dZyUvnfP2SOAQCGBj_-PwX5wy_aLPMsJMY,883
|
46
46
|
unique_toolkit/evaluators/schemas.py,sha256=Jaue6Uhx75X1CyHKWj8sT3RE1JZXTqoLtfLt2xQNCX8,2507
|
47
47
|
unique_toolkit/language_model/__init__.py,sha256=jWko_vQj48wjnpTtlkg8iNdef0SMI3FN2kGywXRTMzg,1880
|
48
48
|
unique_toolkit/language_model/builder.py,sha256=aIAXWWUoB5G-HONJiAt3MdRGd4jdP8nA-HYX2D2WlSI,3048
|
49
49
|
unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
|
50
|
-
unique_toolkit/language_model/functions.py,sha256=
|
51
|
-
unique_toolkit/language_model/infos.py,sha256=
|
50
|
+
unique_toolkit/language_model/functions.py,sha256=0oSkG4xpbxeaVTJide6g-zunBrsBRuvp7UQlKVbjpSk,7949
|
51
|
+
unique_toolkit/language_model/infos.py,sha256=qPf4Xlanet8jf0apZ6-qxS_6zmDd6p9D40it2TqmF3w,25910
|
52
52
|
unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
|
53
|
-
unique_toolkit/language_model/schemas.py,sha256=
|
53
|
+
unique_toolkit/language_model/schemas.py,sha256=Wc_OeML0AYPTfIC1BObwumsunq23h12qVzi4hVlaZPE,11389
|
54
54
|
unique_toolkit/language_model/service.py,sha256=FUf-HTKNslrMAh8qFMco_ZpP-N0t_iAFWK3juldoUe8,8343
|
55
55
|
unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
|
56
|
+
unique_toolkit/protocols/support.py,sha256=iSSoERUZGLbmY2DGBqGeFTCRtH3ClhzAUutqNxwYgKs,823
|
56
57
|
unique_toolkit/short_term_memory/__init__.py,sha256=2mI3AUrffgH7Yt-xS57EGqnHf7jnn6xquoKEhJqk3Wg,185
|
57
58
|
unique_toolkit/short_term_memory/constants.py,sha256=698CL6-wjup2MvU19RxSmQk3gX7aqW_OOpZB7sbz_Xg,34
|
58
59
|
unique_toolkit/short_term_memory/functions.py,sha256=3WiK-xatY5nh4Dr5zlDUye1k3E6kr41RiscwtTplw5k,4484
|
59
60
|
unique_toolkit/short_term_memory/schemas.py,sha256=OhfcXyF6ACdwIXW45sKzjtZX_gkcJs8FEZXcgQTNenw,1406
|
60
61
|
unique_toolkit/short_term_memory/service.py,sha256=vEKFxP1SScPrFniso492fVthWR1sosdFibhiNF3zRvI,8081
|
61
|
-
unique_toolkit-0.7.
|
62
|
-
unique_toolkit-0.7.
|
63
|
-
unique_toolkit-0.7.
|
64
|
-
unique_toolkit-0.7.
|
62
|
+
unique_toolkit-0.7.17.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
63
|
+
unique_toolkit-0.7.17.dist-info/METADATA,sha256=1s2gtPKoW7K5BT6aAVwh96O-1K8-2d-uEXKXzaO2fLo,22573
|
64
|
+
unique_toolkit-0.7.17.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
65
|
+
unique_toolkit-0.7.17.dist-info/RECORD,,
|
File without changes
|
File without changes
|