unique_toolkit 0.7.7__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_toolkit might be problematic. Click here for more details.
- unique_toolkit/__init__.py +28 -1
- unique_toolkit/_common/api_calling/human_verification_manager.py +343 -0
- unique_toolkit/_common/base_model_type_attribute.py +303 -0
- unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
- unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
- unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
- unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
- unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
- unique_toolkit/_common/default_language_model.py +12 -0
- unique_toolkit/_common/docx_generator/__init__.py +7 -0
- unique_toolkit/_common/docx_generator/config.py +12 -0
- unique_toolkit/_common/docx_generator/schemas.py +80 -0
- unique_toolkit/_common/docx_generator/service.py +252 -0
- unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
- unique_toolkit/_common/endpoint_builder.py +305 -0
- unique_toolkit/_common/endpoint_requestor.py +430 -0
- unique_toolkit/_common/exception.py +24 -0
- unique_toolkit/_common/feature_flags/schema.py +9 -0
- unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
- unique_toolkit/_common/pydantic_helpers.py +154 -0
- unique_toolkit/_common/referencing.py +53 -0
- unique_toolkit/_common/string_utilities.py +140 -0
- unique_toolkit/_common/tests/test_referencing.py +521 -0
- unique_toolkit/_common/tests/test_string_utilities.py +506 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +204 -0
- unique_toolkit/_common/utils/__init__.py +1 -0
- unique_toolkit/_common/utils/files.py +43 -0
- unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
- unique_toolkit/_common/utils/structured_output/schema.py +5 -0
- unique_toolkit/_common/utils/write_configuration.py +51 -0
- unique_toolkit/_common/validators.py +101 -4
- unique_toolkit/agentic/__init__.py +1 -0
- unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
- unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
- unique_toolkit/agentic/evaluation/config.py +36 -0
- unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
- unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
- unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
- unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
- unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
- unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +111 -0
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +16 -15
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +30 -20
- unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
- unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
- unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
- unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
- unique_toolkit/agentic/history_manager/history_construction_with_contents.py +297 -0
- unique_toolkit/agentic/history_manager/history_manager.py +242 -0
- unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
- unique_toolkit/agentic/history_manager/utils.py +96 -0
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
- unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
- unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
- unique_toolkit/agentic/tools/__init__.py +1 -0
- unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
- unique_toolkit/agentic/tools/a2a/config.py +17 -0
- unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
- unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
- unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
- unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
- unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
- unique_toolkit/agentic/tools/a2a/manager.py +55 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/config.py +45 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
- unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
- unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
- unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
- unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
- unique_toolkit/agentic/tools/a2a/tool/config.py +73 -0
- unique_toolkit/agentic/tools/a2a/tool/service.py +306 -0
- unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
- unique_toolkit/agentic/tools/config.py +167 -0
- unique_toolkit/agentic/tools/factory.py +44 -0
- unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
- unique_toolkit/agentic/tools/mcp/manager.py +71 -0
- unique_toolkit/agentic/tools/mcp/models.py +28 -0
- unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
- unique_toolkit/agentic/tools/schemas.py +141 -0
- unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
- unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
- unique_toolkit/agentic/tools/tool.py +183 -0
- unique_toolkit/agentic/tools/tool_manager.py +523 -0
- unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
- unique_toolkit/agentic/tools/utils/__init__.py +19 -0
- unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
- unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
- unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
- unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
- unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
- unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
- unique_toolkit/app/__init__.py +6 -0
- unique_toolkit/app/dev_util.py +180 -0
- unique_toolkit/app/init_sdk.py +32 -1
- unique_toolkit/app/schemas.py +198 -31
- unique_toolkit/app/unique_settings.py +367 -0
- unique_toolkit/chat/__init__.py +8 -1
- unique_toolkit/chat/deprecated/service.py +232 -0
- unique_toolkit/chat/functions.py +642 -77
- unique_toolkit/chat/rendering.py +34 -0
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/chat/schemas.py +133 -2
- unique_toolkit/chat/service.py +115 -767
- unique_toolkit/content/functions.py +153 -4
- unique_toolkit/content/schemas.py +122 -15
- unique_toolkit/content/service.py +278 -44
- unique_toolkit/content/smart_rules.py +301 -0
- unique_toolkit/content/utils.py +8 -3
- unique_toolkit/embedding/service.py +102 -11
- unique_toolkit/framework_utilities/__init__.py +1 -0
- unique_toolkit/framework_utilities/langchain/client.py +71 -0
- unique_toolkit/framework_utilities/langchain/history.py +19 -0
- unique_toolkit/framework_utilities/openai/__init__.py +6 -0
- unique_toolkit/framework_utilities/openai/client.py +83 -0
- unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
- unique_toolkit/framework_utilities/utils.py +23 -0
- unique_toolkit/language_model/__init__.py +3 -0
- unique_toolkit/language_model/builder.py +27 -11
- unique_toolkit/language_model/default_language_model.py +3 -0
- unique_toolkit/language_model/functions.py +327 -43
- unique_toolkit/language_model/infos.py +992 -50
- unique_toolkit/language_model/reference.py +242 -0
- unique_toolkit/language_model/schemas.py +475 -48
- unique_toolkit/language_model/service.py +228 -27
- unique_toolkit/protocols/support.py +145 -0
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +1630 -0
- unique_toolkit/services/knowledge_base.py +861 -0
- unique_toolkit/short_term_memory/service.py +178 -41
- unique_toolkit/smart_rules/__init__.py +0 -0
- unique_toolkit/smart_rules/compile.py +56 -0
- unique_toolkit/test_utilities/events.py +197 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +606 -7
- unique_toolkit-1.23.0.dist-info/RECORD +182 -0
- unique_toolkit/evaluators/__init__.py +0 -1
- unique_toolkit/evaluators/config.py +0 -35
- unique_toolkit/evaluators/constants.py +0 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
- unique_toolkit/evaluators/context_relevancy/service.py +0 -53
- unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
- unique_toolkit/evaluators/hallucination/constants.py +0 -41
- unique_toolkit-0.7.7.dist-info/RECORD +0 -64
- /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
|
@@ -1,22 +1,46 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import math
|
|
3
3
|
from enum import StrEnum
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, Literal, Self, TypeVar
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
7
|
from humps import camelize
|
|
8
|
+
from openai.types.chat import (
|
|
9
|
+
ChatCompletionAssistantMessageParam,
|
|
10
|
+
ChatCompletionSystemMessageParam,
|
|
11
|
+
ChatCompletionToolMessageParam,
|
|
12
|
+
ChatCompletionUserMessageParam,
|
|
13
|
+
)
|
|
14
|
+
from openai.types.chat.chat_completion_message_function_tool_call_param import (
|
|
15
|
+
ChatCompletionMessageFunctionToolCallParam,
|
|
16
|
+
Function,
|
|
17
|
+
)
|
|
18
|
+
from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
|
|
19
|
+
from openai.types.responses import (
|
|
20
|
+
EasyInputMessageParam,
|
|
21
|
+
FunctionToolParam,
|
|
22
|
+
ResponseCodeInterpreterToolCall,
|
|
23
|
+
ResponseFunctionToolCallParam,
|
|
24
|
+
ResponseOutputItem,
|
|
25
|
+
ResponseOutputMessage,
|
|
26
|
+
)
|
|
27
|
+
from openai.types.responses.response_input_param import FunctionCallOutput
|
|
28
|
+
from openai.types.responses.response_output_text import AnnotationContainerFileCitation
|
|
29
|
+
from openai.types.shared_params.function_definition import FunctionDefinition
|
|
8
30
|
from pydantic import (
|
|
9
31
|
BaseModel,
|
|
10
32
|
ConfigDict,
|
|
11
33
|
Field,
|
|
12
34
|
PrivateAttr,
|
|
13
35
|
RootModel,
|
|
36
|
+
field_serializer,
|
|
14
37
|
field_validator,
|
|
15
38
|
model_serializer,
|
|
16
39
|
model_validator,
|
|
17
40
|
)
|
|
18
|
-
from typing_extensions import deprecated
|
|
41
|
+
from typing_extensions import deprecated, overload
|
|
19
42
|
|
|
43
|
+
from unique_toolkit.content.schemas import ContentReference
|
|
20
44
|
from unique_toolkit.language_model.utils import format_message
|
|
21
45
|
|
|
22
46
|
# set config to convert camelCase to snake_case
|
|
@@ -27,6 +51,8 @@ model_config = ConfigDict(
|
|
|
27
51
|
)
|
|
28
52
|
|
|
29
53
|
|
|
54
|
+
# Equivalent to
|
|
55
|
+
# from openai.types.chat.chat_completion_role import ChatCompletionRole
|
|
30
56
|
class LanguageModelMessageRole(StrEnum):
|
|
31
57
|
USER = "user"
|
|
32
58
|
SYSTEM = "system"
|
|
@@ -34,33 +60,155 @@ class LanguageModelMessageRole(StrEnum):
|
|
|
34
60
|
TOOL = "tool"
|
|
35
61
|
|
|
36
62
|
|
|
63
|
+
# This is tailored to the unique backend
|
|
64
|
+
class LanguageModelStreamResponseMessage(BaseModel):
|
|
65
|
+
model_config = model_config
|
|
66
|
+
|
|
67
|
+
id: str
|
|
68
|
+
previous_message_id: (
|
|
69
|
+
str | None
|
|
70
|
+
) # Stream response can return a null previous_message_id if an assisstant message is manually added
|
|
71
|
+
role: LanguageModelMessageRole
|
|
72
|
+
text: str
|
|
73
|
+
original_text: str | None = None
|
|
74
|
+
references: list[ContentReference] = []
|
|
75
|
+
|
|
76
|
+
# TODO make sdk return role in lowercase
|
|
77
|
+
# Currently needed as sdk returns role in uppercase
|
|
78
|
+
@field_validator("role", mode="before")
|
|
79
|
+
def set_role(cls, value: str):
|
|
80
|
+
return value.lower()
|
|
81
|
+
|
|
82
|
+
|
|
37
83
|
class LanguageModelFunction(BaseModel):
|
|
38
84
|
model_config = model_config
|
|
39
85
|
|
|
40
|
-
id: str
|
|
86
|
+
id: str = Field(default_factory=lambda: uuid4().hex)
|
|
41
87
|
name: str
|
|
42
|
-
arguments:
|
|
88
|
+
arguments: dict[str, Any] | None = None
|
|
43
89
|
|
|
44
90
|
@field_validator("arguments", mode="before")
|
|
45
|
-
def set_arguments(cls, value):
|
|
91
|
+
def set_arguments(cls, value: Any) -> Any:
|
|
46
92
|
if isinstance(value, str):
|
|
47
93
|
return json.loads(value)
|
|
48
94
|
return value
|
|
49
95
|
|
|
50
96
|
@field_validator("id", mode="before")
|
|
51
|
-
def randomize_id(cls, value):
|
|
52
|
-
|
|
97
|
+
def randomize_id(cls, value: Any) -> Any:
|
|
98
|
+
if value is None or value == "":
|
|
99
|
+
return uuid4().hex
|
|
100
|
+
return value
|
|
53
101
|
|
|
54
102
|
@model_serializer()
|
|
55
103
|
def serialize_model(self):
|
|
56
104
|
seralization = {}
|
|
57
|
-
if self.id:
|
|
58
|
-
seralization["id"] = self.id
|
|
59
105
|
seralization["name"] = self.name
|
|
60
106
|
if self.arguments:
|
|
61
107
|
seralization["arguments"] = json.dumps(self.arguments)
|
|
62
108
|
return seralization
|
|
63
109
|
|
|
110
|
+
def __eq__(self, other: object) -> bool:
|
|
111
|
+
"""Compare two tool calls based on name and arguments."""
|
|
112
|
+
if not isinstance(other, LanguageModelFunction):
|
|
113
|
+
return False
|
|
114
|
+
|
|
115
|
+
if self.id != other.id:
|
|
116
|
+
return False
|
|
117
|
+
|
|
118
|
+
if self.name != other.name:
|
|
119
|
+
return False
|
|
120
|
+
|
|
121
|
+
if self.arguments != other.arguments:
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
return True
|
|
125
|
+
|
|
126
|
+
@overload
|
|
127
|
+
def to_openai_param(
|
|
128
|
+
self, mode: Literal["completions"] = "completions"
|
|
129
|
+
) -> ChatCompletionMessageFunctionToolCallParam: ...
|
|
130
|
+
|
|
131
|
+
@overload
|
|
132
|
+
def to_openai_param(
|
|
133
|
+
self, mode: Literal["responses"]
|
|
134
|
+
) -> ResponseFunctionToolCallParam: ...
|
|
135
|
+
|
|
136
|
+
def to_openai_param(
|
|
137
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
138
|
+
) -> ChatCompletionMessageFunctionToolCallParam | ResponseFunctionToolCallParam:
|
|
139
|
+
arguments = ""
|
|
140
|
+
if isinstance(self.arguments, dict):
|
|
141
|
+
arguments = json.dumps(self.arguments)
|
|
142
|
+
elif isinstance(self.arguments, str):
|
|
143
|
+
arguments = self.arguments
|
|
144
|
+
|
|
145
|
+
if mode == "completions":
|
|
146
|
+
return ChatCompletionMessageFunctionToolCallParam(
|
|
147
|
+
type="function",
|
|
148
|
+
id=self.id or "unknown_id",
|
|
149
|
+
function=Function(name=self.name, arguments=arguments),
|
|
150
|
+
)
|
|
151
|
+
elif mode == "responses":
|
|
152
|
+
if self.id is None:
|
|
153
|
+
raise ValueError("Missing tool call id")
|
|
154
|
+
|
|
155
|
+
return ResponseFunctionToolCallParam(
|
|
156
|
+
type="function_call",
|
|
157
|
+
call_id=self.id,
|
|
158
|
+
name=self.name,
|
|
159
|
+
arguments=arguments,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class LanguageModelStreamResponse(BaseModel):
|
|
164
|
+
model_config = model_config
|
|
165
|
+
|
|
166
|
+
message: LanguageModelStreamResponseMessage
|
|
167
|
+
tool_calls: list[LanguageModelFunction] | None = None
|
|
168
|
+
|
|
169
|
+
def is_empty(self) -> bool:
|
|
170
|
+
"""
|
|
171
|
+
Check if the stream response is empty.
|
|
172
|
+
An empty stream response has no text and no tool calls.
|
|
173
|
+
"""
|
|
174
|
+
return not self.message.original_text and not self.tool_calls
|
|
175
|
+
|
|
176
|
+
def to_openai_param(self) -> ChatCompletionAssistantMessageParam:
|
|
177
|
+
return ChatCompletionAssistantMessageParam(
|
|
178
|
+
role="assistant",
|
|
179
|
+
audio=None,
|
|
180
|
+
content=self.message.text,
|
|
181
|
+
function_call=None,
|
|
182
|
+
refusal=None,
|
|
183
|
+
tool_calls=[t.to_openai_param() for t in self.tool_calls or []],
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
OutputItemType = TypeVar("OutputItemType", bound=ResponseOutputItem)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class ResponsesLanguageModelStreamResponse(LanguageModelStreamResponse):
|
|
191
|
+
output: list[ResponseOutputItem]
|
|
192
|
+
|
|
193
|
+
def filter_output(self, type: type[OutputItemType]) -> list[OutputItemType]:
|
|
194
|
+
return [item for item in self.output if isinstance(item, type)]
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
def code_interpreter_calls(self) -> list[ResponseCodeInterpreterToolCall]:
|
|
198
|
+
return self.filter_output(ResponseCodeInterpreterToolCall)
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def container_files(self) -> list[AnnotationContainerFileCitation]:
|
|
202
|
+
container_files = []
|
|
203
|
+
messages = self.filter_output(ResponseOutputMessage)
|
|
204
|
+
for message in messages:
|
|
205
|
+
for content in message.content:
|
|
206
|
+
if content.type == "output_text":
|
|
207
|
+
for annotation in content.annotations:
|
|
208
|
+
if annotation.type == "container_file_citation":
|
|
209
|
+
container_files.append(annotation)
|
|
210
|
+
return container_files
|
|
211
|
+
|
|
64
212
|
|
|
65
213
|
class LanguageModelFunctionCall(BaseModel):
|
|
66
214
|
model_config = model_config
|
|
@@ -69,10 +217,12 @@ class LanguageModelFunctionCall(BaseModel):
|
|
|
69
217
|
type: str | None = None
|
|
70
218
|
function: LanguageModelFunction
|
|
71
219
|
|
|
220
|
+
# TODO: Circular reference of types
|
|
221
|
+
@deprecated("Use LanguageModelAssistantMessage.from_functions instead.")
|
|
72
222
|
@staticmethod
|
|
73
223
|
def create_assistant_message_from_tool_calls(
|
|
74
224
|
tool_calls: list[LanguageModelFunction],
|
|
75
|
-
):
|
|
225
|
+
) -> "LanguageModelAssistantMessage":
|
|
76
226
|
assistant_message = LanguageModelAssistantMessage(
|
|
77
227
|
content="",
|
|
78
228
|
tool_calls=[
|
|
@@ -93,8 +243,7 @@ class LanguageModelMessage(BaseModel):
|
|
|
93
243
|
content: str | list[dict] | None = None
|
|
94
244
|
|
|
95
245
|
def __str__(self):
|
|
96
|
-
|
|
97
|
-
message = ""
|
|
246
|
+
message = ""
|
|
98
247
|
if isinstance(self.content, str):
|
|
99
248
|
message = self.content
|
|
100
249
|
elif isinstance(self.content, list):
|
|
@@ -110,6 +259,30 @@ class LanguageModelSystemMessage(LanguageModelMessage):
|
|
|
110
259
|
def set_role(cls, value):
|
|
111
260
|
return LanguageModelMessageRole.SYSTEM
|
|
112
261
|
|
|
262
|
+
@overload
|
|
263
|
+
def to_openai(
|
|
264
|
+
self, mode: Literal["completions"] = "completions"
|
|
265
|
+
) -> ChatCompletionSystemMessageParam: ...
|
|
266
|
+
|
|
267
|
+
@overload
|
|
268
|
+
def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
|
|
269
|
+
|
|
270
|
+
def to_openai(
|
|
271
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
272
|
+
) -> ChatCompletionSystemMessageParam | EasyInputMessageParam:
|
|
273
|
+
content = self.content or ""
|
|
274
|
+
if not isinstance(content, str):
|
|
275
|
+
raise ValueError("Content must be a string")
|
|
276
|
+
|
|
277
|
+
if mode == "completions":
|
|
278
|
+
return ChatCompletionSystemMessageParam(role="system", content=content)
|
|
279
|
+
elif mode == "responses":
|
|
280
|
+
return EasyInputMessageParam(role="user", content=content)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
# Equivalent to
|
|
284
|
+
# from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
|
|
285
|
+
|
|
113
286
|
|
|
114
287
|
class LanguageModelUserMessage(LanguageModelMessage):
|
|
115
288
|
role: LanguageModelMessageRole = LanguageModelMessageRole.USER
|
|
@@ -118,7 +291,29 @@ class LanguageModelUserMessage(LanguageModelMessage):
|
|
|
118
291
|
def set_role(cls, value):
|
|
119
292
|
return LanguageModelMessageRole.USER
|
|
120
293
|
|
|
294
|
+
@overload
|
|
295
|
+
def to_openai(
|
|
296
|
+
self, mode: Literal["completions"] = "completions"
|
|
297
|
+
) -> ChatCompletionUserMessageParam: ...
|
|
298
|
+
|
|
299
|
+
@overload
|
|
300
|
+
def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
|
|
301
|
+
|
|
302
|
+
def to_openai(
|
|
303
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
304
|
+
) -> ChatCompletionUserMessageParam | EasyInputMessageParam:
|
|
305
|
+
content = self.content or ""
|
|
306
|
+
if not isinstance(content, str):
|
|
307
|
+
raise ValueError("Content must be a string")
|
|
308
|
+
|
|
309
|
+
if mode == "completions":
|
|
310
|
+
return ChatCompletionUserMessageParam(role="user", content=content)
|
|
311
|
+
elif mode == "responses":
|
|
312
|
+
return EasyInputMessageParam(role="user", content=content)
|
|
313
|
+
|
|
121
314
|
|
|
315
|
+
# Equivalent to
|
|
316
|
+
# from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
|
|
122
317
|
class LanguageModelAssistantMessage(LanguageModelMessage):
|
|
123
318
|
role: LanguageModelMessageRole = LanguageModelMessageRole.ASSISTANT
|
|
124
319
|
parsed: dict | None = None
|
|
@@ -129,6 +324,91 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
|
|
|
129
324
|
def set_role(cls, value):
|
|
130
325
|
return LanguageModelMessageRole.ASSISTANT
|
|
131
326
|
|
|
327
|
+
@classmethod
|
|
328
|
+
def from_functions(
|
|
329
|
+
cls,
|
|
330
|
+
tool_calls: list[LanguageModelFunction],
|
|
331
|
+
):
|
|
332
|
+
return cls(
|
|
333
|
+
content="",
|
|
334
|
+
tool_calls=[
|
|
335
|
+
LanguageModelFunctionCall(
|
|
336
|
+
id=tool_call.id,
|
|
337
|
+
type="function",
|
|
338
|
+
function=tool_call,
|
|
339
|
+
)
|
|
340
|
+
for tool_call in tool_calls
|
|
341
|
+
],
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
@classmethod
|
|
345
|
+
def from_stream_response(cls, response: LanguageModelStreamResponse):
|
|
346
|
+
tool_calls = [
|
|
347
|
+
LanguageModelFunctionCall(
|
|
348
|
+
id=f.id,
|
|
349
|
+
type="function",
|
|
350
|
+
function=f,
|
|
351
|
+
)
|
|
352
|
+
for f in response.tool_calls or []
|
|
353
|
+
]
|
|
354
|
+
|
|
355
|
+
tool_calls = tool_calls if len(tool_calls) > 0 else None
|
|
356
|
+
|
|
357
|
+
return cls(
|
|
358
|
+
content=response.message.text,
|
|
359
|
+
parsed=None,
|
|
360
|
+
refusal=None,
|
|
361
|
+
tool_calls=tool_calls,
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
@overload
|
|
365
|
+
def to_openai(
|
|
366
|
+
self, mode: Literal["completions"] = "completions"
|
|
367
|
+
) -> ChatCompletionAssistantMessageParam: ...
|
|
368
|
+
|
|
369
|
+
@overload
|
|
370
|
+
def to_openai(
|
|
371
|
+
self, mode: Literal["responses"]
|
|
372
|
+
) -> list[EasyInputMessageParam | ResponseFunctionToolCallParam]: ...
|
|
373
|
+
|
|
374
|
+
def to_openai(
|
|
375
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
376
|
+
) -> (
|
|
377
|
+
ChatCompletionAssistantMessageParam
|
|
378
|
+
| list[EasyInputMessageParam | ResponseFunctionToolCallParam]
|
|
379
|
+
):
|
|
380
|
+
content = self.content or ""
|
|
381
|
+
if not isinstance(content, str):
|
|
382
|
+
raise ValueError("Content must be a string")
|
|
383
|
+
|
|
384
|
+
if mode == "completions":
|
|
385
|
+
return ChatCompletionAssistantMessageParam(
|
|
386
|
+
role="assistant",
|
|
387
|
+
content=content,
|
|
388
|
+
tool_calls=[
|
|
389
|
+
t.function.to_openai_param() for t in self.tool_calls or []
|
|
390
|
+
],
|
|
391
|
+
)
|
|
392
|
+
elif mode == "responses":
|
|
393
|
+
"""
|
|
394
|
+
Responses API does not support assistant messages with tool calls
|
|
395
|
+
"""
|
|
396
|
+
res = []
|
|
397
|
+
if content != "":
|
|
398
|
+
res.append(EasyInputMessageParam(role="assistant", content=content))
|
|
399
|
+
if self.tool_calls:
|
|
400
|
+
res.extend(
|
|
401
|
+
[
|
|
402
|
+
t.function.to_openai_param(mode="responses")
|
|
403
|
+
for t in self.tool_calls
|
|
404
|
+
]
|
|
405
|
+
)
|
|
406
|
+
return res
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
# Equivalent to
|
|
410
|
+
# from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
|
|
411
|
+
|
|
132
412
|
|
|
133
413
|
class LanguageModelToolMessage(LanguageModelMessage):
|
|
134
414
|
role: LanguageModelMessageRole = LanguageModelMessageRole.TOOL
|
|
@@ -146,15 +426,84 @@ class LanguageModelToolMessage(LanguageModelMessage):
|
|
|
146
426
|
def set_role(cls, value):
|
|
147
427
|
return LanguageModelMessageRole.TOOL
|
|
148
428
|
|
|
429
|
+
@overload
|
|
430
|
+
def to_openai(
|
|
431
|
+
self, mode: Literal["completions"] = "completions"
|
|
432
|
+
) -> ChatCompletionToolMessageParam: ...
|
|
433
|
+
|
|
434
|
+
@overload
|
|
435
|
+
def to_openai(self, mode: Literal["responses"]) -> FunctionCallOutput: ...
|
|
436
|
+
|
|
437
|
+
def to_openai(
|
|
438
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
439
|
+
) -> ChatCompletionToolMessageParam | FunctionCallOutput:
|
|
440
|
+
content = self.content or ""
|
|
441
|
+
if not isinstance(content, str):
|
|
442
|
+
raise ValueError("Content must be a string")
|
|
443
|
+
|
|
444
|
+
if mode == "completions":
|
|
445
|
+
return ChatCompletionToolMessageParam(
|
|
446
|
+
role="tool",
|
|
447
|
+
content=content,
|
|
448
|
+
tool_call_id=self.tool_call_id,
|
|
449
|
+
)
|
|
450
|
+
elif mode == "responses":
|
|
451
|
+
return FunctionCallOutput(
|
|
452
|
+
call_id=self.tool_call_id,
|
|
453
|
+
output=content,
|
|
454
|
+
type="function_call_output",
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
# Equivalent implementation for list of
|
|
459
|
+
# from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
|
|
460
|
+
# with the addition of the builder
|
|
461
|
+
|
|
462
|
+
LanguageModelMessageOptions = (
|
|
463
|
+
LanguageModelMessage
|
|
464
|
+
| LanguageModelToolMessage
|
|
465
|
+
| LanguageModelAssistantMessage
|
|
466
|
+
| LanguageModelSystemMessage
|
|
467
|
+
| LanguageModelUserMessage
|
|
468
|
+
)
|
|
469
|
+
|
|
149
470
|
|
|
150
471
|
class LanguageModelMessages(RootModel):
|
|
151
|
-
root: list[
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
472
|
+
root: list[LanguageModelMessageOptions]
|
|
473
|
+
|
|
474
|
+
@classmethod
|
|
475
|
+
def load_messages_to_root(cls, data: list[dict] | dict) -> Self:
|
|
476
|
+
"""Convert list of dictionaries to appropriate message objects based on role."""
|
|
477
|
+
# Handle case where data is already wrapped in root
|
|
478
|
+
if isinstance(data, dict) and "root" in data:
|
|
479
|
+
messages_list = data["root"]
|
|
480
|
+
elif isinstance(data, list):
|
|
481
|
+
messages_list = data
|
|
482
|
+
else:
|
|
483
|
+
raise ValueError("Invalid data type")
|
|
484
|
+
|
|
485
|
+
# Convert the messages list
|
|
486
|
+
converted_messages = []
|
|
487
|
+
for item in messages_list:
|
|
488
|
+
if isinstance(item, dict):
|
|
489
|
+
role = item.get("role", "").lower()
|
|
490
|
+
|
|
491
|
+
# Map dictionary to appropriate message class based on role
|
|
492
|
+
if role == "system":
|
|
493
|
+
converted_messages.append(LanguageModelSystemMessage(**item))
|
|
494
|
+
elif role == "user":
|
|
495
|
+
converted_messages.append(LanguageModelUserMessage(**item))
|
|
496
|
+
elif role == "assistant":
|
|
497
|
+
converted_messages.append(LanguageModelAssistantMessage(**item))
|
|
498
|
+
elif role == "tool":
|
|
499
|
+
converted_messages.append(LanguageModelToolMessage(**item))
|
|
500
|
+
else:
|
|
501
|
+
# Fallback to base LanguageModelMessage
|
|
502
|
+
converted_messages.append(LanguageModelMessage(**item))
|
|
503
|
+
else:
|
|
504
|
+
# If it's already a message object, keep it as is
|
|
505
|
+
converted_messages.append(item)
|
|
506
|
+
return cls(root=converted_messages)
|
|
158
507
|
|
|
159
508
|
def __str__(self):
|
|
160
509
|
return "\n\n".join([str(message) for message in self.root])
|
|
@@ -174,6 +523,11 @@ class LanguageModelMessages(RootModel):
|
|
|
174
523
|
return builder
|
|
175
524
|
|
|
176
525
|
|
|
526
|
+
# This seems similar to
|
|
527
|
+
# from openai.types.completion_choice import CompletionChoice
|
|
528
|
+
# but is missing multiple attributes and uses message instead of text
|
|
529
|
+
|
|
530
|
+
|
|
177
531
|
class LanguageModelCompletionChoice(BaseModel):
|
|
178
532
|
model_config = model_config
|
|
179
533
|
|
|
@@ -182,38 +536,26 @@ class LanguageModelCompletionChoice(BaseModel):
|
|
|
182
536
|
finish_reason: str
|
|
183
537
|
|
|
184
538
|
|
|
539
|
+
# This seems similar to
|
|
540
|
+
# from openai.types.completion import Completion
|
|
541
|
+
# but is missing multiple attributes
|
|
185
542
|
class LanguageModelResponse(BaseModel):
|
|
186
543
|
model_config = model_config
|
|
187
544
|
|
|
188
545
|
choices: list[LanguageModelCompletionChoice]
|
|
189
546
|
|
|
547
|
+
@classmethod
|
|
548
|
+
def from_stream_response(cls, response: LanguageModelStreamResponse):
|
|
549
|
+
choice = LanguageModelCompletionChoice(
|
|
550
|
+
index=0,
|
|
551
|
+
message=LanguageModelAssistantMessage.from_stream_response(response),
|
|
552
|
+
finish_reason="",
|
|
553
|
+
)
|
|
190
554
|
|
|
191
|
-
|
|
192
|
-
model_config = model_config
|
|
193
|
-
|
|
194
|
-
id: str
|
|
195
|
-
previous_message_id: (
|
|
196
|
-
str | None
|
|
197
|
-
) # Stream response can return a null previous_message_id if an assisstant message is manually added
|
|
198
|
-
role: LanguageModelMessageRole
|
|
199
|
-
text: str
|
|
200
|
-
original_text: str | None = None
|
|
201
|
-
references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
|
|
202
|
-
|
|
203
|
-
# TODO make sdk return role in lowercase
|
|
204
|
-
# Currently needed as sdk returns role in uppercase
|
|
205
|
-
@field_validator("role", mode="before")
|
|
206
|
-
def set_role(cls, value: str):
|
|
207
|
-
return value.lower()
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
class LanguageModelStreamResponse(BaseModel):
|
|
211
|
-
model_config = model_config
|
|
212
|
-
|
|
213
|
-
message: LanguageModelStreamResponseMessage
|
|
214
|
-
tool_calls: Optional[list[LanguageModelFunction]] = None
|
|
555
|
+
return cls(choices=[choice])
|
|
215
556
|
|
|
216
557
|
|
|
558
|
+
# This is tailored for unique and only used in language model info
|
|
217
559
|
class LanguageModelTokenLimits(BaseModel):
|
|
218
560
|
token_limit_input: int
|
|
219
561
|
token_limit_output: int
|
|
@@ -255,29 +597,46 @@ class LanguageModelTokenLimits(BaseModel):
|
|
|
255
597
|
|
|
256
598
|
data["token_limit_input"] = math.floor(fraction_input * token_limit)
|
|
257
599
|
data["token_limit_output"] = math.floor(
|
|
258
|
-
(1 - fraction_input) * token_limit
|
|
600
|
+
(1 - fraction_input) * token_limit,
|
|
259
601
|
)
|
|
260
602
|
data["_fraction_adaptpable"] = True
|
|
261
603
|
return data
|
|
262
604
|
|
|
263
605
|
raise ValueError(
|
|
264
|
-
'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.'
|
|
606
|
+
'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.',
|
|
265
607
|
)
|
|
266
608
|
|
|
267
609
|
|
|
610
|
+
# This is more restrictive than what openai allows
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
@deprecated(
|
|
614
|
+
"Deprecated as `LanguageModelTool` is deprecated in favor of `LanguageModelToolDescription`",
|
|
615
|
+
)
|
|
268
616
|
class LanguageModelToolParameterProperty(BaseModel):
|
|
269
617
|
type: str
|
|
270
618
|
description: str
|
|
271
|
-
enum:
|
|
272
|
-
items:
|
|
619
|
+
enum: list[Any] | None = None
|
|
620
|
+
items: Self | None = None
|
|
273
621
|
|
|
274
622
|
|
|
623
|
+
# Looks most like
|
|
624
|
+
# from openai.types.shared.function_parameters import FunctionParameters
|
|
625
|
+
@deprecated(
|
|
626
|
+
"Deprecated as `LanguageModelTool` is deprecated in favor of `LanguageModelToolDescription`",
|
|
627
|
+
)
|
|
275
628
|
class LanguageModelToolParameters(BaseModel):
|
|
276
629
|
type: str = "object"
|
|
277
630
|
properties: dict[str, LanguageModelToolParameterProperty]
|
|
278
631
|
required: list[str]
|
|
279
632
|
|
|
280
633
|
|
|
634
|
+
# Looks most like
|
|
635
|
+
# from openai.types.shared_params.function_definition import FunctionDefinition
|
|
636
|
+
# but returns parameter is not known
|
|
637
|
+
@deprecated(
|
|
638
|
+
"Deprecated as `LanguageModelTool` use `LanguageModelToolDescription` instead",
|
|
639
|
+
)
|
|
281
640
|
class LanguageModelTool(BaseModel):
|
|
282
641
|
name: str = Field(
|
|
283
642
|
...,
|
|
@@ -286,8 +645,76 @@ class LanguageModelTool(BaseModel):
|
|
|
286
645
|
)
|
|
287
646
|
description: str
|
|
288
647
|
parameters: (
|
|
289
|
-
LanguageModelToolParameters | dict
|
|
648
|
+
LanguageModelToolParameters | dict[str, Any]
|
|
290
649
|
) # dict represents json schema dumped from pydantic
|
|
291
650
|
returns: LanguageModelToolParameterProperty | LanguageModelToolParameters | None = (
|
|
292
651
|
None
|
|
293
652
|
)
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
class LanguageModelToolDescription(BaseModel):
|
|
656
|
+
name: str = Field(
|
|
657
|
+
...,
|
|
658
|
+
pattern=r"^[a-zA-Z1-9_-]+$",
|
|
659
|
+
description="Name must adhere to the pattern ^[a-zA-Z1-9_-]+$",
|
|
660
|
+
)
|
|
661
|
+
description: str = Field(
|
|
662
|
+
...,
|
|
663
|
+
description="Description of what the tool is doing the tool",
|
|
664
|
+
)
|
|
665
|
+
parameters: type[BaseModel] | dict[str, Any] = Field(
|
|
666
|
+
...,
|
|
667
|
+
description="Pydantic model for the tool parameters",
|
|
668
|
+
union_mode="left_to_right",
|
|
669
|
+
)
|
|
670
|
+
|
|
671
|
+
# TODO: This should be default `True` but if this is the case the parameter_model needs to include additional properties
|
|
672
|
+
strict: bool = Field(
|
|
673
|
+
default=False,
|
|
674
|
+
description="Setting strict to true will ensure function calls reliably adhere to the function schema, instead of being best effort. If set to True the `parameter_model` set `model_config = {'extra':'forbid'}` must be set for on all BaseModels.",
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
@field_serializer("parameters")
|
|
678
|
+
def serialize_parameters(
|
|
679
|
+
self, parameters: type[BaseModel] | dict[str, Any]
|
|
680
|
+
) -> dict[str, Any]:
|
|
681
|
+
return _parameters_as_json_schema(parameters)
|
|
682
|
+
|
|
683
|
+
@overload
|
|
684
|
+
def to_openai(
|
|
685
|
+
self, mode: Literal["completions"] = "completions"
|
|
686
|
+
) -> ChatCompletionToolParam: ...
|
|
687
|
+
|
|
688
|
+
@overload
|
|
689
|
+
def to_openai(self, mode: Literal["responses"]) -> FunctionToolParam: ...
|
|
690
|
+
|
|
691
|
+
def to_openai(
|
|
692
|
+
self, mode: Literal["completions", "responses"] = "completions"
|
|
693
|
+
) -> ChatCompletionToolParam | FunctionToolParam:
|
|
694
|
+
if mode == "completions":
|
|
695
|
+
return ChatCompletionToolParam(
|
|
696
|
+
function=FunctionDefinition(
|
|
697
|
+
name=self.name,
|
|
698
|
+
description=self.description,
|
|
699
|
+
parameters=_parameters_as_json_schema(self.parameters),
|
|
700
|
+
strict=self.strict,
|
|
701
|
+
),
|
|
702
|
+
type="function",
|
|
703
|
+
)
|
|
704
|
+
elif mode == "responses":
|
|
705
|
+
return FunctionToolParam(
|
|
706
|
+
type="function",
|
|
707
|
+
name=self.name,
|
|
708
|
+
parameters=_parameters_as_json_schema(self.parameters),
|
|
709
|
+
strict=self.strict,
|
|
710
|
+
description=self.description,
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
def _parameters_as_json_schema(
|
|
715
|
+
parameters: type[BaseModel] | dict[str, Any],
|
|
716
|
+
) -> dict[str, Any]:
|
|
717
|
+
if isinstance(parameters, dict):
|
|
718
|
+
return parameters
|
|
719
|
+
|
|
720
|
+
return parameters.model_json_schema()
|