unique_toolkit 0.7.7__py3-none-any.whl → 1.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_toolkit might be problematic. Click here for more details.
- unique_toolkit/__init__.py +28 -1
- unique_toolkit/_common/api_calling/human_verification_manager.py +343 -0
- unique_toolkit/_common/base_model_type_attribute.py +303 -0
- unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
- unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
- unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
- unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
- unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
- unique_toolkit/_common/default_language_model.py +12 -0
- unique_toolkit/_common/docx_generator/__init__.py +7 -0
- unique_toolkit/_common/docx_generator/config.py +12 -0
- unique_toolkit/_common/docx_generator/schemas.py +80 -0
- unique_toolkit/_common/docx_generator/service.py +252 -0
- unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
- unique_toolkit/_common/endpoint_builder.py +305 -0
- unique_toolkit/_common/endpoint_requestor.py +430 -0
- unique_toolkit/_common/exception.py +24 -0
- unique_toolkit/_common/feature_flags/schema.py +9 -0
- unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
- unique_toolkit/_common/pydantic_helpers.py +154 -0
- unique_toolkit/_common/referencing.py +53 -0
- unique_toolkit/_common/string_utilities.py +140 -0
- unique_toolkit/_common/tests/test_referencing.py +521 -0
- unique_toolkit/_common/tests/test_string_utilities.py +506 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +204 -0
- unique_toolkit/_common/utils/__init__.py +1 -0
- unique_toolkit/_common/utils/files.py +43 -0
- unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
- unique_toolkit/_common/utils/structured_output/schema.py +5 -0
- unique_toolkit/_common/utils/write_configuration.py +51 -0
- unique_toolkit/_common/validators.py +101 -4
- unique_toolkit/agentic/__init__.py +1 -0
- unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
- unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
- unique_toolkit/agentic/evaluation/config.py +36 -0
- unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
- unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
- unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
- unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
- unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
- unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +111 -0
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +16 -15
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +30 -20
- unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
- unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
- unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
- unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
- unique_toolkit/agentic/history_manager/history_construction_with_contents.py +297 -0
- unique_toolkit/agentic/history_manager/history_manager.py +242 -0
- unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
- unique_toolkit/agentic/history_manager/utils.py +96 -0
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
- unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
- unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
- unique_toolkit/agentic/tools/__init__.py +1 -0
- unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
- unique_toolkit/agentic/tools/a2a/config.py +17 -0
- unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
- unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
- unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
- unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
- unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
- unique_toolkit/agentic/tools/a2a/manager.py +55 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/config.py +45 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
- unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
- unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
- unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
- unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
- unique_toolkit/agentic/tools/a2a/tool/config.py +73 -0
- unique_toolkit/agentic/tools/a2a/tool/service.py +306 -0
- unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
- unique_toolkit/agentic/tools/config.py +167 -0
- unique_toolkit/agentic/tools/factory.py +44 -0
- unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
- unique_toolkit/agentic/tools/mcp/manager.py +71 -0
- unique_toolkit/agentic/tools/mcp/models.py +28 -0
- unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
- unique_toolkit/agentic/tools/schemas.py +141 -0
- unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
- unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
- unique_toolkit/agentic/tools/tool.py +183 -0
- unique_toolkit/agentic/tools/tool_manager.py +523 -0
- unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
- unique_toolkit/agentic/tools/utils/__init__.py +19 -0
- unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
- unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
- unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
- unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
- unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
- unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
- unique_toolkit/app/__init__.py +6 -0
- unique_toolkit/app/dev_util.py +180 -0
- unique_toolkit/app/init_sdk.py +32 -1
- unique_toolkit/app/schemas.py +198 -31
- unique_toolkit/app/unique_settings.py +367 -0
- unique_toolkit/chat/__init__.py +8 -1
- unique_toolkit/chat/deprecated/service.py +232 -0
- unique_toolkit/chat/functions.py +642 -77
- unique_toolkit/chat/rendering.py +34 -0
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/chat/schemas.py +133 -2
- unique_toolkit/chat/service.py +115 -767
- unique_toolkit/content/functions.py +153 -4
- unique_toolkit/content/schemas.py +122 -15
- unique_toolkit/content/service.py +278 -44
- unique_toolkit/content/smart_rules.py +301 -0
- unique_toolkit/content/utils.py +8 -3
- unique_toolkit/embedding/service.py +102 -11
- unique_toolkit/framework_utilities/__init__.py +1 -0
- unique_toolkit/framework_utilities/langchain/client.py +71 -0
- unique_toolkit/framework_utilities/langchain/history.py +19 -0
- unique_toolkit/framework_utilities/openai/__init__.py +6 -0
- unique_toolkit/framework_utilities/openai/client.py +83 -0
- unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
- unique_toolkit/framework_utilities/utils.py +23 -0
- unique_toolkit/language_model/__init__.py +3 -0
- unique_toolkit/language_model/builder.py +27 -11
- unique_toolkit/language_model/default_language_model.py +3 -0
- unique_toolkit/language_model/functions.py +327 -43
- unique_toolkit/language_model/infos.py +992 -50
- unique_toolkit/language_model/reference.py +242 -0
- unique_toolkit/language_model/schemas.py +475 -48
- unique_toolkit/language_model/service.py +228 -27
- unique_toolkit/protocols/support.py +145 -0
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +1630 -0
- unique_toolkit/services/knowledge_base.py +861 -0
- unique_toolkit/short_term_memory/service.py +178 -41
- unique_toolkit/smart_rules/__init__.py +0 -0
- unique_toolkit/smart_rules/compile.py +56 -0
- unique_toolkit/test_utilities/events.py +197 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +606 -7
- unique_toolkit-1.23.0.dist-info/RECORD +182 -0
- unique_toolkit/evaluators/__init__.py +0 -1
- unique_toolkit/evaluators/config.py +0 -35
- unique_toolkit/evaluators/constants.py +0 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
- unique_toolkit/evaluators/context_relevancy/service.py +0 -53
- unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
- unique_toolkit/evaluators/hallucination/constants.py +0 -41
- unique_toolkit-0.7.7.dist-info/RECORD +0 -64
- /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import override
|
|
3
|
+
|
|
4
|
+
from openai import AsyncOpenAI, BaseModel, NotFoundError
|
|
5
|
+
from openai.types.responses.tool_param import CodeInterpreter
|
|
6
|
+
|
|
7
|
+
from unique_toolkit import ContentService, ShortTermMemoryService
|
|
8
|
+
from unique_toolkit.agentic.short_term_memory_manager.persistent_short_term_memory_manager import (
|
|
9
|
+
PersistentShortMemoryManager,
|
|
10
|
+
)
|
|
11
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
|
12
|
+
OpenAIBuiltInTool,
|
|
13
|
+
OpenAIBuiltInToolName,
|
|
14
|
+
)
|
|
15
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
|
|
16
|
+
OpenAICodeInterpreterConfig,
|
|
17
|
+
)
|
|
18
|
+
from unique_toolkit.agentic.tools.schemas import ToolPrompts
|
|
19
|
+
from unique_toolkit.content.schemas import (
|
|
20
|
+
Content,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
_SHORT_TERM_MEMORY_NAME = "container_code_execution"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CodeExecutionShortTermMemorySchema(BaseModel):
|
|
30
|
+
container_id: str | None = None
|
|
31
|
+
file_ids: dict[str, str] = {} # Mapping of unique file id to openai file id
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
CodeExecutionMemoryManager = PersistentShortMemoryManager[
|
|
35
|
+
CodeExecutionShortTermMemorySchema
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _get_container_code_execution_short_term_memory_manager(
|
|
40
|
+
company_id: str, user_id: str, chat_id: str
|
|
41
|
+
) -> CodeExecutionMemoryManager:
|
|
42
|
+
short_term_memory_service = ShortTermMemoryService(
|
|
43
|
+
company_id=company_id,
|
|
44
|
+
user_id=user_id,
|
|
45
|
+
chat_id=chat_id,
|
|
46
|
+
message_id=None,
|
|
47
|
+
)
|
|
48
|
+
short_term_memory_manager = PersistentShortMemoryManager(
|
|
49
|
+
short_term_memory_service=short_term_memory_service,
|
|
50
|
+
short_term_memory_schema=CodeExecutionShortTermMemorySchema,
|
|
51
|
+
short_term_memory_name=_SHORT_TERM_MEMORY_NAME,
|
|
52
|
+
)
|
|
53
|
+
return short_term_memory_manager
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def _create_container_if_not_exists(
|
|
57
|
+
client: AsyncOpenAI,
|
|
58
|
+
chat_id: str,
|
|
59
|
+
user_id: str,
|
|
60
|
+
company_id: str,
|
|
61
|
+
expires_after_minutes: int,
|
|
62
|
+
memory: CodeExecutionShortTermMemorySchema | None = None,
|
|
63
|
+
) -> CodeExecutionShortTermMemorySchema:
|
|
64
|
+
if memory is not None:
|
|
65
|
+
logger.info("Container found in short term memory")
|
|
66
|
+
else:
|
|
67
|
+
logger.info("No Container in short term memory, creating a new container")
|
|
68
|
+
memory = CodeExecutionShortTermMemorySchema()
|
|
69
|
+
|
|
70
|
+
container_id = memory.container_id
|
|
71
|
+
|
|
72
|
+
if container_id is not None:
|
|
73
|
+
try:
|
|
74
|
+
container = await client.containers.retrieve(container_id)
|
|
75
|
+
if container.status not in ["active", "running"]:
|
|
76
|
+
logger.info(
|
|
77
|
+
"Container has status `%s`, recreating a new one", container.status
|
|
78
|
+
)
|
|
79
|
+
container_id = None
|
|
80
|
+
except NotFoundError:
|
|
81
|
+
container_id = None
|
|
82
|
+
|
|
83
|
+
if container_id is None:
|
|
84
|
+
memory = CodeExecutionShortTermMemorySchema()
|
|
85
|
+
|
|
86
|
+
container = await client.containers.create(
|
|
87
|
+
name=f"code_execution_{company_id}_{user_id}_{chat_id}",
|
|
88
|
+
expires_after={
|
|
89
|
+
"anchor": "last_active_at",
|
|
90
|
+
"minutes": expires_after_minutes,
|
|
91
|
+
},
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
memory.container_id = container.id
|
|
95
|
+
|
|
96
|
+
return memory
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
async def _upload_files_to_container(
|
|
100
|
+
client: AsyncOpenAI,
|
|
101
|
+
uploaded_files: list[Content],
|
|
102
|
+
memory: CodeExecutionShortTermMemorySchema,
|
|
103
|
+
content_service: ContentService,
|
|
104
|
+
chat_id: str,
|
|
105
|
+
) -> CodeExecutionShortTermMemorySchema:
|
|
106
|
+
container_id = memory.container_id
|
|
107
|
+
|
|
108
|
+
assert container_id is not None
|
|
109
|
+
|
|
110
|
+
memory = memory.model_copy(deep=True)
|
|
111
|
+
|
|
112
|
+
for file in uploaded_files:
|
|
113
|
+
upload = True
|
|
114
|
+
if file.id in memory.file_ids:
|
|
115
|
+
try:
|
|
116
|
+
_ = await client.containers.files.retrieve(
|
|
117
|
+
container_id=container_id, file_id=memory.file_ids[file.id]
|
|
118
|
+
)
|
|
119
|
+
logger.info("File with id %s already uploaded to container", file.id)
|
|
120
|
+
upload = False
|
|
121
|
+
except NotFoundError:
|
|
122
|
+
upload = True
|
|
123
|
+
|
|
124
|
+
if upload:
|
|
125
|
+
logger.info(
|
|
126
|
+
"Uploding file %s to container %s", file.id, memory.container_id
|
|
127
|
+
)
|
|
128
|
+
file_content = content_service.download_content_to_bytes(
|
|
129
|
+
content_id=file.id, chat_id=chat_id
|
|
130
|
+
) # TODO: Use async version when available
|
|
131
|
+
|
|
132
|
+
openai_file = await client.containers.files.create(
|
|
133
|
+
container_id=container_id,
|
|
134
|
+
file=(file.key, file_content),
|
|
135
|
+
)
|
|
136
|
+
memory.file_ids[file.id] = openai_file.id
|
|
137
|
+
|
|
138
|
+
return memory
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class OpenAICodeInterpreterTool(OpenAIBuiltInTool[CodeInterpreter]):
|
|
142
|
+
DISPLAY_NAME = "Code Interpreter"
|
|
143
|
+
|
|
144
|
+
def __init__(
|
|
145
|
+
self,
|
|
146
|
+
config: OpenAICodeInterpreterConfig,
|
|
147
|
+
container_id: str | None,
|
|
148
|
+
):
|
|
149
|
+
self._config = config
|
|
150
|
+
|
|
151
|
+
if not config.use_auto_container and container_id is None:
|
|
152
|
+
raise ValueError("`container_id` required when not using `auto` containers")
|
|
153
|
+
|
|
154
|
+
self._container_id = container_id
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
@override
|
|
158
|
+
def name(self) -> OpenAIBuiltInToolName:
|
|
159
|
+
return OpenAIBuiltInToolName.CODE_INTERPRETER
|
|
160
|
+
|
|
161
|
+
@override
|
|
162
|
+
def tool_description(self) -> CodeInterpreter:
|
|
163
|
+
if self._config.use_auto_container:
|
|
164
|
+
return {"container": {"type": "auto"}, "type": "code_interpreter"}
|
|
165
|
+
|
|
166
|
+
return {
|
|
167
|
+
"container": self._container_id, # type: ignore
|
|
168
|
+
"type": "code_interpreter",
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
@classmethod
|
|
172
|
+
async def build_tool(
|
|
173
|
+
cls,
|
|
174
|
+
config: OpenAICodeInterpreterConfig,
|
|
175
|
+
uploaded_files: list[Content],
|
|
176
|
+
client: AsyncOpenAI,
|
|
177
|
+
content_service: ContentService,
|
|
178
|
+
company_id: str,
|
|
179
|
+
user_id: str,
|
|
180
|
+
chat_id: str,
|
|
181
|
+
) -> "OpenAICodeInterpreterTool":
|
|
182
|
+
if config.use_auto_container:
|
|
183
|
+
logger.info("Using `auto` container setting")
|
|
184
|
+
return cls(config=config, container_id=None)
|
|
185
|
+
|
|
186
|
+
memory_manager = _get_container_code_execution_short_term_memory_manager(
|
|
187
|
+
company_id=company_id,
|
|
188
|
+
user_id=user_id,
|
|
189
|
+
chat_id=chat_id,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
memory = await memory_manager.load_async()
|
|
193
|
+
|
|
194
|
+
memory = await _create_container_if_not_exists(
|
|
195
|
+
client=client,
|
|
196
|
+
memory=memory,
|
|
197
|
+
chat_id=chat_id,
|
|
198
|
+
user_id=user_id,
|
|
199
|
+
company_id=company_id,
|
|
200
|
+
expires_after_minutes=config.expires_after_minutes,
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
memory = await _upload_files_to_container(
|
|
204
|
+
client=client,
|
|
205
|
+
uploaded_files=uploaded_files,
|
|
206
|
+
content_service=content_service,
|
|
207
|
+
chat_id=chat_id,
|
|
208
|
+
memory=memory,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
await memory_manager.save_async(memory)
|
|
212
|
+
|
|
213
|
+
assert memory.container_id is not None
|
|
214
|
+
|
|
215
|
+
return OpenAICodeInterpreterTool(
|
|
216
|
+
config=config, container_id=memory.container_id
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
@override
|
|
220
|
+
def get_tool_prompts(self) -> ToolPrompts:
|
|
221
|
+
return ToolPrompts(
|
|
222
|
+
name="the python tool", # https://platform.openai.com/docs/guides/tools-code-interpreter
|
|
223
|
+
display_name=self.DISPLAY_NAME,
|
|
224
|
+
tool_description=self._config.tool_description,
|
|
225
|
+
tool_system_prompt=self._config.tool_description_for_system_prompt,
|
|
226
|
+
tool_format_information_for_system_prompt=self._config.tool_format_information_for_system_prompt,
|
|
227
|
+
tool_user_prompt=self._config.tool_description_for_user_prompt,
|
|
228
|
+
tool_format_information_for_user_prompt=self._config.tool_format_information_for_user_prompt,
|
|
229
|
+
input_model={},
|
|
230
|
+
)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from openai import AsyncOpenAI
|
|
2
|
+
|
|
3
|
+
from unique_toolkit.agentic.tools.config import ToolBuildConfig
|
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
|
5
|
+
OpenAIBuiltInTool,
|
|
6
|
+
OpenAIBuiltInToolName,
|
|
7
|
+
)
|
|
8
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
|
|
9
|
+
OpenAICodeInterpreterConfig,
|
|
10
|
+
OpenAICodeInterpreterTool,
|
|
11
|
+
)
|
|
12
|
+
from unique_toolkit.content.schemas import Content
|
|
13
|
+
from unique_toolkit.content.service import ContentService
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class OpenAIBuiltInToolManager:
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
uploaded_files: list[Content],
|
|
20
|
+
content_service: ContentService,
|
|
21
|
+
user_id: str,
|
|
22
|
+
company_id: str,
|
|
23
|
+
chat_id: str,
|
|
24
|
+
client: AsyncOpenAI,
|
|
25
|
+
):
|
|
26
|
+
self._uploaded_files = uploaded_files
|
|
27
|
+
self._content_service = content_service
|
|
28
|
+
self._user_id = user_id
|
|
29
|
+
self._company_id = company_id
|
|
30
|
+
self._client = client
|
|
31
|
+
self._chat_id = chat_id
|
|
32
|
+
|
|
33
|
+
async def _build_tool(self, tool_config: ToolBuildConfig) -> OpenAIBuiltInTool:
|
|
34
|
+
if tool_config.name == OpenAIBuiltInToolName.CODE_INTERPRETER:
|
|
35
|
+
assert isinstance(tool_config.configuration, OpenAICodeInterpreterConfig)
|
|
36
|
+
tool = await OpenAICodeInterpreterTool.build_tool(
|
|
37
|
+
config=tool_config.configuration,
|
|
38
|
+
uploaded_files=self._uploaded_files,
|
|
39
|
+
user_id=self._user_id,
|
|
40
|
+
company_id=self._company_id,
|
|
41
|
+
chat_id=self._chat_id,
|
|
42
|
+
content_service=self._content_service,
|
|
43
|
+
client=self._client,
|
|
44
|
+
)
|
|
45
|
+
return tool
|
|
46
|
+
else:
|
|
47
|
+
raise ValueError(f"Unknown built-in tool name: {tool_config.name}")
|
|
48
|
+
|
|
49
|
+
async def get_all_openai_builtin_tools(
|
|
50
|
+
self, tool_configs: list[ToolBuildConfig]
|
|
51
|
+
) -> tuple[list[ToolBuildConfig], list[OpenAIBuiltInTool]]:
|
|
52
|
+
openai_builtin_tools = []
|
|
53
|
+
filtered_tool_configs = []
|
|
54
|
+
|
|
55
|
+
for tool_config in tool_configs:
|
|
56
|
+
if tool_config.name not in OpenAIBuiltInToolName:
|
|
57
|
+
filtered_tool_configs.append(tool_config)
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
openai_builtin_tools.append(await self._build_tool(tool_config))
|
|
61
|
+
|
|
62
|
+
return filtered_tool_configs, openai_builtin_tools
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import gzip
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator
|
|
7
|
+
|
|
8
|
+
from unique_toolkit._common.pydantic_helpers import get_configuration_dict
|
|
9
|
+
from unique_toolkit.agentic.tools.utils.source_handling.schema import SourceFormatConfig
|
|
10
|
+
from unique_toolkit.content.schemas import ContentChunk
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# TODO: this needs to be more general as the tools can potentially return anything maybe make a base class and then derive per "type" of tool
|
|
14
|
+
class ToolCallResponse(BaseModel):
|
|
15
|
+
id: str
|
|
16
|
+
name: str
|
|
17
|
+
content: str = ""
|
|
18
|
+
debug_info: Optional[dict] = None # TODO: Make the default {}
|
|
19
|
+
content_chunks: Optional[list[ContentChunk]] = None # TODO: Make the default []
|
|
20
|
+
reasoning_result: Optional[dict] = None # TODO: Make the default {}
|
|
21
|
+
error_message: str = ""
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def successful(self) -> bool:
|
|
25
|
+
return self.error_message == ""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class BaseToolConfig(BaseModel):
|
|
29
|
+
model_config = get_configuration_dict()
|
|
30
|
+
# TODO: add a check for the parameters to all be consistent within the tool config
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Source(BaseModel):
|
|
35
|
+
"""Represents the sources in the tool call response that the llm will see
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
source_number: The number of the source
|
|
39
|
+
content: The content of the source
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(
|
|
43
|
+
validate_by_alias=True, serialize_by_alias=True, validate_by_name=True
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
source_number: int | None = Field(
|
|
47
|
+
default=None,
|
|
48
|
+
serialization_alias="[source_number] - Used for citations!",
|
|
49
|
+
validation_alias="[source_number] - Used for citations!",
|
|
50
|
+
)
|
|
51
|
+
content: str = Field(
|
|
52
|
+
serialization_alias="[content] - Content of source",
|
|
53
|
+
validation_alias="[content] - Content of source",
|
|
54
|
+
)
|
|
55
|
+
order: int = Field(
|
|
56
|
+
serialization_alias="[order] - Index in the document!",
|
|
57
|
+
validation_alias="[order] - Index in the document!",
|
|
58
|
+
)
|
|
59
|
+
chunk_id: str | None = Field(
|
|
60
|
+
default=None,
|
|
61
|
+
serialization_alias="[chunk_id] - IGNORE",
|
|
62
|
+
validation_alias="[chunk_id] - IGNORE",
|
|
63
|
+
)
|
|
64
|
+
id: str = Field(
|
|
65
|
+
serialization_alias="[id] - IGNORE",
|
|
66
|
+
validation_alias="[id] - IGNORE",
|
|
67
|
+
)
|
|
68
|
+
key: str | None = Field(
|
|
69
|
+
default=None,
|
|
70
|
+
serialization_alias="[key] - IGNORE",
|
|
71
|
+
validation_alias="[key] - IGNORE",
|
|
72
|
+
)
|
|
73
|
+
metadata: dict[str, str] | str | None = Field(
|
|
74
|
+
default=None,
|
|
75
|
+
serialization_alias="[metadata] - Formatted metadata",
|
|
76
|
+
validation_alias="[metadata] - Formatted metadata",
|
|
77
|
+
)
|
|
78
|
+
url: str | None = Field(
|
|
79
|
+
default=None,
|
|
80
|
+
serialization_alias="[url] - IGNORE",
|
|
81
|
+
validation_alias="[url] - IGNORE",
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
@field_validator("metadata", mode="before")
|
|
85
|
+
def _metadata_str_to_dict(
|
|
86
|
+
cls, v: str | dict[str, str] | None
|
|
87
|
+
) -> dict[str, str] | None:
|
|
88
|
+
"""
|
|
89
|
+
Accept • dict → keep as-is
|
|
90
|
+
• str → parse tag-string back to dict
|
|
91
|
+
"""
|
|
92
|
+
if v is None or isinstance(v, dict):
|
|
93
|
+
return v
|
|
94
|
+
|
|
95
|
+
# v is the rendered string. Build a dict by matching the
|
|
96
|
+
# patterns defined in SourceFormatConfig.sections.
|
|
97
|
+
cfg = SourceFormatConfig() # or inject your app-wide config
|
|
98
|
+
out: dict[str, str] = {}
|
|
99
|
+
for key, tmpl in cfg.sections.items():
|
|
100
|
+
pattern = cfg.template_to_pattern(tmpl)
|
|
101
|
+
m = re.search(pattern, v, flags=re.S)
|
|
102
|
+
if m:
|
|
103
|
+
out[key] = m.group(1).strip()
|
|
104
|
+
|
|
105
|
+
return out if out else v # type: ignore
|
|
106
|
+
|
|
107
|
+
# Compression + Base64 for url to hide it from the LLM
|
|
108
|
+
@field_serializer("url")
|
|
109
|
+
def serialize_url(self, value: str | None) -> str | None:
|
|
110
|
+
if value is None:
|
|
111
|
+
return None
|
|
112
|
+
# Compress then base64 encode
|
|
113
|
+
compressed = gzip.compress(value.encode())
|
|
114
|
+
return base64.b64encode(compressed).decode()
|
|
115
|
+
|
|
116
|
+
@field_validator("url", mode="before")
|
|
117
|
+
@classmethod
|
|
118
|
+
def validate_url(cls, value: Any) -> str | None:
|
|
119
|
+
if value is None or isinstance(value, str) and not value:
|
|
120
|
+
return None
|
|
121
|
+
if isinstance(value, str):
|
|
122
|
+
try:
|
|
123
|
+
# Try to decode base64 then decompress
|
|
124
|
+
decoded_bytes = base64.b64decode(value.encode())
|
|
125
|
+
decompressed = gzip.decompress(decoded_bytes).decode()
|
|
126
|
+
return decompressed
|
|
127
|
+
except Exception:
|
|
128
|
+
# If decoding/decompression fails, assume it's plain text
|
|
129
|
+
return value
|
|
130
|
+
return str(value)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class ToolPrompts(BaseModel):
|
|
134
|
+
name: str
|
|
135
|
+
display_name: str
|
|
136
|
+
tool_system_prompt: str
|
|
137
|
+
tool_format_information_for_system_prompt: str
|
|
138
|
+
tool_user_prompt: str
|
|
139
|
+
tool_format_information_for_user_prompt: str
|
|
140
|
+
tool_description: str
|
|
141
|
+
input_model: dict[str, Any]
|