unique_toolkit 1.15.0__py3-none-any.whl → 1.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +16 -6
  2. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  3. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
  4. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  5. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  6. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  7. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  8. unique_toolkit/agentic/tools/factory.py +4 -0
  9. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  10. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  11. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  12. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  13. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  14. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  15. unique_toolkit/agentic/tools/tool_manager.py +290 -125
  16. unique_toolkit/chat/functions.py +15 -6
  17. unique_toolkit/chat/responses_api.py +461 -0
  18. unique_toolkit/language_model/functions.py +25 -9
  19. unique_toolkit/language_model/schemas.py +222 -27
  20. unique_toolkit/protocols/support.py +91 -9
  21. unique_toolkit/services/__init__.py +7 -0
  22. unique_toolkit/services/chat_service.py +139 -7
  23. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/METADATA +8 -1
  24. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/RECORD +26 -13
  25. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/LICENSE +0 -0
  26. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,11 @@
1
+ from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
2
+ OpenAICodeInterpreterConfig,
3
+ OpenAICodeInterpreterTool,
4
+ )
5
+ from unique_toolkit.agentic.tools.openai_builtin.manager import OpenAIBuiltInToolManager
6
+
7
+ __all__ = [
8
+ "OpenAIBuiltInToolManager",
9
+ "OpenAICodeInterpreterTool",
10
+ "OpenAICodeInterpreterConfig",
11
+ ]
@@ -0,0 +1,30 @@
1
+ from abc import ABC, abstractmethod
2
+ from enum import StrEnum
3
+ from typing import Generic, TypeVar
4
+
5
+ from openai.types.responses.tool_param import CodeInterpreter
6
+
7
+ from unique_toolkit.agentic.tools.schemas import ToolPrompts
8
+
9
+
10
+ class OpenAIBuiltInToolName(StrEnum):
11
+ CODE_INTERPRETER = "code_interpreter"
12
+
13
+
14
+ BuiltInToolType = CodeInterpreter # Add other tool types when needed
15
+ ToolType = TypeVar("ToolType", bound=BuiltInToolType)
16
+
17
+
18
+ class OpenAIBuiltInTool(ABC, Generic[ToolType]):
19
+ @property
20
+ @abstractmethod
21
+ def name(self) -> OpenAIBuiltInToolName:
22
+ raise NotImplementedError()
23
+
24
+ @abstractmethod
25
+ def tool_description(self) -> BuiltInToolType:
26
+ raise NotImplementedError()
27
+
28
+ @abstractmethod
29
+ def get_tool_prompts(self) -> ToolPrompts:
30
+ raise NotImplementedError()
@@ -0,0 +1,8 @@
1
+ from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
2
+ OpenAICodeInterpreterConfig,
3
+ )
4
+ from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.service import (
5
+ OpenAICodeInterpreterTool,
6
+ )
7
+
8
+ __all__ = ["OpenAICodeInterpreterConfig", "OpenAICodeInterpreterTool"]
@@ -0,0 +1,57 @@
1
+ from pydantic import Field
2
+
3
+ from unique_toolkit.agentic.tools.factory import ToolFactory
4
+ from unique_toolkit.agentic.tools.openai_builtin.base import (
5
+ OpenAIBuiltInToolName,
6
+ )
7
+ from unique_toolkit.agentic.tools.schemas import BaseToolConfig
8
+
9
+ DEFAULT_TOOL_DESCRIPTION = "Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc."
10
+
11
+ DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT = """
12
+ Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc.
13
+ Instructions:
14
+ - All files uploaded to the chat are available in the code interpreter under the path `/mnt/data/<filename>
15
+ - All files generated through code should be saved in the `/mnt/data` folder
16
+
17
+ Instructions for displaying images and files in the chat:
18
+ Once files are generated in the `/mnt/data` folder you MUST reference them in the chat using markdown syntax in order to display them in the chat.
19
+
20
+ - If you want to display an image, use the following syntax: `![Image Name](sandbox:/mnt/data/<filename>)`
21
+ - Images will be converted and shown in the chat.
22
+ - Do NOT display an extra download link for images a part from the markdown above.
23
+ - Not using markdown syntax will FAIL to show images to the user.
24
+ - YOU MUST use the syntax above to display images, otherwise the image will not be displayed in the chat.
25
+ - For displaying a link to a file, use the following syntax: `[filename](sandbox:/mnt/data/<filename>)`
26
+ - Files are converted to references the user can click on to download the file
27
+
28
+ You MUST always use this syntax, otherwise the files will not be displayed in the chat.
29
+ """.strip()
30
+
31
+ DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT = ""
32
+
33
+ DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT = ""
34
+
35
+ DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT = ""
36
+
37
+
38
+ class OpenAICodeInterpreterConfig(BaseToolConfig):
39
+ upload_files_in_chat: bool = Field(default=True)
40
+
41
+ tool_description: str = DEFAULT_TOOL_DESCRIPTION
42
+ tool_description_for_system_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT
43
+ tool_format_information_for_system_prompt: str = (
44
+ DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT
45
+ )
46
+ tool_description_for_user_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT
47
+ tool_format_information_for_user_prompt: str = (
48
+ DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT
49
+ )
50
+
51
+ expires_after_minutes: int = 20
52
+ use_auto_container: bool = False
53
+
54
+
55
+ ToolFactory.register_tool_config(
56
+ OpenAIBuiltInToolName.CODE_INTERPRETER, OpenAICodeInterpreterConfig
57
+ )
@@ -0,0 +1,230 @@
1
+ import logging
2
+ from typing import override
3
+
4
+ from openai import AsyncOpenAI, BaseModel, NotFoundError
5
+ from openai.types.responses.tool_param import CodeInterpreter
6
+
7
+ from unique_toolkit import ContentService, ShortTermMemoryService
8
+ from unique_toolkit.agentic.short_term_memory_manager.persistent_short_term_memory_manager import (
9
+ PersistentShortMemoryManager,
10
+ )
11
+ from unique_toolkit.agentic.tools.openai_builtin.base import (
12
+ OpenAIBuiltInTool,
13
+ OpenAIBuiltInToolName,
14
+ )
15
+ from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
16
+ OpenAICodeInterpreterConfig,
17
+ )
18
+ from unique_toolkit.agentic.tools.schemas import ToolPrompts
19
+ from unique_toolkit.content.schemas import (
20
+ Content,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ _SHORT_TERM_MEMORY_NAME = "container_code_execution"
27
+
28
+
29
+ class CodeExecutionShortTermMemorySchema(BaseModel):
30
+ container_id: str | None = None
31
+ file_ids: dict[str, str] = {} # Mapping of unique file id to openai file id
32
+
33
+
34
+ CodeExecutionMemoryManager = PersistentShortMemoryManager[
35
+ CodeExecutionShortTermMemorySchema
36
+ ]
37
+
38
+
39
+ def _get_container_code_execution_short_term_memory_manager(
40
+ company_id: str, user_id: str, chat_id: str
41
+ ) -> CodeExecutionMemoryManager:
42
+ short_term_memory_service = ShortTermMemoryService(
43
+ company_id=company_id,
44
+ user_id=user_id,
45
+ chat_id=chat_id,
46
+ message_id=None,
47
+ )
48
+ short_term_memory_manager = PersistentShortMemoryManager(
49
+ short_term_memory_service=short_term_memory_service,
50
+ short_term_memory_schema=CodeExecutionShortTermMemorySchema,
51
+ short_term_memory_name=_SHORT_TERM_MEMORY_NAME,
52
+ )
53
+ return short_term_memory_manager
54
+
55
+
56
+ async def _create_container_if_not_exists(
57
+ client: AsyncOpenAI,
58
+ chat_id: str,
59
+ user_id: str,
60
+ company_id: str,
61
+ expires_after_minutes: int,
62
+ memory: CodeExecutionShortTermMemorySchema | None = None,
63
+ ) -> CodeExecutionShortTermMemorySchema:
64
+ if memory is not None:
65
+ logger.info("Container found in short term memory")
66
+ else:
67
+ logger.info("No Container in short term memory, creating a new container")
68
+ memory = CodeExecutionShortTermMemorySchema()
69
+
70
+ container_id = memory.container_id
71
+
72
+ if container_id is not None:
73
+ try:
74
+ container = await client.containers.retrieve(container_id)
75
+ if container.status not in ["active", "running"]:
76
+ logger.info(
77
+ "Container has status `%s`, recreating a new one", container.status
78
+ )
79
+ container_id = None
80
+ except NotFoundError:
81
+ container_id = None
82
+
83
+ if container_id is None:
84
+ memory = CodeExecutionShortTermMemorySchema()
85
+
86
+ container = await client.containers.create(
87
+ name=f"code_execution_{company_id}_{user_id}_{chat_id}",
88
+ expires_after={
89
+ "anchor": "last_active_at",
90
+ "minutes": expires_after_minutes,
91
+ },
92
+ )
93
+
94
+ memory.container_id = container.id
95
+
96
+ return memory
97
+
98
+
99
+ async def _upload_files_to_container(
100
+ client: AsyncOpenAI,
101
+ uploaded_files: list[Content],
102
+ memory: CodeExecutionShortTermMemorySchema,
103
+ content_service: ContentService,
104
+ chat_id: str,
105
+ ) -> CodeExecutionShortTermMemorySchema:
106
+ container_id = memory.container_id
107
+
108
+ assert container_id is not None
109
+
110
+ memory = memory.model_copy(deep=True)
111
+
112
+ for file in uploaded_files:
113
+ upload = True
114
+ if file.id in memory.file_ids:
115
+ try:
116
+ _ = await client.containers.files.retrieve(
117
+ container_id=container_id, file_id=memory.file_ids[file.id]
118
+ )
119
+ logger.info("File with id %s already uploaded to container", file.id)
120
+ upload = False
121
+ except NotFoundError:
122
+ upload = True
123
+
124
+ if upload:
125
+ logger.info(
126
+ "Uploding file %s to container %s", file.id, memory.container_id
127
+ )
128
+ file_content = content_service.download_content_to_bytes(
129
+ content_id=file.id, chat_id=chat_id
130
+ ) # TODO: Use async version when available
131
+
132
+ openai_file = await client.containers.files.create(
133
+ container_id=container_id,
134
+ file=(file.key, file_content),
135
+ )
136
+ memory.file_ids[file.id] = openai_file.id
137
+
138
+ return memory
139
+
140
+
141
+ class OpenAICodeInterpreterTool(OpenAIBuiltInTool[CodeInterpreter]):
142
+ DISPLAY_NAME = "Code Interpreter"
143
+
144
+ def __init__(
145
+ self,
146
+ config: OpenAICodeInterpreterConfig,
147
+ container_id: str | None,
148
+ ):
149
+ self._config = config
150
+
151
+ if not config.use_auto_container and container_id is None:
152
+ raise ValueError("`container_id` required when not using `auto` containers")
153
+
154
+ self._container_id = container_id
155
+
156
+ @property
157
+ @override
158
+ def name(self) -> OpenAIBuiltInToolName:
159
+ return OpenAIBuiltInToolName.CODE_INTERPRETER
160
+
161
+ @override
162
+ def tool_description(self) -> CodeInterpreter:
163
+ if self._config.use_auto_container:
164
+ return {"container": {"type": "auto"}, "type": "code_interpreter"}
165
+
166
+ return {
167
+ "container": self._container_id, # type: ignore
168
+ "type": "code_interpreter",
169
+ }
170
+
171
+ @classmethod
172
+ async def build_tool(
173
+ cls,
174
+ config: OpenAICodeInterpreterConfig,
175
+ uploaded_files: list[Content],
176
+ client: AsyncOpenAI,
177
+ content_service: ContentService,
178
+ company_id: str,
179
+ user_id: str,
180
+ chat_id: str,
181
+ ) -> "OpenAICodeInterpreterTool":
182
+ if config.use_auto_container:
183
+ logger.info("Using `auto` container setting")
184
+ return cls(config=config, container_id=None)
185
+
186
+ memory_manager = _get_container_code_execution_short_term_memory_manager(
187
+ company_id=company_id,
188
+ user_id=user_id,
189
+ chat_id=chat_id,
190
+ )
191
+
192
+ memory = await memory_manager.load_async()
193
+
194
+ memory = await _create_container_if_not_exists(
195
+ client=client,
196
+ memory=memory,
197
+ chat_id=chat_id,
198
+ user_id=user_id,
199
+ company_id=company_id,
200
+ expires_after_minutes=config.expires_after_minutes,
201
+ )
202
+
203
+ memory = await _upload_files_to_container(
204
+ client=client,
205
+ uploaded_files=uploaded_files,
206
+ content_service=content_service,
207
+ chat_id=chat_id,
208
+ memory=memory,
209
+ )
210
+
211
+ await memory_manager.save_async(memory)
212
+
213
+ assert memory.container_id is not None
214
+
215
+ return OpenAICodeInterpreterTool(
216
+ config=config, container_id=memory.container_id
217
+ )
218
+
219
+ @override
220
+ def get_tool_prompts(self) -> ToolPrompts:
221
+ return ToolPrompts(
222
+ name="the python tool", # https://platform.openai.com/docs/guides/tools-code-interpreter
223
+ display_name=self.DISPLAY_NAME,
224
+ tool_description=self._config.tool_description,
225
+ tool_system_prompt=self._config.tool_description_for_system_prompt,
226
+ tool_format_information_for_system_prompt=self._config.tool_format_information_for_system_prompt,
227
+ tool_user_prompt=self._config.tool_description_for_user_prompt,
228
+ tool_format_information_for_user_prompt=self._config.tool_format_information_for_user_prompt,
229
+ input_model={},
230
+ )
@@ -0,0 +1,62 @@
1
+ from openai import AsyncOpenAI
2
+
3
+ from unique_toolkit.agentic.tools.config import ToolBuildConfig
4
+ from unique_toolkit.agentic.tools.openai_builtin.base import (
5
+ OpenAIBuiltInTool,
6
+ OpenAIBuiltInToolName,
7
+ )
8
+ from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
9
+ OpenAICodeInterpreterConfig,
10
+ OpenAICodeInterpreterTool,
11
+ )
12
+ from unique_toolkit.content.schemas import Content
13
+ from unique_toolkit.content.service import ContentService
14
+
15
+
16
+ class OpenAIBuiltInToolManager:
17
+ def __init__(
18
+ self,
19
+ uploaded_files: list[Content],
20
+ content_service: ContentService,
21
+ user_id: str,
22
+ company_id: str,
23
+ chat_id: str,
24
+ client: AsyncOpenAI,
25
+ ):
26
+ self._uploaded_files = uploaded_files
27
+ self._content_service = content_service
28
+ self._user_id = user_id
29
+ self._company_id = company_id
30
+ self._client = client
31
+ self._chat_id = chat_id
32
+
33
+ async def _build_tool(self, tool_config: ToolBuildConfig) -> OpenAIBuiltInTool:
34
+ if tool_config.name == OpenAIBuiltInToolName.CODE_INTERPRETER:
35
+ assert isinstance(tool_config.configuration, OpenAICodeInterpreterConfig)
36
+ tool = await OpenAICodeInterpreterTool.build_tool(
37
+ config=tool_config.configuration,
38
+ uploaded_files=self._uploaded_files,
39
+ user_id=self._user_id,
40
+ company_id=self._company_id,
41
+ chat_id=self._chat_id,
42
+ content_service=self._content_service,
43
+ client=self._client,
44
+ )
45
+ return tool
46
+ else:
47
+ raise ValueError(f"Unknown built-in tool name: {tool_config.name}")
48
+
49
+ async def get_all_openai_builtin_tools(
50
+ self, tool_configs: list[ToolBuildConfig]
51
+ ) -> tuple[list[ToolBuildConfig], list[OpenAIBuiltInTool]]:
52
+ openai_builtin_tools = []
53
+ filtered_tool_configs = []
54
+
55
+ for tool_config in tool_configs:
56
+ if tool_config.name not in OpenAIBuiltInToolName:
57
+ filtered_tool_configs.append(tool_config)
58
+ continue
59
+
60
+ openai_builtin_tools.append(await self._build_tool(tool_config))
61
+
62
+ return filtered_tool_configs, openai_builtin_tools