unique_toolkit 1.15.0__py3-none-any.whl → 1.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/tools/factory.py +4 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
- unique_toolkit/agentic/tools/tool_manager.py +257 -127
- unique_toolkit/chat/functions.py +15 -6
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/language_model/functions.py +25 -9
- unique_toolkit/language_model/schemas.py +222 -27
- unique_toolkit/protocols/support.py +91 -9
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +139 -7
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/METADATA +5 -1
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/RECORD +24 -12
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/LICENSE +0 -0
- {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/WHEEL +0 -0
@@ -6,6 +6,7 @@ from unique_toolkit.agentic.tools.utils.execution.execution import SafeTaskExecu
|
|
6
6
|
from unique_toolkit.chat.service import ChatService
|
7
7
|
from unique_toolkit.language_model.schemas import (
|
8
8
|
LanguageModelStreamResponse,
|
9
|
+
ResponsesLanguageModelStreamResponse,
|
9
10
|
)
|
10
11
|
|
11
12
|
|
@@ -26,7 +27,30 @@ class Postprocessor(ABC):
|
|
26
27
|
"Subclasses must implement this method to apply post-processing to the response."
|
27
28
|
)
|
28
29
|
|
29
|
-
async def remove_from_text(self, text) -> str:
|
30
|
+
async def remove_from_text(self, text: str) -> str:
|
31
|
+
raise NotImplementedError(
|
32
|
+
"Subclasses must implement this method to remove post-processing from the message."
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
class ResponsesApiPostprocessor(ABC):
|
37
|
+
def __init__(self, name: str):
|
38
|
+
self.name = name
|
39
|
+
|
40
|
+
def get_name(self) -> str:
|
41
|
+
return self.name
|
42
|
+
|
43
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
44
|
+
raise NotImplementedError("Subclasses must implement this method.")
|
45
|
+
|
46
|
+
def apply_postprocessing_to_response(
|
47
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
48
|
+
) -> bool:
|
49
|
+
raise NotImplementedError(
|
50
|
+
"Subclasses must implement this method to apply post-processing to the response."
|
51
|
+
)
|
52
|
+
|
53
|
+
async def remove_from_text(self, text: str) -> str:
|
30
54
|
raise NotImplementedError(
|
31
55
|
"Subclasses must implement this method to remove post-processing from the message."
|
32
56
|
)
|
@@ -59,12 +83,16 @@ class PostprocessorManager:
|
|
59
83
|
):
|
60
84
|
self._logger = logger
|
61
85
|
self._chat_service = chat_service
|
62
|
-
self._postprocessors: list[Postprocessor] = []
|
86
|
+
self._postprocessors: list[Postprocessor | ResponsesApiPostprocessor] = []
|
63
87
|
|
64
|
-
def add_postprocessor(
|
88
|
+
def add_postprocessor(
|
89
|
+
self, postprocessor: Postprocessor | ResponsesApiPostprocessor
|
90
|
+
):
|
65
91
|
self._postprocessors.append(postprocessor)
|
66
92
|
|
67
|
-
def get_postprocessors(
|
93
|
+
def get_postprocessors(
|
94
|
+
self, name: str
|
95
|
+
) -> list[Postprocessor | ResponsesApiPostprocessor]:
|
68
96
|
return self._postprocessors
|
69
97
|
|
70
98
|
async def run_postprocessors(
|
@@ -75,25 +103,36 @@ class PostprocessorManager:
|
|
75
103
|
logger=self._logger,
|
76
104
|
)
|
77
105
|
|
106
|
+
if isinstance(loop_response, ResponsesLanguageModelStreamResponse):
|
107
|
+
postprocessors = self._postprocessors
|
108
|
+
else:
|
109
|
+
postprocessors = [
|
110
|
+
postprocessor
|
111
|
+
for postprocessor in self._postprocessors
|
112
|
+
if isinstance(postprocessor, Postprocessor)
|
113
|
+
]
|
114
|
+
|
78
115
|
tasks = [
|
79
116
|
task_executor.execute_async(
|
80
117
|
self.execute_postprocessors,
|
81
118
|
loop_response=loop_response,
|
82
119
|
postprocessor_instance=postprocessor,
|
83
120
|
)
|
84
|
-
for postprocessor in
|
121
|
+
for postprocessor in postprocessors
|
85
122
|
]
|
86
123
|
postprocessor_results = await asyncio.gather(*tasks)
|
87
124
|
|
88
|
-
for
|
125
|
+
for postprocessor, result in zip(postprocessors, postprocessor_results):
|
89
126
|
if not result.success:
|
90
127
|
self._logger.warning(
|
91
|
-
|
128
|
+
"Postprocessor %s failed to run.",
|
129
|
+
postprocessor.get_name(),
|
130
|
+
exc_info=result.exception,
|
92
131
|
)
|
93
132
|
|
94
133
|
modification_results = [
|
95
|
-
postprocessor.apply_postprocessing_to_response(loop_response)
|
96
|
-
for postprocessor in
|
134
|
+
postprocessor.apply_postprocessing_to_response(loop_response) # type: ignore
|
135
|
+
for postprocessor in postprocessors
|
97
136
|
]
|
98
137
|
|
99
138
|
has_been_modified = any(modification_results)
|
@@ -108,9 +147,9 @@ class PostprocessorManager:
|
|
108
147
|
async def execute_postprocessors(
|
109
148
|
self,
|
110
149
|
loop_response: LanguageModelStreamResponse,
|
111
|
-
postprocessor_instance: Postprocessor,
|
150
|
+
postprocessor_instance: Postprocessor | ResponsesApiPostprocessor,
|
112
151
|
) -> None:
|
113
|
-
await postprocessor_instance.run(loop_response)
|
152
|
+
await postprocessor_instance.run(loop_response) # type: ignore
|
114
153
|
|
115
154
|
async def remove_from_text(
|
116
155
|
self,
|
@@ -0,0 +1,19 @@
|
|
1
|
+
from unique_toolkit.agentic.responses_api.postprocessors.code_display import (
|
2
|
+
ShowExecutedCodePostprocessor,
|
3
|
+
ShowExecutedCodePostprocessorConfig,
|
4
|
+
)
|
5
|
+
from unique_toolkit.agentic.responses_api.postprocessors.generated_files import (
|
6
|
+
DisplayCodeInterpreterFilesPostProcessor,
|
7
|
+
DisplayCodeInterpreterFilesPostProcessorConfig,
|
8
|
+
)
|
9
|
+
from unique_toolkit.agentic.responses_api.stream_handler import (
|
10
|
+
ResponsesStreamingHandler,
|
11
|
+
)
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"ShowExecutedCodePostprocessor",
|
15
|
+
"ShowExecutedCodePostprocessorConfig",
|
16
|
+
"DisplayCodeInterpreterFilesPostProcessorConfig",
|
17
|
+
"DisplayCodeInterpreterFilesPostProcessor",
|
18
|
+
"ResponsesStreamingHandler",
|
19
|
+
]
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import logging
|
2
|
+
import re
|
3
|
+
from typing import override
|
4
|
+
|
5
|
+
from pydantic import BaseModel, Field
|
6
|
+
|
7
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
8
|
+
ResponsesApiPostprocessor,
|
9
|
+
)
|
10
|
+
from unique_toolkit.agentic.tools.config import get_configuration_dict
|
11
|
+
from unique_toolkit.language_model.schemas import ResponsesLanguageModelStreamResponse
|
12
|
+
|
13
|
+
_TEMPLATE = """
|
14
|
+
<details><summary>Code Interpreter Call</summary>
|
15
|
+
|
16
|
+
```python
|
17
|
+
{code}
|
18
|
+
```
|
19
|
+
|
20
|
+
</details>
|
21
|
+
</br>
|
22
|
+
|
23
|
+
""".lstrip()
|
24
|
+
|
25
|
+
logger = logging.getLogger(__name__)
|
26
|
+
|
27
|
+
|
28
|
+
class ShowExecutedCodePostprocessorConfig(BaseModel):
|
29
|
+
model_config = get_configuration_dict()
|
30
|
+
remove_from_history: bool = Field(
|
31
|
+
default=False,
|
32
|
+
description="If set, the code interpreter call will be removed from the history on subsequent calls to the assistant.",
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
class ShowExecutedCodePostprocessor(ResponsesApiPostprocessor):
|
37
|
+
def __init__(self, config: ShowExecutedCodePostprocessorConfig):
|
38
|
+
super().__init__(self.__class__.__name__)
|
39
|
+
self._config = config
|
40
|
+
|
41
|
+
@override
|
42
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
43
|
+
return None
|
44
|
+
|
45
|
+
@override
|
46
|
+
def apply_postprocessing_to_response(
|
47
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
48
|
+
) -> bool:
|
49
|
+
prepended_text = ""
|
50
|
+
for output in loop_response.code_interpreter_calls:
|
51
|
+
prepended_text += _TEMPLATE.format(code=output.code)
|
52
|
+
|
53
|
+
loop_response.message.text = prepended_text + loop_response.message.text
|
54
|
+
|
55
|
+
return prepended_text != ""
|
56
|
+
|
57
|
+
@override
|
58
|
+
async def remove_from_text(self, text) -> str:
|
59
|
+
if not self._config.remove_from_history:
|
60
|
+
return text
|
61
|
+
# Remove code interpreter blocks using regex
|
62
|
+
pattern = r"<details><summary>Code Interpreter Call</summary>.*?</details>"
|
63
|
+
return re.sub(pattern, "", text, flags=re.DOTALL)
|
@@ -0,0 +1,145 @@
|
|
1
|
+
import logging
|
2
|
+
import re
|
3
|
+
from mimetypes import guess_type
|
4
|
+
from typing import override
|
5
|
+
|
6
|
+
from openai import AsyncOpenAI
|
7
|
+
from pydantic import BaseModel
|
8
|
+
from unique_sdk import Content
|
9
|
+
|
10
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
11
|
+
ResponsesApiPostprocessor,
|
12
|
+
)
|
13
|
+
from unique_toolkit.agentic.tools.config import get_configuration_dict
|
14
|
+
from unique_toolkit.content.schemas import ContentReference
|
15
|
+
from unique_toolkit.content.service import ContentService
|
16
|
+
from unique_toolkit.language_model.schemas import ResponsesLanguageModelStreamResponse
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
|
21
|
+
class DisplayCodeInterpreterFilesPostProcessorConfig(BaseModel):
|
22
|
+
model_config = get_configuration_dict()
|
23
|
+
upload_scope_id: str
|
24
|
+
|
25
|
+
|
26
|
+
class DisplayCodeInterpreterFilesPostProcessor(
|
27
|
+
ResponsesApiPostprocessor,
|
28
|
+
):
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
client: AsyncOpenAI,
|
32
|
+
content_service: ContentService,
|
33
|
+
config: DisplayCodeInterpreterFilesPostProcessorConfig,
|
34
|
+
) -> None:
|
35
|
+
super().__init__(self.__class__.__name__)
|
36
|
+
self._content_service = content_service
|
37
|
+
self._config = config
|
38
|
+
self._client = client
|
39
|
+
self._content_map = {}
|
40
|
+
|
41
|
+
@override
|
42
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
43
|
+
logger.info("Fetching and adding code interpreter files to the response")
|
44
|
+
|
45
|
+
container_files = loop_response.container_files
|
46
|
+
logger.info("Found %s container files", len(container_files))
|
47
|
+
|
48
|
+
self._content_map = {}
|
49
|
+
for container_file in container_files:
|
50
|
+
logger.info("Fetching file content for %s", container_file.filename)
|
51
|
+
file_content = await self._client.containers.files.content.retrieve(
|
52
|
+
container_id=container_file.container_id, file_id=container_file.file_id
|
53
|
+
)
|
54
|
+
|
55
|
+
logger.info(
|
56
|
+
"Uploading file content for %s to knowledge base",
|
57
|
+
container_file.filename,
|
58
|
+
)
|
59
|
+
content = self._content_service.upload_content_from_bytes(
|
60
|
+
content=file_content.content,
|
61
|
+
content_name=container_file.filename,
|
62
|
+
skip_ingestion=True,
|
63
|
+
mime_type=guess_type(container_file.filename)[0] or "text/plain",
|
64
|
+
scope_id=self._config.upload_scope_id,
|
65
|
+
)
|
66
|
+
self._content_map[container_file.filename] = content
|
67
|
+
|
68
|
+
@override
|
69
|
+
def apply_postprocessing_to_response(
|
70
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
71
|
+
) -> bool:
|
72
|
+
ref_number = _get_next_ref_number(loop_response.message.references)
|
73
|
+
changed = False
|
74
|
+
# images
|
75
|
+
for filename, content in self._content_map.items():
|
76
|
+
# Images
|
77
|
+
loop_response.message.text, replaced = _replace_container_image_citation(
|
78
|
+
text=loop_response.message.text, filename=filename, content=content
|
79
|
+
)
|
80
|
+
changed |= replaced
|
81
|
+
|
82
|
+
# Files
|
83
|
+
loop_response.message.text, replaced = _replace_container_file_citation(
|
84
|
+
text=loop_response.message.text,
|
85
|
+
filename=filename,
|
86
|
+
ref_number=ref_number,
|
87
|
+
)
|
88
|
+
changed |= replaced
|
89
|
+
if replaced:
|
90
|
+
loop_response.message.references.append(
|
91
|
+
ContentReference(
|
92
|
+
sequence_number=ref_number,
|
93
|
+
source_id=content.id,
|
94
|
+
source="node-ingestion-chunks",
|
95
|
+
url=f"unique://content/{content.id}",
|
96
|
+
name=filename,
|
97
|
+
)
|
98
|
+
)
|
99
|
+
ref_number += 1
|
100
|
+
return changed
|
101
|
+
|
102
|
+
@override
|
103
|
+
async def remove_from_text(self, text) -> str:
|
104
|
+
return text
|
105
|
+
|
106
|
+
|
107
|
+
def _get_next_ref_number(references: list[ContentReference]) -> int:
|
108
|
+
max_ref_number = 0
|
109
|
+
for ref in references:
|
110
|
+
max_ref_number = max(max_ref_number, ref.sequence_number)
|
111
|
+
return max_ref_number + 1
|
112
|
+
|
113
|
+
|
114
|
+
def _replace_container_image_citation(
|
115
|
+
text: str, filename: str, content: Content
|
116
|
+
) -> tuple[str, bool]:
|
117
|
+
image_markdown = rf"!\[.*?\]\(sandbox:/mnt/data/{re.escape(filename)}\)"
|
118
|
+
|
119
|
+
if not re.search(image_markdown, text):
|
120
|
+
logger.info("No image markdown found for %s", filename)
|
121
|
+
return text, False
|
122
|
+
|
123
|
+
logger.info("Displaying image %s", filename)
|
124
|
+
return re.sub(
|
125
|
+
image_markdown,
|
126
|
+
f"",
|
127
|
+
text,
|
128
|
+
), True
|
129
|
+
|
130
|
+
|
131
|
+
def _replace_container_file_citation(
|
132
|
+
text: str, filename: str, ref_number: int
|
133
|
+
) -> tuple[str, bool]:
|
134
|
+
file_markdown = rf"\[.*?\]\(sandbox:/mnt/data/{re.escape(filename)}\)"
|
135
|
+
|
136
|
+
if not re.search(file_markdown, text):
|
137
|
+
logger.info("No file markdown found for %s", filename)
|
138
|
+
return text, False
|
139
|
+
|
140
|
+
logger.info("Displaying file %s", filename)
|
141
|
+
return re.sub(
|
142
|
+
file_markdown,
|
143
|
+
f"<sup>{ref_number}</sup>",
|
144
|
+
text,
|
145
|
+
), True
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from unique_toolkit.protocols.support import ResponsesSupportCompleteWithReferences
|
2
|
+
from unique_toolkit.services.chat_service import ChatService
|
3
|
+
|
4
|
+
|
5
|
+
class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
|
6
|
+
def __init__(self, chat_service: ChatService):
|
7
|
+
self._chat_service = chat_service
|
8
|
+
|
9
|
+
def complete_with_references(self, *args, **kwargs):
|
10
|
+
return self._chat_service.complete_responses_with_references(*args, **kwargs)
|
11
|
+
|
12
|
+
async def complete_with_references_async(self, *args, **kwargs):
|
13
|
+
return await self._chat_service.complete_responses_with_references_async(
|
14
|
+
*args, **kwargs
|
15
|
+
)
|
@@ -11,6 +11,10 @@ class ToolFactory:
|
|
11
11
|
tool_map: dict[str, type[Tool]] = {}
|
12
12
|
tool_config_map: dict[str, Callable] = {}
|
13
13
|
|
14
|
+
@classmethod
|
15
|
+
def register_tool_config(cls, tool_name: str, tool_config: type[BaseToolConfig]):
|
16
|
+
cls.tool_config_map[tool_name] = tool_config
|
17
|
+
|
14
18
|
@classmethod
|
15
19
|
def register_tool(
|
16
20
|
cls,
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
|
2
|
+
OpenAICodeInterpreterConfig,
|
3
|
+
OpenAICodeInterpreterTool,
|
4
|
+
)
|
5
|
+
from unique_toolkit.agentic.tools.openai_builtin.manager import OpenAIBuiltInToolManager
|
6
|
+
|
7
|
+
__all__ = [
|
8
|
+
"OpenAIBuiltInToolManager",
|
9
|
+
"OpenAICodeInterpreterTool",
|
10
|
+
"OpenAICodeInterpreterConfig",
|
11
|
+
]
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from enum import StrEnum
|
3
|
+
from typing import Generic, TypeVar
|
4
|
+
|
5
|
+
from openai.types.responses.tool_param import CodeInterpreter
|
6
|
+
|
7
|
+
from unique_toolkit.agentic.tools.schemas import ToolPrompts
|
8
|
+
|
9
|
+
|
10
|
+
class OpenAIBuiltInToolName(StrEnum):
|
11
|
+
CODE_INTERPRETER = "code_interpreter"
|
12
|
+
|
13
|
+
|
14
|
+
BuiltInToolType = CodeInterpreter # Add other tool types when needed
|
15
|
+
ToolType = TypeVar("ToolType", bound=BuiltInToolType)
|
16
|
+
|
17
|
+
|
18
|
+
class OpenAIBuiltInTool(ABC, Generic[ToolType]):
|
19
|
+
@property
|
20
|
+
@abstractmethod
|
21
|
+
def name(self) -> OpenAIBuiltInToolName:
|
22
|
+
raise NotImplementedError()
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
def tool_description(self) -> BuiltInToolType:
|
26
|
+
raise NotImplementedError()
|
27
|
+
|
28
|
+
@abstractmethod
|
29
|
+
def get_tool_prompts(self) -> ToolPrompts:
|
30
|
+
raise NotImplementedError()
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
|
2
|
+
OpenAICodeInterpreterConfig,
|
3
|
+
)
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.service import (
|
5
|
+
OpenAICodeInterpreterTool,
|
6
|
+
)
|
7
|
+
|
8
|
+
__all__ = ["OpenAICodeInterpreterConfig", "OpenAICodeInterpreterTool"]
|
@@ -0,0 +1,57 @@
|
|
1
|
+
from pydantic import Field
|
2
|
+
|
3
|
+
from unique_toolkit.agentic.tools.factory import ToolFactory
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
5
|
+
OpenAIBuiltInToolName,
|
6
|
+
)
|
7
|
+
from unique_toolkit.agentic.tools.schemas import BaseToolConfig
|
8
|
+
|
9
|
+
DEFAULT_TOOL_DESCRIPTION = "Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc."
|
10
|
+
|
11
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT = """
|
12
|
+
Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc.
|
13
|
+
Instructions:
|
14
|
+
- All files uploaded to the chat are available in the code interpreter under the path `/mnt/data/<filename>
|
15
|
+
- All files generated through code should be saved in the `/mnt/data` folder
|
16
|
+
|
17
|
+
Instructions for displaying images and files in the chat:
|
18
|
+
Once files are generated in the `/mnt/data` folder you MUST reference them in the chat using markdown syntax in order to display them in the chat.
|
19
|
+
|
20
|
+
- If you want to display an image, use the following syntax: ``
|
21
|
+
- Images will be converted and shown in the chat.
|
22
|
+
- Do NOT display an extra download link for images a part from the markdown above.
|
23
|
+
- Not using markdown syntax will FAIL to show images to the user.
|
24
|
+
- YOU MUST use the syntax above to display images, otherwise the image will not be displayed in the chat.
|
25
|
+
- For displaying a link to a file, use the following syntax: `[filename](sandbox:/mnt/data/<filename>)`
|
26
|
+
- Files are converted to references the user can click on to download the file
|
27
|
+
|
28
|
+
You MUST always use this syntax, otherwise the files will not be displayed in the chat.
|
29
|
+
""".strip()
|
30
|
+
|
31
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT = ""
|
32
|
+
|
33
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT = ""
|
34
|
+
|
35
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT = ""
|
36
|
+
|
37
|
+
|
38
|
+
class OpenAICodeInterpreterConfig(BaseToolConfig):
|
39
|
+
upload_files_in_chat: bool = Field(default=True)
|
40
|
+
|
41
|
+
tool_description: str = DEFAULT_TOOL_DESCRIPTION
|
42
|
+
tool_description_for_system_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT
|
43
|
+
tool_format_information_for_system_prompt: str = (
|
44
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT
|
45
|
+
)
|
46
|
+
tool_description_for_user_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT
|
47
|
+
tool_format_information_for_user_prompt: str = (
|
48
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT
|
49
|
+
)
|
50
|
+
|
51
|
+
expires_after_minutes: int = 20
|
52
|
+
use_auto_container: bool = False
|
53
|
+
|
54
|
+
|
55
|
+
ToolFactory.register_tool_config(
|
56
|
+
OpenAIBuiltInToolName.CODE_INTERPRETER, OpenAICodeInterpreterConfig
|
57
|
+
)
|