unique_toolkit 1.14.11__py3-none-any.whl → 1.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/_common/api_calling/human_verification_manager.py +95 -15
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/tools/factory.py +4 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
- unique_toolkit/agentic/tools/tool_manager.py +257 -127
- unique_toolkit/chat/functions.py +15 -6
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/language_model/functions.py +25 -9
- unique_toolkit/language_model/schemas.py +222 -27
- unique_toolkit/protocols/support.py +91 -9
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +139 -7
- {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/METADATA +8 -1
- {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/RECORD +25 -13
- {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/LICENSE +0 -0
- {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/WHEEL +0 -0
@@ -19,7 +19,10 @@ from unique_toolkit._common.endpoint_requestor import (
|
|
19
19
|
RequestorType,
|
20
20
|
build_requestor,
|
21
21
|
)
|
22
|
-
from unique_toolkit._common.pydantic_helpers import
|
22
|
+
from unique_toolkit._common.pydantic_helpers import (
|
23
|
+
create_complement_model,
|
24
|
+
create_union_model,
|
25
|
+
)
|
23
26
|
from unique_toolkit._common.string_utilities import (
|
24
27
|
dict_to_markdown_table,
|
25
28
|
extract_dicts_from_string,
|
@@ -78,24 +81,67 @@ class HumanVerificationManagerForApiCalling(
|
|
78
81
|
]
|
79
82
|
],
|
80
83
|
requestor_type: RequestorType = RequestorType.REQUESTS,
|
84
|
+
environment_payload_params: BaseModel | None = None,
|
85
|
+
modifiable_payload_params_model: type[BaseModel] | None = None,
|
81
86
|
**kwargs: dict[str, Any],
|
82
87
|
):
|
88
|
+
"""
|
89
|
+
Manages human verification for api calling.
|
90
|
+
|
91
|
+
Args:
|
92
|
+
logger: The logger to use for logging.
|
93
|
+
operation: The operation to use for the api calling.
|
94
|
+
requestor_type: The requestor type to use for the api calling.
|
95
|
+
environment_payload_params: The environment payload params to use for the api calling.
|
96
|
+
If None, the modifiable params model will be the operation payload model.
|
97
|
+
This can be useful for parameters in the payload that should not be modified by the user.
|
98
|
+
modifiable_payload_params_model: The modifiable payload params model to use for the api calling.
|
99
|
+
If None, a complement model will be created using the operation payload model
|
100
|
+
and the environment payload params.
|
101
|
+
If provided, it will be used instead of the complement model.
|
102
|
+
This is necessary if the modifiable params model is required
|
103
|
+
to use custom validators or serializers.
|
104
|
+
**kwargs: Additional keyword arguments to pass to the requestor.
|
105
|
+
"""
|
83
106
|
self._logger = logger
|
84
107
|
self._operation = operation
|
85
|
-
|
108
|
+
self._environment_payload_params = environment_payload_params
|
86
109
|
# Create internal models for this manager instance
|
87
110
|
|
88
|
-
|
111
|
+
if self._environment_payload_params is None:
|
112
|
+
self._modifiable_payload_params_model = self._operation.payload_model()
|
113
|
+
else:
|
114
|
+
if modifiable_payload_params_model is None:
|
115
|
+
self._modifiable_payload_params_model = create_complement_model(
|
116
|
+
model_type_a=self._operation.payload_model(),
|
117
|
+
model_type_b=type(self._environment_payload_params),
|
118
|
+
)
|
119
|
+
else:
|
120
|
+
# This is necessary if the modifiable params model is required
|
121
|
+
# to use custom validators or serializers.
|
122
|
+
self._modifiable_payload_params_model = modifiable_payload_params_model
|
123
|
+
|
124
|
+
if self._environment_payload_params is not None:
|
125
|
+
combined_keys = set(
|
126
|
+
self._modifiable_payload_params_model.model_fields.keys()
|
127
|
+
) | set(type(self._environment_payload_params).model_fields.keys())
|
128
|
+
payload_keys = set(self._operation.payload_model().model_fields.keys())
|
129
|
+
if not payload_keys.issubset(combined_keys):
|
130
|
+
raise ValueError(
|
131
|
+
"The modifiable params model + the environment parameters do not have all the keys of the operation payload model."
|
132
|
+
)
|
133
|
+
|
134
|
+
class VerificationModel(BaseModel):
|
89
135
|
confirmation: HumanConfirmation
|
90
|
-
|
136
|
+
modifiable_params: self._modifiable_payload_params_model # type: ignore
|
91
137
|
|
92
|
-
self.
|
138
|
+
self._verification_model = VerificationModel
|
93
139
|
|
140
|
+
self._requestor_type = requestor_type
|
94
141
|
self._combined_params_model = create_union_model(
|
95
142
|
model_type_a=self._operation.path_params_model(),
|
96
143
|
model_type_b=self._operation.payload_model(),
|
97
144
|
)
|
98
|
-
self._requestor_type = requestor_type
|
99
145
|
self._requestor = build_requestor(
|
100
146
|
requestor_type=requestor_type,
|
101
147
|
operation_type=operation,
|
@@ -104,7 +150,10 @@ class HumanVerificationManagerForApiCalling(
|
|
104
150
|
)
|
105
151
|
|
106
152
|
def detect_api_calls_from_user_message(
|
107
|
-
self,
|
153
|
+
self,
|
154
|
+
*,
|
155
|
+
last_assistant_message: ChatMessage,
|
156
|
+
user_message: str,
|
108
157
|
) -> PayloadType | None:
|
109
158
|
user_message_dicts = extract_dicts_from_string(user_message)
|
110
159
|
if len(user_message_dicts) == 0:
|
@@ -114,13 +163,20 @@ class HumanVerificationManagerForApiCalling(
|
|
114
163
|
for user_message_dict in user_message_dicts:
|
115
164
|
try:
|
116
165
|
# Convert dict to payload model first, then create payload
|
117
|
-
|
166
|
+
verfication_data = self._verification_model.model_validate(
|
118
167
|
user_message_dict, by_alias=True, by_name=True
|
119
168
|
)
|
120
169
|
if self._verify_human_verification(
|
121
|
-
|
170
|
+
verfication_data.confirmation, last_assistant_message
|
122
171
|
):
|
123
|
-
|
172
|
+
payload_dict = verfication_data.modifiable_params.model_dump()
|
173
|
+
if self._environment_payload_params is not None:
|
174
|
+
payload_dict.update(
|
175
|
+
self._environment_payload_params.model_dump()
|
176
|
+
)
|
177
|
+
|
178
|
+
return self._operation.payload_model().model_validate(payload_dict)
|
179
|
+
|
124
180
|
except Exception as e:
|
125
181
|
self._logger.error(f"Error detecting api calls from user message: {e}")
|
126
182
|
|
@@ -141,11 +197,27 @@ class HumanVerificationManagerForApiCalling(
|
|
141
197
|
return confirmation.payload_hash in last_assistant_message.content
|
142
198
|
|
143
199
|
def _create_next_user_message(self, payload: PayloadType) -> str:
|
144
|
-
|
145
|
-
|
200
|
+
# Extract only the modifiable fields from the payload
|
201
|
+
payload_dict = payload.model_dump()
|
202
|
+
if self._environment_payload_params is not None:
|
203
|
+
# Remove environment params from payload to avoid validation errors
|
204
|
+
environment_fields = set(
|
205
|
+
type(self._environment_payload_params).model_fields.keys()
|
206
|
+
)
|
207
|
+
modifiable_dict = {
|
208
|
+
k: v for k, v in payload_dict.items() if k not in environment_fields
|
209
|
+
}
|
210
|
+
else:
|
211
|
+
modifiable_dict = payload_dict
|
212
|
+
|
213
|
+
modifiable_params = self._modifiable_payload_params_model.model_validate(
|
214
|
+
modifiable_dict
|
215
|
+
)
|
216
|
+
api_call = self._verification_model(
|
217
|
+
modifiable_params=modifiable_params,
|
146
218
|
confirmation=HumanConfirmation(
|
147
219
|
payload_hash=hashlib.sha256(
|
148
|
-
|
220
|
+
modifiable_params.model_dump_json().encode()
|
149
221
|
).hexdigest(),
|
150
222
|
time_stamp=datetime.now(),
|
151
223
|
),
|
@@ -168,6 +240,14 @@ class HumanVerificationManagerForApiCalling(
|
|
168
240
|
path_params: PathParamsType,
|
169
241
|
payload: PayloadType,
|
170
242
|
) -> ResponseType:
|
243
|
+
"""
|
244
|
+
Call the api with the given path params, payload and secured payload params.
|
245
|
+
|
246
|
+
The `secured payload params` are params that are enforced by the application.
|
247
|
+
It should generally be not possible for the user to adapt those but here we
|
248
|
+
ensure that the application has the last word.
|
249
|
+
|
250
|
+
"""
|
171
251
|
params = path_params.model_dump()
|
172
252
|
params.update(payload.model_dump())
|
173
253
|
|
@@ -217,8 +297,8 @@ if __name__ == "__main__":
|
|
217
297
|
|
218
298
|
payload = GetUserRequestBody(include_profile=True)
|
219
299
|
|
220
|
-
api_call = human_verification_manager.
|
221
|
-
|
300
|
+
api_call = human_verification_manager._verification_model(
|
301
|
+
modifiable_params=payload,
|
222
302
|
confirmation=HumanConfirmation(
|
223
303
|
payload_hash=hashlib.sha256(payload.model_dump_json().encode()).hexdigest(),
|
224
304
|
time_stamp=datetime.now(),
|
@@ -6,6 +6,7 @@ from unique_toolkit.agentic.tools.utils.execution.execution import SafeTaskExecu
|
|
6
6
|
from unique_toolkit.chat.service import ChatService
|
7
7
|
from unique_toolkit.language_model.schemas import (
|
8
8
|
LanguageModelStreamResponse,
|
9
|
+
ResponsesLanguageModelStreamResponse,
|
9
10
|
)
|
10
11
|
|
11
12
|
|
@@ -26,7 +27,30 @@ class Postprocessor(ABC):
|
|
26
27
|
"Subclasses must implement this method to apply post-processing to the response."
|
27
28
|
)
|
28
29
|
|
29
|
-
async def remove_from_text(self, text) -> str:
|
30
|
+
async def remove_from_text(self, text: str) -> str:
|
31
|
+
raise NotImplementedError(
|
32
|
+
"Subclasses must implement this method to remove post-processing from the message."
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
class ResponsesApiPostprocessor(ABC):
|
37
|
+
def __init__(self, name: str):
|
38
|
+
self.name = name
|
39
|
+
|
40
|
+
def get_name(self) -> str:
|
41
|
+
return self.name
|
42
|
+
|
43
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
44
|
+
raise NotImplementedError("Subclasses must implement this method.")
|
45
|
+
|
46
|
+
def apply_postprocessing_to_response(
|
47
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
48
|
+
) -> bool:
|
49
|
+
raise NotImplementedError(
|
50
|
+
"Subclasses must implement this method to apply post-processing to the response."
|
51
|
+
)
|
52
|
+
|
53
|
+
async def remove_from_text(self, text: str) -> str:
|
30
54
|
raise NotImplementedError(
|
31
55
|
"Subclasses must implement this method to remove post-processing from the message."
|
32
56
|
)
|
@@ -59,12 +83,16 @@ class PostprocessorManager:
|
|
59
83
|
):
|
60
84
|
self._logger = logger
|
61
85
|
self._chat_service = chat_service
|
62
|
-
self._postprocessors: list[Postprocessor] = []
|
86
|
+
self._postprocessors: list[Postprocessor | ResponsesApiPostprocessor] = []
|
63
87
|
|
64
|
-
def add_postprocessor(
|
88
|
+
def add_postprocessor(
|
89
|
+
self, postprocessor: Postprocessor | ResponsesApiPostprocessor
|
90
|
+
):
|
65
91
|
self._postprocessors.append(postprocessor)
|
66
92
|
|
67
|
-
def get_postprocessors(
|
93
|
+
def get_postprocessors(
|
94
|
+
self, name: str
|
95
|
+
) -> list[Postprocessor | ResponsesApiPostprocessor]:
|
68
96
|
return self._postprocessors
|
69
97
|
|
70
98
|
async def run_postprocessors(
|
@@ -75,25 +103,36 @@ class PostprocessorManager:
|
|
75
103
|
logger=self._logger,
|
76
104
|
)
|
77
105
|
|
106
|
+
if isinstance(loop_response, ResponsesLanguageModelStreamResponse):
|
107
|
+
postprocessors = self._postprocessors
|
108
|
+
else:
|
109
|
+
postprocessors = [
|
110
|
+
postprocessor
|
111
|
+
for postprocessor in self._postprocessors
|
112
|
+
if isinstance(postprocessor, Postprocessor)
|
113
|
+
]
|
114
|
+
|
78
115
|
tasks = [
|
79
116
|
task_executor.execute_async(
|
80
117
|
self.execute_postprocessors,
|
81
118
|
loop_response=loop_response,
|
82
119
|
postprocessor_instance=postprocessor,
|
83
120
|
)
|
84
|
-
for postprocessor in
|
121
|
+
for postprocessor in postprocessors
|
85
122
|
]
|
86
123
|
postprocessor_results = await asyncio.gather(*tasks)
|
87
124
|
|
88
|
-
for
|
125
|
+
for postprocessor, result in zip(postprocessors, postprocessor_results):
|
89
126
|
if not result.success:
|
90
127
|
self._logger.warning(
|
91
|
-
|
128
|
+
"Postprocessor %s failed to run.",
|
129
|
+
postprocessor.get_name(),
|
130
|
+
exc_info=result.exception,
|
92
131
|
)
|
93
132
|
|
94
133
|
modification_results = [
|
95
|
-
postprocessor.apply_postprocessing_to_response(loop_response)
|
96
|
-
for postprocessor in
|
134
|
+
postprocessor.apply_postprocessing_to_response(loop_response) # type: ignore
|
135
|
+
for postprocessor in postprocessors
|
97
136
|
]
|
98
137
|
|
99
138
|
has_been_modified = any(modification_results)
|
@@ -108,9 +147,9 @@ class PostprocessorManager:
|
|
108
147
|
async def execute_postprocessors(
|
109
148
|
self,
|
110
149
|
loop_response: LanguageModelStreamResponse,
|
111
|
-
postprocessor_instance: Postprocessor,
|
150
|
+
postprocessor_instance: Postprocessor | ResponsesApiPostprocessor,
|
112
151
|
) -> None:
|
113
|
-
await postprocessor_instance.run(loop_response)
|
152
|
+
await postprocessor_instance.run(loop_response) # type: ignore
|
114
153
|
|
115
154
|
async def remove_from_text(
|
116
155
|
self,
|
@@ -0,0 +1,19 @@
|
|
1
|
+
from unique_toolkit.agentic.responses_api.postprocessors.code_display import (
|
2
|
+
ShowExecutedCodePostprocessor,
|
3
|
+
ShowExecutedCodePostprocessorConfig,
|
4
|
+
)
|
5
|
+
from unique_toolkit.agentic.responses_api.postprocessors.generated_files import (
|
6
|
+
DisplayCodeInterpreterFilesPostProcessor,
|
7
|
+
DisplayCodeInterpreterFilesPostProcessorConfig,
|
8
|
+
)
|
9
|
+
from unique_toolkit.agentic.responses_api.stream_handler import (
|
10
|
+
ResponsesStreamingHandler,
|
11
|
+
)
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"ShowExecutedCodePostprocessor",
|
15
|
+
"ShowExecutedCodePostprocessorConfig",
|
16
|
+
"DisplayCodeInterpreterFilesPostProcessorConfig",
|
17
|
+
"DisplayCodeInterpreterFilesPostProcessor",
|
18
|
+
"ResponsesStreamingHandler",
|
19
|
+
]
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import logging
|
2
|
+
import re
|
3
|
+
from typing import override
|
4
|
+
|
5
|
+
from pydantic import BaseModel, Field
|
6
|
+
|
7
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
8
|
+
ResponsesApiPostprocessor,
|
9
|
+
)
|
10
|
+
from unique_toolkit.agentic.tools.config import get_configuration_dict
|
11
|
+
from unique_toolkit.language_model.schemas import ResponsesLanguageModelStreamResponse
|
12
|
+
|
13
|
+
_TEMPLATE = """
|
14
|
+
<details><summary>Code Interpreter Call</summary>
|
15
|
+
|
16
|
+
```python
|
17
|
+
{code}
|
18
|
+
```
|
19
|
+
|
20
|
+
</details>
|
21
|
+
</br>
|
22
|
+
|
23
|
+
""".lstrip()
|
24
|
+
|
25
|
+
logger = logging.getLogger(__name__)
|
26
|
+
|
27
|
+
|
28
|
+
class ShowExecutedCodePostprocessorConfig(BaseModel):
|
29
|
+
model_config = get_configuration_dict()
|
30
|
+
remove_from_history: bool = Field(
|
31
|
+
default=False,
|
32
|
+
description="If set, the code interpreter call will be removed from the history on subsequent calls to the assistant.",
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
class ShowExecutedCodePostprocessor(ResponsesApiPostprocessor):
|
37
|
+
def __init__(self, config: ShowExecutedCodePostprocessorConfig):
|
38
|
+
super().__init__(self.__class__.__name__)
|
39
|
+
self._config = config
|
40
|
+
|
41
|
+
@override
|
42
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
43
|
+
return None
|
44
|
+
|
45
|
+
@override
|
46
|
+
def apply_postprocessing_to_response(
|
47
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
48
|
+
) -> bool:
|
49
|
+
prepended_text = ""
|
50
|
+
for output in loop_response.code_interpreter_calls:
|
51
|
+
prepended_text += _TEMPLATE.format(code=output.code)
|
52
|
+
|
53
|
+
loop_response.message.text = prepended_text + loop_response.message.text
|
54
|
+
|
55
|
+
return prepended_text != ""
|
56
|
+
|
57
|
+
@override
|
58
|
+
async def remove_from_text(self, text) -> str:
|
59
|
+
if not self._config.remove_from_history:
|
60
|
+
return text
|
61
|
+
# Remove code interpreter blocks using regex
|
62
|
+
pattern = r"<details><summary>Code Interpreter Call</summary>.*?</details>"
|
63
|
+
return re.sub(pattern, "", text, flags=re.DOTALL)
|
@@ -0,0 +1,145 @@
|
|
1
|
+
import logging
|
2
|
+
import re
|
3
|
+
from mimetypes import guess_type
|
4
|
+
from typing import override
|
5
|
+
|
6
|
+
from openai import AsyncOpenAI
|
7
|
+
from pydantic import BaseModel
|
8
|
+
from unique_sdk import Content
|
9
|
+
|
10
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
11
|
+
ResponsesApiPostprocessor,
|
12
|
+
)
|
13
|
+
from unique_toolkit.agentic.tools.config import get_configuration_dict
|
14
|
+
from unique_toolkit.content.schemas import ContentReference
|
15
|
+
from unique_toolkit.content.service import ContentService
|
16
|
+
from unique_toolkit.language_model.schemas import ResponsesLanguageModelStreamResponse
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
|
21
|
+
class DisplayCodeInterpreterFilesPostProcessorConfig(BaseModel):
|
22
|
+
model_config = get_configuration_dict()
|
23
|
+
upload_scope_id: str
|
24
|
+
|
25
|
+
|
26
|
+
class DisplayCodeInterpreterFilesPostProcessor(
|
27
|
+
ResponsesApiPostprocessor,
|
28
|
+
):
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
client: AsyncOpenAI,
|
32
|
+
content_service: ContentService,
|
33
|
+
config: DisplayCodeInterpreterFilesPostProcessorConfig,
|
34
|
+
) -> None:
|
35
|
+
super().__init__(self.__class__.__name__)
|
36
|
+
self._content_service = content_service
|
37
|
+
self._config = config
|
38
|
+
self._client = client
|
39
|
+
self._content_map = {}
|
40
|
+
|
41
|
+
@override
|
42
|
+
async def run(self, loop_response: ResponsesLanguageModelStreamResponse) -> None:
|
43
|
+
logger.info("Fetching and adding code interpreter files to the response")
|
44
|
+
|
45
|
+
container_files = loop_response.container_files
|
46
|
+
logger.info("Found %s container files", len(container_files))
|
47
|
+
|
48
|
+
self._content_map = {}
|
49
|
+
for container_file in container_files:
|
50
|
+
logger.info("Fetching file content for %s", container_file.filename)
|
51
|
+
file_content = await self._client.containers.files.content.retrieve(
|
52
|
+
container_id=container_file.container_id, file_id=container_file.file_id
|
53
|
+
)
|
54
|
+
|
55
|
+
logger.info(
|
56
|
+
"Uploading file content for %s to knowledge base",
|
57
|
+
container_file.filename,
|
58
|
+
)
|
59
|
+
content = self._content_service.upload_content_from_bytes(
|
60
|
+
content=file_content.content,
|
61
|
+
content_name=container_file.filename,
|
62
|
+
skip_ingestion=True,
|
63
|
+
mime_type=guess_type(container_file.filename)[0] or "text/plain",
|
64
|
+
scope_id=self._config.upload_scope_id,
|
65
|
+
)
|
66
|
+
self._content_map[container_file.filename] = content
|
67
|
+
|
68
|
+
@override
|
69
|
+
def apply_postprocessing_to_response(
|
70
|
+
self, loop_response: ResponsesLanguageModelStreamResponse
|
71
|
+
) -> bool:
|
72
|
+
ref_number = _get_next_ref_number(loop_response.message.references)
|
73
|
+
changed = False
|
74
|
+
# images
|
75
|
+
for filename, content in self._content_map.items():
|
76
|
+
# Images
|
77
|
+
loop_response.message.text, replaced = _replace_container_image_citation(
|
78
|
+
text=loop_response.message.text, filename=filename, content=content
|
79
|
+
)
|
80
|
+
changed |= replaced
|
81
|
+
|
82
|
+
# Files
|
83
|
+
loop_response.message.text, replaced = _replace_container_file_citation(
|
84
|
+
text=loop_response.message.text,
|
85
|
+
filename=filename,
|
86
|
+
ref_number=ref_number,
|
87
|
+
)
|
88
|
+
changed |= replaced
|
89
|
+
if replaced:
|
90
|
+
loop_response.message.references.append(
|
91
|
+
ContentReference(
|
92
|
+
sequence_number=ref_number,
|
93
|
+
source_id=content.id,
|
94
|
+
source="node-ingestion-chunks",
|
95
|
+
url=f"unique://content/{content.id}",
|
96
|
+
name=filename,
|
97
|
+
)
|
98
|
+
)
|
99
|
+
ref_number += 1
|
100
|
+
return changed
|
101
|
+
|
102
|
+
@override
|
103
|
+
async def remove_from_text(self, text) -> str:
|
104
|
+
return text
|
105
|
+
|
106
|
+
|
107
|
+
def _get_next_ref_number(references: list[ContentReference]) -> int:
|
108
|
+
max_ref_number = 0
|
109
|
+
for ref in references:
|
110
|
+
max_ref_number = max(max_ref_number, ref.sequence_number)
|
111
|
+
return max_ref_number + 1
|
112
|
+
|
113
|
+
|
114
|
+
def _replace_container_image_citation(
|
115
|
+
text: str, filename: str, content: Content
|
116
|
+
) -> tuple[str, bool]:
|
117
|
+
image_markdown = rf"!\[.*?\]\(sandbox:/mnt/data/{re.escape(filename)}\)"
|
118
|
+
|
119
|
+
if not re.search(image_markdown, text):
|
120
|
+
logger.info("No image markdown found for %s", filename)
|
121
|
+
return text, False
|
122
|
+
|
123
|
+
logger.info("Displaying image %s", filename)
|
124
|
+
return re.sub(
|
125
|
+
image_markdown,
|
126
|
+
f"",
|
127
|
+
text,
|
128
|
+
), True
|
129
|
+
|
130
|
+
|
131
|
+
def _replace_container_file_citation(
|
132
|
+
text: str, filename: str, ref_number: int
|
133
|
+
) -> tuple[str, bool]:
|
134
|
+
file_markdown = rf"\[.*?\]\(sandbox:/mnt/data/{re.escape(filename)}\)"
|
135
|
+
|
136
|
+
if not re.search(file_markdown, text):
|
137
|
+
logger.info("No file markdown found for %s", filename)
|
138
|
+
return text, False
|
139
|
+
|
140
|
+
logger.info("Displaying file %s", filename)
|
141
|
+
return re.sub(
|
142
|
+
file_markdown,
|
143
|
+
f"<sup>{ref_number}</sup>",
|
144
|
+
text,
|
145
|
+
), True
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from unique_toolkit.protocols.support import ResponsesSupportCompleteWithReferences
|
2
|
+
from unique_toolkit.services.chat_service import ChatService
|
3
|
+
|
4
|
+
|
5
|
+
class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
|
6
|
+
def __init__(self, chat_service: ChatService):
|
7
|
+
self._chat_service = chat_service
|
8
|
+
|
9
|
+
def complete_with_references(self, *args, **kwargs):
|
10
|
+
return self._chat_service.complete_responses_with_references(*args, **kwargs)
|
11
|
+
|
12
|
+
async def complete_with_references_async(self, *args, **kwargs):
|
13
|
+
return await self._chat_service.complete_responses_with_references_async(
|
14
|
+
*args, **kwargs
|
15
|
+
)
|
@@ -11,6 +11,10 @@ class ToolFactory:
|
|
11
11
|
tool_map: dict[str, type[Tool]] = {}
|
12
12
|
tool_config_map: dict[str, Callable] = {}
|
13
13
|
|
14
|
+
@classmethod
|
15
|
+
def register_tool_config(cls, tool_name: str, tool_config: type[BaseToolConfig]):
|
16
|
+
cls.tool_config_map[tool_name] = tool_config
|
17
|
+
|
14
18
|
@classmethod
|
15
19
|
def register_tool(
|
16
20
|
cls,
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
|
2
|
+
OpenAICodeInterpreterConfig,
|
3
|
+
OpenAICodeInterpreterTool,
|
4
|
+
)
|
5
|
+
from unique_toolkit.agentic.tools.openai_builtin.manager import OpenAIBuiltInToolManager
|
6
|
+
|
7
|
+
__all__ = [
|
8
|
+
"OpenAIBuiltInToolManager",
|
9
|
+
"OpenAICodeInterpreterTool",
|
10
|
+
"OpenAICodeInterpreterConfig",
|
11
|
+
]
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from enum import StrEnum
|
3
|
+
from typing import Generic, TypeVar
|
4
|
+
|
5
|
+
from openai.types.responses.tool_param import CodeInterpreter
|
6
|
+
|
7
|
+
from unique_toolkit.agentic.tools.schemas import ToolPrompts
|
8
|
+
|
9
|
+
|
10
|
+
class OpenAIBuiltInToolName(StrEnum):
|
11
|
+
CODE_INTERPRETER = "code_interpreter"
|
12
|
+
|
13
|
+
|
14
|
+
BuiltInToolType = CodeInterpreter # Add other tool types when needed
|
15
|
+
ToolType = TypeVar("ToolType", bound=BuiltInToolType)
|
16
|
+
|
17
|
+
|
18
|
+
class OpenAIBuiltInTool(ABC, Generic[ToolType]):
|
19
|
+
@property
|
20
|
+
@abstractmethod
|
21
|
+
def name(self) -> OpenAIBuiltInToolName:
|
22
|
+
raise NotImplementedError()
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
def tool_description(self) -> BuiltInToolType:
|
26
|
+
raise NotImplementedError()
|
27
|
+
|
28
|
+
@abstractmethod
|
29
|
+
def get_tool_prompts(self) -> ToolPrompts:
|
30
|
+
raise NotImplementedError()
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
|
2
|
+
OpenAICodeInterpreterConfig,
|
3
|
+
)
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.service import (
|
5
|
+
OpenAICodeInterpreterTool,
|
6
|
+
)
|
7
|
+
|
8
|
+
__all__ = ["OpenAICodeInterpreterConfig", "OpenAICodeInterpreterTool"]
|
@@ -0,0 +1,57 @@
|
|
1
|
+
from pydantic import Field
|
2
|
+
|
3
|
+
from unique_toolkit.agentic.tools.factory import ToolFactory
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
5
|
+
OpenAIBuiltInToolName,
|
6
|
+
)
|
7
|
+
from unique_toolkit.agentic.tools.schemas import BaseToolConfig
|
8
|
+
|
9
|
+
DEFAULT_TOOL_DESCRIPTION = "Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc."
|
10
|
+
|
11
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT = """
|
12
|
+
Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc.
|
13
|
+
Instructions:
|
14
|
+
- All files uploaded to the chat are available in the code interpreter under the path `/mnt/data/<filename>
|
15
|
+
- All files generated through code should be saved in the `/mnt/data` folder
|
16
|
+
|
17
|
+
Instructions for displaying images and files in the chat:
|
18
|
+
Once files are generated in the `/mnt/data` folder you MUST reference them in the chat using markdown syntax in order to display them in the chat.
|
19
|
+
|
20
|
+
- If you want to display an image, use the following syntax: ``
|
21
|
+
- Images will be converted and shown in the chat.
|
22
|
+
- Do NOT display an extra download link for images a part from the markdown above.
|
23
|
+
- Not using markdown syntax will FAIL to show images to the user.
|
24
|
+
- YOU MUST use the syntax above to display images, otherwise the image will not be displayed in the chat.
|
25
|
+
- For displaying a link to a file, use the following syntax: `[filename](sandbox:/mnt/data/<filename>)`
|
26
|
+
- Files are converted to references the user can click on to download the file
|
27
|
+
|
28
|
+
You MUST always use this syntax, otherwise the files will not be displayed in the chat.
|
29
|
+
""".strip()
|
30
|
+
|
31
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT = ""
|
32
|
+
|
33
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT = ""
|
34
|
+
|
35
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT = ""
|
36
|
+
|
37
|
+
|
38
|
+
class OpenAICodeInterpreterConfig(BaseToolConfig):
|
39
|
+
upload_files_in_chat: bool = Field(default=True)
|
40
|
+
|
41
|
+
tool_description: str = DEFAULT_TOOL_DESCRIPTION
|
42
|
+
tool_description_for_system_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT
|
43
|
+
tool_format_information_for_system_prompt: str = (
|
44
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT
|
45
|
+
)
|
46
|
+
tool_description_for_user_prompt: str = DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT
|
47
|
+
tool_format_information_for_user_prompt: str = (
|
48
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT
|
49
|
+
)
|
50
|
+
|
51
|
+
expires_after_minutes: int = 20
|
52
|
+
use_auto_container: bool = False
|
53
|
+
|
54
|
+
|
55
|
+
ToolFactory.register_tool_config(
|
56
|
+
OpenAIBuiltInToolName.CODE_INTERPRETER, OpenAICodeInterpreterConfig
|
57
|
+
)
|