unique_toolkit 1.15.0__py3-none-any.whl → 1.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
  2. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  3. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  4. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  5. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  6. unique_toolkit/agentic/tools/factory.py +4 -0
  7. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  8. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  9. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  10. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  11. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  12. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  13. unique_toolkit/agentic/tools/tool_manager.py +257 -127
  14. unique_toolkit/chat/functions.py +15 -6
  15. unique_toolkit/chat/responses_api.py +461 -0
  16. unique_toolkit/language_model/functions.py +25 -9
  17. unique_toolkit/language_model/schemas.py +222 -27
  18. unique_toolkit/protocols/support.py +91 -9
  19. unique_toolkit/services/__init__.py +7 -0
  20. unique_toolkit/services/chat_service.py +139 -7
  21. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/METADATA +5 -1
  22. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/RECORD +24 -12
  23. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/LICENSE +0 -0
  24. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.0.dist-info}/WHEEL +0 -0
@@ -1,7 +1,12 @@
1
1
  import asyncio
2
+ from abc import ABC, abstractmethod
2
3
  from logging import Logger, getLogger
3
- from typing import Any
4
+ from typing import override
4
5
 
6
+ from openai.types.chat import (
7
+ ChatCompletionNamedToolChoiceParam,
8
+ )
9
+ from openai.types.responses import ToolParam, response_create_params
5
10
  from pydantic import BaseModel, Field
6
11
 
7
12
  from unique_toolkit.agentic.evaluation.schemas import EvaluationMetricName
@@ -9,6 +14,10 @@ from unique_toolkit.agentic.tools.a2a import A2AManager, SubAgentTool
9
14
  from unique_toolkit.agentic.tools.config import ToolBuildConfig
10
15
  from unique_toolkit.agentic.tools.factory import ToolFactory
11
16
  from unique_toolkit.agentic.tools.mcp.manager import MCPManager
17
+ from unique_toolkit.agentic.tools.openai_builtin.base import (
18
+ OpenAIBuiltInTool,
19
+ )
20
+ from unique_toolkit.agentic.tools.openai_builtin.manager import OpenAIBuiltInToolManager
12
21
  from unique_toolkit.agentic.tools.schemas import ToolCallResponse, ToolPrompts
13
22
  from unique_toolkit.agentic.tools.tool import Tool
14
23
  from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
@@ -19,7 +28,6 @@ from unique_toolkit.agentic.tools.utils.execution.execution import (
19
28
  from unique_toolkit.app.schemas import ChatEvent
20
29
  from unique_toolkit.language_model.schemas import (
21
30
  LanguageModelFunction,
22
- LanguageModelTool,
23
31
  LanguageModelToolDescription,
24
32
  )
25
33
 
@@ -44,135 +52,15 @@ class ToolManagerConfig(BaseModel):
44
52
  )
45
53
 
46
54
 
47
- class ToolManager:
48
- """
49
- Manages the tools available to the agent and executes tool calls.
50
-
51
- This class is responsible for:
52
- - Initializing tools based on the provided configuration and runtime events.
53
- - Filtering tools based on availability, exclusivity, and user-defined constraints.
54
- - Managing the lifecycle of tools, including retrieval, execution, and logging.
55
- - Executing tool calls in parallel when possible to optimize performance.
56
- - Enforcing limits on the number of tool calls and handling duplicate requests.
57
-
58
- Key Features:
59
- - Dynamic Tool Initialization: Tools are dynamically selected and initialized
60
- based on runtime events and user preferences.
61
- - Parallel Execution: Supports asynchronous execution of tools for efficiency.
62
- - Error Handling: Provides detailed error messages and logs for failed tool calls.
63
- - Scalability: Designed to handle a large number of tools and tool calls efficiently.
64
-
65
- Only the ToolManager is allowed to interact with the tools directly.
66
- """
67
-
68
- def __init__(
69
- self,
70
- logger: Logger,
71
- config: ToolManagerConfig,
72
- event: ChatEvent,
73
- tool_progress_reporter: ToolProgressReporter,
74
- mcp_manager: MCPManager,
75
- a2a_manager: A2AManager,
76
- ):
77
- self._logger = logger
55
+ class BaseToolManager(ABC):
56
+ def __init__(self, config: ToolManagerConfig):
78
57
  self._config = config
79
- self._tool_progress_reporter = tool_progress_reporter
80
- self._tools = []
81
- self._tool_choices = event.payload.tool_choices
82
- self._disabled_tools = event.payload.disabled_tools
83
58
  # this needs to be a set of strings to avoid duplicates
84
59
  self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
85
- self._mcp_manager = mcp_manager
86
- self._a2a_manager = a2a_manager
87
- self._init__tools(event)
88
-
89
- def _init__tools(self, event: ChatEvent) -> None:
90
- tool_choices = self._tool_choices
91
- tool_configs = self._config.tools
92
- self._logger.info("Initializing tool definitions...")
93
- self._logger.info(f"Tool choices: {tool_choices}")
94
- self._logger.info(f"Tool configs: {tool_configs}")
95
-
96
- tool_configs, sub_agents = self._a2a_manager.get_all_sub_agents(
97
- tool_configs, event
98
- )
99
-
100
- # Build internal tools from configurations
101
- internal_tools = [
102
- ToolFactory.build_tool_with_settings(
103
- t.name,
104
- t,
105
- t.configuration,
106
- event,
107
- tool_progress_reporter=self._tool_progress_reporter,
108
- )
109
- for t in tool_configs
110
- ]
111
-
112
- # Get MCP tools (these are already properly instantiated)
113
- mcp_tools = self._mcp_manager.get_all_mcp_tools()
114
- # Combine both types of tools
115
- self.available_tools = internal_tools + mcp_tools + sub_agents
116
- self._sub_agents = sub_agents
117
-
118
- for t in self.available_tools:
119
- if not t.is_enabled():
120
- continue
121
- if t.name in self._disabled_tools:
122
- continue
123
- # if tool choices are given, only include those tools
124
- if len(self._tool_choices) > 0 and t.name not in self._tool_choices:
125
- continue
126
- # is the tool exclusive and has been choosen by the user?
127
- if t.is_exclusive() and len(tool_choices) > 0 and t.name in tool_choices:
128
- self._tools = [t] # override all other tools
129
- break
130
- # if the tool is exclusive but no tool choices are given, skip it
131
- if t.is_exclusive():
132
- continue
133
-
134
- self._tools.append(t)
135
-
136
- @property
137
- def sub_agents(self) -> list[SubAgentTool]:
138
- return self._sub_agents
139
-
140
- def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
141
- return list(self._tool_evaluation_check_list)
142
-
143
- def log_loaded_tools(self):
144
- self._logger.info(f"Loaded tools: {[tool.name for tool in self._tools]}")
145
-
146
- def get_tools(self) -> list[Tool]:
147
- return self._tools # type: ignore
148
60
 
61
+ @abstractmethod
149
62
  def get_tool_by_name(self, name: str) -> Tool | None:
150
- for tool in self._tools:
151
- if tool.name == name:
152
- return tool
153
- return None
154
-
155
- def get_forced_tools(self) -> list[dict[str, Any]]:
156
- return [
157
- self._convert_to_forced_tool(t.name)
158
- for t in self._tools
159
- if t.name in self._tool_choices
160
- ]
161
-
162
- def add_forced_tool(self, name):
163
- tool = self.get_tool_by_name(name)
164
- if not tool:
165
- raise ValueError(f"Tool {name} not found")
166
- self._tools.append(tool)
167
- self._tool_choices.append(tool.name)
168
-
169
- def get_tool_definitions(
170
- self,
171
- ) -> list[LanguageModelTool | LanguageModelToolDescription]:
172
- return [tool.tool_description() for tool in self._tools]
173
-
174
- def get_tool_prompts(self) -> list[ToolPrompts]:
175
- return [tool.get_tool_prompts() for tool in self._tools]
63
+ raise NotImplementedError()
176
64
 
177
65
  def does_a_tool_take_control(self, tool_calls: list[LanguageModelFunction]) -> bool:
178
66
  for tool_call in tool_calls:
@@ -301,8 +189,250 @@ class ToolManager:
301
189
  )
302
190
  return unique_tool_calls
303
191
 
304
- def _convert_to_forced_tool(self, tool_name: str) -> dict[str, Any]:
192
+ def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
193
+ return list(self._tool_evaluation_check_list)
194
+
195
+
196
+ class ToolManager(BaseToolManager):
197
+ """
198
+ Manages the tools available to the agent and executes tool calls.
199
+
200
+ This class is responsible for:
201
+ - Initializing tools based on the provided configuration and runtime events.
202
+ - Filtering tools based on availability, exclusivity, and user-defined constraints.
203
+ - Managing the lifecycle of tools, including retrieval, execution, and logging.
204
+ - Executing tool calls in parallel when possible to optimize performance.
205
+ - Enforcing limits on the number of tool calls and handling duplicate requests.
206
+
207
+ Key Features:
208
+ - Dynamic Tool Initialization: Tools are dynamically selected and initialized
209
+ based on runtime events and user preferences.
210
+ - Parallel Execution: Supports asynchronous execution of tools for efficiency.
211
+ - Error Handling: Provides detailed error messages and logs for failed tool calls.
212
+ - Scalability: Designed to handle a large number of tools and tool calls efficiently.
213
+
214
+ Only the ToolManager is allowed to interact with the tools directly.
215
+ """
216
+
217
+ def __init__(
218
+ self,
219
+ logger: Logger,
220
+ config: ToolManagerConfig,
221
+ event: ChatEvent,
222
+ tool_progress_reporter: ToolProgressReporter,
223
+ mcp_manager: MCPManager,
224
+ a2a_manager: A2AManager,
225
+ ):
226
+ super().__init__(config)
227
+ self._logger = logger
228
+ self._config = config
229
+ self._tool_progress_reporter = tool_progress_reporter
230
+ self._tools = []
231
+ self._tool_choices = event.payload.tool_choices
232
+ self._disabled_tools = event.payload.disabled_tools
233
+ # this needs to be a set of strings to avoid duplicates
234
+ self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
235
+ self._mcp_manager = mcp_manager
236
+ self._a2a_manager = a2a_manager
237
+ self._init__tools(event)
238
+
239
+ def _init__tools(self, event: ChatEvent) -> None:
240
+ tool_choices = self._tool_choices
241
+ tool_configs = self._config.tools
242
+ self._logger.info("Initializing tool definitions...")
243
+ self._logger.info(f"Tool choices: {tool_choices}")
244
+
245
+ tool_configs, sub_agents = self._a2a_manager.get_all_sub_agents(
246
+ tool_configs, event
247
+ )
248
+
249
+ # Build internal tools from configurations
250
+ internal_tools = [
251
+ ToolFactory.build_tool_with_settings(
252
+ t.name,
253
+ t,
254
+ t.configuration,
255
+ event,
256
+ tool_progress_reporter=self._tool_progress_reporter,
257
+ )
258
+ for t in tool_configs
259
+ ]
260
+
261
+ # Get MCP tools (these are already properly instantiated)
262
+ mcp_tools = self._mcp_manager.get_all_mcp_tools()
263
+ # Combine both types of tools
264
+ self.available_tools = internal_tools + mcp_tools + sub_agents
265
+ self._sub_agents = sub_agents
266
+
267
+ for t in self.available_tools:
268
+ if not t.is_enabled():
269
+ continue
270
+ if t.name in self._disabled_tools:
271
+ continue
272
+ # if tool choices are given, only include those tools
273
+ if len(self._tool_choices) > 0 and t.name not in self._tool_choices:
274
+ continue
275
+ # is the tool exclusive and has been choosen by the user?
276
+ if t.is_exclusive() and len(tool_choices) > 0 and t.name in tool_choices:
277
+ self._tools = [t] # override all other tools
278
+ break
279
+ # if the tool is exclusive but no tool choices are given, skip it
280
+ if t.is_exclusive():
281
+ continue
282
+
283
+ self._tools.append(t)
284
+
285
+ @property
286
+ def sub_agents(self) -> list[SubAgentTool]:
287
+ return self._sub_agents
288
+
289
+ def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
290
+ return list(self._tool_evaluation_check_list)
291
+
292
+ def log_loaded_tools(self):
293
+ self._logger.info(f"Loaded tools: {[tool.name for tool in self._tools]}")
294
+
295
+ @override
296
+ def get_tool_by_name(self, name: str) -> Tool | None:
297
+ for tool in self._tools:
298
+ if tool.name == name:
299
+ return tool
300
+ return None
301
+
302
+ def get_tools(self) -> list[Tool]:
303
+ return self._tools # type: ignore
304
+
305
+ def get_forced_tools(
306
+ self,
307
+ ) -> list[ChatCompletionNamedToolChoiceParam]:
308
+ return [
309
+ self._convert_to_forced_tool(t.name)
310
+ for t in self._tools
311
+ if t.name in self._tool_choices
312
+ ]
313
+
314
+ def get_tool_definitions(
315
+ self,
316
+ ) -> list[LanguageModelToolDescription]:
317
+ return [tool.tool_description() for tool in self._tools]
318
+
319
+ def get_tool_prompts(self) -> list[ToolPrompts]:
320
+ return [tool.get_tool_prompts() for tool in self._tools]
321
+
322
+ def add_forced_tool(self, name):
323
+ tool = self.get_tool_by_name(name)
324
+ if not tool:
325
+ raise ValueError(f"Tool {name} not found")
326
+
327
+ if tool.name not in self._tool_choices:
328
+ self._tool_choices.append(tool.name)
329
+
330
+ def _convert_to_forced_tool(
331
+ self, tool_name: str
332
+ ) -> ChatCompletionNamedToolChoiceParam:
305
333
  return {
306
334
  "type": "function",
307
335
  "function": {"name": tool_name},
308
336
  }
337
+
338
+ def tool_choices(self) -> list[str]:
339
+ return self._tool_choices.copy()
340
+
341
+
342
+ class ResponsesApiToolManager(BaseToolManager):
343
+ def __init__(
344
+ self,
345
+ logger: Logger,
346
+ config: ToolManagerConfig,
347
+ tool_manager: ToolManager,
348
+ builtin_tools: list[OpenAIBuiltInTool],
349
+ ) -> None:
350
+ super().__init__(config)
351
+ self._logger = logger
352
+ self._config = config
353
+ self._tool_manager = tool_manager
354
+ self._builtin_tools = builtin_tools
355
+ self._tools = self._tool_manager.get_tools()
356
+
357
+ @classmethod
358
+ async def build_manager(
359
+ cls,
360
+ logger: Logger,
361
+ config: ToolManagerConfig,
362
+ event: ChatEvent,
363
+ tool_progress_reporter: ToolProgressReporter,
364
+ mcp_manager: MCPManager,
365
+ a2a_manager: A2AManager,
366
+ builtin_tool_manager: OpenAIBuiltInToolManager,
367
+ ) -> "ResponsesApiToolManager":
368
+ (
369
+ tool_configs,
370
+ builtin_tools,
371
+ ) = await builtin_tool_manager.get_all_openai_builtin_tools(config.tools)
372
+
373
+ completions_tool_manager_config = ToolManagerConfig(
374
+ tools=tool_configs, max_tool_calls=config.max_tool_calls
375
+ )
376
+ completions_tool_manager = ToolManager(
377
+ logger=logger,
378
+ config=completions_tool_manager_config,
379
+ event=event,
380
+ tool_progress_reporter=tool_progress_reporter,
381
+ mcp_manager=mcp_manager,
382
+ a2a_manager=a2a_manager,
383
+ )
384
+
385
+ return cls(
386
+ logger=logger,
387
+ config=config,
388
+ tool_manager=completions_tool_manager,
389
+ builtin_tools=builtin_tools,
390
+ )
391
+
392
+ @override
393
+ def get_tool_by_name(self, name: str) -> Tool | None:
394
+ return self._tool_manager.get_tool_by_name(name)
395
+
396
+ @property
397
+ def sub_agents(self) -> list[SubAgentTool]:
398
+ return self._tool_manager.sub_agents
399
+
400
+ def log_loaded_tools(self):
401
+ self._logger.info(
402
+ f"Loaded tools: {[tool.name for tool in self._tools + self._builtin_tools]}"
403
+ )
404
+
405
+ def get_tools(self) -> list[Tool]:
406
+ return self._tool_manager.get_tools()
407
+
408
+ def get_forced_tools(
409
+ self,
410
+ ) -> list[response_create_params.ToolChoice]:
411
+ """
412
+ Note that built-in tools cannot be forced at the moment
413
+ """
414
+ return [
415
+ {
416
+ "name": t.name,
417
+ "type": "function",
418
+ }
419
+ for t in self._tools
420
+ if t.name in self._tool_manager.tool_choices()
421
+ ]
422
+
423
+ def get_tool_definitions(
424
+ self,
425
+ ) -> list[LanguageModelToolDescription | ToolParam]:
426
+ if len(self._tool_manager.tool_choices()) > 0:
427
+ # We cannot send a builtin tool in this case (api error)
428
+ return [tool.tool_description() for tool in self._tools]
429
+ else:
430
+ return [
431
+ tool.tool_description() for tool in self._tools + self._builtin_tools
432
+ ]
433
+
434
+ def get_tool_prompts(self) -> list[ToolPrompts]:
435
+ return [tool.get_tool_prompts() for tool in self._tools + self._builtin_tools]
436
+
437
+ def add_forced_tool(self, name: str) -> None:
438
+ self._tool_manager.add_forced_tool(name)
@@ -1,13 +1,17 @@
1
1
  import logging
2
2
  import re
3
- from typing import Any
3
+ from typing import Any, Sequence
4
4
 
5
5
  import unique_sdk
6
+ from openai.types.chat import ChatCompletionToolChoiceOptionParam
7
+ from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
6
8
  from typing_extensions import deprecated
7
9
  from unique_sdk._list_object import ListObject
8
10
 
9
11
  from unique_toolkit._common import _time_utils
10
- from unique_toolkit.chat.constants import DEFAULT_MAX_MESSAGES
12
+ from unique_toolkit.chat.constants import (
13
+ DEFAULT_MAX_MESSAGES,
14
+ )
11
15
  from unique_toolkit.chat.schemas import (
12
16
  ChatMessage,
13
17
  ChatMessageAssessment,
@@ -30,10 +34,11 @@ from unique_toolkit.language_model.constants import (
30
34
  DEFAULT_COMPLETE_TIMEOUT,
31
35
  )
32
36
  from unique_toolkit.language_model.functions import (
33
- ChatCompletionMessageParam,
34
37
  _prepare_all_completions_params_util,
35
38
  )
36
- from unique_toolkit.language_model.infos import LanguageModelName
39
+ from unique_toolkit.language_model.infos import (
40
+ LanguageModelName,
41
+ )
37
42
  from unique_toolkit.language_model.schemas import (
38
43
  LanguageModelMessages,
39
44
  LanguageModelStreamResponse,
@@ -761,8 +766,9 @@ def stream_complete_with_references(
761
766
  debug_info: dict | None = None,
762
767
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
763
768
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
764
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
769
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
765
770
  start_text: str | None = None,
771
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
766
772
  other_options: dict | None = None,
767
773
  ) -> LanguageModelStreamResponse:
768
774
  """Streams a completion synchronously.
@@ -795,6 +801,7 @@ def stream_complete_with_references(
795
801
  temperature=temperature,
796
802
  tools=tools,
797
803
  other_options=other_options,
804
+ tool_choice=tool_choice,
798
805
  content_chunks=content_chunks or [],
799
806
  )
800
807
  )
@@ -871,7 +878,8 @@ async def stream_complete_with_references_async(
871
878
  debug_info: dict | None = None,
872
879
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
873
880
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
874
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
881
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
882
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
875
883
  start_text: str | None = None,
876
884
  other_options: dict | None = None,
877
885
  ) -> LanguageModelStreamResponse:
@@ -889,6 +897,7 @@ async def stream_complete_with_references_async(
889
897
  model_name=model_name,
890
898
  temperature=temperature,
891
899
  tools=tools,
900
+ tool_choice=tool_choice,
892
901
  other_options=other_options,
893
902
  content_chunks=content_chunks or [],
894
903
  )