unique_toolkit 1.15.0__py3-none-any.whl → 1.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +16 -6
  2. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  3. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
  4. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  5. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  6. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  7. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  8. unique_toolkit/agentic/tools/factory.py +4 -0
  9. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  10. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  11. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  12. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  13. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  14. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  15. unique_toolkit/agentic/tools/tool_manager.py +290 -125
  16. unique_toolkit/chat/functions.py +15 -6
  17. unique_toolkit/chat/responses_api.py +461 -0
  18. unique_toolkit/language_model/functions.py +25 -9
  19. unique_toolkit/language_model/schemas.py +222 -27
  20. unique_toolkit/protocols/support.py +91 -9
  21. unique_toolkit/services/__init__.py +7 -0
  22. unique_toolkit/services/chat_service.py +139 -7
  23. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/METADATA +8 -1
  24. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/RECORD +26 -13
  25. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/LICENSE +0 -0
  26. {unique_toolkit-1.15.0.dist-info → unique_toolkit-1.16.1.dist-info}/WHEEL +0 -0
@@ -1,7 +1,12 @@
1
1
  import asyncio
2
+ from abc import ABC, abstractmethod
2
3
  from logging import Logger, getLogger
3
- from typing import Any
4
+ from typing import override
4
5
 
6
+ from openai.types.chat import (
7
+ ChatCompletionNamedToolChoiceParam,
8
+ )
9
+ from openai.types.responses import ToolParam, response_create_params
5
10
  from pydantic import BaseModel, Field
6
11
 
7
12
  from unique_toolkit.agentic.evaluation.schemas import EvaluationMetricName
@@ -9,6 +14,10 @@ from unique_toolkit.agentic.tools.a2a import A2AManager, SubAgentTool
9
14
  from unique_toolkit.agentic.tools.config import ToolBuildConfig
10
15
  from unique_toolkit.agentic.tools.factory import ToolFactory
11
16
  from unique_toolkit.agentic.tools.mcp.manager import MCPManager
17
+ from unique_toolkit.agentic.tools.openai_builtin.base import (
18
+ OpenAIBuiltInTool,
19
+ )
20
+ from unique_toolkit.agentic.tools.openai_builtin.manager import OpenAIBuiltInToolManager
12
21
  from unique_toolkit.agentic.tools.schemas import ToolCallResponse, ToolPrompts
13
22
  from unique_toolkit.agentic.tools.tool import Tool
14
23
  from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
@@ -19,7 +28,6 @@ from unique_toolkit.agentic.tools.utils.execution.execution import (
19
28
  from unique_toolkit.app.schemas import ChatEvent
20
29
  from unique_toolkit.language_model.schemas import (
21
30
  LanguageModelFunction,
22
- LanguageModelTool,
23
31
  LanguageModelToolDescription,
24
32
  )
25
33
 
@@ -44,135 +52,23 @@ class ToolManagerConfig(BaseModel):
44
52
  )
45
53
 
46
54
 
47
- class ToolManager:
48
- """
49
- Manages the tools available to the agent and executes tool calls.
50
-
51
- This class is responsible for:
52
- - Initializing tools based on the provided configuration and runtime events.
53
- - Filtering tools based on availability, exclusivity, and user-defined constraints.
54
- - Managing the lifecycle of tools, including retrieval, execution, and logging.
55
- - Executing tool calls in parallel when possible to optimize performance.
56
- - Enforcing limits on the number of tool calls and handling duplicate requests.
57
-
58
- Key Features:
59
- - Dynamic Tool Initialization: Tools are dynamically selected and initialized
60
- based on runtime events and user preferences.
61
- - Parallel Execution: Supports asynchronous execution of tools for efficiency.
62
- - Error Handling: Provides detailed error messages and logs for failed tool calls.
63
- - Scalability: Designed to handle a large number of tools and tool calls efficiently.
64
-
65
- Only the ToolManager is allowed to interact with the tools directly.
66
- """
67
-
68
- def __init__(
69
- self,
70
- logger: Logger,
71
- config: ToolManagerConfig,
72
- event: ChatEvent,
73
- tool_progress_reporter: ToolProgressReporter,
74
- mcp_manager: MCPManager,
75
- a2a_manager: A2AManager,
76
- ):
77
- self._logger = logger
55
+ class BaseToolManager(ABC):
56
+ def __init__(self, config: ToolManagerConfig):
78
57
  self._config = config
79
- self._tool_progress_reporter = tool_progress_reporter
80
- self._tools = []
81
- self._tool_choices = event.payload.tool_choices
82
- self._disabled_tools = event.payload.disabled_tools
83
58
  # this needs to be a set of strings to avoid duplicates
84
59
  self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
85
- self._mcp_manager = mcp_manager
86
- self._a2a_manager = a2a_manager
87
- self._init__tools(event)
88
-
89
- def _init__tools(self, event: ChatEvent) -> None:
90
- tool_choices = self._tool_choices
91
- tool_configs = self._config.tools
92
- self._logger.info("Initializing tool definitions...")
93
- self._logger.info(f"Tool choices: {tool_choices}")
94
- self._logger.info(f"Tool configs: {tool_configs}")
95
-
96
- tool_configs, sub_agents = self._a2a_manager.get_all_sub_agents(
97
- tool_configs, event
98
- )
99
-
100
- # Build internal tools from configurations
101
- internal_tools = [
102
- ToolFactory.build_tool_with_settings(
103
- t.name,
104
- t,
105
- t.configuration,
106
- event,
107
- tool_progress_reporter=self._tool_progress_reporter,
108
- )
109
- for t in tool_configs
110
- ]
111
-
112
- # Get MCP tools (these are already properly instantiated)
113
- mcp_tools = self._mcp_manager.get_all_mcp_tools()
114
- # Combine both types of tools
115
- self.available_tools = internal_tools + mcp_tools + sub_agents
116
- self._sub_agents = sub_agents
117
-
118
- for t in self.available_tools:
119
- if not t.is_enabled():
120
- continue
121
- if t.name in self._disabled_tools:
122
- continue
123
- # if tool choices are given, only include those tools
124
- if len(self._tool_choices) > 0 and t.name not in self._tool_choices:
125
- continue
126
- # is the tool exclusive and has been choosen by the user?
127
- if t.is_exclusive() and len(tool_choices) > 0 and t.name in tool_choices:
128
- self._tools = [t] # override all other tools
129
- break
130
- # if the tool is exclusive but no tool choices are given, skip it
131
- if t.is_exclusive():
132
- continue
133
-
134
- self._tools.append(t)
135
-
136
- @property
137
- def sub_agents(self) -> list[SubAgentTool]:
138
- return self._sub_agents
139
-
140
- def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
141
- return list(self._tool_evaluation_check_list)
142
-
143
- def log_loaded_tools(self):
144
- self._logger.info(f"Loaded tools: {[tool.name for tool in self._tools]}")
145
-
146
- def get_tools(self) -> list[Tool]:
147
- return self._tools # type: ignore
148
60
 
61
+ @abstractmethod
149
62
  def get_tool_by_name(self, name: str) -> Tool | None:
150
- for tool in self._tools:
151
- if tool.name == name:
152
- return tool
153
- return None
154
-
155
- def get_forced_tools(self) -> list[dict[str, Any]]:
156
- return [
157
- self._convert_to_forced_tool(t.name)
158
- for t in self._tools
159
- if t.name in self._tool_choices
160
- ]
161
-
162
- def add_forced_tool(self, name):
163
- tool = self.get_tool_by_name(name)
164
- if not tool:
165
- raise ValueError(f"Tool {name} not found")
166
- self._tools.append(tool)
167
- self._tool_choices.append(tool.name)
63
+ raise NotImplementedError()
168
64
 
169
- def get_tool_definitions(
170
- self,
171
- ) -> list[LanguageModelTool | LanguageModelToolDescription]:
172
- return [tool.tool_description() for tool in self._tools]
65
+ @abstractmethod
66
+ def get_tool_choices(self) -> list[str]:
67
+ raise NotImplementedError()
173
68
 
174
- def get_tool_prompts(self) -> list[ToolPrompts]:
175
- return [tool.get_tool_prompts() for tool in self._tools]
69
+ @abstractmethod
70
+ def get_exclusive_tools(self) -> list[str]:
71
+ raise NotImplementedError()
176
72
 
177
73
  def does_a_tool_take_control(self, tool_calls: list[LanguageModelFunction]) -> bool:
178
74
  for tool_call in tool_calls:
@@ -233,6 +129,14 @@ class ToolManager:
233
129
  unpacked_tool_call_result = self._create_tool_call_response(
234
130
  result, tool_calls[i]
235
131
  )
132
+ if unpacked_tool_call_result.debug_info is None:
133
+ unpacked_tool_call_result.debug_info = {}
134
+ unpacked_tool_call_result.debug_info["is_exclusive"] = (
135
+ tool_calls[i].name in self.get_exclusive_tools()
136
+ )
137
+ unpacked_tool_call_result.debug_info["is_forced"] = (
138
+ tool_calls[i].name in self.get_tool_choices()
139
+ )
236
140
  tool_call_results_unpacked.append(unpacked_tool_call_result)
237
141
 
238
142
  return tool_call_results_unpacked
@@ -301,8 +205,269 @@ class ToolManager:
301
205
  )
302
206
  return unique_tool_calls
303
207
 
304
- def _convert_to_forced_tool(self, tool_name: str) -> dict[str, Any]:
208
+ def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
209
+ return list(self._tool_evaluation_check_list)
210
+
211
+
212
+ class ToolManager(BaseToolManager):
213
+ """
214
+ Manages the tools available to the agent and executes tool calls.
215
+
216
+ This class is responsible for:
217
+ - Initializing tools based on the provided configuration and runtime events.
218
+ - Filtering tools based on availability, exclusivity, and user-defined constraints.
219
+ - Managing the lifecycle of tools, including retrieval, execution, and logging.
220
+ - Executing tool calls in parallel when possible to optimize performance.
221
+ - Enforcing limits on the number of tool calls and handling duplicate requests.
222
+
223
+ Key Features:
224
+ - Dynamic Tool Initialization: Tools are dynamically selected and initialized
225
+ based on runtime events and user preferences.
226
+ - Parallel Execution: Supports asynchronous execution of tools for efficiency.
227
+ - Error Handling: Provides detailed error messages and logs for failed tool calls.
228
+ - Scalability: Designed to handle a large number of tools and tool calls efficiently.
229
+
230
+ Only the ToolManager is allowed to interact with the tools directly.
231
+ """
232
+
233
+ def __init__(
234
+ self,
235
+ logger: Logger,
236
+ config: ToolManagerConfig,
237
+ event: ChatEvent,
238
+ tool_progress_reporter: ToolProgressReporter,
239
+ mcp_manager: MCPManager,
240
+ a2a_manager: A2AManager,
241
+ ):
242
+ super().__init__(config)
243
+ self._logger = logger
244
+ self._config = config
245
+ self._tool_progress_reporter = tool_progress_reporter
246
+ self._tools = []
247
+ self._tool_choices = event.payload.tool_choices
248
+ self._disabled_tools = event.payload.disabled_tools
249
+ self._exclusive_tools = [
250
+ tool.name for tool in self._config.tools if tool.is_exclusive
251
+ ]
252
+ # this needs to be a set of strings to avoid duplicates
253
+ self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
254
+ self._mcp_manager = mcp_manager
255
+ self._a2a_manager = a2a_manager
256
+ self._init__tools(event)
257
+
258
+ def _init__tools(self, event: ChatEvent) -> None:
259
+ tool_choices = self._tool_choices
260
+ tool_configs = self._config.tools
261
+ self._logger.info("Initializing tool definitions...")
262
+ self._logger.info(f"Tool choices: {tool_choices}")
263
+
264
+ tool_configs, sub_agents = self._a2a_manager.get_all_sub_agents(
265
+ tool_configs, event
266
+ )
267
+
268
+ # Build internal tools from configurations
269
+ internal_tools = [
270
+ ToolFactory.build_tool_with_settings(
271
+ t.name,
272
+ t,
273
+ t.configuration,
274
+ event,
275
+ tool_progress_reporter=self._tool_progress_reporter,
276
+ )
277
+ for t in tool_configs
278
+ ]
279
+
280
+ # Get MCP tools (these are already properly instantiated)
281
+ mcp_tools = self._mcp_manager.get_all_mcp_tools()
282
+ # Combine both types of tools
283
+ self.available_tools = internal_tools + mcp_tools + sub_agents
284
+ self._sub_agents = sub_agents
285
+
286
+ for t in self.available_tools:
287
+ if not t.is_enabled():
288
+ continue
289
+ if t.name in self._disabled_tools:
290
+ continue
291
+ # if tool choices are given, only include those tools
292
+ if len(self._tool_choices) > 0 and t.name not in self._tool_choices:
293
+ continue
294
+ # is the tool exclusive and has been choosen by the user?
295
+ if t.is_exclusive() and len(tool_choices) > 0 and t.name in tool_choices:
296
+ self._tools = [t] # override all other tools
297
+ break
298
+ # if the tool is exclusive but no tool choices are given, skip it
299
+ if t.is_exclusive():
300
+ continue
301
+
302
+ self._tools.append(t)
303
+
304
+ @property
305
+ def sub_agents(self) -> list[SubAgentTool]:
306
+ return self._sub_agents
307
+
308
+ def get_evaluation_check_list(self) -> list[EvaluationMetricName]:
309
+ return list(self._tool_evaluation_check_list)
310
+
311
+ def log_loaded_tools(self):
312
+ self._logger.info(f"Loaded tools: {[tool.name for tool in self._tools]}")
313
+
314
+ @override
315
+ def get_tool_by_name(self, name: str) -> Tool | None:
316
+ for tool in self._tools:
317
+ if tool.name == name:
318
+ return tool
319
+ return None
320
+
321
+ @override
322
+ def get_tool_choices(self) -> list[str]:
323
+ return self._tool_choices
324
+
325
+ @override
326
+ def get_exclusive_tools(self) -> list[str]:
327
+ return self._exclusive_tools
328
+
329
+ def get_tools(self) -> list[Tool]:
330
+ return self._tools # type: ignore
331
+
332
+ def get_forced_tools(
333
+ self,
334
+ ) -> list[ChatCompletionNamedToolChoiceParam]:
335
+ return [
336
+ self._convert_to_forced_tool(t.name)
337
+ for t in self._tools
338
+ if t.name in self._tool_choices
339
+ ]
340
+
341
+ def get_tool_definitions(
342
+ self,
343
+ ) -> list[LanguageModelToolDescription]:
344
+ return [tool.tool_description() for tool in self._tools]
345
+
346
+ def get_tool_prompts(self) -> list[ToolPrompts]:
347
+ return [tool.get_tool_prompts() for tool in self._tools]
348
+
349
+ def add_forced_tool(self, name):
350
+ tool = self.get_tool_by_name(name)
351
+ if not tool:
352
+ raise ValueError(f"Tool {name} not found")
353
+
354
+ if tool.name not in self._tool_choices:
355
+ self._tool_choices.append(tool.name)
356
+
357
+ def _convert_to_forced_tool(
358
+ self, tool_name: str
359
+ ) -> ChatCompletionNamedToolChoiceParam:
305
360
  return {
306
361
  "type": "function",
307
362
  "function": {"name": tool_name},
308
363
  }
364
+
365
+ def tool_choices(self) -> list[str]:
366
+ return self._tool_choices.copy()
367
+
368
+
369
+ class ResponsesApiToolManager(BaseToolManager):
370
+ def __init__(
371
+ self,
372
+ logger: Logger,
373
+ config: ToolManagerConfig,
374
+ tool_manager: ToolManager,
375
+ builtin_tools: list[OpenAIBuiltInTool],
376
+ ) -> None:
377
+ super().__init__(config)
378
+ self._logger = logger
379
+ self._config = config
380
+ self._tool_manager = tool_manager
381
+ self._builtin_tools = builtin_tools
382
+ self._tools = self._tool_manager.get_tools()
383
+
384
+ @classmethod
385
+ async def build_manager(
386
+ cls,
387
+ logger: Logger,
388
+ config: ToolManagerConfig,
389
+ event: ChatEvent,
390
+ tool_progress_reporter: ToolProgressReporter,
391
+ mcp_manager: MCPManager,
392
+ a2a_manager: A2AManager,
393
+ builtin_tool_manager: OpenAIBuiltInToolManager,
394
+ ) -> "ResponsesApiToolManager":
395
+ (
396
+ tool_configs,
397
+ builtin_tools,
398
+ ) = await builtin_tool_manager.get_all_openai_builtin_tools(config.tools)
399
+
400
+ completions_tool_manager_config = ToolManagerConfig(
401
+ tools=tool_configs, max_tool_calls=config.max_tool_calls
402
+ )
403
+ completions_tool_manager = ToolManager(
404
+ logger=logger,
405
+ config=completions_tool_manager_config,
406
+ event=event,
407
+ tool_progress_reporter=tool_progress_reporter,
408
+ mcp_manager=mcp_manager,
409
+ a2a_manager=a2a_manager,
410
+ )
411
+
412
+ return cls(
413
+ logger=logger,
414
+ config=config,
415
+ tool_manager=completions_tool_manager,
416
+ builtin_tools=builtin_tools,
417
+ )
418
+
419
+ @override
420
+ def get_tool_by_name(self, name: str) -> Tool | None:
421
+ return self._tool_manager.get_tool_by_name(name)
422
+
423
+ @override
424
+ def get_tool_choices(self) -> list[str]:
425
+ return self._tool_manager._tool_choices
426
+
427
+ @override
428
+ def get_exclusive_tools(self) -> list[str]:
429
+ return self._tool_manager._exclusive_tools
430
+
431
+ @property
432
+ def sub_agents(self) -> list[SubAgentTool]:
433
+ return self._tool_manager.sub_agents
434
+
435
+ def log_loaded_tools(self):
436
+ self._logger.info(
437
+ f"Loaded tools: {[tool.name for tool in self._tools + self._builtin_tools]}"
438
+ )
439
+
440
+ def get_tools(self) -> list[Tool]:
441
+ return self._tool_manager.get_tools()
442
+
443
+ def get_forced_tools(
444
+ self,
445
+ ) -> list[response_create_params.ToolChoice]:
446
+ """
447
+ Note that built-in tools cannot be forced at the moment
448
+ """
449
+ return [
450
+ {
451
+ "name": t.name,
452
+ "type": "function",
453
+ }
454
+ for t in self._tools
455
+ if t.name in self._tool_manager.tool_choices()
456
+ ]
457
+
458
+ def get_tool_definitions(
459
+ self,
460
+ ) -> list[LanguageModelToolDescription | ToolParam]:
461
+ if len(self._tool_manager.tool_choices()) > 0:
462
+ # We cannot send a builtin tool in this case (api error)
463
+ return [tool.tool_description() for tool in self._tools]
464
+ else:
465
+ return [
466
+ tool.tool_description() for tool in self._tools + self._builtin_tools
467
+ ]
468
+
469
+ def get_tool_prompts(self) -> list[ToolPrompts]:
470
+ return [tool.get_tool_prompts() for tool in self._tools + self._builtin_tools]
471
+
472
+ def add_forced_tool(self, name: str) -> None:
473
+ self._tool_manager.add_forced_tool(name)
@@ -1,13 +1,17 @@
1
1
  import logging
2
2
  import re
3
- from typing import Any
3
+ from typing import Any, Sequence
4
4
 
5
5
  import unique_sdk
6
+ from openai.types.chat import ChatCompletionToolChoiceOptionParam
7
+ from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
6
8
  from typing_extensions import deprecated
7
9
  from unique_sdk._list_object import ListObject
8
10
 
9
11
  from unique_toolkit._common import _time_utils
10
- from unique_toolkit.chat.constants import DEFAULT_MAX_MESSAGES
12
+ from unique_toolkit.chat.constants import (
13
+ DEFAULT_MAX_MESSAGES,
14
+ )
11
15
  from unique_toolkit.chat.schemas import (
12
16
  ChatMessage,
13
17
  ChatMessageAssessment,
@@ -30,10 +34,11 @@ from unique_toolkit.language_model.constants import (
30
34
  DEFAULT_COMPLETE_TIMEOUT,
31
35
  )
32
36
  from unique_toolkit.language_model.functions import (
33
- ChatCompletionMessageParam,
34
37
  _prepare_all_completions_params_util,
35
38
  )
36
- from unique_toolkit.language_model.infos import LanguageModelName
39
+ from unique_toolkit.language_model.infos import (
40
+ LanguageModelName,
41
+ )
37
42
  from unique_toolkit.language_model.schemas import (
38
43
  LanguageModelMessages,
39
44
  LanguageModelStreamResponse,
@@ -761,8 +766,9 @@ def stream_complete_with_references(
761
766
  debug_info: dict | None = None,
762
767
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
763
768
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
764
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
769
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
765
770
  start_text: str | None = None,
771
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
766
772
  other_options: dict | None = None,
767
773
  ) -> LanguageModelStreamResponse:
768
774
  """Streams a completion synchronously.
@@ -795,6 +801,7 @@ def stream_complete_with_references(
795
801
  temperature=temperature,
796
802
  tools=tools,
797
803
  other_options=other_options,
804
+ tool_choice=tool_choice,
798
805
  content_chunks=content_chunks or [],
799
806
  )
800
807
  )
@@ -871,7 +878,8 @@ async def stream_complete_with_references_async(
871
878
  debug_info: dict | None = None,
872
879
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
873
880
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
874
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
881
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
882
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
875
883
  start_text: str | None = None,
876
884
  other_options: dict | None = None,
877
885
  ) -> LanguageModelStreamResponse:
@@ -889,6 +897,7 @@ async def stream_complete_with_references_async(
889
897
  model_name=model_name,
890
898
  temperature=temperature,
891
899
  tools=tools,
900
+ tool_choice=tool_choice,
892
901
  other_options=other_options,
893
902
  content_chunks=content_chunks or [],
894
903
  )