unique_toolkit 0.8.29__py3-none-any.whl → 0.8.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,6 +48,14 @@ class LanguageModelName(StrEnum):
48
48
  LITELLM_OPENAI_GPT_5_MINI = "litellm:openai-gpt-5-mini"
49
49
  LITELLM_OPENAI_GPT_5_NANO = "litellm:openai-gpt-5-nano"
50
50
  LITELLM_OPENAI_GPT_5_CHAT = "litellm:openai-gpt-5-chat"
51
+ LITELLM_OPENAI_O1 = "litellm:openai-o1"
52
+ LITELLM_OPENAI_O3 = "litellm:openai-o3"
53
+ LITELLM_OPENAI_O3_DEEP_RESEARCH = "litellm:openai-o3-deep-research"
54
+ LITELLM_OPENAI_O3_PRO = "litellm:openai-o3-pro"
55
+ LITELLM_OPENAI_O4_MINI = "litellm:openai-o4-mini"
56
+ LITELLM_OPENAI_O4_MINI_DEEP_RESEARCH = "litellm:openai-o4-mini-deep-research"
57
+ LITELLM_OPENAI_GPT_4_1_MINI = "litellm:openai-gpt-4-1-mini"
58
+ LITELLM_OPENAI_GPT_4_1_NANO = "litellm:openai-gpt-4-1-nano"
51
59
  LITELLM_DEEPSEEK_R1 = "litellm:deepseek-r1"
52
60
  LITELLM_DEEPSEEK_V3 = "litellm:deepseek-v3-1"
53
61
  LITELLM_QWEN_3 = "litellm:qwen-3-235B-A22B"
@@ -83,6 +91,14 @@ def get_encoder_name(model_name: LanguageModelName) -> EncoderName:
83
91
  | LMN.LITELLM_OPENAI_GPT_5_MINI
84
92
  | LMN.LITELLM_OPENAI_GPT_5_NANO
85
93
  | LMN.LITELLM_OPENAI_GPT_5_CHAT
94
+ | LMN.LITELLM_OPENAI_O1
95
+ | LMN.LITELLM_OPENAI_O3
96
+ | LMN.LITELLM_OPENAI_O3_DEEP_RESEARCH
97
+ | LMN.LITELLM_OPENAI_O4_MINI
98
+ | LMN.LITELLM_OPENAI_O4_MINI_DEEP_RESEARCH
99
+ | LMN.LITELLM_OPENAI_GPT_4_1_MINI
100
+ | LMN.LITELLM_OPENAI_GPT_4_1_NANO
101
+ | LMN.LITELLM_OPENAI_O3_PRO
86
102
  ):
87
103
  return EncoderName.O200K_BASE
88
104
  case _:
@@ -879,6 +895,146 @@ class LanguageModelInfo(BaseModel):
879
895
  deprecated_at=date(2026, 8, 7),
880
896
  retirement_at=date(2026, 8, 7),
881
897
  )
898
+ case LanguageModelName.LITELLM_OPENAI_O1:
899
+ return cls(
900
+ name=model_name,
901
+ provider=LanguageModelProvider.LITELLM,
902
+ version="2024-12-17",
903
+ encoder_name=EncoderName.O200K_BASE,
904
+ capabilities=[
905
+ ModelCapabilities.STRUCTURED_OUTPUT,
906
+ ModelCapabilities.FUNCTION_CALLING,
907
+ ModelCapabilities.STREAMING,
908
+ ModelCapabilities.VISION,
909
+ ModelCapabilities.REASONING,
910
+ ],
911
+ token_limits=LanguageModelTokenLimits(
912
+ token_limit_input=200_000, token_limit_output=100_000
913
+ ),
914
+ info_cutoff_at=date(2023, 10, 1),
915
+ published_at=date(2024, 12, 17),
916
+ temperature_bounds=TemperatureBounds(
917
+ min_temperature=1.0, max_temperature=1.0
918
+ ),
919
+ )
920
+ case LanguageModelName.LITELLM_OPENAI_O3:
921
+ return cls(
922
+ name=model_name,
923
+ provider=LanguageModelProvider.LITELLM,
924
+ version="2025-04-16",
925
+ encoder_name=EncoderName.O200K_BASE,
926
+ capabilities=[
927
+ ModelCapabilities.FUNCTION_CALLING,
928
+ ModelCapabilities.STRUCTURED_OUTPUT,
929
+ ModelCapabilities.STREAMING,
930
+ ModelCapabilities.REASONING,
931
+ ],
932
+ token_limits=LanguageModelTokenLimits(
933
+ token_limit_input=200_000, token_limit_output=100_000
934
+ ),
935
+ temperature_bounds=TemperatureBounds(
936
+ min_temperature=1.0, max_temperature=1.0
937
+ ),
938
+ published_at=date(2025, 4, 16),
939
+ info_cutoff_at=date(2024, 6, 1),
940
+ )
941
+ case LanguageModelName.LITELLM_OPENAI_O3_DEEP_RESEARCH:
942
+ return cls(
943
+ name=model_name,
944
+ provider=LanguageModelProvider.LITELLM,
945
+ version="2025-06-26",
946
+ encoder_name=EncoderName.O200K_BASE,
947
+ token_limits=LanguageModelTokenLimits(
948
+ token_limit_input=200_000, token_limit_output=100_000
949
+ ),
950
+ published_at=date(2025, 4, 16),
951
+ capabilities=[ModelCapabilities.STREAMING],
952
+ info_cutoff_at=date(2024, 6, 1),
953
+ )
954
+ case LanguageModelName.LITELLM_OPENAI_O3_PRO:
955
+ return cls(
956
+ name=model_name,
957
+ provider=LanguageModelProvider.LITELLM,
958
+ version="2025-06-10",
959
+ encoder_name=EncoderName.O200K_BASE,
960
+ capabilities=[
961
+ ModelCapabilities.FUNCTION_CALLING,
962
+ ModelCapabilities.REASONING,
963
+ ModelCapabilities.STRUCTURED_OUTPUT,
964
+ ],
965
+ token_limits=LanguageModelTokenLimits(
966
+ token_limit_input=200_000, token_limit_output=100_000
967
+ ),
968
+ published_at=date(2025, 6, 10),
969
+ info_cutoff_at=date(2024, 6, 1),
970
+ )
971
+ case LanguageModelName.LITELLM_OPENAI_O4_MINI:
972
+ return cls(
973
+ name=model_name,
974
+ provider=LanguageModelProvider.LITELLM,
975
+ version="2025-04-16",
976
+ encoder_name=EncoderName.O200K_BASE,
977
+ capabilities=[
978
+ ModelCapabilities.FUNCTION_CALLING,
979
+ ModelCapabilities.STREAMING,
980
+ ModelCapabilities.STRUCTURED_OUTPUT,
981
+ ],
982
+ token_limits=LanguageModelTokenLimits(
983
+ token_limit_input=200_000, token_limit_output=100_000
984
+ ),
985
+ published_at=date(2025, 4, 16),
986
+ info_cutoff_at=date(2024, 6, 1),
987
+ temperature_bounds=TemperatureBounds(
988
+ min_temperature=1.0, max_temperature=1.0
989
+ ),
990
+ )
991
+ case LanguageModelName.LITELLM_OPENAI_O4_MINI_DEEP_RESEARCH:
992
+ return cls(
993
+ name=model_name,
994
+ provider=LanguageModelProvider.LITELLM,
995
+ version="2025-06-26",
996
+ encoder_name=EncoderName.O200K_BASE,
997
+ token_limits=LanguageModelTokenLimits(
998
+ token_limit_input=200_000, token_limit_output=100_000
999
+ ),
1000
+ published_at=date(2025, 4, 16),
1001
+ capabilities=[ModelCapabilities.STREAMING],
1002
+ info_cutoff_at=date(2024, 6, 1),
1003
+ )
1004
+ case LanguageModelName.LITELLM_OPENAI_GPT_4_1_MINI:
1005
+ return cls(
1006
+ name=model_name,
1007
+ provider=LanguageModelProvider.LITELLM,
1008
+ version="2025-04-14",
1009
+ encoder_name=EncoderName.O200K_BASE,
1010
+ published_at=date(2025, 4, 14),
1011
+ info_cutoff_at=date(2024, 6, 1),
1012
+ token_limits=LanguageModelTokenLimits(
1013
+ token_limit_input=1_047_576, token_limit_output=32_768
1014
+ ),
1015
+ capabilities=[
1016
+ ModelCapabilities.STREAMING,
1017
+ ModelCapabilities.FUNCTION_CALLING,
1018
+ ModelCapabilities.STRUCTURED_OUTPUT,
1019
+ ],
1020
+ )
1021
+ case LanguageModelName.LITELLM_OPENAI_GPT_4_1_NANO:
1022
+ return cls(
1023
+ name=model_name,
1024
+ provider=LanguageModelProvider.LITELLM,
1025
+ version="2025-04-14",
1026
+ encoder_name=EncoderName.O200K_BASE,
1027
+ published_at=date(2025, 4, 14),
1028
+ info_cutoff_at=date(2024, 6, 1),
1029
+ token_limits=LanguageModelTokenLimits(
1030
+ token_limit_input=1_047_576, token_limit_output=32_768
1031
+ ),
1032
+ capabilities=[
1033
+ ModelCapabilities.STREAMING,
1034
+ ModelCapabilities.FUNCTION_CALLING,
1035
+ ModelCapabilities.STRUCTURED_OUTPUT,
1036
+ ],
1037
+ )
882
1038
  case LanguageModelName.LITELLM_DEEPSEEK_R1:
883
1039
  return cls(
884
1040
  name=model_name,
@@ -0,0 +1,4 @@
1
+ from unique_toolkit.tools.a2a.config import SubAgentToolConfig
2
+ from unique_toolkit.tools.a2a.service import SubAgentTool
3
+
4
+ __all__ = ["SubAgentToolConfig", "SubAgentTool"]
@@ -0,0 +1,27 @@
1
+ from unique_toolkit.tools.config import get_configuration_dict
2
+ from unique_toolkit.tools.schemas import BaseToolConfig
3
+
4
+ DEFAULT_PARAM_DESCRIPTION_SUB_AGENT_USER_MESSAGE = """
5
+ This is the message that will be sent to the sub-agent.
6
+ """.strip()
7
+
8
+
9
+ class SubAgentToolConfig(BaseToolConfig):
10
+ model_config = get_configuration_dict()
11
+
12
+ name: str = "default_name"
13
+ assistant_id: str = ""
14
+ chat_id: str | None = None
15
+ reuse_chat: bool = True
16
+ tool_description_for_system_prompt: str = ""
17
+ tool_description: str = ""
18
+ param_description_sub_agent_user_message: str = (
19
+ DEFAULT_PARAM_DESCRIPTION_SUB_AGENT_USER_MESSAGE
20
+ )
21
+ tool_format_information_for_system_prompt: str = ""
22
+
23
+ tool_description_for_user_prompt: str = ""
24
+ tool_format_information_for_user_prompt: str = ""
25
+
26
+ poll_interval: float = 1.0
27
+ max_wait: float = 120.0
@@ -0,0 +1,49 @@
1
+ from logging import Logger
2
+
3
+ from unique_toolkit.app.schemas import ChatEvent
4
+ from unique_toolkit.tools.a2a.config import SubAgentToolConfig
5
+ from unique_toolkit.tools.a2a.service import SubAgentTool, ToolProgressReporter
6
+ from unique_toolkit.tools.config import ToolBuildConfig
7
+ from unique_toolkit.tools.schemas import BaseToolConfig
8
+ from unique_toolkit.tools.tool import Tool
9
+
10
+
11
+ class A2AManager:
12
+ def __init__(
13
+ self,
14
+ logger: Logger,
15
+ tool_progress_reporter: ToolProgressReporter,
16
+ ):
17
+ self._logger = logger
18
+ self._tool_progress_reporter = tool_progress_reporter
19
+
20
+ def get_all_sub_agents(
21
+ self, tool_configs: list[ToolBuildConfig], event: ChatEvent
22
+ ) -> tuple[list[ToolBuildConfig], list[Tool[BaseToolConfig]]]:
23
+ sub_agents = []
24
+
25
+ for tool_config in tool_configs:
26
+ if not tool_config.is_sub_agent:
27
+ continue
28
+
29
+ if not isinstance(tool_config.configuration, SubAgentToolConfig):
30
+ self._logger.error(
31
+ "tool_config.configuration must be of type SubAgentToolConfig"
32
+ )
33
+ continue
34
+
35
+ sub_agent_tool_config: SubAgentToolConfig = tool_config.configuration
36
+
37
+ sub_agents.append(
38
+ SubAgentTool(
39
+ configuration=sub_agent_tool_config,
40
+ event=event,
41
+ tool_progress_reporter=self._tool_progress_reporter,
42
+ )
43
+ )
44
+
45
+ filtered_tool_config = [
46
+ tool_config for tool_config in tool_configs if not tool_config.is_sub_agent
47
+ ]
48
+
49
+ return filtered_tool_config, sub_agents
@@ -0,0 +1,26 @@
1
+ from unique_toolkit import ShortTermMemoryService
2
+ from unique_toolkit.short_term_memory.persistent_short_term_memory_manager import (
3
+ PersistentShortMemoryManager,
4
+ )
5
+ from unique_toolkit.tools.a2a.schema import SubAgentShortTermMemorySchema
6
+
7
+
8
+ def _get_short_term_memory_name(assistant_id: str) -> str:
9
+ return f"sub_agent_chat_id_{assistant_id}"
10
+
11
+
12
+ def get_sub_agent_short_term_memory_manager(
13
+ company_id: str, user_id: str, chat_id: str, assistant_id: str
14
+ ) -> PersistentShortMemoryManager[SubAgentShortTermMemorySchema]:
15
+ short_term_memory_service = ShortTermMemoryService(
16
+ company_id=company_id,
17
+ user_id=user_id,
18
+ chat_id=chat_id,
19
+ message_id=None,
20
+ )
21
+ short_term_memory_manager = PersistentShortMemoryManager(
22
+ short_term_memory_service=short_term_memory_service,
23
+ short_term_memory_schema=SubAgentShortTermMemorySchema,
24
+ short_term_memory_name=_get_short_term_memory_name(assistant_id),
25
+ )
26
+ return short_term_memory_manager
@@ -0,0 +1,15 @@
1
+ from pydantic import BaseModel
2
+
3
+ from unique_toolkit.tools.schemas import ToolCallResponse
4
+
5
+
6
+ class SubAgentToolInput(BaseModel):
7
+ user_message: str
8
+
9
+
10
+ class SubAgentToolCallResponse(ToolCallResponse):
11
+ assistant_message: str
12
+
13
+
14
+ class SubAgentShortTermMemorySchema(BaseModel):
15
+ chat_id: str
@@ -0,0 +1,152 @@
1
+ from pydantic import Field, create_model
2
+ from unique_sdk.utils.chat_in_space import send_message_and_wait_for_completion
3
+
4
+ from unique_toolkit.app import ChatEvent
5
+ from unique_toolkit.evaluators.schemas import EvaluationMetricName
6
+ from unique_toolkit.language_model import (
7
+ LanguageModelFunction,
8
+ LanguageModelMessage,
9
+ LanguageModelToolDescription,
10
+ )
11
+ from unique_toolkit.tools.a2a.config import SubAgentToolConfig
12
+ from unique_toolkit.tools.a2a.memory import (
13
+ get_sub_agent_short_term_memory_manager,
14
+ )
15
+ from unique_toolkit.tools.a2a.schema import (
16
+ SubAgentShortTermMemorySchema,
17
+ SubAgentToolInput,
18
+ )
19
+ from unique_toolkit.tools.schemas import ToolCallResponse
20
+ from unique_toolkit.tools.tool import Tool
21
+ from unique_toolkit.tools.tool_progress_reporter import (
22
+ ProgressState,
23
+ ToolProgressReporter,
24
+ )
25
+
26
+
27
+ class SubAgentTool(Tool[SubAgentToolConfig]):
28
+ name: str = "SubAgentTool"
29
+
30
+ def __init__(
31
+ self,
32
+ configuration: SubAgentToolConfig,
33
+ event: ChatEvent,
34
+ tool_progress_reporter: ToolProgressReporter | None = None,
35
+ ):
36
+ super().__init__(configuration, event, tool_progress_reporter)
37
+ self._user_id = event.user_id
38
+ self._company_id = event.company_id
39
+ self.name = configuration.name
40
+
41
+ self._short_term_memory_manager = get_sub_agent_short_term_memory_manager(
42
+ company_id=self._company_id,
43
+ user_id=self._user_id,
44
+ chat_id=event.payload.chat_id,
45
+ assistant_id=self.config.assistant_id,
46
+ )
47
+
48
+ def tool_description(self) -> LanguageModelToolDescription:
49
+ tool_input_model_with_description = create_model(
50
+ "SubAgentToolInput",
51
+ user_message=(
52
+ str,
53
+ Field(description=self.config.param_description_sub_agent_user_message),
54
+ ),
55
+ )
56
+
57
+ return LanguageModelToolDescription(
58
+ name=self.name,
59
+ description=self.config.tool_description,
60
+ parameters=tool_input_model_with_description,
61
+ )
62
+
63
+ def tool_description_for_system_prompt(self) -> str:
64
+ return self.config.tool_description_for_system_prompt
65
+
66
+ def tool_format_information_for_system_prompt(self) -> str:
67
+ return self.config.tool_format_information_for_system_prompt
68
+
69
+ def tool_description_for_user_prompt(self) -> str:
70
+ return self.config.tool_description_for_user_prompt
71
+
72
+ def tool_format_information_for_user_prompt(self) -> str:
73
+ return self.config.tool_format_information_for_user_prompt
74
+
75
+ def evaluation_check_list(self) -> list[EvaluationMetricName]:
76
+ return []
77
+
78
+ def get_evaluation_checks_based_on_tool_response(
79
+ self,
80
+ tool_response: ToolCallResponse,
81
+ ) -> list[EvaluationMetricName]:
82
+ return []
83
+
84
+ async def _get_chat_id(self) -> str | None:
85
+ if not self.config.reuse_chat:
86
+ return None
87
+
88
+ if self.config.chat_id is not None:
89
+ return self.config.chat_id
90
+
91
+ # Check if there is a saved chat id in short term memory
92
+ short_term_memory = await self._short_term_memory_manager.load_async()
93
+
94
+ if short_term_memory is not None:
95
+ return short_term_memory.chat_id
96
+
97
+ return None
98
+
99
+ async def _save_chat_id(self, chat_id: str) -> None:
100
+ if not self.config.reuse_chat:
101
+ return
102
+
103
+ await self._short_term_memory_manager.save_async(
104
+ SubAgentShortTermMemorySchema(chat_id=chat_id)
105
+ )
106
+
107
+ async def run(self, tool_call: LanguageModelFunction) -> ToolCallResponse:
108
+ tool_input = SubAgentToolInput.model_validate(tool_call.arguments)
109
+
110
+ if self.tool_progress_reporter:
111
+ await self.tool_progress_reporter.notify_from_tool_call(
112
+ tool_call=tool_call,
113
+ name=f"{self.name}",
114
+ message=f"{tool_input.user_message}",
115
+ state=ProgressState.RUNNING,
116
+ )
117
+
118
+ # Check if there is a saved chat id in short term memory
119
+ chat_id = await self._get_chat_id()
120
+
121
+ response = await send_message_and_wait_for_completion(
122
+ user_id=self._user_id,
123
+ assistant_id=self.config.assistant_id,
124
+ company_id=self._company_id,
125
+ text=tool_input.user_message, # type: ignore
126
+ chat_id=chat_id, # type: ignore
127
+ poll_interval=self.config.poll_interval,
128
+ max_wait=self.config.max_wait,
129
+ )
130
+
131
+ if chat_id is None:
132
+ await self._save_chat_id(response["chatId"])
133
+
134
+ if response["text"] is None:
135
+ raise ValueError("No response returned from sub agent")
136
+
137
+ self._text = response["text"]
138
+ return ToolCallResponse(
139
+ id=tool_call.id, # type: ignore
140
+ name=tool_call.name,
141
+ content=response["text"],
142
+ )
143
+
144
+ def get_tool_call_result_for_loop_history(
145
+ self,
146
+ tool_response: ToolCallResponse,
147
+ ) -> LanguageModelMessage:
148
+ return ToolCallResponse(
149
+ id=tool_response.id,
150
+ name=tool_response.name,
151
+ content=tool_response["content"],
152
+ )
@@ -72,6 +72,7 @@ class ToolBuildConfig(BaseModel):
72
72
  default=False,
73
73
  description="This tool must be chosen by the user and no other tools are used for this iteration.",
74
74
  )
75
+ is_sub_agent: bool = False
75
76
 
76
77
  is_enabled: bool = Field(default=True)
77
78
 
@@ -7,6 +7,7 @@ from pydantic import BaseModel
7
7
  from tests.test_obj_factory import get_event_obj
8
8
  from unique_toolkit.app.schemas import McpServer, McpTool
9
9
  from unique_toolkit.chat.service import ChatService
10
+ from unique_toolkit.tools.a2a.manager import A2AManager
10
11
  from unique_toolkit.tools.config import ToolBuildConfig, ToolIcon, ToolSelectionPolicy
11
12
  from unique_toolkit.tools.factory import ToolFactory
12
13
  from unique_toolkit.tools.mcp.manager import MCPManager
@@ -162,25 +163,38 @@ class TestMCPManager:
162
163
  tool_progress_reporter=tool_progress_reporter,
163
164
  )
164
165
 
166
+ @pytest.fixture
167
+ def a2a_manager(self, tool_progress_reporter):
168
+ """Create MCP manager fixture"""
169
+ return A2AManager(
170
+ logger=self.logger,
171
+ tool_progress_reporter=tool_progress_reporter,
172
+ )
173
+
165
174
  @pytest.fixture
166
175
  def tool_manager_config(self, internal_tools):
167
176
  """Create tool manager configuration fixture"""
168
177
  return ToolManagerConfig(tools=internal_tools, max_tool_calls=10)
169
178
 
170
179
  @pytest.fixture
171
- def tool_manager(self, tool_manager_config, mcp_manager, tool_progress_reporter):
180
+ def tool_manager(
181
+ self, tool_manager_config, mcp_manager, a2a_manager, tool_progress_reporter
182
+ ):
172
183
  """Create tool manager fixture"""
184
+
173
185
  return ToolManager(
174
186
  logger=self.logger,
175
187
  config=tool_manager_config,
176
188
  event=self.event,
177
189
  tool_progress_reporter=tool_progress_reporter,
178
190
  mcp_manager=mcp_manager,
191
+ a2a_manager=a2a_manager,
179
192
  )
180
193
 
181
194
  def test_tool_manager_initialization(self, tool_manager):
182
195
  """Test tool manager is initialized correctly"""
183
196
  assert tool_manager is not None
197
+
184
198
  assert (
185
199
  len(tool_manager.get_tools()) >= 2
186
200
  ) # Should have both internal and MCP tools
@@ -255,12 +269,19 @@ class TestMCPManager:
255
269
  """Test the _init__tools method behavior with different scenarios"""
256
270
 
257
271
  # Test 1: Normal initialization with both tool types
272
+
273
+ a2a_manager = A2AManager(
274
+ logger=self.logger,
275
+ tool_progress_reporter=tool_progress_reporter,
276
+ )
277
+
258
278
  tool_manager = ToolManager(
259
279
  logger=self.logger,
260
280
  config=tool_manager_config,
261
281
  event=self.event,
262
282
  tool_progress_reporter=tool_progress_reporter,
263
283
  mcp_manager=mcp_manager,
284
+ a2a_manager=a2a_manager,
264
285
  )
265
286
 
266
287
  # Verify both tools are loaded
@@ -285,12 +306,18 @@ class TestMCPManager:
285
306
  event_with_disabled.payload.tool_choices = ["internal_search", "mcp_test_tool"]
286
307
  event_with_disabled.payload.disabled_tools = ["internal_search"]
287
308
 
309
+ a2a_manager = A2AManager(
310
+ logger=self.logger,
311
+ tool_progress_reporter=tool_progress_reporter,
312
+ )
313
+
288
314
  tool_manager = ToolManager(
289
315
  logger=self.logger,
290
316
  config=tool_manager_config,
291
317
  event=event_with_disabled,
292
318
  tool_progress_reporter=tool_progress_reporter,
293
319
  mcp_manager=mcp_manager,
320
+ a2a_manager=a2a_manager,
294
321
  )
295
322
 
296
323
  # Should only have MCP tool, internal tool should be filtered out
@@ -315,12 +342,18 @@ class TestMCPManager:
315
342
  event_with_limited_choices.payload.tool_choices = ["internal_search"]
316
343
  event_with_limited_choices.payload.disabled_tools = []
317
344
 
345
+ a2a_manager = A2AManager(
346
+ logger=self.logger,
347
+ tool_progress_reporter=tool_progress_reporter,
348
+ )
349
+
318
350
  tool_manager = ToolManager(
319
351
  logger=self.logger,
320
352
  config=tool_manager_config,
321
353
  event=event_with_limited_choices,
322
354
  tool_progress_reporter=tool_progress_reporter,
323
355
  mcp_manager=mcp_manager,
356
+ a2a_manager=a2a_manager,
324
357
  )
325
358
 
326
359
  # Should only have internal search tool
@@ -348,12 +381,18 @@ class TestMCPManager:
348
381
  tools=[exclusive_tool_config], max_tool_calls=10
349
382
  )
350
383
 
384
+ a2a_manager = A2AManager(
385
+ logger=self.logger,
386
+ tool_progress_reporter=tool_progress_reporter,
387
+ )
388
+
351
389
  tool_manager = ToolManager(
352
390
  logger=self.logger,
353
391
  config=config_with_exclusive,
354
392
  event=self.event,
355
393
  tool_progress_reporter=tool_progress_reporter,
356
394
  mcp_manager=mcp_manager,
395
+ a2a_manager=a2a_manager,
357
396
  )
358
397
 
359
398
  # Should only have the exclusive tool, MCP tools should be ignored
@@ -383,12 +422,18 @@ class TestMCPManager:
383
422
  tools=[disabled_tool_config], max_tool_calls=10
384
423
  )
385
424
 
425
+ a2a_manager = A2AManager(
426
+ logger=self.logger,
427
+ tool_progress_reporter=tool_progress_reporter,
428
+ )
429
+
386
430
  tool_manager = ToolManager(
387
431
  logger=self.logger,
388
432
  config=config_with_disabled,
389
433
  event=self.event,
390
434
  tool_progress_reporter=tool_progress_reporter,
391
435
  mcp_manager=mcp_manager,
436
+ a2a_manager=a2a_manager,
392
437
  )
393
438
 
394
439
  # Should only have MCP tool, disabled internal tool should be filtered out
@@ -11,6 +11,7 @@ from unique_toolkit.language_model.schemas import (
11
11
  LanguageModelTool,
12
12
  LanguageModelToolDescription,
13
13
  )
14
+ from unique_toolkit.tools.a2a.manager import A2AManager
14
15
  from unique_toolkit.tools.config import ToolBuildConfig, _rebuild_config_model
15
16
  from unique_toolkit.tools.factory import ToolFactory
16
17
  from unique_toolkit.tools.mcp.manager import MCPManager
@@ -71,6 +72,7 @@ class ToolManager:
71
72
  event: ChatEvent,
72
73
  tool_progress_reporter: ToolProgressReporter,
73
74
  mcp_manager: MCPManager,
75
+ a2a_manager: A2AManager,
74
76
  ):
75
77
  self._logger = logger
76
78
  self._config = config
@@ -81,6 +83,7 @@ class ToolManager:
81
83
  # this needs to be a set of strings to avoid duplicates
82
84
  self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
83
85
  self._mcp_manager = mcp_manager
86
+ self._a2a_manager = a2a_manager
84
87
  self._init__tools(event)
85
88
 
86
89
  def _init__tools(self, event: ChatEvent) -> None:
@@ -90,6 +93,10 @@ class ToolManager:
90
93
  self._logger.info(f"Tool choices: {tool_choices}")
91
94
  self._logger.info(f"Tool configs: {tool_configs}")
92
95
 
96
+ tool_configs, sub_agents = self._a2a_manager.get_all_sub_agents(
97
+ tool_configs, event
98
+ )
99
+
93
100
  # Build internal tools from configurations
94
101
  internal_tools = [
95
102
  ToolFactory.build_tool_with_settings(
@@ -105,7 +112,7 @@ class ToolManager:
105
112
  # Get MCP tools (these are already properly instantiated)
106
113
  mcp_tools = self._mcp_manager.get_all_mcp_tools()
107
114
  # Combine both types of tools
108
- self.available_tools = internal_tools + mcp_tools
115
+ self.available_tools = internal_tools + mcp_tools + sub_agents
109
116
 
110
117
  for t in self.available_tools:
111
118
  if t.is_exclusive():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.8.29
3
+ Version: 0.8.31
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -114,6 +114,22 @@ All notable changes to this project will be documented in this file.
114
114
 
115
115
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
116
116
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
117
+
118
+
119
+ ## [0.8.31] - 2025-08-29
120
+ - Add various openai models to supported model list
121
+ - o1
122
+ - o3
123
+ - o3-deep-research
124
+ - o3-pro
125
+ - o4-mini
126
+ - o4-mini-deep-research
127
+ - gpt-4-1-mini
128
+ - gpt-4-1-nano
129
+
130
+ ## [0.8.30] - 2025-08-28
131
+ - Added A2A manager
132
+
117
133
  ## [0.8.29] - 2025-08-27
118
134
  - Include `MessageExecution` and `MessageLog` in toolkit
119
135
 
@@ -126,6 +142,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
126
142
  ## [0.8.26] - 2025-08-27
127
143
  - Optimized MCP manager
128
144
 
145
+ ## [0.8.26] - 2025-08-27
146
+ - Optimized MCP manager
147
+
129
148
  ## [0.8.25] - 2025-08-27
130
149
  - Load environment variables automatically from plattform dirs or environment
131
150
  - General Endpoint definition utility
@@ -81,7 +81,7 @@ unique_toolkit/language_model/__init__.py,sha256=lRQyLlbwHbNFf4-0foBU13UGb09lwEe
81
81
  unique_toolkit/language_model/builder.py,sha256=4OKfwJfj3TrgO1ezc_ewIue6W7BCQ2ZYQXUckWVPPTA,3369
82
82
  unique_toolkit/language_model/constants.py,sha256=B-topqW0r83dkC_25DeQfnPk3n53qzIHUCBS7YJ0-1U,119
83
83
  unique_toolkit/language_model/functions.py,sha256=4-zOzLsdjcfeTy6alqkYEBl-oVWptz9xLi8C5vdbWEg,16769
84
- unique_toolkit/language_model/infos.py,sha256=2roY4jqVd5p11Cavc4JLqh8uqpynOKjs2_eb5OJhqkk,48810
84
+ unique_toolkit/language_model/infos.py,sha256=fKmpTEvrHotGFo-PHkLpdlmk57UyFLCkuPeTic-Zohg,56304
85
85
  unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
86
86
  unique_toolkit/language_model/reference.py,sha256=nkX2VFz-IrUz8yqyc3G5jUMNwrNpxITBrMEKkbqqYoI,8583
87
87
  unique_toolkit/language_model/schemas.py,sha256=EOgy-p1GRcS46Sq0qEsN8MfOMl-KCcvEd9aCmqm9d08,16497
@@ -99,23 +99,29 @@ unique_toolkit/short_term_memory/service.py,sha256=5PeVBu1ZCAfyDb2HLVvlmqSbyzBBu
99
99
  unique_toolkit/smart_rules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
100
100
  unique_toolkit/smart_rules/compile.py,sha256=cxWjb2dxEI2HGsakKdVCkSNi7VK9mr08w5sDcFCQyWI,9553
101
101
  unique_toolkit/thinking_manager/thinking_manager.py,sha256=AJfmrTXTr-DxBnJ2_zYYpYo57kr5deqT0LiZb8UdaDQ,4175
102
- unique_toolkit/tools/config.py,sha256=E4lFQA4gCi3_j4hcdnd5YUHhbaWTyIGWrgD18QardQw,3796
102
+ unique_toolkit/tools/a2a/__init__.py,sha256=BIPxaqnfTD9IywxrqTLbGnkqNQrfLnKbdzF5ELYzCxM,171
103
+ unique_toolkit/tools/a2a/config.py,sha256=DKerB7gJ0oUFfqdvVN4I174EZYfYzt7ET2NYGkMLd0Q,864
104
+ unique_toolkit/tools/a2a/manager.py,sha256=-Vt0pL886zMaaG4sGQhXmbwOhOL928WbZzm4FRZuZMM,1666
105
+ unique_toolkit/tools/a2a/memory.py,sha256=F18kUA3m3NqoKdKAJSwDv8JQneHvZTGOkcZTLNMXAYs,1004
106
+ unique_toolkit/tools/a2a/schema.py,sha256=1R7qIu2l2qnUJDGRuUnZLqPPkHnT3x4d3d4PALoBzcY,296
107
+ unique_toolkit/tools/a2a/service.py,sha256=8yiG2zYqHva4fQJX4dfnE9Bm4vDnbK0k6YRyCSQy9RQ,5147
108
+ unique_toolkit/tools/config.py,sha256=nYwglwUSb3fxhqYxI83k-qoecuF5Zcol5FMOxHvTNeE,3827
103
109
  unique_toolkit/tools/factory.py,sha256=w3uNHuYBIJ330Xi8PTdAkr8G3OMbQH2cBgvk5UT16oE,1253
104
110
  unique_toolkit/tools/mcp/__init__.py,sha256=RLF_p-LDRC7GhiB3fdCi4u3bh6V9PY_w26fg61BLyco,122
105
111
  unique_toolkit/tools/mcp/manager.py,sha256=lQcCsfCMqW2j6uCwNmgYoQDwjm8YosvCh4-apw3KZKs,2683
106
112
  unique_toolkit/tools/mcp/models.py,sha256=f9OXcGadBx2tDlhYFu-7oEkl5p9ppoDKTqZbca_NblQ,714
107
113
  unique_toolkit/tools/mcp/tool_wrapper.py,sha256=w7Fbo4FSMYvtgSq7Sqt1dmAPvqHjoBQS-TwnSp0p6j0,9927
108
114
  unique_toolkit/tools/schemas.py,sha256=rArQccbfIv7CWcozClAZ-BVlOwAsjpgL8KUab_WeO3k,4817
109
- unique_toolkit/tools/test/test_mcp_manager.py,sha256=jnVKxqJyeJBDb6LvI8bM9dd8XPxrbXV0VCOHMhCdlug,14398
115
+ unique_toolkit/tools/test/test_mcp_manager.py,sha256=dySiytBUfRjTOzwY_oGKi_jT6BNVgzZuh1du-EvbcJ4,15627
110
116
  unique_toolkit/tools/test/test_tool_progress_reporter.py,sha256=GTtmBqOUo0-4fh_q0lRgxDhwKeankc3FHFD5ULZAm4Y,6299
111
117
  unique_toolkit/tools/tool.py,sha256=E0qORpQBrdksAylf_RuPaEbXKi_AlswVGv-67t3DNZI,5879
112
- unique_toolkit/tools/tool_manager.py,sha256=_Qt5fUGvA82YHn5LvqNJurLYW41jH9endNwsiSeyXyM,9948
118
+ unique_toolkit/tools/tool_manager.py,sha256=uW1uf8mYvbnBN_TVa4L79p_8Sf9HyY4V_nXZqrh3fPM,10206
113
119
  unique_toolkit/tools/tool_progress_reporter.py,sha256=ixud9VoHey1vlU1t86cW0-WTvyTwMxNSWBon8I11SUk,7955
114
120
  unique_toolkit/tools/utils/execution/execution.py,sha256=vjG2Y6awsGNtlvyQAGCTthQ5thWHYnn-vzZXaYLb3QE,7922
115
121
  unique_toolkit/tools/utils/source_handling/schema.py,sha256=vzAyf6ZWNexjMO0OrnB8y2glGkvAilmGGQXd6zcDaKw,870
116
122
  unique_toolkit/tools/utils/source_handling/source_formatting.py,sha256=C7uayNbdkNVJdEARA5CENnHtNY1SU6etlaqbgHNyxaQ,9152
117
123
  unique_toolkit/tools/utils/source_handling/tests/test_source_formatting.py,sha256=oM5ZxEgzROrnX1229KViCAFjRxl9wCTzWZoinYSHleM,6979
118
- unique_toolkit-0.8.29.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
119
- unique_toolkit-0.8.29.dist-info/METADATA,sha256=14-dyP0BCriYbAGs3mQBbGUG-SfuOmJDX6lq1Puf2Gg,29245
120
- unique_toolkit-0.8.29.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
121
- unique_toolkit-0.8.29.dist-info/RECORD,,
124
+ unique_toolkit-0.8.31.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
125
+ unique_toolkit-0.8.31.dist-info/METADATA,sha256=Kcu6gzn5fnXVRnZIHaplzTJuHArVWkhOH1Z7kCgBeUk,29539
126
+ unique_toolkit-0.8.31.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
127
+ unique_toolkit-0.8.31.dist-info/RECORD,,