unique_toolkit 0.8.22__py3-none-any.whl → 0.8.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -78,11 +78,9 @@ class EvaluationManager:
78
78
  self,
79
79
  logger: Logger,
80
80
  chat_service: ChatService,
81
- assistant_message_id: str,
82
81
  ):
83
82
  self._logger = logger
84
83
  self._chat_service = chat_service
85
- self._assistant_message_id = assistant_message_id
86
84
  self._evaluations: dict[EvaluationMetricName, Evaluation] = {}
87
85
  self._evaluation_passed: bool = True
88
86
 
@@ -96,6 +94,7 @@ class EvaluationManager:
96
94
  self,
97
95
  selected_evaluation_names: list[EvaluationMetricName],
98
96
  loop_response: LanguageModelStreamResponse,
97
+ assistant_message_id: str,
99
98
  ) -> list[EvaluationMetricResult]:
100
99
  task_executor = SafeTaskExecutor(
101
100
  logger=self._logger,
@@ -106,6 +105,7 @@ class EvaluationManager:
106
105
  self.execute_evaluation_call,
107
106
  loop_response=loop_response,
108
107
  evaluation_name=evaluation_name,
108
+ assistant_message_id=assistant_message_id,
109
109
  )
110
110
  for evaluation_name in selected_evaluation_names
111
111
  ]
@@ -126,6 +126,7 @@ class EvaluationManager:
126
126
  self,
127
127
  evaluation_name: EvaluationMetricName,
128
128
  loop_response: LanguageModelStreamResponse,
129
+ assistant_message_id: str,
129
130
  ) -> EvaluationMetricResult:
130
131
  self._logger.info(f"Processing tool call: {evaluation_name}")
131
132
 
@@ -133,13 +134,15 @@ class EvaluationManager:
133
134
 
134
135
  if evaluation_instance:
135
136
  # Execute the evaluation
136
- await self._create_assistant_message(evaluation_instance)
137
+ await self._create_assistant_message(
138
+ evaluation_instance, assistant_message_id
139
+ )
137
140
  evaluation_metric_result: EvaluationMetricResult = (
138
141
  await evaluation_instance.run(loop_response)
139
142
  )
140
143
  # show results to the user
141
144
  await self._show_message_assessment(
142
- evaluation_instance, evaluation_metric_result
145
+ evaluation_instance, evaluation_metric_result, assistant_message_id
143
146
  )
144
147
 
145
148
  return evaluation_metric_result
@@ -182,6 +185,7 @@ class EvaluationManager:
182
185
  self,
183
186
  evaluation_instance: Evaluation,
184
187
  evaluation_metric_result: EvaluationMetricResult,
188
+ assistant_message_id: str,
185
189
  ) -> None:
186
190
  evaluation_assessment_message = (
187
191
  await evaluation_instance.evaluation_metric_to_assessment(
@@ -189,7 +193,7 @@ class EvaluationManager:
189
193
  )
190
194
  )
191
195
  await self._chat_service.modify_message_assessment_async(
192
- assistant_message_id=self._assistant_message_id,
196
+ assistant_message_id=assistant_message_id,
193
197
  status=evaluation_assessment_message.status,
194
198
  title=evaluation_assessment_message.title,
195
199
  explanation=evaluation_assessment_message.explanation,
@@ -197,9 +201,11 @@ class EvaluationManager:
197
201
  type=evaluation_assessment_message.type,
198
202
  )
199
203
 
200
- async def _create_assistant_message(self, evaluation_instance: Evaluation):
204
+ async def _create_assistant_message(
205
+ self, evaluation_instance: Evaluation, assistant_message_id: str
206
+ ):
201
207
  await self._chat_service.create_message_assessment_async(
202
- assistant_message_id=self._assistant_message_id,
208
+ assistant_message_id=assistant_message_id,
203
209
  status=ChatMessageAssessmentStatus.PENDING,
204
210
  type=evaluation_instance.get_assessment_type(),
205
211
  )
@@ -85,6 +85,22 @@ class ToolBuildConfig(BaseModel):
85
85
  if not isinstance(value, dict):
86
86
  return value
87
87
 
88
+ is_mcp_tool = value.get("mcp_source_id", "") != ""
89
+ mcp_configuration = value.get("configuration", {})
90
+
91
+ # Import at runtime to avoid circular imports
92
+ from unique_toolkit.tools.mcp.models import MCPToolConfig
93
+
94
+ if (
95
+ isinstance(mcp_configuration, MCPToolConfig)
96
+ and mcp_configuration.mcp_source_id
97
+ ):
98
+ return value
99
+ if is_mcp_tool:
100
+ # For MCP tools, skip ToolFactory validation
101
+ # Configuration can remain as a dict
102
+ return value
103
+
88
104
  configuration = value.get("configuration", {})
89
105
  if isinstance(configuration, dict):
90
106
  # Local import to avoid circular import at module import time
@@ -105,3 +121,11 @@ class ToolBuildConfig(BaseModel):
105
121
  config = configuration
106
122
  value["configuration"] = config
107
123
  return value
124
+
125
+
126
+ def _rebuild_config_model():
127
+ """Rebuild the ToolBuildConfig model to resolve forward references."""
128
+ # Import here to avoid circular imports
129
+ from unique_toolkit.tools.schemas import BaseToolConfig # noqa: F401
130
+
131
+ ToolBuildConfig.model_rebuild()
@@ -0,0 +1,4 @@
1
+ from .models import EnrichedMCPTool, MCPToolConfig
2
+ from .tool_wrapper import MCPToolWrapper
3
+
4
+ __all__ = ["MCPToolWrapper", "MCPToolConfig", "EnrichedMCPTool"]
@@ -0,0 +1,82 @@
1
+ import logging
2
+
3
+ from unique_toolkit.app.schemas import ChatEvent, McpServer, McpTool
4
+ from unique_toolkit.tools.mcp.models import EnrichedMCPTool, MCPToolConfig
5
+ from unique_toolkit.tools.mcp.tool_wrapper import MCPToolWrapper
6
+ from unique_toolkit.tools.tool_progress_reporter import ToolProgressReporter
7
+
8
+
9
+ class MCPManager:
10
+ def __init__(
11
+ self,
12
+ mcp_servers: list[McpServer],
13
+ event: ChatEvent,
14
+ tool_progress_reporter: ToolProgressReporter,
15
+ ):
16
+ self._mcp_servers = mcp_servers
17
+ self._event = event
18
+ self._tool_progress_reporter = tool_progress_reporter
19
+
20
+ def get_mcp_servers(self):
21
+ return self._mcp_servers
22
+
23
+ def get_mcp_server_by_id(self, id: str):
24
+ return next((server for server in self._mcp_servers if server.id == id), None)
25
+
26
+ def _enrich_tool_with_mcp_info(
27
+ self, mcp_tool: McpTool, server: McpServer
28
+ ) -> EnrichedMCPTool:
29
+ enriched_tool = type("EnrichedMcpTool", (), {})()
30
+
31
+ # Copy all attributes from the original tool
32
+ for attr in dir(mcp_tool):
33
+ if not attr.startswith("_"):
34
+ setattr(enriched_tool, attr, getattr(mcp_tool, attr))
35
+
36
+ # Add server-specific attributes
37
+ enriched_tool.server_id = server.id
38
+ enriched_tool.server_name = server.name
39
+ enriched_tool.server_system_prompt = getattr(server, "system_prompt", None)
40
+ enriched_tool.server_user_prompt = getattr(server, "user_prompt", None)
41
+ enriched_tool.mcp_source_id = server.id
42
+
43
+ return enriched_tool
44
+
45
+ def create_mcp_tool_wrapper(
46
+ self, mcp_tool: EnrichedMCPTool, tool_progress_reporter: ToolProgressReporter
47
+ ) -> MCPToolWrapper:
48
+ """Create MCP tool wrapper that behave like internal tools"""
49
+ try:
50
+ config = MCPToolConfig(
51
+ server_id=mcp_tool.server_id,
52
+ server_name=mcp_tool.server_name,
53
+ server_system_prompt=mcp_tool.server_system_prompt,
54
+ server_user_prompt=mcp_tool.server_user_prompt,
55
+ mcp_source_id=mcp_tool.mcp_source_id,
56
+ )
57
+ wrapper = MCPToolWrapper(
58
+ mcp_tool=mcp_tool,
59
+ config=config,
60
+ event=self._event,
61
+ tool_progress_reporter=tool_progress_reporter,
62
+ )
63
+ return wrapper
64
+ except Exception as e:
65
+ import traceback
66
+
67
+ logging.error(f"Error creating MCP tool wrapper for {mcp_tool.name}: {e}")
68
+ logging.error(f"Full traceback: {traceback.format_exc()}")
69
+ return None
70
+
71
+ def get_all_mcp_tools(self, selected_by_user: list[str]) -> list[MCPToolWrapper]:
72
+ selected_tools = []
73
+ for server in self._mcp_servers:
74
+ if hasattr(server, "tools") and server.tools:
75
+ for tool in server.tools:
76
+ enriched_tool = self._enrich_tool_with_mcp_info(tool, server)
77
+ wrapper = self.create_mcp_tool_wrapper(
78
+ enriched_tool, self._tool_progress_reporter
79
+ )
80
+ if wrapper is not None:
81
+ selected_tools.append(wrapper)
82
+ return selected_tools
@@ -0,0 +1,38 @@
1
+ from typing import Any, Dict, Optional, Protocol
2
+
3
+ from unique_toolkit.tools.schemas import BaseToolConfig
4
+
5
+
6
+ class MCPTool(Protocol):
7
+ """Protocol defining the expected structure of an MCP tool."""
8
+
9
+ name: str
10
+ description: Optional[str]
11
+ input_schema: Dict[str, Any]
12
+ output_schema: Optional[Dict[str, Any]]
13
+ annotations: Optional[Dict[str, Any]]
14
+ title: Optional[str]
15
+ icon: Optional[str]
16
+ system_prompt: Optional[str]
17
+ user_prompt: Optional[str]
18
+ is_connected: bool
19
+
20
+
21
+ class EnrichedMCPTool(MCPTool, Protocol):
22
+ """Protocol for MCP tools enriched with server information."""
23
+
24
+ server_id: str
25
+ server_name: str
26
+ server_system_prompt: Optional[str]
27
+ server_user_prompt: Optional[str]
28
+ mcp_source_id: str
29
+
30
+
31
+ class MCPToolConfig(BaseToolConfig):
32
+ """Configuration for MCP tools"""
33
+
34
+ server_id: str
35
+ server_name: str
36
+ server_system_prompt: Optional[str] = None
37
+ server_user_prompt: Optional[str] = None
38
+ mcp_source_id: str
@@ -0,0 +1,278 @@
1
+ import json
2
+ from typing import Any, Dict
3
+
4
+ import unique_sdk
5
+ from pydantic import BaseModel, Field, create_model
6
+
7
+ from unique_toolkit.app.schemas import ChatEvent
8
+ from unique_toolkit.evals.schemas import EvaluationMetricName
9
+ from unique_toolkit.language_model import LanguageModelMessage
10
+ from unique_toolkit.language_model.schemas import (
11
+ LanguageModelFunction,
12
+ LanguageModelToolDescription,
13
+ LanguageModelToolMessage,
14
+ )
15
+ from unique_toolkit.tools.mcp.models import EnrichedMCPTool, MCPToolConfig
16
+ from unique_toolkit.tools.schemas import ToolCallResponse
17
+ from unique_toolkit.tools.tool import Tool
18
+ from unique_toolkit.tools.tool_progress_reporter import (
19
+ ProgressState,
20
+ ToolProgressReporter,
21
+ )
22
+
23
+
24
+ class MCPToolWrapper(Tool[MCPToolConfig]):
25
+ """Wrapper class for MCP tools that implements the Tool interface"""
26
+
27
+ def __init__(
28
+ self,
29
+ mcp_tool: EnrichedMCPTool,
30
+ config: MCPToolConfig,
31
+ event: ChatEvent,
32
+ tool_progress_reporter: ToolProgressReporter | None = None,
33
+ ):
34
+ self.name = mcp_tool.name
35
+ super().__init__(config, event, tool_progress_reporter)
36
+ self.mcp_tool = mcp_tool
37
+ self._tool_description = mcp_tool.description or ""
38
+ self._parameters_schema = mcp_tool.input_schema
39
+
40
+ # Set the display name for user-facing messages
41
+ # Priority: title > annotations.title > name
42
+ self.display_name = (
43
+ getattr(mcp_tool, "title", None)
44
+ or (getattr(mcp_tool, "annotations", {}) or {}).get("title")
45
+ or mcp_tool.name
46
+ )
47
+
48
+ def tool_description(self) -> LanguageModelToolDescription:
49
+ """Convert MCP tool schema to LanguageModelToolDescription"""
50
+ # Create a Pydantic model from the MCP tool's input schema
51
+ parameters_model = self._create_parameters_model()
52
+
53
+ return LanguageModelToolDescription(
54
+ name=self.name,
55
+ description=self._tool_description,
56
+ parameters=parameters_model,
57
+ )
58
+
59
+ def _create_parameters_model(self) -> type[BaseModel]:
60
+ """Create a Pydantic model from MCP tool's input schema"""
61
+ properties = self._parameters_schema.get("properties", {})
62
+ required_fields = self._parameters_schema.get("required", [])
63
+
64
+ # Convert JSON schema properties to Pydantic fields
65
+ fields = {}
66
+ for prop_name, prop_schema in properties.items():
67
+ field_type = self._json_schema_to_python_type(prop_schema)
68
+ field_description = prop_schema.get("description", "")
69
+
70
+ if prop_name in required_fields:
71
+ fields[prop_name] = (
72
+ field_type,
73
+ Field(description=field_description),
74
+ )
75
+ else:
76
+ fields[prop_name] = (
77
+ field_type,
78
+ Field(default=None, description=field_description),
79
+ )
80
+
81
+ # Create dynamic model
82
+ return create_model(f"{self.name}Parameters", **fields)
83
+
84
+ def _json_schema_to_python_type(self, schema: Dict[str, Any]) -> type:
85
+ """Convert JSON schema type to Python type"""
86
+ json_type = schema.get("type", "string")
87
+
88
+ type_mapping = {
89
+ "string": str,
90
+ "integer": int,
91
+ "number": float,
92
+ "boolean": bool,
93
+ "array": list,
94
+ "object": dict,
95
+ }
96
+
97
+ return type_mapping.get(json_type, str)
98
+
99
+ def display_name(self) -> str:
100
+ """The display name of the tool."""
101
+ return self._display_name
102
+
103
+ def tool_description_for_system_prompt(self) -> str:
104
+ """Return tool description for system prompt"""
105
+ return self._tool_description
106
+
107
+ def tool_format_information_for_system_prompt(self) -> str:
108
+ """Return formatting information for system prompt"""
109
+ return f"Use this MCP tool to {self._tool_description.lower()}"
110
+
111
+ def evaluation_check_list(self) -> list[EvaluationMetricName]:
112
+ """Return evaluation check list - empty for MCP tools for now"""
113
+ return []
114
+
115
+ def get_evaluation_checks_based_on_tool_response(
116
+ self,
117
+ tool_response: ToolCallResponse,
118
+ ) -> list[EvaluationMetricName]:
119
+ """Return evaluation checks based on tool response"""
120
+ return []
121
+
122
+ def get_tool_call_result_for_loop_history(
123
+ self,
124
+ tool_response: ToolCallResponse,
125
+ ) -> LanguageModelMessage:
126
+ """Convert tool response to message for loop history"""
127
+ # Convert the tool response to a message for the conversation history
128
+ content = (
129
+ tool_response.error_message
130
+ if tool_response.error_message
131
+ else "Tool executed successfully"
132
+ )
133
+
134
+ if hasattr(tool_response, "content") and tool_response.content:
135
+ content = str(tool_response.content)
136
+ elif tool_response.debug_info:
137
+ content = json.dumps(tool_response.debug_info)
138
+
139
+ return LanguageModelToolMessage(
140
+ content=content,
141
+ tool_call_id=tool_response.id,
142
+ name=tool_response.name,
143
+ )
144
+
145
+ async def run(self, tool_call: LanguageModelFunction) -> ToolCallResponse:
146
+ """Execute the MCP tool using SDK to call public API"""
147
+ self.logger.info(f"Running MCP tool: {self.name}")
148
+
149
+ # Notify progress if reporter is available
150
+ if self.tool_progress_reporter:
151
+ await self.tool_progress_reporter.notify_from_tool_call(
152
+ tool_call=tool_call,
153
+ name=f"**{self.display_name}**",
154
+ message=f"Executing MCP tool: {self.display_name}",
155
+ state=ProgressState.RUNNING,
156
+ )
157
+
158
+ try:
159
+ # Robust argument extraction and validation
160
+ arguments = self._extract_and_validate_arguments(tool_call)
161
+
162
+ # Use SDK to call the public API
163
+ result = await self._call_mcp_tool_via_sdk(arguments)
164
+
165
+ # Create successful response
166
+ tool_response = ToolCallResponse(
167
+ id=tool_call.id or "",
168
+ name=self.name,
169
+ debug_info={
170
+ "mcp_tool": self.name,
171
+ "arguments": arguments,
172
+ "result": result,
173
+ },
174
+ error_message="",
175
+ )
176
+
177
+ # Notify completion
178
+ if self.tool_progress_reporter:
179
+ await self.tool_progress_reporter.notify_from_tool_call(
180
+ tool_call=tool_call,
181
+ name=f"**{self.display_name}**",
182
+ message=f"MCP tool completed: {self.display_name}",
183
+ state=ProgressState.FINISHED,
184
+ )
185
+
186
+ return tool_response
187
+
188
+ except Exception as e:
189
+ self.logger.error(f"Error executing MCP tool {self.name}: {e}")
190
+
191
+ # Notify failure
192
+ if self.tool_progress_reporter:
193
+ await self.tool_progress_reporter.notify_from_tool_call(
194
+ tool_call=tool_call,
195
+ name=f"**{self.display_name}**",
196
+ message=f"MCP tool failed: {str(e)}",
197
+ state=ProgressState.FAILED,
198
+ )
199
+
200
+ return ToolCallResponse(
201
+ id=tool_call.id or "",
202
+ name=self.name,
203
+ debug_info={
204
+ "mcp_tool": self.name,
205
+ "error": str(e),
206
+ "original_arguments": getattr(tool_call, "arguments", None),
207
+ },
208
+ error_message=str(e),
209
+ )
210
+
211
+ def _extract_and_validate_arguments(
212
+ self, tool_call: LanguageModelFunction
213
+ ) -> Dict[str, Any]:
214
+ """
215
+ Extract and validate arguments from tool call, handling various formats robustly.
216
+
217
+ The arguments field can come in different formats:
218
+ 1. As a JSON string (expected format from OpenAI API)
219
+ 2. As a dictionary (from internal processing)
220
+ 3. As None or empty (edge cases)
221
+ """
222
+ raw_arguments = tool_call.arguments
223
+
224
+ # Handle None or empty arguments
225
+ if not raw_arguments:
226
+ self.logger.warning(f"MCP tool {self.name} called with empty arguments")
227
+ return {}
228
+
229
+ # Handle string arguments (JSON format)
230
+ if isinstance(raw_arguments, str):
231
+ try:
232
+ parsed_arguments = json.loads(raw_arguments)
233
+ if not isinstance(parsed_arguments, dict):
234
+ self.logger.warning(
235
+ f"MCP tool {self.name}: arguments JSON parsed to non-dict: {type(parsed_arguments)}"
236
+ )
237
+ return {}
238
+ return parsed_arguments
239
+ except json.JSONDecodeError as e:
240
+ self.logger.error(
241
+ f"MCP tool {self.name}: failed to parse arguments JSON '{raw_arguments}': {e}"
242
+ )
243
+ raise ValueError(
244
+ f"Invalid JSON arguments for MCP tool {self.name}: {e}"
245
+ )
246
+
247
+ # Handle dictionary arguments (already parsed)
248
+ if isinstance(raw_arguments, dict):
249
+ self.logger.debug(f"MCP tool {self.name}: arguments already in dict format")
250
+ return raw_arguments
251
+
252
+ # Handle unexpected argument types
253
+ self.logger.error(
254
+ f"MCP tool {self.name}: unexpected arguments type {type(raw_arguments)}: {raw_arguments}"
255
+ )
256
+ raise ValueError(
257
+ f"Unexpected arguments type for MCP tool {self.name}: {type(raw_arguments)}"
258
+ )
259
+
260
+ async def _call_mcp_tool_via_sdk(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
261
+ """Call MCP tool via SDK to public API"""
262
+ try:
263
+ result = unique_sdk.MCP.call_tool(
264
+ user_id=self.event.user_id,
265
+ company_id=self.event.company_id,
266
+ name=self.name,
267
+ arguments=arguments,
268
+ )
269
+
270
+ self.logger.info(
271
+ f"Calling MCP tool {self.name} with arguments: {arguments}"
272
+ )
273
+ self.logger.debug(f"Result: {result}")
274
+
275
+ return result
276
+ except Exception as e:
277
+ self.logger.error(f"SDK call failed for MCP tool {self.name}: {e}")
278
+ raise
@@ -0,0 +1,399 @@
1
+ import logging
2
+ from unittest.mock import Mock
3
+
4
+ import pytest
5
+ from pydantic import BaseModel
6
+
7
+ from tests.test_obj_factory import get_event_obj
8
+ from unique_toolkit.app.schemas import McpServer, McpTool
9
+ from unique_toolkit.chat.service import ChatService
10
+ from unique_toolkit.tools.config import ToolBuildConfig, ToolIcon, ToolSelectionPolicy
11
+ from unique_toolkit.tools.factory import ToolFactory
12
+ from unique_toolkit.tools.mcp.manager import MCPManager
13
+ from unique_toolkit.tools.schemas import BaseToolConfig
14
+ from unique_toolkit.tools.tool import Tool
15
+ from unique_toolkit.tools.tool_manager import ToolManager, ToolManagerConfig
16
+ from unique_toolkit.tools.tool_progress_reporter import ToolProgressReporter
17
+
18
+
19
+ class MockParameters(BaseModel):
20
+ pass
21
+
22
+
23
+ class MockInternalSearchTool(Tool[BaseToolConfig]):
24
+ """Mock internal search tool for testing"""
25
+
26
+ name = "internal_search"
27
+
28
+ def __init__(self, config, event, tool_progress_reporter=None):
29
+ super().__init__(config, event, tool_progress_reporter)
30
+
31
+ def tool_description(self):
32
+ from unique_toolkit.language_model.schemas import LanguageModelToolDescription
33
+
34
+ return LanguageModelToolDescription(
35
+ name="internal_search",
36
+ description="Internal search tool for testing",
37
+ parameters=MockParameters,
38
+ )
39
+
40
+ def tool_description_for_system_prompt(self) -> str:
41
+ return "Internal search tool for searching content"
42
+
43
+ def tool_format_information_for_system_prompt(self) -> str:
44
+ return "Use this tool to search for content"
45
+
46
+ def get_tool_call_result_for_loop_history(self, tool_response):
47
+ from unique_toolkit.language_model.schemas import LanguageModelMessage
48
+
49
+ return LanguageModelMessage(role="tool", content="Mock search result")
50
+
51
+ def evaluation_check_list(self):
52
+ return []
53
+
54
+ def get_evaluation_checks_based_on_tool_response(self, tool_response):
55
+ return []
56
+
57
+ def get_tool_prompts(self):
58
+ from unique_toolkit.tools.schemas import ToolPrompts
59
+
60
+ return ToolPrompts()
61
+
62
+ async def run(self, tool_call):
63
+ from unique_toolkit.tools.schemas import ToolCallResponse
64
+
65
+ return ToolCallResponse(id=tool_call.id, name=tool_call.name, content_chunks=[])
66
+
67
+
68
+ class MockInternalSearchConfig(BaseToolConfig):
69
+ """Mock configuration for internal search tool"""
70
+
71
+ pass
72
+
73
+
74
+ class TestMCPManager:
75
+ @pytest.fixture(autouse=True)
76
+ def setup(self):
77
+ """Setup test environment"""
78
+ self.logger = logging.getLogger(__name__)
79
+
80
+ # Register mock internal tool
81
+ ToolFactory.register_tool(MockInternalSearchTool, MockInternalSearchConfig)
82
+
83
+ self.event = get_event_obj(
84
+ user_id="test_user",
85
+ company_id="test_company",
86
+ assistant_id="test_assistant",
87
+ chat_id="test_chat",
88
+ )
89
+
90
+ # Set tool choices to include both internal and MCP tools
91
+ self.event.payload.tool_choices = ["internal_search", "mcp_test_tool"]
92
+ self.event.payload.disabled_tools = []
93
+
94
+ @pytest.fixture
95
+ def mock_chat_service(self):
96
+ """Create mock chat service for tool progress reporter"""
97
+ return Mock(spec=ChatService)
98
+
99
+ @pytest.fixture
100
+ def tool_progress_reporter(self, mock_chat_service):
101
+ """Create tool progress reporter fixture"""
102
+ return ToolProgressReporter(mock_chat_service)
103
+
104
+ @pytest.fixture
105
+ def mcp_tools(self):
106
+ """Create mock MCP tools fixture"""
107
+ mcp_tool = McpTool(
108
+ id="mcp_test_tool_id",
109
+ name="mcp_test_tool",
110
+ description="Test MCP tool",
111
+ input_schema={
112
+ "type": "object",
113
+ "properties": {
114
+ "query": {"type": "string", "description": "Search query"}
115
+ },
116
+ "required": ["query"],
117
+ },
118
+ output_schema=None,
119
+ annotations=None,
120
+ title="Test MCP Tool",
121
+ icon=None,
122
+ system_prompt=None,
123
+ user_prompt=None,
124
+ is_connected=True,
125
+ )
126
+ return [mcp_tool]
127
+
128
+ @pytest.fixture
129
+ def mcp_servers(self, mcp_tools):
130
+ """Create mock MCP servers fixture"""
131
+ server = McpServer(
132
+ id="test_server_id",
133
+ name="test_server",
134
+ description="Test MCP server",
135
+ tools=mcp_tools,
136
+ system_prompt="Test system prompt",
137
+ user_prompt="Test user prompt",
138
+ is_connected=True,
139
+ )
140
+ return [server]
141
+
142
+ @pytest.fixture
143
+ def internal_tools(self):
144
+ """Create internal tools fixture"""
145
+ internal_tool_config = ToolBuildConfig(
146
+ name="internal_search",
147
+ configuration=MockInternalSearchConfig(),
148
+ display_name="Internal Search",
149
+ icon=ToolIcon.BOOK,
150
+ selection_policy=ToolSelectionPolicy.BY_USER,
151
+ is_exclusive=False,
152
+ is_enabled=True,
153
+ )
154
+ return [internal_tool_config]
155
+
156
+ @pytest.fixture
157
+ def mcp_manager(self, mcp_servers, tool_progress_reporter):
158
+ """Create MCP manager fixture"""
159
+ return MCPManager(
160
+ mcp_servers=mcp_servers,
161
+ event=self.event,
162
+ tool_progress_reporter=tool_progress_reporter,
163
+ )
164
+
165
+ @pytest.fixture
166
+ def tool_manager_config(self, internal_tools):
167
+ """Create tool manager configuration fixture"""
168
+ return ToolManagerConfig(tools=internal_tools, max_tool_calls=10)
169
+
170
+ @pytest.fixture
171
+ def tool_manager(self, tool_manager_config, mcp_manager, tool_progress_reporter):
172
+ """Create tool manager fixture"""
173
+ return ToolManager(
174
+ logger=self.logger,
175
+ config=tool_manager_config,
176
+ event=self.event,
177
+ tool_progress_reporter=tool_progress_reporter,
178
+ mcp_manager=mcp_manager,
179
+ )
180
+
181
+ def test_tool_manager_initialization(self, tool_manager):
182
+ """Test tool manager is initialized correctly"""
183
+ assert tool_manager is not None
184
+ assert (
185
+ len(tool_manager.get_tools()) >= 2
186
+ ) # Should have both internal and MCP tools
187
+
188
+ def test_tool_manager_has_both_tool_types(self, tool_manager):
189
+ """Test that tool manager contains both MCP and internal tools"""
190
+ tools = tool_manager.get_tools()
191
+ tool_names = [tool.name for tool in tools]
192
+
193
+ # Should contain internal search tool
194
+ assert "internal_search" in tool_names
195
+
196
+ # Should contain MCP tool (wrapped)
197
+ assert "mcp_test_tool" in tool_names
198
+
199
+ # Should have at least 2 tools total
200
+ assert len(tools) >= 2
201
+
202
+ def test_tool_manager_can_get_tools_by_name(self, tool_manager):
203
+ """Test that tool manager can retrieve tools by name"""
204
+ # Test getting internal tool
205
+ internal_tool = tool_manager.get_tool_by_name("internal_search")
206
+ assert internal_tool is not None
207
+ assert internal_tool.name == "internal_search"
208
+
209
+ # Test getting MCP tool
210
+ mcp_tool = tool_manager.get_tool_by_name("mcp_test_tool")
211
+ assert mcp_tool is not None
212
+ assert mcp_tool.name == "mcp_test_tool"
213
+
214
+ def test_tool_manager_tools_property_contains_both_types(self, tool_manager):
215
+ """Test that the _tools property contains both internal and MCP tools"""
216
+ # Access the private _tools attribute directly to verify integration
217
+ tools = tool_manager._tools
218
+ tool_names = [tool.name for tool in tools]
219
+
220
+ # Verify both tool types are present
221
+ assert "internal_search" in tool_names, (
222
+ f"Internal tool missing. Available tools: {tool_names}"
223
+ )
224
+ assert "mcp_test_tool" in tool_names, (
225
+ f"MCP tool missing. Available tools: {tool_names}"
226
+ )
227
+
228
+ # Verify we have the expected number of tools
229
+ assert len(tools) == 2, f"Expected 2 tools, got {len(tools)}: {tool_names}"
230
+
231
+ def test_tool_manager_logs_loaded_tools(self, tool_manager, caplog):
232
+ """Test that tool manager logs the loaded tools correctly"""
233
+ with caplog.at_level(logging.INFO):
234
+ tool_manager.log_loaded_tools()
235
+
236
+ # Check that both tools are mentioned in the logs
237
+ log_output = caplog.text
238
+ assert "internal_search" in log_output
239
+ assert "mcp_test_tool" in log_output
240
+
241
+ def test_tool_manager_gets_tool_definitions(self, tool_manager):
242
+ """Test that tool manager returns tool definitions for both tool types"""
243
+ definitions = tool_manager.get_tool_definitions()
244
+
245
+ # Should have definitions for both tools
246
+ assert len(definitions) == 2
247
+
248
+ definition_names = [defn.name for defn in definitions]
249
+ assert "internal_search" in definition_names
250
+ assert "mcp_test_tool" in definition_names
251
+
252
+ def test_init_tools_method(
253
+ self, tool_manager_config, mcp_manager, tool_progress_reporter
254
+ ):
255
+ """Test the _init__tools method behavior with different scenarios"""
256
+
257
+ # Test 1: Normal initialization with both tool types
258
+ tool_manager = ToolManager(
259
+ logger=self.logger,
260
+ config=tool_manager_config,
261
+ event=self.event,
262
+ tool_progress_reporter=tool_progress_reporter,
263
+ mcp_manager=mcp_manager,
264
+ )
265
+
266
+ # Verify both tools are loaded
267
+ tools = tool_manager.get_tools()
268
+ tool_names = [tool.name for tool in tools]
269
+ assert "internal_search" in tool_names
270
+ assert "mcp_test_tool" in tool_names
271
+ assert len(tools) == 2
272
+
273
+ def test_init_tools_with_disabled_tools(
274
+ self, tool_manager_config, mcp_manager, tool_progress_reporter
275
+ ):
276
+ """Test _init__tools method when some tools are disabled"""
277
+
278
+ # Modify event to disable the internal search tool
279
+ event_with_disabled = get_event_obj(
280
+ user_id="test_user",
281
+ company_id="test_company",
282
+ assistant_id="test_assistant",
283
+ chat_id="test_chat",
284
+ )
285
+ event_with_disabled.payload.tool_choices = ["internal_search", "mcp_test_tool"]
286
+ event_with_disabled.payload.disabled_tools = ["internal_search"]
287
+
288
+ tool_manager = ToolManager(
289
+ logger=self.logger,
290
+ config=tool_manager_config,
291
+ event=event_with_disabled,
292
+ tool_progress_reporter=tool_progress_reporter,
293
+ mcp_manager=mcp_manager,
294
+ )
295
+
296
+ # Should only have MCP tool, internal tool should be filtered out
297
+ tools = tool_manager.get_tools()
298
+ tool_names = [tool.name for tool in tools]
299
+ assert "internal_search" not in tool_names
300
+ assert "mcp_test_tool" in tool_names
301
+ assert len(tools) == 1
302
+
303
+ def test_init_tools_with_limited_tool_choices(
304
+ self, tool_manager_config, mcp_manager, tool_progress_reporter
305
+ ):
306
+ """Test _init__tools method when only specific tools are chosen"""
307
+
308
+ # Modify event to only choose internal search tool
309
+ event_with_limited_choices = get_event_obj(
310
+ user_id="test_user",
311
+ company_id="test_company",
312
+ assistant_id="test_assistant",
313
+ chat_id="test_chat",
314
+ )
315
+ event_with_limited_choices.payload.tool_choices = ["internal_search"]
316
+ event_with_limited_choices.payload.disabled_tools = []
317
+
318
+ tool_manager = ToolManager(
319
+ logger=self.logger,
320
+ config=tool_manager_config,
321
+ event=event_with_limited_choices,
322
+ tool_progress_reporter=tool_progress_reporter,
323
+ mcp_manager=mcp_manager,
324
+ )
325
+
326
+ # Should only have internal search tool
327
+ tools = tool_manager.get_tools()
328
+ tool_names = [tool.name for tool in tools]
329
+ assert "internal_search" in tool_names
330
+ assert "mcp_test_tool" not in tool_names
331
+ assert len(tools) == 1
332
+
333
+ def test_init_tools_with_exclusive_tool(self, mcp_manager, tool_progress_reporter):
334
+ """Test _init__tools method when an exclusive tool is present"""
335
+
336
+ # Create an exclusive tool configuration
337
+ exclusive_tool_config = ToolBuildConfig(
338
+ name="internal_search",
339
+ configuration=MockInternalSearchConfig(),
340
+ display_name="Internal Search",
341
+ icon=ToolIcon.BOOK,
342
+ selection_policy=ToolSelectionPolicy.BY_USER,
343
+ is_exclusive=True, # Make it exclusive
344
+ is_enabled=True,
345
+ )
346
+
347
+ config_with_exclusive = ToolManagerConfig(
348
+ tools=[exclusive_tool_config], max_tool_calls=10
349
+ )
350
+
351
+ tool_manager = ToolManager(
352
+ logger=self.logger,
353
+ config=config_with_exclusive,
354
+ event=self.event,
355
+ tool_progress_reporter=tool_progress_reporter,
356
+ mcp_manager=mcp_manager,
357
+ )
358
+
359
+ # Should only have the exclusive tool, MCP tools should be ignored
360
+ tools = tool_manager.get_tools()
361
+ tool_names = [tool.name for tool in tools]
362
+ assert "internal_search" in tool_names
363
+ assert "mcp_test_tool" not in tool_names
364
+ assert len(tools) == 1
365
+
366
+ def test_init_tools_with_disabled_tool_config(
367
+ self, mcp_manager, tool_progress_reporter
368
+ ):
369
+ """Test _init__tools method when a tool is disabled in its configuration"""
370
+
371
+ # Create a disabled tool configuration
372
+ disabled_tool_config = ToolBuildConfig(
373
+ name="internal_search",
374
+ configuration=MockInternalSearchConfig(),
375
+ display_name="Internal Search",
376
+ icon=ToolIcon.BOOK,
377
+ selection_policy=ToolSelectionPolicy.BY_USER,
378
+ is_exclusive=False,
379
+ is_enabled=False, # Disable the tool
380
+ )
381
+
382
+ config_with_disabled = ToolManagerConfig(
383
+ tools=[disabled_tool_config], max_tool_calls=10
384
+ )
385
+
386
+ tool_manager = ToolManager(
387
+ logger=self.logger,
388
+ config=config_with_disabled,
389
+ event=self.event,
390
+ tool_progress_reporter=tool_progress_reporter,
391
+ mcp_manager=mcp_manager,
392
+ )
393
+
394
+ # Should only have MCP tool, disabled internal tool should be filtered out
395
+ tools = tool_manager.get_tools()
396
+ tool_names = [tool.name for tool in tools]
397
+ assert "internal_search" not in tool_names
398
+ assert "mcp_test_tool" in tool_names
399
+ assert len(tools) == 1
@@ -11,13 +11,24 @@ from unique_toolkit.language_model.schemas import (
11
11
  LanguageModelTool,
12
12
  LanguageModelToolDescription,
13
13
  )
14
- from unique_toolkit.tools.config import ToolBuildConfig
14
+ from unique_toolkit.tools.config import ToolBuildConfig, _rebuild_config_model
15
15
  from unique_toolkit.tools.factory import ToolFactory
16
+ from unique_toolkit.tools.mcp.manager import MCPManager
16
17
  from unique_toolkit.tools.schemas import ToolCallResponse, ToolPrompts
17
18
  from unique_toolkit.tools.tool import Tool
18
19
  from unique_toolkit.tools.tool_progress_reporter import ToolProgressReporter
19
20
  from unique_toolkit.tools.utils.execution.execution import Result, SafeTaskExecutor
20
21
 
22
+ # Rebuild the config model now that all imports are resolved
23
+ _rebuild_config_model()
24
+
25
+
26
+ class ForcedToolOption:
27
+ type: str = "function"
28
+
29
+ def __init__(self, name: str):
30
+ self.name = name
31
+
21
32
 
22
33
  class ToolManagerConfig(BaseModel):
23
34
  tools: list[ToolBuildConfig] = Field(
@@ -59,6 +70,7 @@ class ToolManager:
59
70
  config: ToolManagerConfig,
60
71
  event: ChatEvent,
61
72
  tool_progress_reporter: ToolProgressReporter,
73
+ mcp_manager: MCPManager,
62
74
  ):
63
75
  self._logger = logger
64
76
  self._config = config
@@ -68,6 +80,7 @@ class ToolManager:
68
80
  self._disabled_tools = event.payload.disabled_tools
69
81
  # this needs to be a set of strings to avoid duplicates
70
82
  self._tool_evaluation_check_list: set[EvaluationMetricName] = set()
83
+ self._mcp_manager = mcp_manager
71
84
  self._init__tools(event)
72
85
 
73
86
  def _init__tools(self, event: ChatEvent) -> None:
@@ -77,7 +90,8 @@ class ToolManager:
77
90
  self._logger.info(f"Tool choices: {tool_choices}")
78
91
  self._logger.info(f"Tool configs: {tool_configs}")
79
92
 
80
- self.available_tools = [
93
+ # Build internal tools from configurations
94
+ internal_tools = [
81
95
  ToolFactory.build_tool_with_settings(
82
96
  t.name,
83
97
  t,
@@ -88,6 +102,11 @@ class ToolManager:
88
102
  for t in tool_configs
89
103
  ]
90
104
 
105
+ # Get MCP tools (these are already properly instantiated)
106
+ mcp_tools = self._mcp_manager.get_all_mcp_tools(tool_choices)
107
+ # Combine both types of tools
108
+ self.available_tools = internal_tools + mcp_tools
109
+
91
110
  for t in self.available_tools:
92
111
  if t.is_exclusive():
93
112
  self._tools = [t]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.8.22
3
+ Version: 0.8.24
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -114,6 +114,12 @@ All notable changes to this project will be documented in this file.
114
114
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
115
115
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
116
116
 
117
+ ## [0.8.24] - 2025-08-25
118
+ - Optimized hallucination manager
119
+
120
+ ## [0.8.23] - 2025-08-27
121
+ - Add MCP manager that handles MCP related logic
122
+
117
123
 
118
124
  ## [0.8.22] - 2025-08-25
119
125
  - Add DeepSeek-R1, DeepSeek-V3.1, Qwen3-235B-A22B and Qwen3-235B-A22B-Thinking-2507 to supported model list
@@ -40,7 +40,7 @@ unique_toolkit/evals/config.py,sha256=ywHIrJs5SFdKr1WXfrofWuFfzb0iPQw8iZDpq5oEug
40
40
  unique_toolkit/evals/context_relevancy/prompts.py,sha256=EdHFUOB581yVxcOL8482KUv_LzaRjuiem71EF8udYMc,1331
41
41
  unique_toolkit/evals/context_relevancy/schema.py,sha256=lm9x0jExOinUk9itqC8ZpgReC7yj1VDwEMppxlZGqpY,2923
42
42
  unique_toolkit/evals/context_relevancy/service.py,sha256=txTWIhV65QGFhxG1jCb5TTqZc_c7K9I8pi2HVDTAfm8,8384
43
- unique_toolkit/evals/evaluation_manager.py,sha256=luavQrMTrxEi7hRhpUSlGLYpEza7eQCg5Nk46ogbabw,7721
43
+ unique_toolkit/evals/evaluation_manager.py,sha256=g-8qa_6_p53C9Okx8iNkuoIXYSJrf-6sQ-xku7bo9kI,7895
44
44
  unique_toolkit/evals/exception.py,sha256=7lcVbCyoN4Md1chNJDFxpUYyWbVrcr9dcc3TxWykJTc,115
45
45
  unique_toolkit/evals/hallucination/constants.py,sha256=FLcXl5XU07jCvS8YPX9l6UjTaqyQ8YvnSKpx4Z6wZ2Y,1997
46
46
  unique_toolkit/evals/hallucination/hallucination_evaluation.py,sha256=TuZ88jeVn0tVr9d0GhWyJSxKNA16nhvr2xRPo-yK8OM,3063
@@ -96,18 +96,23 @@ unique_toolkit/short_term_memory/service.py,sha256=5PeVBu1ZCAfyDb2HLVvlmqSbyzBBu
96
96
  unique_toolkit/smart_rules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
97
  unique_toolkit/smart_rules/compile.py,sha256=cxWjb2dxEI2HGsakKdVCkSNi7VK9mr08w5sDcFCQyWI,9553
98
98
  unique_toolkit/thinking_manager/thinking_manager.py,sha256=AJfmrTXTr-DxBnJ2_zYYpYo57kr5deqT0LiZb8UdaDQ,4175
99
- unique_toolkit/tools/config.py,sha256=CHelTOEd2uqktK-bSYIyaqb9M1hKz8fy_DAR2mVzXpc,2985
99
+ unique_toolkit/tools/config.py,sha256=E4lFQA4gCi3_j4hcdnd5YUHhbaWTyIGWrgD18QardQw,3796
100
100
  unique_toolkit/tools/factory.py,sha256=w3uNHuYBIJ330Xi8PTdAkr8G3OMbQH2cBgvk5UT16oE,1253
101
+ unique_toolkit/tools/mcp/__init__.py,sha256=_2_im3F4IgAwKyHysndhQSV1GbfoRviNOZlAS5vkF3Q,158
102
+ unique_toolkit/tools/mcp/manager.py,sha256=7J9oE_4WxYcPDXx8RcWP67pt5oVMCUvxk_FDsu2J3YY,3218
103
+ unique_toolkit/tools/mcp/models.py,sha256=4SFE12Y8xoJ0GBaXdz8oylhUBMHrrm66BRurCm3tCAI,987
104
+ unique_toolkit/tools/mcp/tool_wrapper.py,sha256=lclzLZ9q4q6dLg7wUJq_aYjV0sTlQwk5abB9DeXJsr8,10287
101
105
  unique_toolkit/tools/schemas.py,sha256=PJBWN9NcEBSUmhrAMznbnw06ordrf4lfbhcxYZFmAOc,4720
106
+ unique_toolkit/tools/test/test_mcp_manager.py,sha256=jnVKxqJyeJBDb6LvI8bM9dd8XPxrbXV0VCOHMhCdlug,14398
102
107
  unique_toolkit/tools/test/test_tool_progress_reporter.py,sha256=GTtmBqOUo0-4fh_q0lRgxDhwKeankc3FHFD5ULZAm4Y,6299
103
108
  unique_toolkit/tools/tool.py,sha256=zreY6fGBAfOIPNujWkl4cV5fBojjzGzVyvYuNLDrbzQ,5626
104
- unique_toolkit/tools/tool_manager.py,sha256=vYGmNF9IvQoAG3N20TIbCpzmsJfoTFn_SVaENTJIk1w,9358
109
+ unique_toolkit/tools/tool_manager.py,sha256=-DTfX608iPSHo2_UlC_lVG8DGJfWhuzjAk_Wshc3YTQ,9988
105
110
  unique_toolkit/tools/tool_progress_reporter.py,sha256=ixud9VoHey1vlU1t86cW0-WTvyTwMxNSWBon8I11SUk,7955
106
111
  unique_toolkit/tools/utils/execution/execution.py,sha256=vjG2Y6awsGNtlvyQAGCTthQ5thWHYnn-vzZXaYLb3QE,7922
107
112
  unique_toolkit/tools/utils/source_handling/schema.py,sha256=vzAyf6ZWNexjMO0OrnB8y2glGkvAilmGGQXd6zcDaKw,870
108
113
  unique_toolkit/tools/utils/source_handling/source_formatting.py,sha256=C7uayNbdkNVJdEARA5CENnHtNY1SU6etlaqbgHNyxaQ,9152
109
114
  unique_toolkit/tools/utils/source_handling/tests/test_source_formatting.py,sha256=oM5ZxEgzROrnX1229KViCAFjRxl9wCTzWZoinYSHleM,6979
110
- unique_toolkit-0.8.22.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
111
- unique_toolkit-0.8.22.dist-info/METADATA,sha256=yFz8RDjtGih0zLFKHQNfgyWM1P0W1URIBGu2EjK3ZWc,28453
112
- unique_toolkit-0.8.22.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
113
- unique_toolkit-0.8.22.dist-info/RECORD,,
115
+ unique_toolkit-0.8.24.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
116
+ unique_toolkit-0.8.24.dist-info/METADATA,sha256=0vvS5x-HsApDZlOrB7vnLQPal3u7Ucqxv7hZa35ebOo,28588
117
+ unique_toolkit-0.8.24.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
118
+ unique_toolkit-0.8.24.dist-info/RECORD,,