mcp-use 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

mcp_use/__init__.py CHANGED
@@ -7,6 +7,7 @@ to MCP tools through existing LangChain adapters.
7
7
 
8
8
  from importlib.metadata import version
9
9
 
10
+ from . import observability
10
11
  from .agents.mcpagent import MCPAgent
11
12
  from .client import MCPClient
12
13
  from .config import load_config_file
@@ -30,6 +31,7 @@ __all__ = [
30
31
  "MCP_USE_DEBUG",
31
32
  "Logger",
32
33
  "set_debug",
34
+ "observability",
33
35
  ]
34
36
 
35
37
 
mcp_use/adapters/base.py CHANGED
@@ -34,9 +34,7 @@ class BaseAdapter(ABC):
34
34
  self._connector_tool_map: dict[BaseConnector, list[T]] = {}
35
35
 
36
36
  @classmethod
37
- async def create_tools(
38
- cls, client: "MCPClient", disallowed_tools: list[str] | None = None
39
- ) -> list[T]:
37
+ async def create_tools(cls, client: "MCPClient", disallowed_tools: list[str] | None = None) -> list[T]:
40
38
  """Create tools from an MCPClient instance.
41
39
 
42
40
  This is the recommended way to create tools from an MCPClient, as it handles
@@ -86,9 +84,7 @@ class BaseAdapter(ABC):
86
84
  """
87
85
  # Check if we already have tools for this connector
88
86
  if connector in self._connector_tool_map:
89
- logger.debug(
90
- f"Returning {len(self._connector_tool_map[connector])} existing tools for connector"
91
- )
87
+ logger.debug(f"Returning {len(self._connector_tool_map[connector])} existing tools for connector")
92
88
  return self._connector_tool_map[connector]
93
89
 
94
90
  # Create tools for this connector
@@ -89,9 +89,7 @@ class LangChainAdapter(BaseAdapter):
89
89
  elif hasattr(resource, "blob"):
90
90
  # Assuming blob needs decoding or specific handling; adjust as needed
91
91
  decoded_result += (
92
- resource.blob.decode()
93
- if isinstance(resource.blob, bytes)
94
- else str(resource.blob)
92
+ resource.blob.decode() if isinstance(resource.blob, bytes) else str(resource.blob)
95
93
  )
96
94
  else:
97
95
  raise ToolException(f"Unexpected resource type: {resource.type}")
@@ -154,9 +152,7 @@ class LangChainAdapter(BaseAdapter):
154
152
  logger.debug(f'MCP tool: "{self.name}" received input: {kwargs}')
155
153
 
156
154
  try:
157
- tool_result: CallToolResult = await self.tool_connector.call_tool(
158
- self.name, kwargs
159
- )
155
+ tool_result: CallToolResult = await self.tool_connector.call_tool(self.name, kwargs)
160
156
  try:
161
157
  # Use the helper function to parse the result
162
158
  return adapter_self._parse_mcp_tool_result(tool_result)
@@ -185,8 +181,7 @@ class LangChainAdapter(BaseAdapter):
185
181
  class ResourceTool(BaseTool):
186
182
  name: str = _sanitize(mcp_resource.name or f"resource_{mcp_resource.uri}")
187
183
  description: str = (
188
- mcp_resource.description
189
- or f"Return the content of the resource located at URI {mcp_resource.uri}."
184
+ mcp_resource.description or f"Return the content of the resource located at URI {mcp_resource.uri}."
190
185
  )
191
186
  args_schema: type[BaseModel] = ReadResourceRequestParams
192
187
  tool_connector: BaseConnector = connector
@@ -243,9 +238,7 @@ class LangChainAdapter(BaseAdapter):
243
238
  Field(None, description=arg.description),
244
239
  )
245
240
 
246
- InputSchema = create_model(
247
- dynamic_model_name, **field_definitions_for_create, __base__=BaseModel
248
- )
241
+ InputSchema = create_model(dynamic_model_name, **field_definitions_for_create, __base__=BaseModel)
249
242
  else:
250
243
  # Create an empty Pydantic model if there are no arguments
251
244
  InputSchema = create_model(dynamic_model_name, __base__=BaseModel)
mcp_use/agents/base.py CHANGED
@@ -48,9 +48,7 @@ class BaseAgent(ABC):
48
48
  pass
49
49
 
50
50
  @abstractmethod
51
- async def step(
52
- self, query: str, previous_steps: list[dict[str, Any]] | None = None
53
- ) -> dict[str, Any]:
51
+ async def step(self, query: str, previous_steps: list[dict[str, Any]] | None = None) -> dict[str, Any]:
54
52
  """Perform a single step of the agent.
55
53
 
56
54
  Args:
@@ -23,7 +23,7 @@ from langchain_core.utils.input import get_color_mapping
23
23
 
24
24
  from mcp_use.client import MCPClient
25
25
  from mcp_use.connectors.base import BaseConnector
26
- from mcp_use.telemetry.posthog import Telemetry
26
+ from mcp_use.telemetry.telemetry import Telemetry
27
27
  from mcp_use.telemetry.utils import extract_model_info
28
28
 
29
29
  from ..adapters.langchain_adapter import LangChainAdapter
@@ -122,9 +122,7 @@ class MCPAgent:
122
122
  # Get server management tools
123
123
  management_tools = self.server_manager.tools
124
124
  self._tools = management_tools
125
- logger.info(
126
- f"🔧 Server manager mode active with {len(management_tools)} management tools"
127
- )
125
+ logger.info(f"🔧 Server manager mode active with {len(management_tools)} management tools")
128
126
 
129
127
  # Create the system message based on available tools
130
128
  await self._create_system_message_from_tools(self._tools)
@@ -190,9 +188,7 @@ class MCPAgent:
190
188
 
191
189
  # Update conversation history if memory is enabled
192
190
  if self.memory_enabled:
193
- history_without_system = [
194
- msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)
195
- ]
191
+ history_without_system = [msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)]
196
192
  self._conversation_history = [self._system_message] + history_without_system
197
193
 
198
194
  def _create_agent(self) -> AgentExecutor:
@@ -223,9 +219,7 @@ class MCPAgent:
223
219
  agent = create_tool_calling_agent(llm=self.llm, tools=self._tools, prompt=prompt)
224
220
 
225
221
  # Use the standard AgentExecutor
226
- executor = AgentExecutor(
227
- agent=agent, tools=self._tools, max_iterations=self.max_steps, verbose=self.verbose
228
- )
222
+ executor = AgentExecutor(agent=agent, tools=self._tools, max_iterations=self.max_steps, verbose=self.verbose)
229
223
  logger.debug(f"Created agent executor with max_iterations={self.max_steps}")
230
224
  return executor
231
225
 
@@ -273,9 +267,7 @@ class MCPAgent:
273
267
  # Update conversation history if memory is enabled
274
268
  if self.memory_enabled:
275
269
  # Remove old system message if it exists
276
- history_without_system = [
277
- msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)
278
- ]
270
+ history_without_system = [msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)]
279
271
  self._conversation_history = history_without_system
280
272
 
281
273
  # Add new system message
@@ -300,9 +292,7 @@ class MCPAgent:
300
292
  # If the agent is already initialized, we need to reinitialize it
301
293
  # to apply the changes to the available tools
302
294
  if self._initialized:
303
- logger.debug(
304
- "Agent already initialized. Changes will take effect on next initialization."
305
- )
295
+ logger.debug("Agent already initialized. Changes will take effect on next initialization.")
306
296
  # We don't automatically reinitialize here as it could be disruptive
307
297
  # to ongoing operations. The user can call initialize() explicitly if needed.
308
298
 
@@ -335,9 +325,7 @@ class MCPAgent:
335
325
 
336
326
  # 1. Initialise on-demand ------------------------------------------------
337
327
  initialised_here = False
338
- if (manage_connector and not self._initialized) or (
339
- not self._initialized and self.auto_initialize
340
- ):
328
+ if (manage_connector and not self._initialized) or (not self._initialized and self.auto_initialize):
341
329
  await self.initialize()
342
330
  initialised_here = True
343
331
 
@@ -351,9 +339,7 @@ class MCPAgent:
351
339
  if self.memory_enabled:
352
340
  self.add_to_history(HumanMessage(content=query))
353
341
 
354
- history_to_use = (
355
- external_history if external_history is not None else self._conversation_history
356
- )
342
+ history_to_use = external_history if external_history is not None else self._conversation_history
357
343
  inputs = {"input": query, "chat_history": history_to_use}
358
344
 
359
345
  # 3. Stream & diff -------------------------------------------------------
@@ -365,9 +351,12 @@ class MCPAgent:
365
351
  if not isinstance(message, ToolAgentAction):
366
352
  self.add_to_history(message)
367
353
  yield event
368
-
369
354
  # 5. House-keeping -------------------------------------------------------
370
- if initialised_here and manage_connector:
355
+ # Restrict agent cleanup in _generate_response_chunks_async to only occur
356
+ # when the agent was initialized in this generator and is not client-managed
357
+ # and the user does want us to manage the connection.
358
+ if not self.client and initialised_here and manage_connector:
359
+ logger.info("🧹 Closing agent after generator completion")
371
360
  await self.close()
372
361
 
373
362
  async def astream(
@@ -411,9 +400,7 @@ class MCPAgent:
411
400
  elif self.connectors:
412
401
  server_count = len(self.connectors)
413
402
 
414
- conversation_history_length = (
415
- len(self._conversation_history) if self.memory_enabled else 0
416
- )
403
+ conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
417
404
 
418
405
  self.telemetry.track_agent_execution(
419
406
  execution_method="astream",
@@ -486,11 +473,7 @@ class MCPAgent:
486
473
  if self._agent_executor:
487
474
  self._agent_executor.max_iterations = steps
488
475
 
489
- display_query = (
490
- query[:50].replace("\n", " ") + "..."
491
- if len(query) > 50
492
- else query.replace("\n", " ")
493
- )
476
+ display_query = query[:50].replace("\n", " ") + "..." if len(query) > 50 else query.replace("\n", " ")
494
477
  logger.info(f"💬 Received query: '{display_query}'")
495
478
 
496
479
  # Add the user query to conversation history if memory is enabled
@@ -498,9 +481,7 @@ class MCPAgent:
498
481
  self.add_to_history(HumanMessage(content=query))
499
482
 
500
483
  # Use the provided history or the internal history
501
- history_to_use = (
502
- external_history if external_history is not None else self._conversation_history
503
- )
484
+ history_to_use = external_history if external_history is not None else self._conversation_history
504
485
 
505
486
  # Convert messages to format expected by LangChain agent input
506
487
  # Exclude the main system message as it's part of the agent's prompt
@@ -516,9 +497,7 @@ class MCPAgent:
516
497
 
517
498
  # Construct a mapping of tool name to tool for easy lookup
518
499
  name_to_tool_map = {tool.name: tool for tool in self._tools}
519
- color_mapping = get_color_mapping(
520
- [tool.name for tool in self._tools], excluded_colors=["green", "red"]
521
- )
500
+ color_mapping = get_color_mapping([tool.name for tool in self._tools], excluded_colors=["green", "red"])
522
501
 
523
502
  logger.info(f"🏁 Starting agent execution with max_steps={steps}")
524
503
 
@@ -532,7 +511,7 @@ class MCPAgent:
532
511
 
533
512
  if current_tool_names != existing_tool_names:
534
513
  logger.info(
535
- f"🔄 Tools changed before step {step_num + 1}, updating agent. "
514
+ f"🔄 Tools changed before step {step_num + 1}, updating agent."
536
515
  f"New tools: {', '.join(current_tool_names)}"
537
516
  )
538
517
  self._tools = current_tools
@@ -637,9 +616,7 @@ class MCPAgent:
637
616
  elif self.connectors:
638
617
  server_count = len(self.connectors)
639
618
 
640
- conversation_history_length = (
641
- len(self._conversation_history) if self.memory_enabled else 0
642
- )
619
+ conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
643
620
  self.telemetry.track_agent_execution(
644
621
  execution_method="run",
645
622
  query=query,
@@ -2,9 +2,7 @@ from langchain.schema import SystemMessage
2
2
  from langchain_core.tools import BaseTool
3
3
 
4
4
 
5
- def generate_tool_descriptions(
6
- tools: list[BaseTool], disallowed_tools: list[str] | None = None
7
- ) -> list[str]:
5
+ def generate_tool_descriptions(tools: list[BaseTool], disallowed_tools: list[str] | None = None) -> list[str]:
8
6
  """
9
7
  Generates a list of formatted tool descriptions, excluding disallowed tools.
10
8
 
mcp_use/client.py CHANGED
@@ -77,9 +77,7 @@ class MCPClient:
77
77
  sandbox: Whether to use sandboxed execution mode for running MCP servers.
78
78
  sandbox_options: Optional sandbox configuration options.
79
79
  """
80
- return cls(
81
- config=load_config_file(filepath), sandbox=sandbox, sandbox_options=sandbox_options
82
- )
80
+ return cls(config=load_config_file(filepath), sandbox=sandbox, sandbox_options=sandbox_options)
83
81
 
84
82
  def add_server(
85
83
  self,
@@ -30,7 +30,7 @@ class BaseConnector(ABC):
30
30
  self._resources: list[Resource] | None = None
31
31
  self._prompts: list[Prompt] | None = None
32
32
  self._connected = False
33
- self.auto_reconnect = True # Whether to automatically reconnect on connection loss (not configurable for now), may be made configurable through the connector_config
33
+ self.auto_reconnect = True # Whether to automatically reconnect on connection loss (not configurable for now)
34
34
 
35
35
  @abstractmethod
36
36
  async def connect(self) -> None:
@@ -123,7 +123,11 @@ class BaseConnector(ABC):
123
123
  else:
124
124
  self._prompts = []
125
125
 
126
- logger.debug(f"MCP session initialized with {len(self._tools)} tools, " f"{len(self._resources)} resources, " f"and {len(self._prompts)} prompts")
126
+ logger.debug(
127
+ f"MCP session initialized with {len(self._tools)} tools, "
128
+ f"{len(self._resources)} resources, "
129
+ f"and {len(self._prompts)} prompts"
130
+ )
127
131
 
128
132
  return result
129
133
 
@@ -222,7 +226,9 @@ class BaseConnector(ABC):
222
226
  except Exception as e:
223
227
  raise RuntimeError(f"Failed to reconnect to MCP server: {e}") from e
224
228
  else:
225
- raise RuntimeError("Connection to MCP server has been lost. " "Auto-reconnection is disabled. Please reconnect manually.")
229
+ raise RuntimeError(
230
+ "Connection to MCP server has been lost. Auto-reconnection is disabled. Please reconnect manually."
231
+ )
226
232
 
227
233
  async def call_tool(self, name: str, arguments: dict[str, Any]) -> CallToolResult:
228
234
  """Call an MCP tool with automatic reconnection handling.
@@ -111,9 +111,7 @@ class HttpConnector(BaseConnector):
111
111
  if isinstance(streamable_error, httpx.HTTPStatusError):
112
112
  if streamable_error.response.status_code in [404, 405]:
113
113
  should_fallback = True
114
- elif "405 Method Not Allowed" in str(streamable_error) or "404 Not Found" in str(
115
- streamable_error
116
- ):
114
+ elif "405 Method Not Allowed" in str(streamable_error) or "404 Not Found" in str(streamable_error):
117
115
  should_fallback = True
118
116
  else:
119
117
  # For other errors, still try fallback but they might indicate
@@ -131,16 +129,13 @@ class HttpConnector(BaseConnector):
131
129
  read_stream, write_stream = await connection_manager.start()
132
130
 
133
131
  # Create the client session for SSE
134
- self.client_session = ClientSession(
135
- read_stream, write_stream, sampling_callback=None
136
- )
132
+ self.client_session = ClientSession(read_stream, write_stream, sampling_callback=None)
137
133
  await self.client_session.__aenter__()
138
134
  self.transport_type = "SSE"
139
135
 
140
136
  except Exception as sse_error:
141
137
  logger.error(
142
- f"Both transport methods failed. Streamable HTTP: {streamable_error}, "
143
- f"SSE: {sse_error}"
138
+ f"Both transport methods failed. Streamable HTTP: {streamable_error}, SSE: {sse_error}"
144
139
  )
145
140
  raise sse_error
146
141
  else:
@@ -149,10 +144,7 @@ class HttpConnector(BaseConnector):
149
144
  # Store the successful connection manager and mark as connected
150
145
  self._connection_manager = connection_manager
151
146
  self._connected = True
152
- logger.debug(
153
- f"Successfully connected to MCP implementation via"
154
- f" {self.transport_type}: {self.base_url}"
155
- )
147
+ logger.debug(f"Successfully connected to MCP implementation via {self.transport_type}: {self.base_url}")
156
148
 
157
149
  @property
158
150
  def public_identifier(self) -> str:
@@ -84,9 +84,7 @@ class SandboxConnector(BaseConnector):
84
84
  )
85
85
 
86
86
  self.sandbox_template_id = _e2b_options.get("sandbox_template_id", "base")
87
- self.supergateway_cmd_parts = _e2b_options.get(
88
- "supergateway_command", "npx -y supergateway"
89
- )
87
+ self.supergateway_cmd_parts = _e2b_options.get("supergateway_command", "npx -y supergateway")
90
88
 
91
89
  self.sandbox: Sandbox | None = None
92
90
  self.process: CommandHandle | None = None
@@ -138,10 +136,7 @@ class SandboxConnector(BaseConnector):
138
136
  async with session.get(ping_url, timeout=2) as response:
139
137
  if response.status == 200:
140
138
  elapsed = time.time() - start_time
141
- logger.info(
142
- f"Server is ready! "
143
- f"SSE endpoint responded with 200 after {elapsed:.1f}s"
144
- )
139
+ logger.info(f"Server is ready! SSE endpoint responded with 200 after {elapsed:.1f}s")
145
140
  return True
146
141
  except Exception:
147
142
  # If sse endpoint doesn't work, try the base URL
@@ -149,8 +144,7 @@ class SandboxConnector(BaseConnector):
149
144
  if response.status < 500: # Accept any non-server error
150
145
  elapsed = time.time() - start_time
151
146
  logger.info(
152
- f"Server is ready! Base URL responded with "
153
- f"{response.status} after {elapsed:.1f}s"
147
+ f"Server is ready! Base URL responded with {response.status} after {elapsed:.1f}s"
154
148
  )
155
149
  return True
156
150
  except Exception:
@@ -220,9 +214,7 @@ class SandboxConnector(BaseConnector):
220
214
  sse_url = f"{self.base_url}/sse"
221
215
 
222
216
  # Create and start the connection manager
223
- self._connection_manager = SseConnectionManager(
224
- sse_url, self.headers, self.timeout, self.sse_read_timeout
225
- )
217
+ self._connection_manager = SseConnectionManager(sse_url, self.headers, self.timeout, self.sse_read_timeout)
226
218
  read_stream, write_stream = await self._connection_manager.start()
227
219
 
228
220
  # Create the client session
@@ -231,9 +223,7 @@ class SandboxConnector(BaseConnector):
231
223
 
232
224
  # Mark as connected
233
225
  self._connected = True
234
- logger.debug(
235
- f"Successfully connected to MCP implementation via HTTP/SSE: {self.base_url}"
236
- )
226
+ logger.debug(f"Successfully connected to MCP implementation via HTTP/SSE: {self.base_url}")
237
227
 
238
228
  except Exception as e:
239
229
  logger.error(f"Failed to connect to MCP implementation: {e}")
@@ -52,9 +52,7 @@ class StdioConnector(BaseConnector):
52
52
  logger.debug(f"Connecting to MCP implementation: {self.command}")
53
53
  try:
54
54
  # Create server parameters
55
- server_params = StdioServerParameters(
56
- command=self.command, args=self.args, env=self.env
57
- )
55
+ server_params = StdioServerParameters(command=self.command, args=self.args, env=self.env)
58
56
 
59
57
  # Create and start the connection manager
60
58
  self._connection_manager = StdioConnectionManager(server_params, self.errlog)
@@ -64,9 +64,7 @@ class WebSocketConnector(BaseConnector):
64
64
  self.ws = await self._connection_manager.start()
65
65
 
66
66
  # Start the message receiver task
67
- self._receiver_task = asyncio.create_task(
68
- self._receive_messages(), name="websocket_receiver_task"
69
- )
67
+ self._receiver_task = asyncio.create_task(self._receive_messages(), name="websocket_receiver_task")
70
68
 
71
69
  # Mark as connected
72
70
  self._connected = True
mcp_use/logging.py CHANGED
@@ -12,7 +12,7 @@ import sys
12
12
  from langchain.globals import set_debug as langchain_set_debug
13
13
 
14
14
  # Global debug flag - can be set programmatically or from environment
15
- MCP_USE_DEBUG = False
15
+ MCP_USE_DEBUG = 1
16
16
 
17
17
 
18
18
  class Logger:
@@ -49,19 +49,13 @@ class ServerManager:
49
49
  session = None
50
50
  try:
51
51
  session = self.client.get_session(server_name)
52
- logger.debug(
53
- f"Using existing session for server '{server_name}' to prefetch tools."
54
- )
52
+ logger.debug(f"Using existing session for server '{server_name}' to prefetch tools.")
55
53
  except ValueError:
56
54
  try:
57
55
  session = await self.client.create_session(server_name)
58
- logger.debug(
59
- f"Temporarily created session for '{server_name}' to prefetch tools"
60
- )
56
+ logger.debug(f"Temporarily created session for '{server_name}' to prefetch tools")
61
57
  except Exception:
62
- logger.warning(
63
- f"Could not create session for '{server_name}' during prefetch"
64
- )
58
+ logger.warning(f"Could not create session for '{server_name}' during prefetch")
65
59
  continue
66
60
 
67
61
  # Fetch tools if session is available
@@ -70,17 +64,12 @@ class ServerManager:
70
64
  tools = await self.adapter._create_tools_from_connectors([connector])
71
65
 
72
66
  # Check if this server's tools have changed
73
- if (
74
- server_name not in self._server_tools
75
- or self._server_tools[server_name] != tools
76
- ):
67
+ if server_name not in self._server_tools or self._server_tools[server_name] != tools:
77
68
  self._server_tools[server_name] = tools # Cache tools
78
69
  self.initialized_servers[server_name] = True # Mark as initialized
79
70
  logger.debug(f"Prefetched {len(tools)} tools for server '{server_name}'.")
80
71
  else:
81
- logger.debug(
82
- f"Tools for server '{server_name}' unchanged, using cached version."
83
- )
72
+ logger.debug(f"Tools for server '{server_name}' unchanged, using cached version.")
84
73
  except Exception as e:
85
74
  logger.error(f"Error prefetching tools for server '{server_name}': {e}")
86
75
 
@@ -17,9 +17,7 @@ class DisconnectServerTool(MCPServerTool):
17
17
  """Tool for disconnecting from the currently active MCP server."""
18
18
 
19
19
  name: ClassVar[str] = "disconnect_from_mcp_server"
20
- description: ClassVar[str] = (
21
- "Disconnect from the currently active MCP (Model Context Protocol) server"
22
- )
20
+ description: ClassVar[str] = "Disconnect from the currently active MCP (Model Context Protocol) server"
23
21
  args_schema: ClassVar[type[BaseModel]] = DisconnectServerInput
24
22
 
25
23
  def _run(self, **kwargs) -> str:
@@ -21,10 +21,7 @@ class GetActiveServerTool(MCPServerTool):
21
21
  def _run(self, **kwargs) -> str:
22
22
  """Get the currently active MCP server."""
23
23
  if not self.server_manager.active_server:
24
- return (
25
- "No MCP server is currently active. "
26
- "Use connect_to_mcp_server to connect to a server."
27
- )
24
+ return "No MCP server is currently active. Use connect_to_mcp_server to connect to a server."
28
25
  return f"Currently active MCP server: {self.server_manager.active_server}"
29
26
 
30
27
  async def _arun(self, **kwargs) -> str:
@@ -48,36 +48,12 @@ class SearchToolsTool(MCPServerTool):
48
48
  results = await self._search_tool.search_tools(
49
49
  query, top_k=top_k, active_server=self.server_manager.active_server
50
50
  )
51
- return self.format_search_results(results)
51
+ return results
52
52
 
53
53
  def _run(self, query: str, top_k: int = 100) -> str:
54
54
  """Synchronous version that raises a NotImplementedError - use _arun instead."""
55
55
  raise NotImplementedError("SearchToolsTool requires async execution. Use _arun instead.")
56
56
 
57
- def format_search_results(self, results: list[tuple[BaseTool, str, float]]) -> str:
58
- """Format search results in a consistent format."""
59
-
60
- # Only show top_k results
61
- results = results
62
-
63
- formatted_output = "Search results\n\n"
64
-
65
- for i, (tool, server_name, score) in enumerate(results):
66
- # Format score as percentage
67
- if i < 5:
68
- score_pct = f"{score * 100:.1f}%"
69
- logger.info(f"{i}: {tool.name} ({score_pct} match)")
70
- formatted_output += f"[{i + 1}] Tool: {tool.name} ({score_pct} match)\n"
71
- formatted_output += f" Server: {server_name}\n"
72
- formatted_output += f" Description: {tool.description}\n\n"
73
-
74
- # Add footer with information about how to use the results
75
- formatted_output += (
76
- "\nTo use a tool, connect to the appropriate server first, then invoke the tool."
77
- )
78
-
79
- return formatted_output
80
-
81
57
 
82
58
  class ToolSearchEngine:
83
59
  """
@@ -115,13 +91,18 @@ class ToolSearchEngine:
115
91
 
116
92
  try:
117
93
  from fastembed import TextEmbedding # optional dependency install with [search]
118
- except ImportError:
94
+ except ImportError as exc:
119
95
  logger.error(
120
96
  "The 'fastembed' library is not installed. "
121
97
  "To use the search functionality, please install it by running: "
122
98
  "pip install mcp-use[search]"
123
99
  )
124
- return False
100
+ raise ImportError(
101
+ "The 'fastembed' library is not installed. "
102
+ "To use the server_manager functionality, please install it by running: "
103
+ "pip install mcp-use[search] "
104
+ "or disable the server_manager by setting use_server_manager=False in the MCPAgent constructor."
105
+ ) from exc
125
106
 
126
107
  try:
127
108
  self.model = TextEmbedding(model_name="BAAI/bge-small-en-v1.5")
@@ -282,11 +263,7 @@ class ToolSearchEngine:
282
263
  )
283
264
 
284
265
  # If the server manager has an active server but it wasn't provided, use it
285
- if (
286
- active_server is None
287
- and self.server_manager
288
- and hasattr(self.server_manager, "active_server")
289
- ):
266
+ if active_server is None and self.server_manager and hasattr(self.server_manager, "active_server"):
290
267
  active_server = self.server_manager.active_server
291
268
 
292
269
  results = self.search(query, top_k=top_k)
@@ -303,11 +280,27 @@ class ToolSearchEngine:
303
280
  marked_results = []
304
281
  for tool, server_name, score in results:
305
282
  # If this is the active server, add "(ACTIVE)" marker
306
- display_server = (
307
- f"{server_name} (ACTIVE)" if server_name == active_server else server_name
308
- )
283
+ display_server = f"{server_name} (ACTIVE)" if server_name == active_server else server_name
309
284
  marked_results.append((tool, display_server, score))
310
285
  results = marked_results
311
286
 
312
287
  # Format and return the results
313
- return results
288
+ return self._format_search_results(results)
289
+
290
+ def _format_search_results(self, results: list[tuple[BaseTool, str, float]]) -> str:
291
+ """Format search results in a consistent format."""
292
+ formatted_output = "Search results\n\n"
293
+
294
+ for i, (tool, server_name, score) in enumerate(results):
295
+ # Format score as percentage
296
+ score_pct = f"{score * 100:.1f}%"
297
+ if i < 5:
298
+ logger.info(f"{i}: {tool.name} ({score_pct} match)")
299
+ formatted_output += f"[{i + 1}] Tool: {tool.name} ({score_pct} match)\n"
300
+ formatted_output += f" Server: {server_name}\n"
301
+ formatted_output += f" Description: {tool.description}\n\n"
302
+
303
+ # Add footer with information about how to use the results
304
+ formatted_output += "\nTo use a tool, connect to the appropriate server first, then invoke the tool."
305
+
306
+ return formatted_output