mcp-use 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

mcp_use/adapters/base.py CHANGED
@@ -33,8 +33,7 @@ class BaseAdapter(ABC):
33
33
  self.disallowed_tools = disallowed_tools or []
34
34
  self._connector_tool_map: dict[BaseConnector, list[T]] = {}
35
35
 
36
- @classmethod
37
- async def create_tools(cls, client: "MCPClient", disallowed_tools: list[str] | None = None) -> list[T]:
36
+ async def create_tools(self, client: "MCPClient") -> list[T]:
38
37
  """Create tools from an MCPClient instance.
39
38
 
40
39
  This is the recommended way to create tools from an MCPClient, as it handles
@@ -42,7 +41,6 @@ class BaseAdapter(ABC):
42
41
 
43
42
  Args:
44
43
  client: The MCPClient to extract tools from.
45
- disallowed_tools: Optional list of tool names to exclude.
46
44
 
47
45
  Returns:
48
46
  A list of tools in the target framework's format.
@@ -56,9 +54,6 @@ class BaseAdapter(ABC):
56
54
  tools = await YourAdapter.create_tools(client)
57
55
  ```
58
56
  """
59
- # Create the adapter
60
- adapter = cls(disallowed_tools=disallowed_tools)
61
-
62
57
  # Ensure we have active sessions
63
58
  if not client.active_sessions:
64
59
  logger.info("No active sessions found, creating new ones...")
@@ -71,7 +66,7 @@ class BaseAdapter(ABC):
71
66
  connectors = [session.connector for session in sessions.values()]
72
67
 
73
68
  # Create tools from connectors
74
- return await adapter._create_tools_from_connectors(connectors)
69
+ return await self._create_tools_from_connectors(connectors)
75
70
 
76
71
  async def load_tools_for_connector(self, connector: BaseConnector) -> list[T]:
77
72
  """Dynamically load tools for a specific connector.
@@ -7,7 +7,7 @@ to provide a simple interface for using MCP tools with different LLMs.
7
7
 
8
8
  import logging
9
9
  import time
10
- from collections.abc import AsyncIterator
10
+ from collections.abc import AsyncGenerator, AsyncIterator
11
11
 
12
12
  from langchain.agents import AgentExecutor, create_tool_calling_agent
13
13
  from langchain.agents.output_parsers.tools import ToolAgentAction
@@ -106,9 +106,10 @@ class MCPAgent:
106
106
  raise ValueError("Client must be provided when using server manager")
107
107
  self.server_manager = ServerManager(self.client, self.adapter)
108
108
 
109
- # State tracking
109
+ # State tracking - initialize _tools as empty list
110
110
  self._agent_executor: AgentExecutor | None = None
111
111
  self._system_message: SystemMessage | None = None
112
+ self._tools: list[BaseTool] = []
112
113
 
113
114
  # Track model info for telemetry
114
115
  self._model_provider, self._model_name = extract_model_info(self.llm)
@@ -304,150 +305,54 @@ class MCPAgent:
304
305
  """
305
306
  return self.disallowed_tools
306
307
 
307
- async def _generate_response_chunks_async(
308
+ async def _consume_and_return(
308
309
  self,
309
- query: str,
310
- max_steps: int | None = None,
311
- manage_connector: bool = True,
312
- external_history: list[BaseMessage] | None = None,
313
- ) -> AsyncIterator[StreamEvent]:
314
- """Internal async generator yielding response chunks.
315
-
316
- The implementation purposefully keeps the logic compact:
317
- 1. Ensure the agent is initialised (optionally handling connector
318
- lifecycle).
319
- 2. Forward the *same* inputs we use for ``run`` to LangChain's
320
- ``AgentExecutor.astream``.
321
- 3. Diff the growing ``output`` field coming from LangChain and yield
322
- only the new part so the caller receives *incremental* chunks.
323
- 4. Persist conversation history when memory is enabled.
324
- """
325
-
326
- # 1. Initialise on-demand ------------------------------------------------
327
- initialised_here = False
328
- if (manage_connector and not self._initialized) or (not self._initialized and self.auto_initialize):
329
- await self.initialize()
330
- initialised_here = True
331
-
332
- if not self._agent_executor:
333
- raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
334
-
335
- # 2. Build inputs --------------------------------------------------------
336
- effective_max_steps = max_steps or self.max_steps
337
- self._agent_executor.max_iterations = effective_max_steps
338
-
339
- if self.memory_enabled:
340
- self.add_to_history(HumanMessage(content=query))
341
-
342
- history_to_use = external_history if external_history is not None else self._conversation_history
343
- inputs = {"input": query, "chat_history": history_to_use}
310
+ generator: AsyncGenerator[tuple[AgentAction, str], str],
311
+ ) -> str:
312
+ """Consume the generator and return the final result.
344
313
 
345
- # 3. Stream & diff -------------------------------------------------------
346
- async for event in self._agent_executor.astream_events(inputs):
347
- if event.get("event") == "on_chain_end":
348
- output = event["data"]["output"]
349
- if isinstance(output, list):
350
- for message in output:
351
- if not isinstance(message, ToolAgentAction):
352
- self.add_to_history(message)
353
- yield event
354
- # 5. House-keeping -------------------------------------------------------
355
- # Restrict agent cleanup in _generate_response_chunks_async to only occur
356
- # when the agent was initialized in this generator and is not client-managed
357
- # and the user does want us to manage the connection.
358
- if not self.client and initialised_here and manage_connector:
359
- logger.info("🧹 Closing agent after generator completion")
360
- await self.close()
314
+ This method manually iterates through the generator to consume the steps.
315
+ In Python, async generators cannot return values directly, so we expect
316
+ the final result to be yielded as a special marker.
361
317
 
362
- async def astream(
363
- self,
364
- query: str,
365
- max_steps: int | None = None,
366
- manage_connector: bool = True,
367
- external_history: list[BaseMessage] | None = None,
368
- ) -> AsyncIterator[str]:
369
- """Asynchronous streaming interface.
370
-
371
- Example::
318
+ Args:
319
+ generator: The async generator that yields steps and a final result.
372
320
 
373
- async for chunk in agent.astream("hello"):
374
- print(chunk, end="|", flush=True)
321
+ Returns:
322
+ The final result from the generator.
375
323
  """
376
- start_time = time.time()
377
- success = False
378
- chunk_count = 0
379
- total_response_length = 0
380
-
381
- try:
382
- async for chunk in self._generate_response_chunks_async(
383
- query=query,
384
- max_steps=max_steps,
385
- manage_connector=manage_connector,
386
- external_history=external_history,
387
- ):
388
- chunk_count += 1
389
- if isinstance(chunk, str):
390
- total_response_length += len(chunk)
391
- yield chunk
392
- success = True
393
- finally:
394
- # Track comprehensive execution data for streaming
395
- execution_time_ms = int((time.time() - start_time) * 1000)
396
-
397
- server_count = 0
398
- if self.client:
399
- server_count = len(self.client.get_all_active_sessions())
400
- elif self.connectors:
401
- server_count = len(self.connectors)
402
-
403
- conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
404
-
405
- self.telemetry.track_agent_execution(
406
- execution_method="astream",
407
- query=query,
408
- success=success,
409
- model_provider=self._model_provider,
410
- model_name=self._model_name,
411
- server_count=server_count,
412
- server_identifiers=[connector.public_identifier for connector in self.connectors],
413
- total_tools_available=len(self._tools) if self._tools else 0,
414
- tools_available_names=[tool.name for tool in self._tools],
415
- max_steps_configured=self.max_steps,
416
- memory_enabled=self.memory_enabled,
417
- use_server_manager=self.use_server_manager,
418
- max_steps_used=max_steps,
419
- manage_connector=manage_connector,
420
- external_history_used=external_history is not None,
421
- response=f"[STREAMED RESPONSE - {total_response_length} chars]",
422
- execution_time_ms=execution_time_ms,
423
- error_type=None if success else "streaming_error",
424
- conversation_history_length=conversation_history_length,
425
- )
426
-
427
- async def run(
324
+ final_result = ""
325
+ steps_taken = 0
326
+ tools_used_names = []
327
+ async for item in generator:
328
+ # If it's a string, it's the final result
329
+ if isinstance(item, str):
330
+ final_result = item
331
+ break
332
+ # Otherwise it's a step tuple, just consume it
333
+ steps_taken += 1
334
+ tools_used_names.append(item[0].tool)
335
+ return final_result, steps_taken, tools_used_names
336
+
337
+ async def stream(
428
338
  self,
429
339
  query: str,
430
340
  max_steps: int | None = None,
431
341
  manage_connector: bool = True,
432
342
  external_history: list[BaseMessage] | None = None,
433
- ) -> str:
434
- """Run a query using the MCP tools with unified step-by-step execution.
435
-
436
- This method handles connecting to the MCP server, initializing the agent,
437
- running the query, and then cleaning up the connection.
343
+ track_execution: bool = True,
344
+ ) -> AsyncGenerator[tuple[AgentAction, str] | str, None]:
345
+ """Run the agent and yield intermediate steps as an async generator.
438
346
 
439
347
  Args:
440
348
  query: The query to run.
441
349
  max_steps: Optional maximum number of steps to take.
442
350
  manage_connector: Whether to handle the connector lifecycle internally.
443
- If True, this method will connect, initialize, and disconnect from
444
- the connector automatically. If False, the caller is responsible
445
- for managing the connector lifecycle.
446
351
  external_history: Optional external history to use instead of the
447
352
  internal conversation history.
448
353
 
449
- Returns:
450
- The result of running the query.
354
+ Yields:
355
+ Intermediate steps as (AgentAction, str) tuples, followed by the final result as a string.
451
356
  """
452
357
  result = ""
453
358
  initialized_here = False
@@ -546,11 +451,13 @@ class MCPAgent:
546
451
  result = next_step_output.return_values.get("output", "No output generated")
547
452
  break
548
453
 
549
- # If it's actions/steps, add to intermediate steps
454
+ # If it's actions/steps, add to intermediate steps and yield them
550
455
  intermediate_steps.extend(next_step_output)
551
456
 
552
- # Log tool calls and track tool usage
553
- for action, output in next_step_output:
457
+ # Yield each step and track tool usage
458
+ for agent_step in next_step_output:
459
+ yield agent_step
460
+ action, observation = agent_step
554
461
  tool_name = action.tool
555
462
  tools_used_names.append(tool_name)
556
463
  tool_input_str = str(action.tool_input)
@@ -559,11 +466,11 @@ class MCPAgent:
559
466
  tool_input_str = tool_input_str[:97] + "..."
560
467
  logger.info(f"🔧 Tool call: {tool_name} with input: {tool_input_str}")
561
468
  # Truncate long outputs for readability
562
- output_str = str(output)
563
- if len(output_str) > 100:
564
- output_str = output_str[:97] + "..."
565
- output_str = output_str.replace("\n", " ")
566
- logger.info(f"📄 Tool result: {output_str}")
469
+ observation_str = str(observation)
470
+ if len(observation_str) > 100:
471
+ observation_str = observation_str[:97] + "..."
472
+ observation_str = observation_str.replace("\n", " ")
473
+ logger.info(f"📄 Tool result: {observation_str}")
567
474
 
568
475
  # Check for return_direct on the last action taken
569
476
  if len(next_step_output) > 0:
@@ -597,12 +504,14 @@ class MCPAgent:
597
504
 
598
505
  logger.info(f"🎉 Agent execution complete in {time.time() - start_time} seconds")
599
506
  success = True
600
- return result
507
+
508
+ # Yield the final result as a string
509
+ yield result
601
510
 
602
511
  except Exception as e:
603
512
  logger.error(f"❌ Error running query: {e}")
604
513
  if initialized_here and manage_connector:
605
- logger.info("🧹 Cleaning up resources after initialization error in run")
514
+ logger.info("🧹 Cleaning up resources after initialization error in stream")
606
515
  await self.close()
607
516
  raise
608
517
 
@@ -617,13 +526,84 @@ class MCPAgent:
617
526
  server_count = len(self.connectors)
618
527
 
619
528
  conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
529
+
530
+ # Safely access _tools in case initialization failed
531
+ tools_available = getattr(self, "_tools", [])
532
+
533
+ if track_execution:
534
+ self.telemetry.track_agent_execution(
535
+ execution_method="stream",
536
+ query=query,
537
+ success=success,
538
+ model_provider=self._model_provider,
539
+ model_name=self._model_name,
540
+ server_count=server_count,
541
+ server_identifiers=[connector.public_identifier for connector in self.connectors],
542
+ total_tools_available=len(tools_available),
543
+ tools_available_names=[tool.name for tool in tools_available],
544
+ max_steps_configured=self.max_steps,
545
+ memory_enabled=self.memory_enabled,
546
+ use_server_manager=self.use_server_manager,
547
+ max_steps_used=max_steps,
548
+ manage_connector=manage_connector,
549
+ external_history_used=external_history is not None,
550
+ steps_taken=steps_taken,
551
+ tools_used_count=len(tools_used_names),
552
+ tools_used_names=tools_used_names,
553
+ response=result,
554
+ execution_time_ms=execution_time_ms,
555
+ error_type=None if success else "execution_error",
556
+ conversation_history_length=conversation_history_length,
557
+ )
558
+
559
+ # Clean up if necessary (e.g., if not using client-managed sessions)
560
+ if manage_connector and not self.client and initialized_here:
561
+ logger.info("🧹 Closing agent after stream completion")
562
+ await self.close()
563
+
564
+ async def run(
565
+ self,
566
+ query: str,
567
+ max_steps: int | None = None,
568
+ manage_connector: bool = True,
569
+ external_history: list[BaseMessage] | None = None,
570
+ ) -> str:
571
+ """Run a query using the MCP tools and return the final result.
572
+
573
+ This method uses the streaming implementation internally and returns
574
+ the final result after consuming all intermediate steps.
575
+
576
+ Args:
577
+ query: The query to run.
578
+ max_steps: Optional maximum number of steps to take.
579
+ manage_connector: Whether to handle the connector lifecycle internally.
580
+ If True, this method will connect, initialize, and disconnect from
581
+ the connector automatically. If False, the caller is responsible
582
+ for managing the connector lifecycle.
583
+ external_history: Optional external history to use instead of the
584
+ internal conversation history.
585
+
586
+ Returns:
587
+ The result of running the query.
588
+ """
589
+ success = True
590
+ start_time = time.time()
591
+ generator = self.stream(query, max_steps, manage_connector, external_history, track_execution=False)
592
+ try:
593
+ result, steps_taken, tools_used_names = await self._consume_and_return(generator)
594
+ except Exception as e:
595
+ success = False
596
+ error = str(e)
597
+ logger.error(f"❌ Error during agent execution: {e}")
598
+ raise
599
+ finally:
620
600
  self.telemetry.track_agent_execution(
621
601
  execution_method="run",
622
602
  query=query,
623
603
  success=success,
624
604
  model_provider=self._model_provider,
625
605
  model_name=self._model_name,
626
- server_count=server_count,
606
+ server_count=len(self.client.get_all_active_sessions()) if self.client else len(self.connectors),
627
607
  server_identifiers=[connector.public_identifier for connector in self.connectors],
628
608
  total_tools_available=len(self._tools) if self._tools else 0,
629
609
  tools_available_names=[tool.name for tool in self._tools],
@@ -637,16 +617,132 @@ class MCPAgent:
637
617
  tools_used_count=len(tools_used_names),
638
618
  tools_used_names=tools_used_names,
639
619
  response=result,
620
+ execution_time_ms=int((time.time() - start_time) * 1000),
621
+ error_type=error,
622
+ conversation_history_length=len(self._conversation_history),
623
+ )
624
+ return result
625
+
626
+ async def _generate_response_chunks_async(
627
+ self,
628
+ query: str,
629
+ max_steps: int | None = None,
630
+ manage_connector: bool = True,
631
+ external_history: list[BaseMessage] | None = None,
632
+ ) -> AsyncIterator[StreamEvent]:
633
+ """Internal async generator yielding response chunks.
634
+
635
+ The implementation purposefully keeps the logic compact:
636
+ 1. Ensure the agent is initialised (optionally handling connector
637
+ lifecycle).
638
+ 2. Forward the *same* inputs we use for ``run`` to LangChain's
639
+ ``AgentExecutor.astream``.
640
+ 3. Diff the growing ``output`` field coming from LangChain and yield
641
+ only the new part so the caller receives *incremental* chunks.
642
+ 4. Persist conversation history when memory is enabled.
643
+ """
644
+
645
+ # 1. Initialise on-demand ------------------------------------------------
646
+ initialised_here = False
647
+ if (manage_connector and not self._initialized) or (not self._initialized and self.auto_initialize):
648
+ await self.initialize()
649
+ initialised_here = True
650
+
651
+ if not self._agent_executor:
652
+ raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
653
+
654
+ # 2. Build inputs --------------------------------------------------------
655
+ effective_max_steps = max_steps or self.max_steps
656
+ self._agent_executor.max_iterations = effective_max_steps
657
+
658
+ if self.memory_enabled:
659
+ self.add_to_history(HumanMessage(content=query))
660
+
661
+ history_to_use = external_history if external_history is not None else self._conversation_history
662
+ inputs = {"input": query, "chat_history": history_to_use}
663
+
664
+ # 3. Stream & diff -------------------------------------------------------
665
+ async for event in self._agent_executor.astream_events(inputs):
666
+ if event.get("event") == "on_chain_end":
667
+ output = event["data"]["output"]
668
+ if isinstance(output, list):
669
+ for message in output:
670
+ if not isinstance(message, ToolAgentAction):
671
+ self.add_to_history(message)
672
+ yield event
673
+ # 5. House-keeping -------------------------------------------------------
674
+ # Restrict agent cleanup in _generate_response_chunks_async to only occur
675
+ # when the agent was initialized in this generator and is not client-managed
676
+ # and the user does want us to manage the connection.
677
+ if not self.client and initialised_here and manage_connector:
678
+ logger.info("🧹 Closing agent after generator completion")
679
+ await self.close()
680
+
681
+ async def stream_events(
682
+ self,
683
+ query: str,
684
+ max_steps: int | None = None,
685
+ manage_connector: bool = True,
686
+ external_history: list[BaseMessage] | None = None,
687
+ ) -> AsyncIterator[str]:
688
+ """Asynchronous streaming interface.
689
+
690
+ Example::
691
+
692
+ async for chunk in agent.astream("hello"):
693
+ print(chunk, end="|", flush=True)
694
+ """
695
+ start_time = time.time()
696
+ success = False
697
+ chunk_count = 0
698
+ total_response_length = 0
699
+
700
+ try:
701
+ async for chunk in self._generate_response_chunks_async(
702
+ query=query,
703
+ max_steps=max_steps,
704
+ manage_connector=manage_connector,
705
+ external_history=external_history,
706
+ ):
707
+ chunk_count += 1
708
+ if isinstance(chunk, str):
709
+ total_response_length += len(chunk)
710
+ yield chunk
711
+ success = True
712
+ finally:
713
+ # Track comprehensive execution data for streaming
714
+ execution_time_ms = int((time.time() - start_time) * 1000)
715
+
716
+ server_count = 0
717
+ if self.client:
718
+ server_count = len(self.client.get_all_active_sessions())
719
+ elif self.connectors:
720
+ server_count = len(self.connectors)
721
+
722
+ conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
723
+
724
+ self.telemetry.track_agent_execution(
725
+ execution_method="stream_events",
726
+ query=query,
727
+ success=success,
728
+ model_provider=self._model_provider,
729
+ model_name=self._model_name,
730
+ server_count=server_count,
731
+ server_identifiers=[connector.public_identifier for connector in self.connectors],
732
+ total_tools_available=len(self._tools) if self._tools else 0,
733
+ tools_available_names=[tool.name for tool in self._tools],
734
+ max_steps_configured=self.max_steps,
735
+ memory_enabled=self.memory_enabled,
736
+ use_server_manager=self.use_server_manager,
737
+ max_steps_used=max_steps,
738
+ manage_connector=manage_connector,
739
+ external_history_used=external_history is not None,
740
+ response=f"[STREAMED RESPONSE - {total_response_length} chars]",
640
741
  execution_time_ms=execution_time_ms,
641
- error_type=None if success else "execution_error",
742
+ error_type=None if success else "streaming_error",
642
743
  conversation_history_length=conversation_history_length,
643
744
  )
644
745
 
645
- # Clean up if necessary (e.g., if not using client-managed sessions)
646
- if manage_connector and not self.client and not initialized_here:
647
- logger.info("🧹 Closing agent after query completion")
648
- await self.close()
649
-
650
746
  async def close(self) -> None:
651
747
  """Close the MCP connection with improved error handling."""
652
748
  logger.info("🔌 Closing agent and cleaning up resources...")
@@ -659,7 +755,8 @@ class MCPAgent:
659
755
  if self.client:
660
756
  logger.info("🔄 Closing sessions through client")
661
757
  await self.client.close_all_sessions()
662
- self._sessions = {}
758
+ if hasattr(self, "_sessions"):
759
+ self._sessions = {}
663
760
  # If using direct connector, disconnect
664
761
  elif self.connectors:
665
762
  for connector in self.connectors:
@@ -677,6 +774,8 @@ class MCPAgent:
677
774
  logger.error(f"❌ Error during agent closure: {e}")
678
775
  # Still try to clean up references even if there was an error
679
776
  self._agent_executor = None
680
- self._tools = []
681
- self._sessions = {}
777
+ if hasattr(self, "_tools"):
778
+ self._tools = []
779
+ if hasattr(self, "_sessions"):
780
+ self._sessions = {}
682
781
  self._initialized = False
@@ -11,6 +11,7 @@ from typing import Any
11
11
  from mcp import ClientSession
12
12
  from mcp.shared.exceptions import McpError
13
13
  from mcp.types import CallToolResult, GetPromptResult, Prompt, ReadResourceResult, Resource, Tool
14
+ from pydantic import AnyUrl
14
15
 
15
16
  from ..logging import logger
16
17
  from ..task_managers import ConnectionManager
@@ -287,10 +288,9 @@ class BaseConnector(ABC):
287
288
  logger.error(f"Error listing resources: {e}")
288
289
  return []
289
290
 
290
- async def read_resource(self, uri: str) -> ReadResourceResult:
291
+ async def read_resource(self, uri: AnyUrl) -> ReadResourceResult:
291
292
  """Read a resource by URI."""
292
- if not self.client_session:
293
- raise RuntimeError("MCP client is not connected")
293
+ await self._ensure_connected()
294
294
 
295
295
  logger.debug(f"Reading resource: {uri}")
296
296
  result = await self.client_session.read_resource(uri)
@@ -298,7 +298,6 @@ class BaseConnector(ABC):
298
298
 
299
299
  async def list_prompts(self) -> list[Prompt]:
300
300
  """List all available prompts from the MCP implementation."""
301
- # Ensure we're connected
302
301
  await self._ensure_connected()
303
302
 
304
303
  logger.debug("Listing prompts")
@@ -311,7 +310,6 @@ class BaseConnector(ABC):
311
310
 
312
311
  async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> GetPromptResult:
313
312
  """Get a prompt by name."""
314
- # Ensure we're connected
315
313
  await self._ensure_connected()
316
314
 
317
315
  logger.debug(f"Getting prompt: {name}")
@@ -320,7 +318,6 @@ class BaseConnector(ABC):
320
318
 
321
319
  async def request(self, method: str, params: dict[str, Any] | None = None) -> Any:
322
320
  """Send a raw request to the MCP implementation."""
323
- # Ensure we're connected
324
321
  await self._ensure_connected()
325
322
 
326
323
  logger.debug(f"Sending request: {method} with params: {params}")
@@ -65,9 +65,8 @@ class SandboxConnector(BaseConnector):
65
65
  super().__init__()
66
66
  if Sandbox is None:
67
67
  raise ImportError(
68
- "E2B SDK (e2b-code-interpreter) not found. "
69
- "Please install it with 'pip install mcp-use[e2b]' "
70
- "(or 'pip install e2b-code-interpreter')."
68
+ "E2B SDK (e2b-code-interpreter) not found. Please install it with "
69
+ "'pip install mcp-use[e2b]' (or 'pip install e2b-code-interpreter')."
71
70
  )
72
71
 
73
72
  self.user_command = command
@@ -79,8 +78,8 @@ class SandboxConnector(BaseConnector):
79
78
  self.api_key = _e2b_options.get("api_key") or os.environ.get("E2B_API_KEY")
80
79
  if not self.api_key:
81
80
  raise ValueError(
82
- "E2B API key is required. Provide it via 'sandbox_options.api_key' "
83
- "or the E2B_API_KEY environment variable."
81
+ "E2B API key is required. Provide it via 'sandbox_options.api_key'"
82
+ " or the E2B_API_KEY environment variable."
84
83
  )
85
84
 
86
85
  self.sandbox_template_id = _e2b_options.get("sandbox_template_id", "base")
@@ -88,7 +87,7 @@ class SandboxConnector(BaseConnector):
88
87
 
89
88
  self.sandbox: Sandbox | None = None
90
89
  self.process: CommandHandle | None = None
91
- self.client: ClientSession | None = None
90
+ self.client_session: ClientSession | None = None
92
91
  self.errlog = sys.stderr
93
92
  self.base_url: str | None = None
94
93
  self._connected = False
@@ -218,8 +217,8 @@ class SandboxConnector(BaseConnector):
218
217
  read_stream, write_stream = await self._connection_manager.start()
219
218
 
220
219
  # Create the client session
221
- self.client = ClientSession(read_stream, write_stream, sampling_callback=None)
222
- await self.client.__aenter__()
220
+ self.client_session = ClientSession(read_stream, write_stream, sampling_callback=None)
221
+ await self.client_session.__aenter__()
223
222
 
224
223
  # Mark as connected
225
224
  self._connected = True
@@ -1,8 +1,8 @@
1
1
  import asyncio
2
+ import math
2
3
  import time
3
4
  from typing import ClassVar
4
5
 
5
- import numpy as np
6
6
  from langchain_core.tools import BaseTool
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -205,10 +205,8 @@ class ToolSearchEngine:
205
205
  # Calculate similarity scores
206
206
  scores = {}
207
207
  for tool_name, embedding in self.tool_embeddings.items():
208
- # Calculate cosine similarity
209
- similarity = np.dot(query_embedding, embedding) / (
210
- np.linalg.norm(query_embedding) * np.linalg.norm(embedding)
211
- )
208
+ # Calculate cosine similarity using pure Python
209
+ similarity = self._cosine_similarity(query_embedding, embedding)
212
210
  scores[tool_name] = float(similarity)
213
211
 
214
212
  # Sort by score and get top_k results
@@ -304,3 +302,27 @@ class ToolSearchEngine:
304
302
  formatted_output += "\nTo use a tool, connect to the appropriate server first, then invoke the tool."
305
303
 
306
304
  return formatted_output
305
+
306
+ def _cosine_similarity(self, vec1: list[float], vec2: list[float]) -> float:
307
+ """Calculate cosine similarity between two vectors.
308
+
309
+ Args:
310
+ vec1: First vector
311
+ vec2: Second vector
312
+
313
+ Returns:
314
+ Cosine similarity between the vectors
315
+ """
316
+ # Calculate dot product
317
+ dot_product = sum(a * b for a, b in zip(vec1, vec2, strict=False))
318
+
319
+ # Calculate magnitudes
320
+ magnitude1 = math.sqrt(sum(a * a for a in vec1))
321
+ magnitude2 = math.sqrt(sum(b * b for b in vec2))
322
+
323
+ # Avoid division by zero
324
+ if magnitude1 == 0 or magnitude2 == 0:
325
+ return 0.0
326
+
327
+ # Calculate cosine similarity
328
+ return dot_product / (magnitude1 * magnitude2)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.3.2
3
+ Version: 1.3.4
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -15,26 +15,20 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.11
17
17
  Requires-Dist: aiohttp>=3.9.0
18
- Requires-Dist: anyio>=4.0.0
19
- Requires-Dist: authlib>=1.5.2
20
- Requires-Dist: httpx>=0.28.1
21
18
  Requires-Dist: jsonschema-pydantic>=0.1.0
22
- Requires-Dist: langchain-community>=0.0.10
23
19
  Requires-Dist: langchain>=0.1.0
24
20
  Requires-Dist: mcp>=1.9.3
25
21
  Requires-Dist: posthog>=4.8.0
26
22
  Requires-Dist: pydantic>=2.0.0
27
23
  Requires-Dist: python-dotenv>=1.0.0
28
24
  Requires-Dist: scarf-sdk>=0.1.0
29
- Requires-Dist: starlette>=0.41.0
30
- Requires-Dist: typing-extensions>=4.8.0
31
- Requires-Dist: uvicorn>=0.32.0
32
25
  Requires-Dist: websockets>=12.0
33
26
  Provides-Extra: anthropic
34
27
  Requires-Dist: langchain-anthropic; extra == 'anthropic'
35
28
  Provides-Extra: dev
36
29
  Requires-Dist: black>=23.9.0; extra == 'dev'
37
- Requires-Dist: fastmcp; extra == 'dev'
30
+ Requires-Dist: fastapi; extra == 'dev'
31
+ Requires-Dist: fastmcp==2.8.0; extra == 'dev'
38
32
  Requires-Dist: isort>=5.12.0; extra == 'dev'
39
33
  Requires-Dist: mypy>=1.5.0; extra == 'dev'
40
34
  Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
@@ -49,6 +43,7 @@ Provides-Extra: search
49
43
  Requires-Dist: fastembed>=0.0.1; extra == 'search'
50
44
  Description-Content-Type: text/markdown
51
45
 
46
+ <div align="center">
52
47
  <div align="center" style="margin: 0 auto; max-width: 80%;">
53
48
  <picture>
54
49
  <source media="(prefers-color-scheme: dark)" srcset="static/logo_white.svg">
@@ -57,24 +52,23 @@ Description-Content-Type: text/markdown
57
52
  </picture>
58
53
  </div>
59
54
 
60
- <h1 align="center">Unified MCP Client Library </h1>
55
+ <br>
56
+
57
+ # Connect any LLM to any MCP server
58
+
61
59
  <p align="center">
60
+ <a href="https://github.com/pietrozullo/mcp-use/stargazers" alt="GitHub stars">
61
+ <img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
62
62
  <a href="https://pypi.org/project/mcp_use/" alt="PyPI Version">
63
63
  <img src="https://img.shields.io/pypi/v/mcp_use.svg"/></a>
64
- <a href="https://pypi.org/project/mcp_use/" alt="PyPI Downloads">
65
- <img src="https://static.pepy.tech/badge/mcp-use" /></a>
66
- <a href="https://pypi.org/project/mcp_use/" alt="Python Versions">
67
- <img src="https://img.shields.io/pypi/pyversions/mcp_use.svg" /></a>
68
- <a href="https://docs.mcp-use.io" alt="Documentation">
69
- <img src="https://img.shields.io/badge/docs-mcp--use.io-blue" /></a>
70
- <a href="https://mcp-use.io" alt="Website">
71
- <img src="https://img.shields.io/badge/website-mcp--use.io-blue" /></a>
72
64
  <a href="https://github.com/pietrozullo/mcp-use/blob/main/LICENSE" alt="License">
73
65
  <img src="https://img.shields.io/github/license/pietrozullo/mcp-use" /></a>
74
- <a href="https://github.com/astral-sh/ruff" alt="Code style: Ruff">
75
- <img src="https://img.shields.io/badge/code%20style-ruff-000000.svg" /></a>
76
- <a href="https://github.com/pietrozullo/mcp-use/stargazers" alt="GitHub stars">
77
- <img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
66
+ <a href="https://pypi.org/project/mcp_use/" alt="PyPI Downloads">
67
+ <img src="https://static.pepy.tech/badge/mcp-use" /></a>
68
+ <a href="https://docs.mcp-use.com" alt="Documentation">
69
+ <img src="https://img.shields.io/badge/docs-mcp--use.com-blue" /></a>
70
+ <a href="https://mcp-use.com" alt="Website">
71
+ <img src="https://img.shields.io/badge/website-mcp--use.com-blue" /></a>
78
72
  </p>
79
73
  <p align="center">
80
74
  <a href="https://x.com/pietrozullo" alt="Twitter Follow - Pietro">
@@ -84,15 +78,19 @@ Description-Content-Type: text/markdown
84
78
  <a href="https://discord.gg/XkNkSkMz3V" alt="Discord">
85
79
  <img src="https://dcbadge.limes.pink/api/server/XkNkSkMz3V?style=flat" /></a>
86
80
  </p>
87
- 🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom agents that have tool access, without using closed source or application clients.
81
+ </div>
88
82
 
89
- 💬 Get started quickly - chat with your servers on our <b>hosted version</b>! <b>[Try mcp-use chat *(beta)* ](https://chat.mcp-use.io)</b>.
83
+ 🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom MCP agents that have tool access, without using closed source or application clients.
90
84
 
91
85
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
92
86
 
87
+ - Visit the [mcp-use.com website](https://mcp-use.com/) to know how to build and deploy MCP agents.
88
+ - Visit the [mcp-use docs](https://docs.mcp-use.com/) to get started with mcp-use library
89
+
90
+ 💬 Get started quickly - chat with your servers on our <b>hosted version</b>! [Try mcp-use chat (beta)](https://chat.mcp-use.com).
91
+
93
92
  # Features
94
93
 
95
- ## ✨ Key Features
96
94
  <table>
97
95
  <tr>
98
96
  <th width="400">Feature</th>
@@ -107,8 +105,8 @@ Description-Content-Type: text/markdown
107
105
  <td>Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.)</td>
108
106
  </tr>
109
107
  <tr>
110
- <td>🌐 <a href="https://mcp-use.io/builder"><strong>Code Builder</strong></a></td>
111
- <td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.io/builder">code builder</a>.</td>
108
+ <td>🌐 <a href="https://mcp-use.com/builder"><strong>Code Builder</strong></a></td>
109
+ <td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.com/builder">code builder</a>.</td>
112
110
  </tr>
113
111
  <tr>
114
112
  <td>🔗 <a href="#http-connection-example"><strong>HTTP Support</strong></a></td>
@@ -131,7 +129,7 @@ Description-Content-Type: text/markdown
131
129
  <td>Build your own agents with any framework using the LangChain adapter or create new adapters</td>
132
130
  </tr>
133
131
  <tr>
134
- <td>❓ <a href="https://mcp-use.io/what-should-we-build-next"><strong>What should we build next</strong></a></td>
132
+ <td>❓ <a href="https://mcp-use.com/what-should-we-build-next"><strong>What should we build next</strong></a></td>
135
133
  <td>Let us know what you'd like us to build next</td>
136
134
  </tr>
137
135
  </table>
@@ -163,6 +161,7 @@ pip install langchain-openai
163
161
  # For Anthropic
164
162
  pip install langchain-anthropic
165
163
  ```
164
+
166
165
  For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/) and add your API keys for the provider you want to use to your `.env` file.
167
166
 
168
167
  ```bash
@@ -718,7 +717,7 @@ async def main():
718
717
 
719
718
  # Create a custom LangChain agent
720
719
  llm_with_tools = llm.bind_tools(tools)
721
- result = await llm_with_tools.ainvoke("What tools do you have avilable ? ")
720
+ result = await llm_with_tools.ainvoke("What tools do you have available ? ")
722
721
  print(result)
723
722
 
724
723
 
@@ -799,7 +798,6 @@ Thanks to all our amazing contributors!
799
798
  <img src="https://contrib.rocks/image?repo=mcp-use/mcp-use" />
800
799
  </a>
801
800
 
802
-
803
801
  ## Top Starred Dependents
804
802
 
805
803
  <!-- gh-dependents-info-used-by-start -->
@@ -862,6 +860,7 @@ Thanks to all our amazing contributors!
862
860
  # License
863
861
 
864
862
  MIT
863
+
865
864
  # Citation
866
865
 
867
866
  If you use MCP-Use in your research or project, please cite:
@@ -875,3 +874,5 @@ If you use MCP-Use in your research or project, please cite:
875
874
  url = {https://github.com/pietrozullo/mcp-use}
876
875
  }
877
876
  ```
877
+
878
+ <img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=732589b6-6850-4b8c-aa25-906c0979e426&page=README.md" />
@@ -5,17 +5,17 @@ mcp_use/logging.py,sha256=CRtkPwR-bkXK_kQ0QOL86RikMWOHzEOi7A8VRHkNsZw,4270
5
5
  mcp_use/session.py,sha256=4kwcB_IkTt_3FiBSTI1H17KhL1W_6N5oai3HTxFrTH4,2496
6
6
  mcp_use/utils.py,sha256=QavJcVq2WxUUUCCpPCUeOB5bqIS0FFmpK-RAZkGc6aA,720
7
7
  mcp_use/adapters/__init__.py,sha256=-xCrgPThuX7x0PHGFDdjb7M-mgw6QV3sKu5PM7ShnRg,275
8
- mcp_use/adapters/base.py,sha256=tX5vNcz1HZtim4aID_KTfCeI6mQ6qI3bywTWJv0HaZs,7120
8
+ mcp_use/adapters/base.py,sha256=U1z_UzojC-bytb4ZuKTRpEgEp-2F_BVBgqEXbUqLYB4,6901
9
9
  mcp_use/adapters/langchain_adapter.py,sha256=LdlpRyLORhl8NZvtAmisgPelXkhEbBErSNdGHb8SF18,10860
10
10
  mcp_use/agents/__init__.py,sha256=N3eVYP2PxqNO2KcQv5fY8UMUX2W3eLTNkkzuFIJ1DUA,261
11
11
  mcp_use/agents/base.py,sha256=EN-dRbwOi9vIqofFg3jmi5yT2VKlwEr9Cwi1DZgB3eE,1591
12
- mcp_use/agents/mcpagent.py,sha256=7AsU8NE7vWmtTp2N2t1N0j4D-gxBFpBlC3XAjyVULWE,31297
12
+ mcp_use/agents/mcpagent.py,sha256=ke912HbpkXxcv1DpjiFTEg3hoIEMSFlTS62IRxYaTDs,35656
13
13
  mcp_use/agents/prompts/system_prompt_builder.py,sha256=E86STmxcl2Ic763_114awNqFB2RyLrQlbvgRmJajQjI,4116
14
14
  mcp_use/agents/prompts/templates.py,sha256=AZKrGWuI516C-PmyOPvxDBibNdqJtN24sOHTGR06bi4,1933
15
15
  mcp_use/connectors/__init__.py,sha256=cUF4yT0bNr8qeLkSzg28SHueiV5qDaHEB1l1GZ2K0dc,536
16
- mcp_use/connectors/base.py,sha256=yajYF5P924VzggCJ-kt37Rh_srCu__Seb7KUBhjvFdQ,12255
16
+ mcp_use/connectors/base.py,sha256=c0Vj_hP9sw9YFHlap_W8OhGQy67OIpn-IpAdD0j3FD4,12128
17
17
  mcp_use/connectors/http.py,sha256=Aw0DVQN-7fYebDKuX3bvlH9GAFS7Mq9CM_HylXdNqEI,6274
18
- mcp_use/connectors/sandbox.py,sha256=Id6THKdBvPxFzOFJ2tSK300CiiRFE_EtN5Gu6NRfUTU,10870
18
+ mcp_use/connectors/sandbox.py,sha256=cnybcNW55k-S0hUtRR1M3KcGXwnaeDMVm8wDTsfF1Mk,10875
19
19
  mcp_use/connectors/stdio.py,sha256=rnJoLaHf1cIjk1KqfxfSsUs-iGTJ7KZonxgIc3kXeCM,2791
20
20
  mcp_use/connectors/utils.py,sha256=zQ8GdNQx0Twz3by90BoU1RsWPf9wODGof4K3-NxPXeA,366
21
21
  mcp_use/connectors/websocket.py,sha256=G7ZeLJNPVl9AG6kCmiNJz1N2Ing_QxT7pSswigTKi8Y,9650
@@ -27,7 +27,7 @@ mcp_use/managers/tools/connect_server.py,sha256=MGYQCl11q-w6gSIYuT44dDk7ILV3Oh7k
27
27
  mcp_use/managers/tools/disconnect_server.py,sha256=Y3kJN31efzsjfJwxUhpBxS-bgU21DCfGbn_LgEbzyvI,1586
28
28
  mcp_use/managers/tools/get_active_server.py,sha256=tCaib76gYU3L5G82tEOTq4Io2cuCXWjOjPselb-92i8,964
29
29
  mcp_use/managers/tools/list_servers_tool.py,sha256=OPDSMNe-VuAhlUyhDnR4CiuZFpoMhnhWpAablwO5S0k,1897
30
- mcp_use/managers/tools/search_tools.py,sha256=Vxbi8j50q3DyV9dvWCdPeN4vWFG5ksuLG5sERpdlmWg,12226
30
+ mcp_use/managers/tools/search_tools.py,sha256=4vso7ln-AfG6lQAMq9FA_CyeVtSEDYEWlHtdHtfnLps,12911
31
31
  mcp_use/managers/tools/use_tool.py,sha256=gMNjgJrI9XDitPyJglcJcAvowbEWkO5z57yt4DT2Lpc,6626
32
32
  mcp_use/observability/__init__.py,sha256=kTUcP0d6L5_3ktfldhdAk-3AWckzVHs7ztG-R6cye64,186
33
33
  mcp_use/observability/laminar.py,sha256=WWjmVXP55yCfAlqlayeuJmym1gdrv8is7UyrIp4Tbn0,839
@@ -43,7 +43,7 @@ mcp_use/telemetry/events.py,sha256=K5xqbmkum30r4gM2PWtTiUWGF8oZzGZw2DYwco1RfOQ,3
43
43
  mcp_use/telemetry/telemetry.py,sha256=ck2MDFMtooafriR1W_zi41dWq-0O-ucF89pCkdkyc9E,11724
44
44
  mcp_use/telemetry/utils.py,sha256=kDVTqt2oSeWNJbnTOlXOehr2yFO0PMyx2UGkrWkfJiw,1769
45
45
  mcp_use/types/sandbox.py,sha256=opJ9r56F1FvaqVvPovfAj5jZbsOexgwYx5wLgSlN8_U,712
46
- mcp_use-1.3.2.dist-info/METADATA,sha256=-74mIsLr1hd6BBPmXxpHKf7Yz_fk10W_kHmAQ6DK_K4,28683
47
- mcp_use-1.3.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
48
- mcp_use-1.3.2.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
49
- mcp_use-1.3.2.dist-info/RECORD,,
46
+ mcp_use-1.3.4.dist-info/METADATA,sha256=_RG0jvtG9ti0jnn2qQ8YfXZ1s0-_VdfniJ9EFNaJePY,28534
47
+ mcp_use-1.3.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
48
+ mcp_use-1.3.4.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
49
+ mcp_use-1.3.4.dist-info/RECORD,,