mcp-use 1.2.13__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

Files changed (91) hide show
  1. {mcp_use-1.2.13 → mcp_use-1.3.0}/.github/workflows/unittests.yml +1 -1
  2. {mcp_use-1.2.13 → mcp_use-1.3.0}/PKG-INFO +146 -19
  3. {mcp_use-1.2.13 → mcp_use-1.3.0}/README.md +143 -18
  4. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/api-reference/introduction.mdx +157 -15
  5. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/essentials/configuration.mdx +87 -1
  6. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/essentials/connection-types.mdx +76 -2
  7. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/quickstart.mdx +73 -0
  8. mcp_use-1.3.0/examples/sandbox_everything.py +69 -0
  9. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/agents/mcpagent.py +8 -12
  10. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/client.py +18 -9
  11. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/config.py +30 -4
  12. mcp_use-1.3.0/mcp_use/connectors/__init__.py +20 -0
  13. mcp_use-1.3.0/mcp_use/connectors/sandbox.py +291 -0
  14. mcp_use-1.3.0/mcp_use/connectors/utils.py +13 -0
  15. mcp_use-1.3.0/mcp_use/types/clientoptions.py +23 -0
  16. mcp_use-1.3.0/mcp_use/types/sandbox.py +23 -0
  17. {mcp_use-1.2.13 → mcp_use-1.3.0}/pyproject.toml +4 -1
  18. {mcp_use-1.2.13 → mcp_use-1.3.0}/ruff.toml +1 -1
  19. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_client.py +2 -2
  20. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_config.py +79 -1
  21. mcp_use-1.3.0/tests/unit/test_sandbox_connector.py +308 -0
  22. mcp_use-1.2.13/mcp_use/connectors/__init__.py +0 -13
  23. {mcp_use-1.2.13 → mcp_use-1.3.0}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  24. {mcp_use-1.2.13 → mcp_use-1.3.0}/.github/pull_request_template.md +0 -0
  25. {mcp_use-1.2.13 → mcp_use-1.3.0}/.github/workflows/publish.yml +0 -0
  26. {mcp_use-1.2.13 → mcp_use-1.3.0}/.gitignore +0 -0
  27. {mcp_use-1.2.13 → mcp_use-1.3.0}/.pre-commit-config.yaml +0 -0
  28. {mcp_use-1.2.13 → mcp_use-1.3.0}/CONTRIBUTING.md +0 -0
  29. {mcp_use-1.2.13 → mcp_use-1.3.0}/LICENSE +0 -0
  30. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/README.md +0 -0
  31. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/api-reference/mcpagent.mdx +0 -0
  32. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/building-custom-agents.mdx +0 -0
  33. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/development.mdx +0 -0
  34. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/docs.json +0 -0
  35. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/essentials/debugging.mdx +0 -0
  36. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/essentials/llm-integration.mdx +0 -0
  37. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/essentials/server-manager.mdx +0 -0
  38. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/favicon.svg +0 -0
  39. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/images/hero-dark.png +0 -0
  40. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/images/hero-light.png +0 -0
  41. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/introduction.mdx +0 -0
  42. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/logo/dark.svg +0 -0
  43. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/logo/light.svg +0 -0
  44. {mcp_use-1.2.13 → mcp_use-1.3.0}/docs/snippets/snippet-intro.mdx +0 -0
  45. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/airbnb_mcp.json +0 -0
  46. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/airbnb_use.py +0 -0
  47. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/blender_use.py +0 -0
  48. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/browser_mcp.json +0 -0
  49. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/browser_use.py +0 -0
  50. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/chat_example.py +0 -0
  51. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/filesystem_use.py +0 -0
  52. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/http_example.py +0 -0
  53. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/mcp_everything.py +0 -0
  54. {mcp_use-1.2.13 → mcp_use-1.3.0}/examples/multi_server_example.py +0 -0
  55. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/__init__.py +0 -0
  56. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/adapters/__init__.py +0 -0
  57. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/adapters/base.py +0 -0
  58. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/adapters/langchain_adapter.py +0 -0
  59. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/agents/__init__.py +0 -0
  60. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/agents/base.py +0 -0
  61. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/agents/prompts/system_prompt_builder.py +0 -0
  62. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/agents/prompts/templates.py +0 -0
  63. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/connectors/base.py +0 -0
  64. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/connectors/http.py +0 -0
  65. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/connectors/stdio.py +0 -0
  66. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/connectors/websocket.py +0 -0
  67. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/logging.py +0 -0
  68. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/__init__.py +0 -0
  69. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/server_manager.py +0 -0
  70. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/__init__.py +0 -0
  71. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/base_tool.py +0 -0
  72. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/connect_server.py +0 -0
  73. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/disconnect_server.py +0 -0
  74. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/get_active_server.py +0 -0
  75. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/list_servers_tool.py +0 -0
  76. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/search_tools.py +0 -0
  77. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/managers/tools/use_tool.py +0 -0
  78. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/session.py +0 -0
  79. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/task_managers/__init__.py +0 -0
  80. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/task_managers/base.py +0 -0
  81. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/task_managers/sse.py +0 -0
  82. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/task_managers/stdio.py +0 -0
  83. {mcp_use-1.2.13 → mcp_use-1.3.0}/mcp_use/task_managers/websocket.py +0 -0
  84. {mcp_use-1.2.13 → mcp_use-1.3.0}/pytest.ini +0 -0
  85. {mcp_use-1.2.13 → mcp_use-1.3.0}/static/logo_black.svg +0 -0
  86. {mcp_use-1.2.13 → mcp_use-1.3.0}/static/logo_white.svg +0 -0
  87. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/conftest.py +0 -0
  88. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_http_connector.py +0 -0
  89. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_logging.py +0 -0
  90. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_session.py +0 -0
  91. {mcp_use-1.2.13 → mcp_use-1.3.0}/tests/unit/test_stdio_connector.py +0 -0
@@ -22,7 +22,7 @@ jobs:
22
22
  - name: Install dependencies
23
23
  run: |
24
24
  python -m pip install --upgrade pip
25
- pip install .[dev,anthropic,openai,search]
25
+ pip install .[dev,anthropic,openai,search,e2b]
26
26
  - name: Lint with ruff
27
27
  run: |
28
28
  ruff check .
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.2.13
3
+ Version: 1.3.0
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -33,6 +33,8 @@ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
33
33
  Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
34
34
  Requires-Dist: pytest>=7.4.0; extra == 'dev'
35
35
  Requires-Dist: ruff>=0.1.0; extra == 'dev'
36
+ Provides-Extra: e2b
37
+ Requires-Dist: e2b-code-interpreter>=1.5.0; extra == 'e2b'
36
38
  Provides-Extra: openai
37
39
  Requires-Dist: openai>=1.10.0; extra == 'openai'
38
40
  Provides-Extra: search
@@ -67,30 +69,64 @@ Description-Content-Type: text/markdown
67
69
  <img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
68
70
  </p>
69
71
  <p align="center">
70
- <a href="https://x.com/pietrozullo" alt="Twitter Follow">
72
+ <a href="https://x.com/pietrozullo" alt="Twitter Follow - Pietro">
71
73
  <img src="https://img.shields.io/twitter/follow/Pietro?style=social" /></a>
74
+ <a href="https://x.com/pederzh" alt="Twitter Follow - Luigi">
75
+ <img src="https://img.shields.io/twitter/follow/Luigi?style=social" /></a>
72
76
  <a href="https://discord.gg/XkNkSkMz3V" alt="Discord">
73
77
  <img src="https://dcbadge.limes.pink/api/server/https://discord.gg/XkNkSkMz3V?style=flat" /></a>
74
78
  </p>
75
79
  🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom agents that have tool access, without using closed source or application clients.
76
80
 
81
+ 💬 Get started quickly - chat with your servers on our <b>hosted version</b>! <b>[Try mcp-use chat *(beta)* ](https://chat.mcp-use.io)</b>.
82
+
77
83
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
78
84
 
79
85
  # Features
80
86
 
81
87
  ## ✨ Key Features
82
-
83
- | Feature | Description |
84
- |---------|-------------|
85
- | 🔄 [**Ease of use**](#quick-start) | Create your first MCP capable agent you need only 6 lines of code |
86
- | 🤖 [**LLM Flexibility**](#installing-langchain-providers) | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
87
- | 🌐 [**Code Builder**](https://mcp-use.io/builder) | Explore MCP capabilities and generate starter code with the interactive [code builder](https://mcp-use.io/builder). |
88
- | 🔗 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
89
- | ⚙️ [**Dynamic Server Selection**](#dynamic-server-selection-server-manager) | Agents can dynamically choose the most appropriate MCP server for a given task from the available pool |
90
- | 🧩 [**Multi-Server Support**](#multi-server-support) | Use multiple MCP servers simultaneously in a single agent |
91
- | 🛡️ [**Tool Restrictions**](#tool-access-control) | Restrict potentially dangerous tools like file system or network access |
92
- | 🔧 [**Custom Agents**](#build-a-custom-agent) | Build your own agents with any framework using the LangChain adapter or create new adapters |
93
-
88
+ <table>
89
+ <tr>
90
+ <th width="400">Feature</th>
91
+ <th>Description</th>
92
+ </tr>
93
+ <tr>
94
+ <td>🔄 <a href="#quick-start"><strong>Ease of use</strong></a></td>
95
+ <td>Create your first MCP capable agent you need only 6 lines of code</td>
96
+ </tr>
97
+ <tr>
98
+ <td>🤖 <a href="#installing-langchain-providers"><strong>LLM Flexibility</strong></a></td>
99
+ <td>Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.)</td>
100
+ </tr>
101
+ <tr>
102
+ <td>🌐 <a href="https://mcp-use.io/builder"><strong>Code Builder</strong></a></td>
103
+ <td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.io/builder">code builder</a>.</td>
104
+ </tr>
105
+ <tr>
106
+ <td>🔗 <a href="#http-connection-example"><strong>HTTP Support</strong></a></td>
107
+ <td>Direct connection to MCP servers running on specific HTTP ports</td>
108
+ </tr>
109
+ <tr>
110
+ <td>⚙️ <a href="#dynamic-server-selection-server-manager"><strong>Dynamic Server Selection</strong></a></td>
111
+ <td>Agents can dynamically choose the most appropriate MCP server for a given task from the available pool</td>
112
+ </tr>
113
+ <tr>
114
+ <td>🧩 <a href="#multi-server-support"><strong>Multi-Server Support</strong></a></td>
115
+ <td>Use multiple MCP servers simultaneously in a single agent</td>
116
+ </tr>
117
+ <tr>
118
+ <td>🛡️ <a href="#tool-access-control"><strong>Tool Restrictions</strong></a></td>
119
+ <td>Restrict potentially dangerous tools like file system or network access</td>
120
+ </tr>
121
+ <tr>
122
+ <td>🔧 <a href="#build-a-custom-agent"><strong>Custom Agents</strong></a></td>
123
+ <td>Build your own agents with any framework using the LangChain adapter or create new adapters</td>
124
+ </tr>
125
+ <tr>
126
+ <td>❓ <a href="https://mcp-use.io/what-should-we-build-next"><strong>What should we build next</strong></a></td>
127
+ <td>Let us know what you'd like us to build next</td>
128
+ </tr>
129
+ </table>
94
130
 
95
131
  # Quick start
96
132
 
@@ -118,11 +154,8 @@ pip install langchain-openai
118
154
 
119
155
  # For Anthropic
120
156
  pip install langchain-anthropic
121
-
122
- # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
123
157
  ```
124
-
125
- and add your API keys for the provider you want to use to your `.env` file.
158
+ For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/) and add your API keys for the provider you want to use to your `.env` file.
126
159
 
127
160
  ```bash
128
161
  OPENAI_API_KEY=
@@ -561,6 +594,101 @@ if __name__ == "__main__":
561
594
  asyncio.run(main())
562
595
  ```
563
596
 
597
+ # Sandboxed Execution
598
+
599
+ MCP-Use supports running MCP servers in a sandboxed environment using E2B's cloud infrastructure. This allows you to run MCP servers without having to install dependencies locally, making it easier to use tools that might have complex setups or system requirements.
600
+
601
+ ## Installation
602
+
603
+ To use sandboxed execution, you need to install the E2B dependency:
604
+
605
+ ```bash
606
+ # Install mcp-use with E2B support
607
+ pip install "mcp-use[e2b]"
608
+
609
+ # Or install the dependency directly
610
+ pip install e2b-code-interpreter
611
+ ```
612
+
613
+ You'll also need an E2B API key. You can sign up at [e2b.dev](https://e2b.dev) to get your API key.
614
+
615
+ ## Configuration
616
+
617
+ To enable sandboxed execution, use the `ClientOptions` parameter when creating your `MCPClient`:
618
+
619
+ ```python
620
+ import asyncio
621
+ import os
622
+ from dotenv import load_dotenv
623
+ from langchain_openai import ChatOpenAI
624
+ from mcp_use import MCPAgent, MCPClient
625
+ from mcp_use.types.sandbox import SandboxOptions
626
+ from mcp_use.types.clientoptions import ClientOptions
627
+
628
+ async def main():
629
+ # Load environment variables (needs E2B_API_KEY)
630
+ load_dotenv()
631
+
632
+ # Define MCP server configuration
633
+ server_config = {
634
+ "mcpServers": {
635
+ "everything": {
636
+ "command": "npx",
637
+ "args": ["-y", "@modelcontextprotocol/server-everything"],
638
+ }
639
+ }
640
+ }
641
+
642
+ # Define sandbox options
643
+ sandbox_options: SandboxOptions = {
644
+ "api_key": os.getenv("E2B_API_KEY"), # API key can also be provided directly
645
+ "sandbox_template_id": "base", # Use base template
646
+ }
647
+
648
+ # Create client options for sandboxed mode
649
+ client_options: ClientOptions = {
650
+ "is_sandboxed": True,
651
+ "sandbox_options": sandbox_options
652
+ }
653
+
654
+ # Create client with sandboxed mode enabled
655
+ client = MCPClient(
656
+ config=server_config,
657
+ options=client_options
658
+ )
659
+
660
+ # Create agent with the sandboxed client
661
+ llm = ChatOpenAI(model="gpt-4o")
662
+ agent = MCPAgent(llm=llm, client=client)
663
+
664
+ # Run your agent
665
+ result = await agent.run("Use the command line tools to help me add 1+1")
666
+ print(result)
667
+
668
+ # Clean up
669
+ await client.close_all_sessions()
670
+
671
+ if __name__ == "__main__":
672
+ asyncio.run(main())
673
+ ```
674
+
675
+ ## Sandbox Options
676
+
677
+ The `SandboxOptions` type provides configuration for the sandbox environment:
678
+
679
+ | Option | Description | Default |
680
+ | ---------------------- | ---------------------------------------------------------------------------------------- | --------------------- |
681
+ | `api_key` | E2B API key. Required - can be provided directly or via E2B_API_KEY environment variable | None |
682
+ | `sandbox_template_id` | Template ID for the sandbox environment | "base" |
683
+ | `supergateway_command` | Command to run supergateway | "npx -y supergateway" |
684
+
685
+ ## Benefits of Sandboxed Execution
686
+
687
+ - **No local dependencies**: Run MCP servers without installing dependencies locally
688
+ - **Isolation**: Execute code in a secure, isolated environment
689
+ - **Consistent environment**: Ensure consistent behavior across different systems
690
+ - **Resource efficiency**: Offload resource-intensive tasks to cloud infrastructure
691
+
564
692
  # Build a Custom Agent:
565
693
 
566
694
  You can also build your own custom agent using the LangChain adapter:
@@ -652,7 +780,6 @@ agent = MCPAgent(
652
780
 
653
781
  This is useful when you only need to see the agent's steps and decision-making process without all the low-level debug information from other components.
654
782
 
655
-
656
783
  # Roadmap
657
784
 
658
785
  <ul>
@@ -26,30 +26,64 @@
26
26
  <img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
27
27
  </p>
28
28
  <p align="center">
29
- <a href="https://x.com/pietrozullo" alt="Twitter Follow">
29
+ <a href="https://x.com/pietrozullo" alt="Twitter Follow - Pietro">
30
30
  <img src="https://img.shields.io/twitter/follow/Pietro?style=social" /></a>
31
+ <a href="https://x.com/pederzh" alt="Twitter Follow - Luigi">
32
+ <img src="https://img.shields.io/twitter/follow/Luigi?style=social" /></a>
31
33
  <a href="https://discord.gg/XkNkSkMz3V" alt="Discord">
32
34
  <img src="https://dcbadge.limes.pink/api/server/https://discord.gg/XkNkSkMz3V?style=flat" /></a>
33
35
  </p>
34
36
  🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom agents that have tool access, without using closed source or application clients.
35
37
 
38
+ 💬 Get started quickly - chat with your servers on our <b>hosted version</b>! <b>[Try mcp-use chat *(beta)* ](https://chat.mcp-use.io)</b>.
39
+
36
40
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
37
41
 
38
42
  # Features
39
43
 
40
44
  ## ✨ Key Features
41
-
42
- | Feature | Description |
43
- |---------|-------------|
44
- | 🔄 [**Ease of use**](#quick-start) | Create your first MCP capable agent you need only 6 lines of code |
45
- | 🤖 [**LLM Flexibility**](#installing-langchain-providers) | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
46
- | 🌐 [**Code Builder**](https://mcp-use.io/builder) | Explore MCP capabilities and generate starter code with the interactive [code builder](https://mcp-use.io/builder). |
47
- | 🔗 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
48
- | ⚙️ [**Dynamic Server Selection**](#dynamic-server-selection-server-manager) | Agents can dynamically choose the most appropriate MCP server for a given task from the available pool |
49
- | 🧩 [**Multi-Server Support**](#multi-server-support) | Use multiple MCP servers simultaneously in a single agent |
50
- | 🛡️ [**Tool Restrictions**](#tool-access-control) | Restrict potentially dangerous tools like file system or network access |
51
- | 🔧 [**Custom Agents**](#build-a-custom-agent) | Build your own agents with any framework using the LangChain adapter or create new adapters |
52
-
45
+ <table>
46
+ <tr>
47
+ <th width="400">Feature</th>
48
+ <th>Description</th>
49
+ </tr>
50
+ <tr>
51
+ <td>🔄 <a href="#quick-start"><strong>Ease of use</strong></a></td>
52
+ <td>Create your first MCP capable agent you need only 6 lines of code</td>
53
+ </tr>
54
+ <tr>
55
+ <td>🤖 <a href="#installing-langchain-providers"><strong>LLM Flexibility</strong></a></td>
56
+ <td>Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.)</td>
57
+ </tr>
58
+ <tr>
59
+ <td>🌐 <a href="https://mcp-use.io/builder"><strong>Code Builder</strong></a></td>
60
+ <td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.io/builder">code builder</a>.</td>
61
+ </tr>
62
+ <tr>
63
+ <td>🔗 <a href="#http-connection-example"><strong>HTTP Support</strong></a></td>
64
+ <td>Direct connection to MCP servers running on specific HTTP ports</td>
65
+ </tr>
66
+ <tr>
67
+ <td>⚙️ <a href="#dynamic-server-selection-server-manager"><strong>Dynamic Server Selection</strong></a></td>
68
+ <td>Agents can dynamically choose the most appropriate MCP server for a given task from the available pool</td>
69
+ </tr>
70
+ <tr>
71
+ <td>🧩 <a href="#multi-server-support"><strong>Multi-Server Support</strong></a></td>
72
+ <td>Use multiple MCP servers simultaneously in a single agent</td>
73
+ </tr>
74
+ <tr>
75
+ <td>🛡️ <a href="#tool-access-control"><strong>Tool Restrictions</strong></a></td>
76
+ <td>Restrict potentially dangerous tools like file system or network access</td>
77
+ </tr>
78
+ <tr>
79
+ <td>🔧 <a href="#build-a-custom-agent"><strong>Custom Agents</strong></a></td>
80
+ <td>Build your own agents with any framework using the LangChain adapter or create new adapters</td>
81
+ </tr>
82
+ <tr>
83
+ <td>❓ <a href="https://mcp-use.io/what-should-we-build-next"><strong>What should we build next</strong></a></td>
84
+ <td>Let us know what you'd like us to build next</td>
85
+ </tr>
86
+ </table>
53
87
 
54
88
  # Quick start
55
89
 
@@ -77,11 +111,8 @@ pip install langchain-openai
77
111
 
78
112
  # For Anthropic
79
113
  pip install langchain-anthropic
80
-
81
- # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
82
114
  ```
83
-
84
- and add your API keys for the provider you want to use to your `.env` file.
115
+ For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/) and add your API keys for the provider you want to use to your `.env` file.
85
116
 
86
117
  ```bash
87
118
  OPENAI_API_KEY=
@@ -520,6 +551,101 @@ if __name__ == "__main__":
520
551
  asyncio.run(main())
521
552
  ```
522
553
 
554
+ # Sandboxed Execution
555
+
556
+ MCP-Use supports running MCP servers in a sandboxed environment using E2B's cloud infrastructure. This allows you to run MCP servers without having to install dependencies locally, making it easier to use tools that might have complex setups or system requirements.
557
+
558
+ ## Installation
559
+
560
+ To use sandboxed execution, you need to install the E2B dependency:
561
+
562
+ ```bash
563
+ # Install mcp-use with E2B support
564
+ pip install "mcp-use[e2b]"
565
+
566
+ # Or install the dependency directly
567
+ pip install e2b-code-interpreter
568
+ ```
569
+
570
+ You'll also need an E2B API key. You can sign up at [e2b.dev](https://e2b.dev) to get your API key.
571
+
572
+ ## Configuration
573
+
574
+ To enable sandboxed execution, use the `ClientOptions` parameter when creating your `MCPClient`:
575
+
576
+ ```python
577
+ import asyncio
578
+ import os
579
+ from dotenv import load_dotenv
580
+ from langchain_openai import ChatOpenAI
581
+ from mcp_use import MCPAgent, MCPClient
582
+ from mcp_use.types.sandbox import SandboxOptions
583
+ from mcp_use.types.clientoptions import ClientOptions
584
+
585
+ async def main():
586
+ # Load environment variables (needs E2B_API_KEY)
587
+ load_dotenv()
588
+
589
+ # Define MCP server configuration
590
+ server_config = {
591
+ "mcpServers": {
592
+ "everything": {
593
+ "command": "npx",
594
+ "args": ["-y", "@modelcontextprotocol/server-everything"],
595
+ }
596
+ }
597
+ }
598
+
599
+ # Define sandbox options
600
+ sandbox_options: SandboxOptions = {
601
+ "api_key": os.getenv("E2B_API_KEY"), # API key can also be provided directly
602
+ "sandbox_template_id": "base", # Use base template
603
+ }
604
+
605
+ # Create client options for sandboxed mode
606
+ client_options: ClientOptions = {
607
+ "is_sandboxed": True,
608
+ "sandbox_options": sandbox_options
609
+ }
610
+
611
+ # Create client with sandboxed mode enabled
612
+ client = MCPClient(
613
+ config=server_config,
614
+ options=client_options
615
+ )
616
+
617
+ # Create agent with the sandboxed client
618
+ llm = ChatOpenAI(model="gpt-4o")
619
+ agent = MCPAgent(llm=llm, client=client)
620
+
621
+ # Run your agent
622
+ result = await agent.run("Use the command line tools to help me add 1+1")
623
+ print(result)
624
+
625
+ # Clean up
626
+ await client.close_all_sessions()
627
+
628
+ if __name__ == "__main__":
629
+ asyncio.run(main())
630
+ ```
631
+
632
+ ## Sandbox Options
633
+
634
+ The `SandboxOptions` type provides configuration for the sandbox environment:
635
+
636
+ | Option | Description | Default |
637
+ | ---------------------- | ---------------------------------------------------------------------------------------- | --------------------- |
638
+ | `api_key` | E2B API key. Required - can be provided directly or via E2B_API_KEY environment variable | None |
639
+ | `sandbox_template_id` | Template ID for the sandbox environment | "base" |
640
+ | `supergateway_command` | Command to run supergateway | "npx -y supergateway" |
641
+
642
+ ## Benefits of Sandboxed Execution
643
+
644
+ - **No local dependencies**: Run MCP servers without installing dependencies locally
645
+ - **Isolation**: Execute code in a secure, isolated environment
646
+ - **Consistent environment**: Ensure consistent behavior across different systems
647
+ - **Resource efficiency**: Offload resource-intensive tasks to cloud infrastructure
648
+
523
649
  # Build a Custom Agent:
524
650
 
525
651
  You can also build your own custom agent using the LangChain adapter:
@@ -611,7 +737,6 @@ agent = MCPAgent(
611
737
 
612
738
  This is useful when you only need to see the agent's steps and decision-making process without all the low-level debug information from other components.
613
739
 
614
-
615
740
  # Roadmap
616
741
 
617
742
  <ul>
@@ -49,6 +49,60 @@ client = MCPClient.from_dict(config=config)
49
49
  | --------- | ---- | -------- | ---------------------------------------------- |
50
50
  | `config` | dict | Yes | Dictionary containing MCP server configuration |
51
51
 
52
+ #### Sandboxed Execution
53
+
54
+ Both `from_config_file` and `from_dict` methods support the `options` parameter for configuring client features, including sandboxed execution:
55
+
56
+ ```python
57
+ from mcp_use import MCPClient
58
+ from mcp_use.types.sandbox import SandboxOptions
59
+ from mcp_use.types.clientoptions import ClientOptions
60
+
61
+ # Define sandbox options
62
+ sandbox_options: SandboxOptions = {
63
+ "api_key": "your_e2b_api_key",
64
+ "sandbox_template_id": "code-interpreter-v1"
65
+ }
66
+
67
+ # Create client options for sandboxed mode
68
+ client_options: ClientOptions = {
69
+ "is_sandboxed": True,
70
+ "sandbox_options": sandbox_options
71
+ }
72
+
73
+ # Create client with sandboxed mode enabled
74
+ client = MCPClient.from_config_file(
75
+ config_path="config.json",
76
+ options=client_options
77
+ )
78
+ ```
79
+
80
+ | Parameter | Type | Required | Default | Description |
81
+ | --------- | ------------- | -------- | ------- | ---------------------------------------------------- |
82
+ | `options` | ClientOptions | No | {} | Client configuration options, including sandbox mode |
83
+
84
+ The `ClientOptions` type is a TypedDict with the following fields:
85
+
86
+ | Field | Type | Required | Default | Description |
87
+ | ----------------- | -------------- | -------- | ------- | ------------------------------------------------- |
88
+ | `is_sandboxed` | bool | No | False | Whether to run commands in a sandbox environment |
89
+ | `sandbox_options` | SandboxOptions | No | None | Configuration options for the sandbox environment |
90
+
91
+ The `SandboxOptions` type supports the following options:
92
+
93
+ | Option | Type | Required | Default | Description |
94
+ | ---------------------- | ---- | -------- | --------------------- | ---------------------------------------------------------------------------------------- |
95
+ | `api_key` | str | Yes | None | E2B API key. Required - can be provided directly or via E2B_API_KEY environment variable |
96
+ | `sandbox_template_id` | str | No | "base" | Template ID for the sandbox environment |
97
+ | `supergateway_command` | str | No | "npx -y supergateway" | Command to run supergateway |
98
+
99
+ **When to use sandboxed execution**:
100
+
101
+ - When you want to run MCP servers without installing their dependencies locally
102
+ - To ensure consistent execution environments across different systems
103
+ - For improved security through isolation
104
+ - To leverage cloud resources for resource-intensive MCP servers
105
+
52
106
  ### Core Methods
53
107
 
54
108
  #### create_session
@@ -133,22 +187,22 @@ agent = MCPAgent(
133
187
  )
134
188
  ```
135
189
 
136
- | Parameter | Type | Required | Default | Description |
137
- | ------------------------- | ------------------- | -------- | ------- | ------------------------------------------------------------ |
138
- | `llm` | BaseLanguageModel | Yes | - | Any LangChain-compatible language model |
139
- | `client` | MCPClient | No | None | The MCPClient instance |
140
- | `connectors` | list[BaseConnector] | No | None | List of connectors if not using client |
141
- | `server_name` | str | No | None | Name of the server to use |
142
- | `max_steps` | int | No | 5 | Maximum number of steps the agent can take |
143
- | `auto_initialize` | bool | No | False | Whether to initialize automatically |
144
- | `memory_enabled` | bool | No | True | Whether to enable memory |
145
- | `system_prompt` | str | No | None | Custom system prompt |
146
- | `system_prompt_template` | str | No | None | Custom system prompt template |
147
- | `additional_instructions` | str | No | None | Additional instructions for the agent |
148
- | `session_options` | dict | No | {} | Additional options for session creation |
149
- | `output_parser` | OutputParser | No | None | Custom output parser for LLM responses |
190
+ | Parameter | Type | Required | Default | Description |
191
+ | ------------------------- | ------------------- | -------- | ------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
192
+ | `llm` | BaseLanguageModel | Yes | - | Any LangChain-compatible language model |
193
+ | `client` | MCPClient | No | None | The MCPClient instance |
194
+ | `connectors` | list[BaseConnector] | No | None | List of connectors if not using client |
195
+ | `server_name` | str | No | None | Name of the server to use |
196
+ | `max_steps` | int | No | 5 | Maximum number of steps the agent can take |
197
+ | `auto_initialize` | bool | No | False | Whether to initialize automatically |
198
+ | `memory_enabled` | bool | No | True | Whether to enable memory |
199
+ | `system_prompt` | str | No | None | Custom system prompt |
200
+ | `system_prompt_template` | str | No | None | Custom system prompt template |
201
+ | `additional_instructions` | str | No | None | Additional instructions for the agent |
202
+ | `session_options` | dict | No | {} | Additional options for session creation |
203
+ | `output_parser` | OutputParser | No | None | Custom output parser for LLM responses |
150
204
  | `use_server_manager` | bool | No | False | If `True`, enables automatic selection of the appropriate server based on the chosen tool when multiple servers are configured via `MCPClient`. |
151
- | `disallowed_tools` | list[str] | No | None | List of tool names that should not be available to the agent |
205
+ | `disallowed_tools` | list[str] | No | None | List of tool names that should not be available to the agent |
152
206
 
153
207
  **When to use different parameters**:
154
208
 
@@ -459,3 +513,91 @@ This approach is useful when:
459
513
  - You want to limit the agent's capabilities for specific tasks
460
514
  - You're concerned about security implications of certain tools
461
515
  - You want to focus the agent on specific functionality
516
+
517
+ ### Sandboxed Execution with Multiple Servers
518
+
519
+ Configure and use multiple sandboxed MCP servers:
520
+
521
+ ```python
522
+ import os
523
+ from dotenv import load_dotenv
524
+ from mcp_use import MCPClient, MCPAgent
525
+ from mcp_use.types.sandbox import SandboxOptions
526
+ from langchain_anthropic import ChatAnthropic
527
+
528
+ # Load environment variables
529
+ load_dotenv()
530
+
531
+ # Define sandbox options
532
+ sandbox_options: SandboxOptions = {
533
+ "api_key": os.getenv("E2B_API_KEY"),
534
+ "sandbox_template_id": "code-interpreter-v1"
535
+ }
536
+
537
+ # Create client with multiple sandboxed servers
538
+ client = MCPClient.from_dict(
539
+ {
540
+ "mcpServers": {
541
+ "browser": {
542
+ "command": "npx",
543
+ "args": ["@playwright/mcp@latest"]
544
+ },
545
+ "command": {
546
+ "command": "npx",
547
+ "args": ["-y", "@modelcontextprotocol/server-everything"]
548
+ }
549
+ }
550
+ },
551
+ is_sandboxed=True,
552
+ sandbox_options=sandbox_options
553
+ )
554
+
555
+ # Create agent with server manager enabled
556
+ agent = MCPAgent(
557
+ llm=ChatAnthropic(model="claude-3-5-sonnet"),
558
+ client=client,
559
+ use_server_manager=True # Automatically selects the appropriate server
560
+ )
561
+
562
+ # Run a task that will use tools from both servers
563
+ result = await agent.run(
564
+ "Search for information about Python and then use the command line to check the latest version"
565
+ )
566
+ ```
567
+
568
+ This approach is useful when:
569
+
570
+ - You need to use multiple MCP servers but don't want to install their dependencies locally
571
+ - You want to ensure consistent execution environments for all servers
572
+ - You need to leverage cloud resources for resource-intensive MCP servers
573
+
574
+ ## Error Handling
575
+
576
+ mcp_use provides several exception types to handle different error scenarios:
577
+
578
+ | Exception | Description | When It Occurs |
579
+ | ------------------------ | --------------------------------- | ----------------------------------- |
580
+ | `MCPConnectionError` | Connection to MCP server failed | Network issues, server not running |
581
+ | `MCPAuthenticationError` | Authentication with server failed | Invalid credentials or tokens |
582
+ | `MCPTimeoutError` | Operation timed out | Server takes too long to respond |
583
+ | `MCPServerError` | Server returned an error | Internal server error |
584
+ | `MCPClientError` | Client-side error | Invalid configuration or parameters |
585
+ | `MCPError` | Generic MCP-related error | Any other MCP-related issue |
586
+
587
+ **Handling Strategies**:
588
+
589
+ ```python
590
+ from mcp_use.exceptions import MCPConnectionError, MCPTimeoutError
591
+
592
+ try:
593
+ result = await agent.run("Find information")
594
+ except MCPConnectionError:
595
+ # Handle connection issues
596
+ print("Failed to connect to the MCP server")
597
+ except MCPTimeoutError:
598
+ # Handle timeout issues
599
+ print("Operation timed out")
600
+ except Exception as e:
601
+ # Handle other exceptions
602
+ print(f"An error occurred: {e}")
603
+ ```