mcp-use 1.2.13__py3-none-any.whl → 1.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use/agents/mcpagent.py +117 -22
- mcp_use/client.py +35 -9
- mcp_use/config.py +30 -4
- mcp_use/connectors/__init__.py +12 -5
- mcp_use/connectors/base.py +135 -37
- mcp_use/connectors/http.py +108 -30
- mcp_use/connectors/sandbox.py +296 -0
- mcp_use/connectors/stdio.py +7 -2
- mcp_use/connectors/utils.py +13 -0
- mcp_use/connectors/websocket.py +7 -2
- mcp_use/session.py +1 -4
- mcp_use/task_managers/__init__.py +2 -1
- mcp_use/task_managers/base.py +10 -4
- mcp_use/task_managers/streamable_http.py +81 -0
- mcp_use/task_managers/websocket.py +5 -0
- mcp_use/telemetry/__init__.py +0 -0
- mcp_use/telemetry/events.py +93 -0
- mcp_use/telemetry/posthog.py +214 -0
- mcp_use/telemetry/utils.py +48 -0
- mcp_use/types/sandbox.py +23 -0
- mcp_use/utils.py +27 -0
- {mcp_use-1.2.13.dist-info → mcp_use-1.3.1.dist-info}/METADATA +209 -32
- mcp_use-1.3.1.dist-info/RECORD +46 -0
- mcp_use-1.2.13.dist-info/RECORD +0 -37
- {mcp_use-1.2.13.dist-info → mcp_use-1.3.1.dist-info}/WHEEL +0 -0
- {mcp_use-1.2.13.dist-info → mcp_use-1.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import platform
|
|
4
|
+
import uuid
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from functools import wraps
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from posthog import Posthog
|
|
11
|
+
|
|
12
|
+
from mcp_use.logging import MCP_USE_DEBUG
|
|
13
|
+
from mcp_use.telemetry.events import (
|
|
14
|
+
BaseTelemetryEvent,
|
|
15
|
+
MCPAgentExecutionEvent,
|
|
16
|
+
)
|
|
17
|
+
from mcp_use.telemetry.utils import get_package_version
|
|
18
|
+
from mcp_use.utils import singleton
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def requires_telemetry(func: Callable) -> Callable:
|
|
24
|
+
"""Decorator that skips function execution if telemetry is disabled"""
|
|
25
|
+
|
|
26
|
+
@wraps(func)
|
|
27
|
+
def wrapper(self, *args, **kwargs):
|
|
28
|
+
if not self._posthog_client:
|
|
29
|
+
return None
|
|
30
|
+
return func(self, *args, **kwargs)
|
|
31
|
+
|
|
32
|
+
return wrapper
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_cache_home() -> Path:
|
|
36
|
+
"""Get platform-appropriate cache directory."""
|
|
37
|
+
# XDG_CACHE_HOME for Linux and manually set envs
|
|
38
|
+
env_var: str | None = os.getenv("XDG_CACHE_HOME")
|
|
39
|
+
if env_var and (path := Path(env_var)).is_absolute():
|
|
40
|
+
return path
|
|
41
|
+
|
|
42
|
+
system = platform.system()
|
|
43
|
+
if system == "Windows":
|
|
44
|
+
appdata = os.getenv("LOCALAPPDATA") or os.getenv("APPDATA")
|
|
45
|
+
if appdata:
|
|
46
|
+
return Path(appdata)
|
|
47
|
+
return Path.home() / "AppData" / "Local"
|
|
48
|
+
elif system == "Darwin": # macOS
|
|
49
|
+
return Path.home() / "Library" / "Caches"
|
|
50
|
+
else: # Linux or other Unix
|
|
51
|
+
return Path.home() / ".cache"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@singleton
|
|
55
|
+
class Telemetry:
|
|
56
|
+
"""
|
|
57
|
+
Service for capturing anonymized telemetry data.
|
|
58
|
+
If the environment variable `MCP_USE_ANONYMIZED_TELEMETRY=false`, telemetry will be disabled.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
USER_ID_PATH = str(get_cache_home() / "mcp_use" / "telemetry_user_id")
|
|
62
|
+
PROJECT_API_KEY = "phc_lyTtbYwvkdSbrcMQNPiKiiRWrrM1seyKIMjycSvItEI"
|
|
63
|
+
HOST = "https://eu.i.posthog.com"
|
|
64
|
+
UNKNOWN_USER_ID = "UNKNOWN_USER_ID"
|
|
65
|
+
|
|
66
|
+
_curr_user_id = None
|
|
67
|
+
|
|
68
|
+
def __init__(self):
|
|
69
|
+
telemetry_disabled = os.getenv("MCP_USE_ANONYMIZED_TELEMETRY", "true").lower() == "false"
|
|
70
|
+
|
|
71
|
+
if telemetry_disabled:
|
|
72
|
+
self._posthog_client = None
|
|
73
|
+
logger.debug("Telemetry disabled")
|
|
74
|
+
else:
|
|
75
|
+
logger.info(
|
|
76
|
+
"Anonymized telemetry enabled. Set MCP_USE_ANONYMIZED_TELEMETRY=false to disable."
|
|
77
|
+
)
|
|
78
|
+
try:
|
|
79
|
+
self._posthog_client = Posthog(
|
|
80
|
+
project_api_key=self.PROJECT_API_KEY,
|
|
81
|
+
host=self.HOST,
|
|
82
|
+
disable_geoip=False,
|
|
83
|
+
enable_exception_autocapture=True,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Silence posthog's logging unless debug mode (level 2)
|
|
87
|
+
if MCP_USE_DEBUG < 2:
|
|
88
|
+
posthog_logger = logging.getLogger("posthog")
|
|
89
|
+
posthog_logger.disabled = True
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.warning(f"Failed to initialize telemetry: {e}")
|
|
93
|
+
self._posthog_client = None
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def user_id(self) -> str:
|
|
97
|
+
"""Get or create a persistent anonymous user ID"""
|
|
98
|
+
if self._curr_user_id:
|
|
99
|
+
return self._curr_user_id
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
if not os.path.exists(self.USER_ID_PATH):
|
|
103
|
+
os.makedirs(os.path.dirname(self.USER_ID_PATH), exist_ok=True)
|
|
104
|
+
with open(self.USER_ID_PATH, "w") as f:
|
|
105
|
+
new_user_id = str(uuid.uuid4())
|
|
106
|
+
f.write(new_user_id)
|
|
107
|
+
self._curr_user_id = new_user_id
|
|
108
|
+
else:
|
|
109
|
+
with open(self.USER_ID_PATH) as f:
|
|
110
|
+
self._curr_user_id = f.read().strip()
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.debug(f"Failed to get/create user ID: {e}")
|
|
113
|
+
self._curr_user_id = self.UNKNOWN_USER_ID
|
|
114
|
+
|
|
115
|
+
return self._curr_user_id
|
|
116
|
+
|
|
117
|
+
@requires_telemetry
|
|
118
|
+
def capture(self, event: BaseTelemetryEvent) -> None:
|
|
119
|
+
"""Capture a telemetry event"""
|
|
120
|
+
try:
|
|
121
|
+
# Add package version to all events
|
|
122
|
+
properties = event.properties.copy()
|
|
123
|
+
properties["mcp_use_version"] = get_package_version()
|
|
124
|
+
|
|
125
|
+
self._posthog_client.capture(
|
|
126
|
+
distinct_id=self.user_id, event=event.name, properties=properties
|
|
127
|
+
)
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.debug(f"Failed to track event {event.name}: {e}")
|
|
130
|
+
|
|
131
|
+
@requires_telemetry
|
|
132
|
+
def track_event(self, event_name: str, properties: dict[str, Any] | None = None) -> None:
|
|
133
|
+
"""Track a telemetry event with optional properties (legacy method)"""
|
|
134
|
+
try:
|
|
135
|
+
# Add package version to all events
|
|
136
|
+
event_properties = (properties or {}).copy()
|
|
137
|
+
event_properties["mcp_use_version"] = get_package_version()
|
|
138
|
+
|
|
139
|
+
self._posthog_client.capture(
|
|
140
|
+
distinct_id=self.user_id, event=event_name, properties=event_properties
|
|
141
|
+
)
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.debug(f"Failed to track event {event_name}: {e}")
|
|
144
|
+
|
|
145
|
+
@requires_telemetry
|
|
146
|
+
def track_agent_execution(
|
|
147
|
+
self,
|
|
148
|
+
execution_method: str,
|
|
149
|
+
query: str,
|
|
150
|
+
success: bool,
|
|
151
|
+
model_provider: str,
|
|
152
|
+
model_name: str,
|
|
153
|
+
server_count: int,
|
|
154
|
+
server_identifiers: list[dict[str, str]],
|
|
155
|
+
total_tools_available: int,
|
|
156
|
+
tools_available_names: list[str],
|
|
157
|
+
max_steps_configured: int,
|
|
158
|
+
memory_enabled: bool,
|
|
159
|
+
use_server_manager: bool,
|
|
160
|
+
max_steps_used: int | None,
|
|
161
|
+
manage_connector: bool,
|
|
162
|
+
external_history_used: bool,
|
|
163
|
+
steps_taken: int | None = None,
|
|
164
|
+
tools_used_count: int | None = None,
|
|
165
|
+
tools_used_names: list[str] | None = None,
|
|
166
|
+
response: str | None = None,
|
|
167
|
+
execution_time_ms: int | None = None,
|
|
168
|
+
error_type: str | None = None,
|
|
169
|
+
conversation_history_length: int | None = None,
|
|
170
|
+
) -> None:
|
|
171
|
+
"""Track comprehensive agent execution"""
|
|
172
|
+
event = MCPAgentExecutionEvent(
|
|
173
|
+
execution_method=execution_method,
|
|
174
|
+
query=query,
|
|
175
|
+
success=success,
|
|
176
|
+
model_provider=model_provider,
|
|
177
|
+
model_name=model_name,
|
|
178
|
+
server_count=server_count,
|
|
179
|
+
server_identifiers=server_identifiers,
|
|
180
|
+
total_tools_available=total_tools_available,
|
|
181
|
+
tools_available_names=tools_available_names,
|
|
182
|
+
max_steps_configured=max_steps_configured,
|
|
183
|
+
memory_enabled=memory_enabled,
|
|
184
|
+
use_server_manager=use_server_manager,
|
|
185
|
+
max_steps_used=max_steps_used,
|
|
186
|
+
manage_connector=manage_connector,
|
|
187
|
+
external_history_used=external_history_used,
|
|
188
|
+
steps_taken=steps_taken,
|
|
189
|
+
tools_used_count=tools_used_count,
|
|
190
|
+
tools_used_names=tools_used_names,
|
|
191
|
+
response=response,
|
|
192
|
+
execution_time_ms=execution_time_ms,
|
|
193
|
+
error_type=error_type,
|
|
194
|
+
conversation_history_length=conversation_history_length,
|
|
195
|
+
)
|
|
196
|
+
self.capture(event)
|
|
197
|
+
|
|
198
|
+
@requires_telemetry
|
|
199
|
+
def flush(self) -> None:
|
|
200
|
+
"""Flush any queued telemetry events"""
|
|
201
|
+
try:
|
|
202
|
+
self._posthog_client.flush()
|
|
203
|
+
logger.debug("PostHog client telemetry queue flushed")
|
|
204
|
+
except Exception as e:
|
|
205
|
+
logger.debug(f"Failed to flush PostHog client: {e}")
|
|
206
|
+
|
|
207
|
+
@requires_telemetry
|
|
208
|
+
def shutdown(self) -> None:
|
|
209
|
+
"""Shutdown telemetry client and flush remaining events"""
|
|
210
|
+
try:
|
|
211
|
+
self._posthog_client.shutdown()
|
|
212
|
+
logger.debug("PostHog client shutdown successfully")
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.debug(f"Error shutting down telemetry: {e}")
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for extracting model information from LangChain LLMs.
|
|
3
|
+
|
|
4
|
+
This module provides utilities to extract provider and model information
|
|
5
|
+
from LangChain language models for telemetry purposes.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import importlib.metadata
|
|
9
|
+
|
|
10
|
+
from langchain_core.language_models.base import BaseLanguageModel
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_package_version() -> str:
|
|
14
|
+
"""Get the current mcp-use package version."""
|
|
15
|
+
try:
|
|
16
|
+
return importlib.metadata.version("mcp-use")
|
|
17
|
+
except importlib.metadata.PackageNotFoundError:
|
|
18
|
+
return "unknown"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_model_provider(llm: BaseLanguageModel) -> str:
|
|
22
|
+
"""Extract the model provider from LangChain LLM using BaseChatModel standards."""
|
|
23
|
+
# Use LangChain's standard _llm_type property for identification
|
|
24
|
+
return getattr(llm, "_llm_type", llm.__class__.__name__.lower())
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_model_name(llm: BaseLanguageModel) -> str:
|
|
28
|
+
"""Extract the model name from LangChain LLM using BaseChatModel standards."""
|
|
29
|
+
# First try _identifying_params which may contain model info
|
|
30
|
+
if hasattr(llm, "_identifying_params"):
|
|
31
|
+
identifying_params = llm._identifying_params
|
|
32
|
+
if isinstance(identifying_params, dict):
|
|
33
|
+
# Common keys that contain model names
|
|
34
|
+
for key in ["model", "model_name", "model_id", "deployment_name"]:
|
|
35
|
+
if key in identifying_params:
|
|
36
|
+
return str(identifying_params[key])
|
|
37
|
+
|
|
38
|
+
# Fallback to direct model attributes
|
|
39
|
+
return getattr(llm, "model", getattr(llm, "model_name", llm.__class__.__name__))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def extract_model_info(llm: BaseLanguageModel) -> tuple[str, str]:
|
|
43
|
+
"""Extract both provider and model name from LangChain LLM.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Tuple of (provider, model_name)
|
|
47
|
+
"""
|
|
48
|
+
return get_model_provider(llm), get_model_name(llm)
|
mcp_use/types/sandbox.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""Type definitions for sandbox-related configurations."""
|
|
2
|
+
|
|
3
|
+
from typing import NotRequired, TypedDict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SandboxOptions(TypedDict):
|
|
7
|
+
"""Configuration options for sandbox execution.
|
|
8
|
+
|
|
9
|
+
This type defines the configuration options available when running
|
|
10
|
+
MCP servers in a sandboxed environment (e.g., using E2B).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
api_key: str
|
|
14
|
+
"""Direct API key for sandbox provider (e.g., E2B API key).
|
|
15
|
+
If not provided, will use E2B_API_KEY environment variable."""
|
|
16
|
+
|
|
17
|
+
sandbox_template_id: NotRequired[str]
|
|
18
|
+
"""Template ID for the sandbox environment.
|
|
19
|
+
Default: 'base'"""
|
|
20
|
+
|
|
21
|
+
supergateway_command: NotRequired[str]
|
|
22
|
+
"""Command to run supergateway.
|
|
23
|
+
Default: 'npx -y supergateway'"""
|
mcp_use/utils.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
def singleton(cls):
|
|
2
|
+
"""A decorator that implements the singleton pattern for a class.
|
|
3
|
+
|
|
4
|
+
This decorator ensures that only one instance of a class is ever created.
|
|
5
|
+
Subsequent attempts to create a new instance will return the existing one.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
@singleton
|
|
9
|
+
class MySingletonClass:
|
|
10
|
+
def __init__(self):
|
|
11
|
+
# ... initialization ...
|
|
12
|
+
pass
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
cls: The class to be decorated.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
A wrapper function that handles instance creation.
|
|
19
|
+
"""
|
|
20
|
+
instance = [None]
|
|
21
|
+
|
|
22
|
+
def wrapper(*args, **kwargs):
|
|
23
|
+
if instance[0] is None:
|
|
24
|
+
instance[0] = cls(*args, **kwargs)
|
|
25
|
+
return instance[0]
|
|
26
|
+
|
|
27
|
+
return wrapper
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp-use
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.1
|
|
4
4
|
Summary: MCP Library for LLMs
|
|
5
5
|
Author-email: Pietro Zullo <pietro.zullo@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -18,7 +18,8 @@ Requires-Dist: aiohttp>=3.9.0
|
|
|
18
18
|
Requires-Dist: jsonschema-pydantic>=0.1.0
|
|
19
19
|
Requires-Dist: langchain-community>=0.0.10
|
|
20
20
|
Requires-Dist: langchain>=0.1.0
|
|
21
|
-
Requires-Dist: mcp>=1.
|
|
21
|
+
Requires-Dist: mcp>=1.9.3
|
|
22
|
+
Requires-Dist: posthog>=4.8.0
|
|
22
23
|
Requires-Dist: pydantic>=2.0.0
|
|
23
24
|
Requires-Dist: python-dotenv>=1.0.0
|
|
24
25
|
Requires-Dist: typing-extensions>=4.8.0
|
|
@@ -27,12 +28,15 @@ Provides-Extra: anthropic
|
|
|
27
28
|
Requires-Dist: anthropic>=0.15.0; extra == 'anthropic'
|
|
28
29
|
Provides-Extra: dev
|
|
29
30
|
Requires-Dist: black>=23.9.0; extra == 'dev'
|
|
31
|
+
Requires-Dist: fastmcp; extra == 'dev'
|
|
30
32
|
Requires-Dist: isort>=5.12.0; extra == 'dev'
|
|
31
33
|
Requires-Dist: mypy>=1.5.0; extra == 'dev'
|
|
32
34
|
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
|
33
35
|
Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
|
|
34
36
|
Requires-Dist: pytest>=7.4.0; extra == 'dev'
|
|
35
37
|
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
38
|
+
Provides-Extra: e2b
|
|
39
|
+
Requires-Dist: e2b-code-interpreter>=1.5.0; extra == 'e2b'
|
|
36
40
|
Provides-Extra: openai
|
|
37
41
|
Requires-Dist: openai>=1.10.0; extra == 'openai'
|
|
38
42
|
Provides-Extra: search
|
|
@@ -67,30 +71,64 @@ Description-Content-Type: text/markdown
|
|
|
67
71
|
<img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
|
|
68
72
|
</p>
|
|
69
73
|
<p align="center">
|
|
70
|
-
<a href="https://x.com/pietrozullo" alt="Twitter Follow">
|
|
74
|
+
<a href="https://x.com/pietrozullo" alt="Twitter Follow - Pietro">
|
|
71
75
|
<img src="https://img.shields.io/twitter/follow/Pietro?style=social" /></a>
|
|
76
|
+
<a href="https://x.com/pederzh" alt="Twitter Follow - Luigi">
|
|
77
|
+
<img src="https://img.shields.io/twitter/follow/Luigi?style=social" /></a>
|
|
72
78
|
<a href="https://discord.gg/XkNkSkMz3V" alt="Discord">
|
|
73
79
|
<img src="https://dcbadge.limes.pink/api/server/https://discord.gg/XkNkSkMz3V?style=flat" /></a>
|
|
74
80
|
</p>
|
|
75
81
|
🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom agents that have tool access, without using closed source or application clients.
|
|
76
82
|
|
|
83
|
+
💬 Get started quickly - chat with your servers on our <b>hosted version</b>! <b>[Try mcp-use chat *(beta)* ](https://chat.mcp-use.io)</b>.
|
|
84
|
+
|
|
77
85
|
💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
|
|
78
86
|
|
|
79
87
|
# Features
|
|
80
88
|
|
|
81
89
|
## ✨ Key Features
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
90
|
+
<table>
|
|
91
|
+
<tr>
|
|
92
|
+
<th width="400">Feature</th>
|
|
93
|
+
<th>Description</th>
|
|
94
|
+
</tr>
|
|
95
|
+
<tr>
|
|
96
|
+
<td>🔄 <a href="#quick-start"><strong>Ease of use</strong></a></td>
|
|
97
|
+
<td>Create your first MCP capable agent you need only 6 lines of code</td>
|
|
98
|
+
</tr>
|
|
99
|
+
<tr>
|
|
100
|
+
<td>🤖 <a href="#installing-langchain-providers"><strong>LLM Flexibility</strong></a></td>
|
|
101
|
+
<td>Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.)</td>
|
|
102
|
+
</tr>
|
|
103
|
+
<tr>
|
|
104
|
+
<td>🌐 <a href="https://mcp-use.io/builder"><strong>Code Builder</strong></a></td>
|
|
105
|
+
<td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.io/builder">code builder</a>.</td>
|
|
106
|
+
</tr>
|
|
107
|
+
<tr>
|
|
108
|
+
<td>🔗 <a href="#http-connection-example"><strong>HTTP Support</strong></a></td>
|
|
109
|
+
<td>Direct connection to MCP servers running on specific HTTP ports</td>
|
|
110
|
+
</tr>
|
|
111
|
+
<tr>
|
|
112
|
+
<td>⚙️ <a href="#dynamic-server-selection-server-manager"><strong>Dynamic Server Selection</strong></a></td>
|
|
113
|
+
<td>Agents can dynamically choose the most appropriate MCP server for a given task from the available pool</td>
|
|
114
|
+
</tr>
|
|
115
|
+
<tr>
|
|
116
|
+
<td>🧩 <a href="#multi-server-support"><strong>Multi-Server Support</strong></a></td>
|
|
117
|
+
<td>Use multiple MCP servers simultaneously in a single agent</td>
|
|
118
|
+
</tr>
|
|
119
|
+
<tr>
|
|
120
|
+
<td>🛡️ <a href="#tool-access-control"><strong>Tool Restrictions</strong></a></td>
|
|
121
|
+
<td>Restrict potentially dangerous tools like file system or network access</td>
|
|
122
|
+
</tr>
|
|
123
|
+
<tr>
|
|
124
|
+
<td>🔧 <a href="#build-a-custom-agent"><strong>Custom Agents</strong></a></td>
|
|
125
|
+
<td>Build your own agents with any framework using the LangChain adapter or create new adapters</td>
|
|
126
|
+
</tr>
|
|
127
|
+
<tr>
|
|
128
|
+
<td>❓ <a href="https://mcp-use.io/what-should-we-build-next"><strong>What should we build next</strong></a></td>
|
|
129
|
+
<td>Let us know what you'd like us to build next</td>
|
|
130
|
+
</tr>
|
|
131
|
+
</table>
|
|
94
132
|
|
|
95
133
|
# Quick start
|
|
96
134
|
|
|
@@ -118,11 +156,8 @@ pip install langchain-openai
|
|
|
118
156
|
|
|
119
157
|
# For Anthropic
|
|
120
158
|
pip install langchain-anthropic
|
|
121
|
-
|
|
122
|
-
# For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
|
|
123
159
|
```
|
|
124
|
-
|
|
125
|
-
and add your API keys for the provider you want to use to your `.env` file.
|
|
160
|
+
For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/) and add your API keys for the provider you want to use to your `.env` file.
|
|
126
161
|
|
|
127
162
|
```bash
|
|
128
163
|
OPENAI_API_KEY=
|
|
@@ -561,6 +596,96 @@ if __name__ == "__main__":
|
|
|
561
596
|
asyncio.run(main())
|
|
562
597
|
```
|
|
563
598
|
|
|
599
|
+
# Sandboxed Execution
|
|
600
|
+
|
|
601
|
+
MCP-Use supports running MCP servers in a sandboxed environment using E2B's cloud infrastructure. This allows you to run MCP servers without having to install dependencies locally, making it easier to use tools that might have complex setups or system requirements.
|
|
602
|
+
|
|
603
|
+
## Installation
|
|
604
|
+
|
|
605
|
+
To use sandboxed execution, you need to install the E2B dependency:
|
|
606
|
+
|
|
607
|
+
```bash
|
|
608
|
+
# Install mcp-use with E2B support
|
|
609
|
+
pip install "mcp-use[e2b]"
|
|
610
|
+
|
|
611
|
+
# Or install the dependency directly
|
|
612
|
+
pip install e2b-code-interpreter
|
|
613
|
+
```
|
|
614
|
+
|
|
615
|
+
You'll also need an E2B API key. You can sign up at [e2b.dev](https://e2b.dev) to get your API key.
|
|
616
|
+
|
|
617
|
+
## Configuration
|
|
618
|
+
|
|
619
|
+
To enable sandboxed execution, use the sandbox parameter when creating your `MCPClient`:
|
|
620
|
+
|
|
621
|
+
```python
|
|
622
|
+
import asyncio
|
|
623
|
+
import os
|
|
624
|
+
from dotenv import load_dotenv
|
|
625
|
+
from langchain_openai import ChatOpenAI
|
|
626
|
+
from mcp_use import MCPAgent, MCPClient
|
|
627
|
+
from mcp_use.types.sandbox import SandboxOptions
|
|
628
|
+
|
|
629
|
+
async def main():
|
|
630
|
+
# Load environment variables (needs E2B_API_KEY)
|
|
631
|
+
load_dotenv()
|
|
632
|
+
|
|
633
|
+
# Define MCP server configuration
|
|
634
|
+
server_config = {
|
|
635
|
+
"mcpServers": {
|
|
636
|
+
"everything": {
|
|
637
|
+
"command": "npx",
|
|
638
|
+
"args": ["-y", "@modelcontextprotocol/server-everything"],
|
|
639
|
+
}
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
# Define sandbox options
|
|
644
|
+
sandbox_options: SandboxOptions = {
|
|
645
|
+
"api_key": os.getenv("E2B_API_KEY"), # API key can also be provided directly
|
|
646
|
+
"sandbox_template_id": "base", # Use base template
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
# Create client with sandboxed mode enabled
|
|
650
|
+
client = MCPClient(
|
|
651
|
+
config=server_config,
|
|
652
|
+
sandbox=True,
|
|
653
|
+
sandbox_options=sandbox_options,
|
|
654
|
+
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
# Create agent with the sandboxed client
|
|
658
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
659
|
+
agent = MCPAgent(llm=llm, client=client)
|
|
660
|
+
|
|
661
|
+
# Run your agent
|
|
662
|
+
result = await agent.run("Use the command line tools to help me add 1+1")
|
|
663
|
+
print(result)
|
|
664
|
+
|
|
665
|
+
# Clean up
|
|
666
|
+
await client.close_all_sessions()
|
|
667
|
+
|
|
668
|
+
if __name__ == "__main__":
|
|
669
|
+
asyncio.run(main())
|
|
670
|
+
```
|
|
671
|
+
|
|
672
|
+
## Sandbox Options
|
|
673
|
+
|
|
674
|
+
The `SandboxOptions` type provides configuration for the sandbox environment:
|
|
675
|
+
|
|
676
|
+
| Option | Description | Default |
|
|
677
|
+
| ---------------------- | ---------------------------------------------------------------------------------------- | --------------------- |
|
|
678
|
+
| `api_key` | E2B API key. Required - can be provided directly or via E2B_API_KEY environment variable | None |
|
|
679
|
+
| `sandbox_template_id` | Template ID for the sandbox environment | "base" |
|
|
680
|
+
| `supergateway_command` | Command to run supergateway | "npx -y supergateway" |
|
|
681
|
+
|
|
682
|
+
## Benefits of Sandboxed Execution
|
|
683
|
+
|
|
684
|
+
- **No local dependencies**: Run MCP servers without installing dependencies locally
|
|
685
|
+
- **Isolation**: Execute code in a secure, isolated environment
|
|
686
|
+
- **Consistent environment**: Ensure consistent behavior across different systems
|
|
687
|
+
- **Resource efficiency**: Offload resource-intensive tasks to cloud infrastructure
|
|
688
|
+
|
|
564
689
|
# Build a Custom Agent:
|
|
565
690
|
|
|
566
691
|
You can also build your own custom agent using the LangChain adapter:
|
|
@@ -652,15 +777,6 @@ agent = MCPAgent(
|
|
|
652
777
|
|
|
653
778
|
This is useful when you only need to see the agent's steps and decision-making process without all the low-level debug information from other components.
|
|
654
779
|
|
|
655
|
-
|
|
656
|
-
# Roadmap
|
|
657
|
-
|
|
658
|
-
<ul>
|
|
659
|
-
<li>[x] Multiple Servers at once </li>
|
|
660
|
-
<li>[x] Test remote connectors (http, ws)</li>
|
|
661
|
-
<li>[ ] ... </li>
|
|
662
|
-
</ul>
|
|
663
|
-
|
|
664
780
|
## Star History
|
|
665
781
|
|
|
666
782
|
[](https://www.star-history.com/#pietrozullo/mcp-use&Date)
|
|
@@ -669,12 +785,77 @@ This is useful when you only need to see the agent's steps and decision-making p
|
|
|
669
785
|
|
|
670
786
|
We love contributions! Feel free to open issues for bugs or feature requests. Look at [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
|
671
787
|
|
|
788
|
+
## Contributors
|
|
789
|
+
|
|
790
|
+
Thanks to all our amazing contributors!
|
|
791
|
+
|
|
792
|
+
<a href="https://github.com/mcp-use/mcp-use/graphs/contributors">
|
|
793
|
+
<img src="https://contrib.rocks/image?repo=mcp-use/mcp-use" />
|
|
794
|
+
</a>
|
|
795
|
+
|
|
796
|
+
|
|
797
|
+
## Top Starred Dependents
|
|
798
|
+
|
|
799
|
+
<!-- gh-dependents-info-used-by-start -->
|
|
800
|
+
|
|
801
|
+
<table>
|
|
802
|
+
<tr>
|
|
803
|
+
<th width="400">Repository</th>
|
|
804
|
+
<th>Stars</th>
|
|
805
|
+
</tr>
|
|
806
|
+
<tr>
|
|
807
|
+
<td><img src="https://avatars.githubusercontent.com/u/170207473?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/tavily-ai/meeting-prep-agent"><strong>tavily-ai/meeting-prep-agent</strong></a></td>
|
|
808
|
+
<td>⭐ 112</td>
|
|
809
|
+
</tr>
|
|
810
|
+
<tr>
|
|
811
|
+
<td><img src="https://avatars.githubusercontent.com/u/20041231?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/krishnaik06/MCP-CRASH-Course"><strong>krishnaik06/MCP-CRASH-Course</strong></a></td>
|
|
812
|
+
<td>⭐ 37</td>
|
|
813
|
+
</tr>
|
|
814
|
+
<tr>
|
|
815
|
+
<td><img src="https://avatars.githubusercontent.com/u/892404?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/truemagic-coder/solana-agent-app"><strong>truemagic-coder/solana-agent-app</strong></a></td>
|
|
816
|
+
<td>⭐ 29</td>
|
|
817
|
+
</tr>
|
|
818
|
+
<tr>
|
|
819
|
+
<td><img src="https://avatars.githubusercontent.com/u/8344498?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/schogini/techietalksai"><strong>schogini/techietalksai</strong></a></td>
|
|
820
|
+
<td>⭐ 21</td>
|
|
821
|
+
</tr>
|
|
822
|
+
<tr>
|
|
823
|
+
<td><img src="https://avatars.githubusercontent.com/u/201161342?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/autometa-dev/whatsapp-mcp-voice-agent"><strong>autometa-dev/whatsapp-mcp-voice-agent</strong></a></td>
|
|
824
|
+
<td>⭐ 18</td>
|
|
825
|
+
</tr>
|
|
826
|
+
<tr>
|
|
827
|
+
<td><img src="https://avatars.githubusercontent.com/u/100749943?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/Deniscartin/mcp-cli"><strong>Deniscartin/mcp-cli</strong></a></td>
|
|
828
|
+
<td>⭐ 17</td>
|
|
829
|
+
</tr>
|
|
830
|
+
<tr>
|
|
831
|
+
<td><img src="https://avatars.githubusercontent.com/u/6764390?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/elastic/genai-workshops"><strong>elastic/genai-workshops</strong></a></td>
|
|
832
|
+
<td>⭐ 9</td>
|
|
833
|
+
</tr>
|
|
834
|
+
<tr>
|
|
835
|
+
<td><img src="https://avatars.githubusercontent.com/u/6688805?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/innovaccer/Healthcare-MCP"><strong>innovaccer/Healthcare-MCP</strong></a></td>
|
|
836
|
+
<td>⭐ 6</td>
|
|
837
|
+
</tr>
|
|
838
|
+
<tr>
|
|
839
|
+
<td><img src="https://avatars.githubusercontent.com/u/205593730?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/Qingyon-AI/Revornix"><strong>Qingyon-AI/Revornix</strong></a></td>
|
|
840
|
+
<td>⭐ 5</td>
|
|
841
|
+
</tr>
|
|
842
|
+
<tr>
|
|
843
|
+
<td><img src="https://avatars.githubusercontent.com/u/68845761?s=40&v=4" width="20" height="20" style="vertical-align: middle; margin-right: 8px;"> <a href="https://github.com/entbappy/MCP-Tutorials"><strong>entbappy/MCP-Tutorials</strong></a></td>
|
|
844
|
+
<td>⭐ 5</td>
|
|
845
|
+
</tr>
|
|
846
|
+
</table>
|
|
847
|
+
|
|
848
|
+
<!-- gh-dependents-info-used-by-end -->
|
|
849
|
+
|
|
672
850
|
# Requirements
|
|
673
851
|
|
|
674
852
|
- Python 3.11+
|
|
675
853
|
- MCP implementation (like Playwright MCP)
|
|
676
854
|
- LangChain and appropriate model libraries (OpenAI, Anthropic, etc.)
|
|
677
855
|
|
|
856
|
+
# License
|
|
857
|
+
|
|
858
|
+
MIT
|
|
678
859
|
# Citation
|
|
679
860
|
|
|
680
861
|
If you use MCP-Use in your research or project, please cite:
|
|
@@ -688,7 +869,3 @@ If you use MCP-Use in your research or project, please cite:
|
|
|
688
869
|
url = {https://github.com/pietrozullo/mcp-use}
|
|
689
870
|
}
|
|
690
871
|
```
|
|
691
|
-
|
|
692
|
-
# License
|
|
693
|
-
|
|
694
|
-
MIT
|