mcp-use 0.1.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

@@ -0,0 +1,382 @@
1
+ Metadata-Version: 2.4
2
+ Name: mcp-use
3
+ Version: 1.0.0
4
+ Summary: MCP Library for LLMs
5
+ Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Requires-Python: >=3.11
17
+ Requires-Dist: aiohttp>=3.9.0
18
+ Requires-Dist: jsonschema-pydantic>=0.1.0
19
+ Requires-Dist: langchain-community>=0.0.10
20
+ Requires-Dist: langchain>=0.1.0
21
+ Requires-Dist: mcp
22
+ Requires-Dist: pydantic>=2.0.0
23
+ Requires-Dist: python-dotenv>=1.0.0
24
+ Requires-Dist: typing-extensions>=4.8.0
25
+ Requires-Dist: websockets>=12.0
26
+ Provides-Extra: anthropic
27
+ Requires-Dist: anthropic>=0.15.0; extra == 'anthropic'
28
+ Provides-Extra: dev
29
+ Requires-Dist: black>=23.9.0; extra == 'dev'
30
+ Requires-Dist: isort>=5.12.0; extra == 'dev'
31
+ Requires-Dist: mypy>=1.5.0; extra == 'dev'
32
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
33
+ Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
34
+ Requires-Dist: pytest>=7.4.0; extra == 'dev'
35
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
36
+ Provides-Extra: openai
37
+ Requires-Dist: openai>=1.10.0; extra == 'openai'
38
+ Description-Content-Type: text/markdown
39
+
40
+ <picture>
41
+ <img alt="" src="./static/mcpusegrass.png" width="full">
42
+ </picture>
43
+
44
+ <h1 align="center">Open Source MCP CLient Library </h1>
45
+
46
+ [![](https://img.shields.io/pypi/dd/mcp_use.svg)](https://pypi.org/project/mcp_use/)
47
+ [![PyPI Downloads](https://img.shields.io/pypi/dm/mcp_use.svg)](https://pypi.org/project/mcp_use/)
48
+ [![PyPI Version](https://img.shields.io/pypi/v/mcp_use.svg)](https://pypi.org/project/mcp_use/)
49
+ [![Python Versions](https://img.shields.io/pypi/pyversions/mcp_use.svg)](https://pypi.org/project/mcp_use/)
50
+ [![License](https://img.shields.io/github/license/pietrozullo/mcp-use)](https://github.com/pietrozullo/mcp-use/blob/main/LICENSE)
51
+ [![Code style: Ruff](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff)
52
+ [![GitHub stars](https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social)](https://github.com/pietrozullo/mcp-use/stargazers)
53
+
54
+ 🌐 MCP-Use is the open source way to connect any LLM to MCP tools and build custom agents that have tool access, without using closed source or application clients.
55
+
56
+ 💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
57
+
58
+ # Quick start
59
+
60
+ With pip:
61
+
62
+ ```bash
63
+ pip install mcp-use
64
+ ```
65
+
66
+ Or install from source:
67
+
68
+ ```bash
69
+ git clone https://github.com/pietrozullo/mcp-use.git
70
+ cd mcp-use
71
+ pip install -e .
72
+ ```
73
+
74
+ Spin up your agent:
75
+
76
+ ```python
77
+ import asyncio
78
+ import os
79
+ from dotenv import load_dotenv
80
+ from langchain_openai import ChatOpenAI
81
+ from mcp_use import MCPAgent, MCPClient
82
+
83
+ async def main():
84
+ # Load environment variables
85
+ load_dotenv()
86
+
87
+ # Create MCPClient from config file
88
+ client = MCPClient.from_config_file("browser_mcp.json")
89
+
90
+ # Create LLM
91
+ llm = ChatOpenAI(model="gpt-4o")
92
+
93
+ # Create agent with the client
94
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
95
+
96
+ # Run the query
97
+ result = await agent.run(
98
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
99
+ )
100
+ print(f"\nResult: {result}")
101
+
102
+ if __name__ == "__main__":
103
+ asyncio.run(main())
104
+ ```
105
+
106
+ Example configuration file (`browser_mcp.json`):
107
+
108
+ ```json
109
+ {
110
+ "mcpServers": {
111
+ "playwright": {
112
+ "command": "npx",
113
+ "args": ["@playwright/mcp@latest"],
114
+ "env": {
115
+ "DISPLAY": ":1"
116
+ }
117
+ }
118
+ }
119
+ }
120
+ ```
121
+
122
+ Add your API keys for the provider you want to use to your `.env` file.
123
+
124
+ ```bash
125
+ OPENAI_API_KEY=
126
+ ANTHROPIC_API_KEY=
127
+ ```
128
+
129
+ For other settings, models, and more, check out the documentation.
130
+
131
+ # Example Use Cases
132
+
133
+ ## Web Browsing with Playwright
134
+
135
+ ```python
136
+ import asyncio
137
+ import os
138
+ from dotenv import load_dotenv
139
+ from langchain_openai import ChatOpenAI
140
+ from mcp_use import MCPAgent, MCPClient
141
+
142
+ async def main():
143
+ # Load environment variables
144
+ load_dotenv()
145
+
146
+ # Create MCPClient from config file
147
+ client = MCPClient.from_config_file(
148
+ os.path.join(os.path.dirname(__file__), "browser_mcp.json")
149
+ )
150
+
151
+ # Create LLM
152
+ llm = ChatOpenAI(model="gpt-4o")
153
+ # Alternative models:
154
+ # llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
155
+ # llm = ChatGroq(model="llama3-8b-8192")
156
+
157
+ # Create agent with the client
158
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
159
+
160
+ # Run the query
161
+ result = await agent.run(
162
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
163
+ max_steps=30,
164
+ )
165
+ print(f"\nResult: {result}")
166
+
167
+ if __name__ == "__main__":
168
+ asyncio.run(main())
169
+ ```
170
+
171
+ ## Airbnb Search
172
+
173
+ ```python
174
+ import asyncio
175
+ import os
176
+ from dotenv import load_dotenv
177
+ from langchain_anthropic import ChatAnthropic
178
+ from mcp_use import MCPAgent, MCPClient
179
+
180
+ async def run_airbnb_example():
181
+ # Load environment variables
182
+ load_dotenv()
183
+
184
+ # Create MCPClient with Airbnb configuration
185
+ client = MCPClient.from_config_file(
186
+ os.path.join(os.path.dirname(__file__), "airbnb_mcp.json")
187
+ )
188
+
189
+ # Create LLM - you can choose between different models
190
+ llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
191
+
192
+ # Create agent with the client
193
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
194
+
195
+ try:
196
+ # Run a query to search for accommodations
197
+ result = await agent.run(
198
+ "Find me a nice place to stay in Barcelona for 2 adults "
199
+ "for a week in August. I prefer places with a pool and "
200
+ "good reviews. Show me the top 3 options.",
201
+ max_steps=30,
202
+ )
203
+ print(f"\nResult: {result}")
204
+ finally:
205
+ # Ensure we clean up resources properly
206
+ if client.sessions:
207
+ await client.close_all_sessions()
208
+
209
+ if __name__ == "__main__":
210
+ asyncio.run(run_airbnb_example())
211
+ ```
212
+
213
+ Example configuration file (`airbnb_mcp.json`):
214
+
215
+ ```json
216
+ {
217
+ "mcpServers": {
218
+ "airbnb": {
219
+ "command": "npx",
220
+ "args": ["-y", "@openbnb/mcp-server-airbnb"]
221
+ }
222
+ }
223
+ }
224
+ ```
225
+
226
+ ## Blender 3D Creation
227
+
228
+ ```python
229
+ import asyncio
230
+ from dotenv import load_dotenv
231
+ from langchain_anthropic import ChatAnthropic
232
+ from mcp_use import MCPAgent, MCPClient
233
+
234
+ async def run_blender_example():
235
+ # Load environment variables
236
+ load_dotenv()
237
+
238
+ # Create MCPClient with Blender MCP configuration
239
+ config = {"mcpServers": {"blender": {"command": "uvx", "args": ["blender-mcp"]}}}
240
+ client = MCPClient.from_dict(config)
241
+
242
+ # Create LLM
243
+ llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
244
+
245
+ # Create agent with the client
246
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
247
+
248
+ try:
249
+ # Run the query
250
+ result = await agent.run(
251
+ "Create an inflatable cube with soft material and a plane as ground.",
252
+ max_steps=30,
253
+ )
254
+ print(f"\nResult: {result}")
255
+ finally:
256
+ # Ensure we clean up resources properly
257
+ if client.sessions:
258
+ await client.close_all_sessions()
259
+
260
+ if __name__ == "__main__":
261
+ asyncio.run(run_blender_example())
262
+ ```
263
+
264
+ # Configuration File Support
265
+
266
+ MCP-Use supports initialization from configuration files, making it easy to manage and switch between different MCP server setups:
267
+
268
+ ```python
269
+ import asyncio
270
+ from mcp_use import create_session_from_config
271
+
272
+ async def main():
273
+ # Create an MCP session from a config file
274
+ session = create_session_from_config("mcp-config.json")
275
+
276
+ # Initialize the session
277
+ await session.initialize()
278
+
279
+ # Use the session...
280
+
281
+ # Disconnect when done
282
+ await session.disconnect()
283
+
284
+ if __name__ == "__main__":
285
+ asyncio.run(main())
286
+ ```
287
+
288
+ # Multi-Server Support
289
+
290
+ MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
291
+
292
+ ## Configuration
293
+
294
+ You can configure multiple servers in your configuration file:
295
+
296
+ ```json
297
+ {
298
+ "mcpServers": {
299
+ "airbnb": {
300
+ "command": "npx",
301
+ "args": ["-y", "@openbnb/mcp-server-airbnb", "--ignore-robots-txt"]
302
+ },
303
+ "playwright": {
304
+ "command": "npx",
305
+ "args": ["@playwright/mcp@latest"],
306
+ "env": {
307
+ "DISPLAY": ":1"
308
+ }
309
+ }
310
+ }
311
+ }
312
+ ```
313
+
314
+ ## Usage
315
+
316
+ The `MCPClient` class provides several methods for managing multiple servers:
317
+
318
+ ```python
319
+ import asyncio
320
+ from mcp_use import MCPClient, MCPAgent
321
+ from langchain_anthropic import ChatAnthropic
322
+
323
+ async def main():
324
+ # Create client with multiple servers
325
+ client = MCPClient.from_config_file("multi_server_config.json")
326
+
327
+ # Create agent with the client
328
+ agent = MCPAgent(
329
+ llm=ChatAnthropic(model="claude-3-5-sonnet-20240620"),
330
+ client=client
331
+ )
332
+
333
+ try:
334
+ # Run a query that uses tools from multiple servers
335
+ result = await agent.run(
336
+ "Search for a nice place to stay in Barcelona on Airbnb, "
337
+ "then use Google to find nearby restaurants and attractions."
338
+ )
339
+ print(result)
340
+ finally:
341
+ # Clean up all sessions
342
+ await client.close_all_sessions()
343
+
344
+ if __name__ == "__main__":
345
+ asyncio.run(main())
346
+ ```
347
+
348
+ ## Roadmap
349
+
350
+ <ul>
351
+ <li>[x] Multiple Servers at once </li>
352
+ <li>[ ] Test remote connectors (http, ws)</li>
353
+ <li>[ ] ... </li>
354
+ </ul>
355
+
356
+ ## Contributing
357
+
358
+ We love contributions! Feel free to open issues for bugs or feature requests.
359
+
360
+ ## Requirements
361
+
362
+ - Python 3.11+
363
+ - MCP implementation (like Playwright MCP)
364
+ - LangChain and appropriate model libraries (OpenAI, Anthropic, etc.)
365
+
366
+ ## Citation
367
+
368
+ If you use MCP-Use in your research or project, please cite:
369
+
370
+ ```bibtex
371
+ @software{mcp_use2025,
372
+ author = {Zullo, Pietro},
373
+ title = {MCP-Use: MCP Library for Python},
374
+ year = {2025},
375
+ publisher = {GitHub},
376
+ url = {https://github.com/pietrozullo/mcp-use}
377
+ }
378
+ ```
379
+
380
+ ## License
381
+
382
+ MIT
@@ -0,0 +1,24 @@
1
+ mcp_use/__init__.py,sha256=PSoxLAu1GPjfIDPcZiJyI3k66MMS3lcfx5kERUgFb1o,723
2
+ mcp_use/client.py,sha256=0rvlJBwvPD19sjDRtXfnp15-F1VHJlXWxLQNt9cHwPA,8275
3
+ mcp_use/config.py,sha256=D9LuCuT1mFUSBiO2DUGa5Pnd-yjNcvM9u_v11N5UmK8,1624
4
+ mcp_use/logging.py,sha256=2-hSB7ZWcHEx_OFHNg8GIbSGCZx3MW4mZGGWxi2Ew3E,2690
5
+ mcp_use/session.py,sha256=Z4EZTUnQUX0QyGMzkJIrMRTX4SDk6qQUoBld408LIJE,3449
6
+ mcp_use/agents/__init__.py,sha256=ukchMTqCOID6ikvLmJ-6sldWTVFIzztGQo4BX6QeQr8,312
7
+ mcp_use/agents/base.py,sha256=bfuldi_89AbSbNc8KeTiCArRT9V62CNxHOWYkLHWjyA,1605
8
+ mcp_use/agents/langchain_agent.py,sha256=q6zIb9J9fc15HRGDjPAhmPdM_8UOqQToy8ESeyry1kc,10035
9
+ mcp_use/agents/mcpagent.py,sha256=lTRutdT1QIMiTbMSKfSbqlqNq_Y6uDPfkjAzJAKb6H0,12727
10
+ mcp_use/agents/prompts/default.py,sha256=tnwt9vOiVBhdpu-lIHhwEJo3rvE6EobPfUgS9JURBzg,941
11
+ mcp_use/connectors/__init__.py,sha256=jnd-7pPPJMb0UNJ6aD9lInj5Tlamc8lA_mFyG8RWJpo,385
12
+ mcp_use/connectors/base.py,sha256=caUaTfsODUOik8JF9mPtcZDyZhoIz2X12I_BhAfZK10,1616
13
+ mcp_use/connectors/http.py,sha256=KqVf0HXouFoeQ_bBUr6KQifiUjTo7K6EOCRkqVpFx4Q,7763
14
+ mcp_use/connectors/stdio.py,sha256=aEeZ-OZS6yknFPEy-YpwvwFILGIaAsA48uB1I4j8wog,6752
15
+ mcp_use/connectors/websocket.py,sha256=4xqxl9UncrfU6NitvKfB80Hk2g7o0Gc0G5sm6sY3RAk,9534
16
+ mcp_use/task_managers/__init__.py,sha256=6VVe5ceSxXmQvBpjH-6aFud5dRJMNA6pu0qpAnfxpIA,460
17
+ mcp_use/task_managers/base.py,sha256=ksNdxTwq8N-zqymxVoKGnWXq9iqkLYC61uB91o6Mh-4,4888
18
+ mcp_use/task_managers/http.py,sha256=XhrF73RGRnVctBVW2FlFrFTJR2pIGXhtNvfJFiW0Olw,1881
19
+ mcp_use/task_managers/stdio.py,sha256=DEISpXv4mo3d5a-WT8lkWbrXJwUh7QW0nMT_IM3fHGg,2269
20
+ mcp_use/task_managers/websocket.py,sha256=SVgTLFogiynb48eyi6ZioWIKLLWiVBCNE59rXi6GrCM,1943
21
+ mcp_use-1.0.0.dist-info/METADATA,sha256=SutQOwdz5oeqwsqGZJEgALeYG3evLZ8pNabca1lJppw,10113
22
+ mcp_use-1.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
23
+ mcp_use-1.0.0.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
24
+ mcp_use-1.0.0.dist-info/RECORD,,
@@ -1,5 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
-
mcp_use/tools/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- """
2
- Tool conversion utilities.
3
-
4
- This module provides utilities for converting between MCP tool schemas
5
- and LLM-specific tool formats.
6
- """
7
-
8
- from .converter import ToolConverter
9
- from .formats import AnthropicToolFormat, OpenAIToolFormat, ToolFormat
10
-
11
- __all__ = ["ToolConverter", "ToolFormat", "OpenAIToolFormat", "AnthropicToolFormat"]
@@ -1,108 +0,0 @@
1
- """
2
- Tool converter for different LLM providers.
3
-
4
- This module provides utilities for converting between MCP tool schemas
5
- and LLM-specific formats.
6
- """
7
-
8
- from enum import Enum, auto
9
- from typing import Any
10
-
11
- from .formats import AnthropicToolFormat, OpenAIToolFormat, ToolFormat
12
-
13
-
14
- class ModelProvider(Enum):
15
- """Enum for supported model providers."""
16
-
17
- OPENAI = auto()
18
- ANTHROPIC = auto()
19
-
20
- @classmethod
21
- def from_string(cls, value: str) -> "ModelProvider":
22
- """Convert a string to a ModelProvider enum.
23
-
24
- Args:
25
- value: The string to convert.
26
-
27
- Returns:
28
- The corresponding ModelProvider enum value.
29
-
30
- Raises:
31
- ValueError: If the string is not a valid model provider.
32
- """
33
- value = value.lower()
34
- if value in ("openai", "open_ai", "open-ai"):
35
- return cls.OPENAI
36
- elif value in ("anthropic", "claude"):
37
- return cls.ANTHROPIC
38
- else:
39
- raise ValueError(f"Unsupported model provider: {value}")
40
-
41
-
42
- class ToolConverter:
43
- """Converter for MCP tools to different LLM formats.
44
-
45
- This class provides utilities for converting between MCP tool schemas
46
- and LLM-specific formats.
47
- """
48
-
49
- _format_classes: dict[ModelProvider, type[ToolFormat]] = {
50
- ModelProvider.OPENAI: OpenAIToolFormat,
51
- ModelProvider.ANTHROPIC: AnthropicToolFormat,
52
- }
53
-
54
- def __init__(self, provider: str | ModelProvider) -> None:
55
- """Initialize a new tool converter.
56
-
57
- Args:
58
- provider: The model provider to convert tools for.
59
- Can be a string or a ModelProvider enum.
60
-
61
- Raises:
62
- ValueError: If the provider is not supported.
63
- """
64
- if isinstance(provider, str):
65
- self.provider = ModelProvider.from_string(provider)
66
- else:
67
- self.provider = provider
68
-
69
- # Create an instance of the appropriate format class
70
- format_class = self._format_classes.get(self.provider)
71
- if not format_class:
72
- raise ValueError(f"Unsupported model provider: {provider}")
73
-
74
- self._format = format_class()
75
-
76
- def convert_tools(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
77
- """Convert a list of MCP tools to the LLM-specific format.
78
-
79
- Args:
80
- tools: The list of MCP tools to convert.
81
-
82
- Returns:
83
- The converted tools in the LLM-specific format.
84
- """
85
- return [self._format.convert_tool(tool) for tool in tools]
86
-
87
- def convert_tool_call(self, name: str, arguments: dict[str, Any]) -> dict[str, Any]:
88
- """Convert a tool call to the MCP format.
89
-
90
- Args:
91
- name: The name of the tool being called.
92
- arguments: The arguments for the tool call.
93
-
94
- Returns:
95
- The converted tool call in the MCP format.
96
- """
97
- return self._format.convert_tool_call(name, arguments)
98
-
99
- def parse_tool_calls(self, response: dict[str, Any]) -> list[dict[str, Any]]:
100
- """Parse tool calls from an LLM response.
101
-
102
- Args:
103
- response: The response from the LLM.
104
-
105
- Returns:
106
- A list of parsed tool calls, each containing 'name' and 'arguments' keys.
107
- """
108
- return self._format.parse_tool_call(response)