mcp-use 1.0.3__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp-use
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.4
|
|
4
4
|
Summary: MCP Library for LLMs
|
|
5
5
|
Author-email: Pietro Zullo <pietro.zullo@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -56,6 +56,19 @@ Description-Content-Type: text/markdown
|
|
|
56
56
|
|
|
57
57
|
💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
|
|
58
58
|
|
|
59
|
+
# Features
|
|
60
|
+
|
|
61
|
+
## ✨ Key Features
|
|
62
|
+
|
|
63
|
+
| Feature | Description |
|
|
64
|
+
|---------|-------------|
|
|
65
|
+
| 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
|
|
66
|
+
| 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
|
|
67
|
+
| 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
|
|
68
|
+
| 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
|
|
69
|
+
| 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
|
|
70
|
+
|
|
71
|
+
|
|
59
72
|
# Quick start
|
|
60
73
|
|
|
61
74
|
With pip:
|
|
@@ -72,7 +85,30 @@ cd mcp-use
|
|
|
72
85
|
pip install -e .
|
|
73
86
|
```
|
|
74
87
|
|
|
75
|
-
|
|
88
|
+
### Installing LangChain Providers
|
|
89
|
+
|
|
90
|
+
mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
|
|
91
|
+
|
|
92
|
+
```bash
|
|
93
|
+
# For OpenAI
|
|
94
|
+
pip install langchain-openai
|
|
95
|
+
|
|
96
|
+
# For Anthropic
|
|
97
|
+
pip install langchain-anthropic
|
|
98
|
+
|
|
99
|
+
# For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
and add your API keys for the provider you want to use to your `.env` file.
|
|
103
|
+
|
|
104
|
+
```bash
|
|
105
|
+
OPENAI_API_KEY=
|
|
106
|
+
ANTHROPIC_API_KEY=
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
> **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
|
|
110
|
+
|
|
111
|
+
### Spin up your agent:
|
|
76
112
|
|
|
77
113
|
```python
|
|
78
114
|
import asyncio
|
|
@@ -85,8 +121,21 @@ async def main():
|
|
|
85
121
|
# Load environment variables
|
|
86
122
|
load_dotenv()
|
|
87
123
|
|
|
88
|
-
# Create
|
|
89
|
-
|
|
124
|
+
# Create configuration dictionary
|
|
125
|
+
config = {
|
|
126
|
+
"mcpServers": {
|
|
127
|
+
"playwright": {
|
|
128
|
+
"command": "npx",
|
|
129
|
+
"args": ["@playwright/mcp@latest"],
|
|
130
|
+
"env": {
|
|
131
|
+
"DISPLAY": ":1"
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
# Create MCPClient from configuration dictionary
|
|
138
|
+
client = MCPClient.from_dict(config)
|
|
90
139
|
|
|
91
140
|
# Create LLM
|
|
92
141
|
llm = ChatOpenAI(model="gpt-4o")
|
|
@@ -96,7 +145,7 @@ async def main():
|
|
|
96
145
|
|
|
97
146
|
# Run the query
|
|
98
147
|
result = await agent.run(
|
|
99
|
-
"Find the best restaurant in San Francisco
|
|
148
|
+
"Find the best restaurant in San Francisco",
|
|
100
149
|
)
|
|
101
150
|
print(f"\nResult: {result}")
|
|
102
151
|
|
|
@@ -104,6 +153,14 @@ if __name__ == "__main__":
|
|
|
104
153
|
asyncio.run(main())
|
|
105
154
|
```
|
|
106
155
|
|
|
156
|
+
You can also add the servers configuration from a config file like this:
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
client = MCPClient.from_config_file(
|
|
160
|
+
os.path.join("browser_mcp.json")
|
|
161
|
+
)
|
|
162
|
+
```
|
|
163
|
+
|
|
107
164
|
Example configuration file (`browser_mcp.json`):
|
|
108
165
|
|
|
109
166
|
```json
|
|
@@ -120,15 +177,10 @@ Example configuration file (`browser_mcp.json`):
|
|
|
120
177
|
}
|
|
121
178
|
```
|
|
122
179
|
|
|
123
|
-
Add your API keys for the provider you want to use to your `.env` file.
|
|
124
|
-
|
|
125
|
-
```bash
|
|
126
|
-
OPENAI_API_KEY=
|
|
127
|
-
ANTHROPIC_API_KEY=
|
|
128
|
-
```
|
|
129
|
-
|
|
130
180
|
For other settings, models, and more, check out the documentation.
|
|
131
181
|
|
|
182
|
+
# Features
|
|
183
|
+
|
|
132
184
|
# Example Use Cases
|
|
133
185
|
|
|
134
186
|
## Web Browsing with Playwright
|
|
@@ -286,6 +338,55 @@ if __name__ == "__main__":
|
|
|
286
338
|
asyncio.run(main())
|
|
287
339
|
```
|
|
288
340
|
|
|
341
|
+
## HTTP Connection Example
|
|
342
|
+
|
|
343
|
+
MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
|
|
344
|
+
|
|
345
|
+
Here's an example of how to use the HTTP connection feature:
|
|
346
|
+
|
|
347
|
+
```python
|
|
348
|
+
import asyncio
|
|
349
|
+
import os
|
|
350
|
+
from dotenv import load_dotenv
|
|
351
|
+
from langchain_openai import ChatOpenAI
|
|
352
|
+
from mcp_use import MCPAgent, MCPClient
|
|
353
|
+
|
|
354
|
+
async def main():
|
|
355
|
+
"""Run the example using a configuration file."""
|
|
356
|
+
# Load environment variables
|
|
357
|
+
load_dotenv()
|
|
358
|
+
|
|
359
|
+
config = {
|
|
360
|
+
"mcpServers": {
|
|
361
|
+
"http": {
|
|
362
|
+
"url": "http://localhost:8931/sse"
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
# Create MCPClient from config file
|
|
368
|
+
client = MCPClient.from_dict(config)
|
|
369
|
+
|
|
370
|
+
# Create LLM
|
|
371
|
+
llm = ChatOpenAI(model="gpt-4o")
|
|
372
|
+
|
|
373
|
+
# Create agent with the client
|
|
374
|
+
agent = MCPAgent(llm=llm, client=client, max_steps=30)
|
|
375
|
+
|
|
376
|
+
# Run the query
|
|
377
|
+
result = await agent.run(
|
|
378
|
+
"Find the best restaurant in San Francisco USING GOOGLE SEARCH",
|
|
379
|
+
max_steps=30,
|
|
380
|
+
)
|
|
381
|
+
print(f"\nResult: {result}")
|
|
382
|
+
|
|
383
|
+
if __name__ == "__main__":
|
|
384
|
+
# Run the appropriate example
|
|
385
|
+
asyncio.run(main())
|
|
386
|
+
```
|
|
387
|
+
|
|
388
|
+
This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
|
|
389
|
+
|
|
289
390
|
# Multi-Server Support
|
|
290
391
|
|
|
291
392
|
MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
|
|
@@ -18,7 +18,7 @@ mcp_use/task_managers/base.py,sha256=ksNdxTwq8N-zqymxVoKGnWXq9iqkLYC61uB91o6Mh-4
|
|
|
18
18
|
mcp_use/task_managers/sse.py,sha256=WysmjwqRI3meXMZY_F4y9tSBMvSiUZfTJQfitM5l6jQ,2529
|
|
19
19
|
mcp_use/task_managers/stdio.py,sha256=DEISpXv4mo3d5a-WT8lkWbrXJwUh7QW0nMT_IM3fHGg,2269
|
|
20
20
|
mcp_use/task_managers/websocket.py,sha256=ZbCqdGgzCRtsXzRGFws-f2OzH8cPAkN4sJNDwEpRmCc,1915
|
|
21
|
-
mcp_use-1.
|
|
22
|
-
mcp_use-1.
|
|
23
|
-
mcp_use-1.
|
|
24
|
-
mcp_use-1.
|
|
21
|
+
mcp_use-1.1.4.dist-info/METADATA,sha256=vE5PNvtxt7MIOI5EL1DRA73xzNoGjEe2Xa7WfBdV9rU,14015
|
|
22
|
+
mcp_use-1.1.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
23
|
+
mcp_use-1.1.4.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
|
|
24
|
+
mcp_use-1.1.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|