mcp-use 1.0.3__tar.gz → 1.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

Files changed (66) hide show
  1. mcp_use-1.1.4/.github/ISSUE_TEMPLATE/bug_report.md +38 -0
  2. {mcp_use-1.0.3 → mcp_use-1.1.4}/PKG-INFO +113 -12
  3. {mcp_use-1.0.3 → mcp_use-1.1.4}/README.md +112 -11
  4. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/introduction.mdx +3 -0
  5. mcp_use-1.1.4/docs/logo/dark.svg +7 -0
  6. mcp_use-1.1.4/docs/logo/light.svg +7 -0
  7. mcp_use-1.1.4/docs/quickstart.mdx +234 -0
  8. mcp_use-1.1.4/examples/http_example.py +53 -0
  9. {mcp_use-1.0.3 → mcp_use-1.1.4}/pyproject.toml +1 -1
  10. mcp_use-1.1.4/static/image.jpg +0 -0
  11. mcp_use-1.0.3/docs/logo/dark.svg +0 -12
  12. mcp_use-1.0.3/docs/logo/light.svg +0 -12
  13. mcp_use-1.0.3/docs/quickstart.mdx +0 -138
  14. mcp_use-1.0.3/static/image.jpg +0 -0
  15. {mcp_use-1.0.3 → mcp_use-1.1.4}/.github/workflows/publish.yml +0 -0
  16. {mcp_use-1.0.3 → mcp_use-1.1.4}/.github/workflows/tests.yml +0 -0
  17. {mcp_use-1.0.3 → mcp_use-1.1.4}/.gitignore +0 -0
  18. {mcp_use-1.0.3 → mcp_use-1.1.4}/.pre-commit-config.yaml +0 -0
  19. {mcp_use-1.0.3 → mcp_use-1.1.4}/LICENSE +0 -0
  20. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/README.md +0 -0
  21. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/api-reference/introduction.mdx +0 -0
  22. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/development.mdx +0 -0
  23. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/docs.json +0 -0
  24. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/essentials/configuration.mdx +0 -0
  25. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/essentials/connection-types.mdx +0 -0
  26. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/essentials/llm-integration.mdx +0 -0
  27. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/favicon.svg +0 -0
  28. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/images/hero-dark.png +0 -0
  29. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/images/hero-light.png +0 -0
  30. {mcp_use-1.0.3 → mcp_use-1.1.4}/docs/snippets/snippet-intro.mdx +0 -0
  31. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/airbnb_mcp.json +0 -0
  32. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/airbnb_use.py +0 -0
  33. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/blender_use.py +0 -0
  34. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/browser_mcp.json +0 -0
  35. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/browser_use.py +0 -0
  36. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/chat_example.py +0 -0
  37. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/filesystem_use.py +0 -0
  38. {mcp_use-1.0.3 → mcp_use-1.1.4}/examples/multi_server_example.py +0 -0
  39. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/__init__.py +0 -0
  40. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/agents/__init__.py +0 -0
  41. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/agents/base.py +0 -0
  42. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/agents/langchain_agent.py +0 -0
  43. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/agents/mcpagent.py +0 -0
  44. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/agents/prompts/default.py +0 -0
  45. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/client.py +0 -0
  46. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/config.py +0 -0
  47. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/connectors/__init__.py +0 -0
  48. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/connectors/base.py +0 -0
  49. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/connectors/http.py +0 -0
  50. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/connectors/stdio.py +0 -0
  51. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/connectors/websocket.py +0 -0
  52. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/logging.py +0 -0
  53. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/session.py +0 -0
  54. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/task_managers/__init__.py +0 -0
  55. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/task_managers/base.py +0 -0
  56. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/task_managers/sse.py +0 -0
  57. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/task_managers/stdio.py +0 -0
  58. {mcp_use-1.0.3 → mcp_use-1.1.4}/mcp_use/task_managers/websocket.py +0 -0
  59. {mcp_use-1.0.3 → mcp_use-1.1.4}/pytest.ini +0 -0
  60. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/conftest.py +0 -0
  61. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_client.py +0 -0
  62. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_config.py +0 -0
  63. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_http_connector.py +0 -0
  64. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_logging.py +0 -0
  65. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_session.py +0 -0
  66. {mcp_use-1.0.3 → mcp_use-1.1.4}/tests/unit/test_stdio_connector.py +0 -0
@@ -0,0 +1,38 @@
1
+ ---
2
+ name: Bug report
3
+ about: Create a report to help us improve
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Describe the bug**
11
+ A clear and concise description of what the bug is.
12
+
13
+ **To Reproduce**
14
+ Steps to reproduce the behavior:
15
+ 1. Go to '...'
16
+ 2. Click on '....'
17
+ 3. Scroll down to '....'
18
+ 4. See error
19
+
20
+ **Expected behavior**
21
+ A clear and concise description of what you expected to happen.
22
+
23
+ **Screenshots**
24
+ If applicable, add screenshots to help explain your problem.
25
+
26
+ **Desktop (please complete the following information):**
27
+ - OS: [e.g. iOS]
28
+ - Browser [e.g. chrome, safari]
29
+ - Version [e.g. 22]
30
+
31
+ **Smartphone (please complete the following information):**
32
+ - Device: [e.g. iPhone6]
33
+ - OS: [e.g. iOS8.1]
34
+ - Browser [e.g. stock browser, safari]
35
+ - Version [e.g. 22]
36
+
37
+ **Additional context**
38
+ Add any other context about the problem here.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.0.3
3
+ Version: 1.1.4
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -56,6 +56,19 @@ Description-Content-Type: text/markdown
56
56
 
57
57
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
58
58
 
59
+ # Features
60
+
61
+ ## ✨ Key Features
62
+
63
+ | Feature | Description |
64
+ |---------|-------------|
65
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
66
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
67
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
68
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
69
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
70
+
71
+
59
72
  # Quick start
60
73
 
61
74
  With pip:
@@ -72,7 +85,30 @@ cd mcp-use
72
85
  pip install -e .
73
86
  ```
74
87
 
75
- Spin up your agent:
88
+ ### Installing LangChain Providers
89
+
90
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
91
+
92
+ ```bash
93
+ # For OpenAI
94
+ pip install langchain-openai
95
+
96
+ # For Anthropic
97
+ pip install langchain-anthropic
98
+
99
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
100
+ ```
101
+
102
+ and add your API keys for the provider you want to use to your `.env` file.
103
+
104
+ ```bash
105
+ OPENAI_API_KEY=
106
+ ANTHROPIC_API_KEY=
107
+ ```
108
+
109
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
110
+
111
+ ### Spin up your agent:
76
112
 
77
113
  ```python
78
114
  import asyncio
@@ -85,8 +121,21 @@ async def main():
85
121
  # Load environment variables
86
122
  load_dotenv()
87
123
 
88
- # Create MCPClient from config file
89
- client = MCPClient.from_config_file("browser_mcp.json")
124
+ # Create configuration dictionary
125
+ config = {
126
+ "mcpServers": {
127
+ "playwright": {
128
+ "command": "npx",
129
+ "args": ["@playwright/mcp@latest"],
130
+ "env": {
131
+ "DISPLAY": ":1"
132
+ }
133
+ }
134
+ }
135
+ }
136
+
137
+ # Create MCPClient from configuration dictionary
138
+ client = MCPClient.from_dict(config)
90
139
 
91
140
  # Create LLM
92
141
  llm = ChatOpenAI(model="gpt-4o")
@@ -96,7 +145,7 @@ async def main():
96
145
 
97
146
  # Run the query
98
147
  result = await agent.run(
99
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
148
+ "Find the best restaurant in San Francisco",
100
149
  )
101
150
  print(f"\nResult: {result}")
102
151
 
@@ -104,6 +153,14 @@ if __name__ == "__main__":
104
153
  asyncio.run(main())
105
154
  ```
106
155
 
156
+ You can also add the servers configuration from a config file like this:
157
+
158
+ ```python
159
+ client = MCPClient.from_config_file(
160
+ os.path.join("browser_mcp.json")
161
+ )
162
+ ```
163
+
107
164
  Example configuration file (`browser_mcp.json`):
108
165
 
109
166
  ```json
@@ -120,15 +177,10 @@ Example configuration file (`browser_mcp.json`):
120
177
  }
121
178
  ```
122
179
 
123
- Add your API keys for the provider you want to use to your `.env` file.
124
-
125
- ```bash
126
- OPENAI_API_KEY=
127
- ANTHROPIC_API_KEY=
128
- ```
129
-
130
180
  For other settings, models, and more, check out the documentation.
131
181
 
182
+ # Features
183
+
132
184
  # Example Use Cases
133
185
 
134
186
  ## Web Browsing with Playwright
@@ -286,6 +338,55 @@ if __name__ == "__main__":
286
338
  asyncio.run(main())
287
339
  ```
288
340
 
341
+ ## HTTP Connection Example
342
+
343
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
344
+
345
+ Here's an example of how to use the HTTP connection feature:
346
+
347
+ ```python
348
+ import asyncio
349
+ import os
350
+ from dotenv import load_dotenv
351
+ from langchain_openai import ChatOpenAI
352
+ from mcp_use import MCPAgent, MCPClient
353
+
354
+ async def main():
355
+ """Run the example using a configuration file."""
356
+ # Load environment variables
357
+ load_dotenv()
358
+
359
+ config = {
360
+ "mcpServers": {
361
+ "http": {
362
+ "url": "http://localhost:8931/sse"
363
+ }
364
+ }
365
+ }
366
+
367
+ # Create MCPClient from config file
368
+ client = MCPClient.from_dict(config)
369
+
370
+ # Create LLM
371
+ llm = ChatOpenAI(model="gpt-4o")
372
+
373
+ # Create agent with the client
374
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
375
+
376
+ # Run the query
377
+ result = await agent.run(
378
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
379
+ max_steps=30,
380
+ )
381
+ print(f"\nResult: {result}")
382
+
383
+ if __name__ == "__main__":
384
+ # Run the appropriate example
385
+ asyncio.run(main())
386
+ ```
387
+
388
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
389
+
289
390
  # Multi-Server Support
290
391
 
291
392
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -17,6 +17,19 @@
17
17
 
18
18
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
19
19
 
20
+ # Features
21
+
22
+ ## ✨ Key Features
23
+
24
+ | Feature | Description |
25
+ |---------|-------------|
26
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
27
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
28
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
29
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
30
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
31
+
32
+
20
33
  # Quick start
21
34
 
22
35
  With pip:
@@ -33,7 +46,30 @@ cd mcp-use
33
46
  pip install -e .
34
47
  ```
35
48
 
36
- Spin up your agent:
49
+ ### Installing LangChain Providers
50
+
51
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
52
+
53
+ ```bash
54
+ # For OpenAI
55
+ pip install langchain-openai
56
+
57
+ # For Anthropic
58
+ pip install langchain-anthropic
59
+
60
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
61
+ ```
62
+
63
+ and add your API keys for the provider you want to use to your `.env` file.
64
+
65
+ ```bash
66
+ OPENAI_API_KEY=
67
+ ANTHROPIC_API_KEY=
68
+ ```
69
+
70
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
71
+
72
+ ### Spin up your agent:
37
73
 
38
74
  ```python
39
75
  import asyncio
@@ -46,8 +82,21 @@ async def main():
46
82
  # Load environment variables
47
83
  load_dotenv()
48
84
 
49
- # Create MCPClient from config file
50
- client = MCPClient.from_config_file("browser_mcp.json")
85
+ # Create configuration dictionary
86
+ config = {
87
+ "mcpServers": {
88
+ "playwright": {
89
+ "command": "npx",
90
+ "args": ["@playwright/mcp@latest"],
91
+ "env": {
92
+ "DISPLAY": ":1"
93
+ }
94
+ }
95
+ }
96
+ }
97
+
98
+ # Create MCPClient from configuration dictionary
99
+ client = MCPClient.from_dict(config)
51
100
 
52
101
  # Create LLM
53
102
  llm = ChatOpenAI(model="gpt-4o")
@@ -57,7 +106,7 @@ async def main():
57
106
 
58
107
  # Run the query
59
108
  result = await agent.run(
60
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
109
+ "Find the best restaurant in San Francisco",
61
110
  )
62
111
  print(f"\nResult: {result}")
63
112
 
@@ -65,6 +114,14 @@ if __name__ == "__main__":
65
114
  asyncio.run(main())
66
115
  ```
67
116
 
117
+ You can also add the servers configuration from a config file like this:
118
+
119
+ ```python
120
+ client = MCPClient.from_config_file(
121
+ os.path.join("browser_mcp.json")
122
+ )
123
+ ```
124
+
68
125
  Example configuration file (`browser_mcp.json`):
69
126
 
70
127
  ```json
@@ -81,15 +138,10 @@ Example configuration file (`browser_mcp.json`):
81
138
  }
82
139
  ```
83
140
 
84
- Add your API keys for the provider you want to use to your `.env` file.
85
-
86
- ```bash
87
- OPENAI_API_KEY=
88
- ANTHROPIC_API_KEY=
89
- ```
90
-
91
141
  For other settings, models, and more, check out the documentation.
92
142
 
143
+ # Features
144
+
93
145
  # Example Use Cases
94
146
 
95
147
  ## Web Browsing with Playwright
@@ -247,6 +299,55 @@ if __name__ == "__main__":
247
299
  asyncio.run(main())
248
300
  ```
249
301
 
302
+ ## HTTP Connection Example
303
+
304
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
305
+
306
+ Here's an example of how to use the HTTP connection feature:
307
+
308
+ ```python
309
+ import asyncio
310
+ import os
311
+ from dotenv import load_dotenv
312
+ from langchain_openai import ChatOpenAI
313
+ from mcp_use import MCPAgent, MCPClient
314
+
315
+ async def main():
316
+ """Run the example using a configuration file."""
317
+ # Load environment variables
318
+ load_dotenv()
319
+
320
+ config = {
321
+ "mcpServers": {
322
+ "http": {
323
+ "url": "http://localhost:8931/sse"
324
+ }
325
+ }
326
+ }
327
+
328
+ # Create MCPClient from config file
329
+ client = MCPClient.from_dict(config)
330
+
331
+ # Create LLM
332
+ llm = ChatOpenAI(model="gpt-4o")
333
+
334
+ # Create agent with the client
335
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
336
+
337
+ # Run the query
338
+ result = await agent.run(
339
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
340
+ max_steps=30,
341
+ )
342
+ print(f"\nResult: {result}")
343
+
344
+ if __name__ == "__main__":
345
+ # Run the appropriate example
346
+ asyncio.run(main())
347
+ ```
348
+
349
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
350
+
250
351
  # Multi-Server Support
251
352
 
252
353
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -25,6 +25,9 @@ mcp_use is an open source library that enables developers to connect any Languag
25
25
  <Card title="Universal LLM Support" icon="robot" href="/essentials/llm-integration">
26
26
  Compatible with any LangChain-supported LLM provider
27
27
  </Card>
28
+ <Card title="HTTP Connection" icon="network" href="/quickstart">
29
+ Connect to MCP servers running on specific HTTP ports for web-based integrations
30
+ </Card>
28
31
  </CardGroup>
29
32
 
30
33
  ## Getting Started
@@ -0,0 +1,7 @@
1
+ <svg width="303" height="303" viewBox="0 0 303 303" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <path d="M106.066 106.066C86.5398 125.592 54.8816 125.592 35.3554 106.066V106.066C15.8291 86.5397 15.8291 54.8815 35.3554 35.3552V35.3552C54.8816 15.829 86.5398 15.829 106.066 35.3552V35.3552C125.592 54.8815 125.592 86.5397 106.066 106.066V106.066Z" fill="white"/>
3
+ <path d="M267.286 267.286C247.76 286.812 216.102 286.812 196.576 267.286V267.286C177.049 247.76 177.049 216.102 196.576 196.576V196.576C216.102 177.049 247.76 177.049 267.286 196.576V196.576C286.813 216.102 286.813 247.76 267.286 267.286V267.286Z" fill="white"/>
4
+ <path fill-rule="evenodd" clip-rule="evenodd" d="M181.957 230.04L211.425 259.508L260.922 210.011L232.851 181.94C204.215 181.726 175.645 170.695 153.796 148.846C131.947 126.997 120.915 98.4264 120.702 69.7903L92.631 41.7193L43.1335 91.2168L72.6014 120.685C100.313 121.56 127.765 132.573 148.917 153.725C170.069 174.877 181.082 202.328 181.957 230.04Z" fill="white"/>
5
+ <circle cx="70.3209" cy="232.321" r="50" fill="white"/>
6
+ <circle cx="232.321" cy="70.3209" r="50" fill="white"/>
7
+ </svg>
@@ -0,0 +1,7 @@
1
+ <svg width="303" height="303" viewBox="0 0 303 303" fill="none" xmlns="http://www.w3.org/2000/svg">
2
+ <path d="M106.066 106.066C86.5398 125.592 54.8816 125.592 35.3554 106.066V106.066C15.8291 86.5397 15.8291 54.8815 35.3554 35.3552V35.3552C54.8816 15.829 86.5398 15.829 106.066 35.3552V35.3552C125.592 54.8815 125.592 86.5397 106.066 106.066V106.066Z" fill="black"/>
3
+ <path d="M267.286 267.286C247.76 286.812 216.102 286.812 196.576 267.286V267.286C177.049 247.76 177.049 216.102 196.576 196.576V196.576C216.102 177.049 247.76 177.049 267.286 196.576V196.576C286.813 216.102 286.813 247.76 267.286 267.286V267.286Z" fill="black"/>
4
+ <path fill-rule="evenodd" clip-rule="evenodd" d="M181.957 230.04L211.425 259.508L260.922 210.011L232.851 181.94C204.215 181.726 175.645 170.695 153.796 148.846C131.947 126.997 120.915 98.4264 120.702 69.7903L92.631 41.7193L43.1335 91.2168L72.6014 120.685C100.313 121.56 127.765 132.573 148.917 153.725C170.069 174.877 181.082 202.328 181.957 230.04Z" fill="black"/>
5
+ <circle cx="70.3209" cy="232.321" r="50" fill="black"/>
6
+ <circle cx="232.321" cy="70.3209" r="50" fill="black"/>
7
+ </svg>
@@ -0,0 +1,234 @@
1
+ ---
2
+ title: Quickstart
3
+ description: "Get started with mcp_use in minutes"
4
+ ---
5
+
6
+ # Quickstart Guide
7
+
8
+ This guide will help you get started with mcp_use quickly. We'll cover installation, basic configuration, and running your first agent.
9
+
10
+ ## Installation
11
+
12
+ You can install mcp_use using pip:
13
+
14
+ ```bash
15
+ pip install mcp-use
16
+ ```
17
+
18
+ Or install from source:
19
+
20
+ ```bash
21
+ git clone https://github.com/pietrozullo/mcp-use.git
22
+ cd mcp-use
23
+ pip install -e .
24
+ ```
25
+
26
+ ## Installing LangChain Providers
27
+
28
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
29
+
30
+ ```bash
31
+ # For OpenAI
32
+ pip install langchain-openai
33
+
34
+ # For Anthropic
35
+ pip install langchain-anthropic
36
+
37
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
38
+ ```
39
+
40
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
41
+
42
+ ## Environment Setup
43
+
44
+ Set up your environment variables in a `.env` file:
45
+
46
+ ```bash
47
+ OPENAI_API_KEY=your_api_key_here
48
+ ANTHROPIC_API_KEY=your_api_key_here
49
+ ```
50
+
51
+ ## Your First Agent
52
+
53
+ Here's a simple example to get you started:
54
+
55
+ ```python
56
+ import asyncio
57
+ import os
58
+ from dotenv import load_dotenv
59
+ from langchain_openai import ChatOpenAI
60
+ from mcp_use import MCPAgent, MCPClient
61
+
62
+ async def main():
63
+ # Load environment variables
64
+ load_dotenv()
65
+
66
+ # Create configuration dictionary
67
+ config = {
68
+ "mcpServers": {
69
+ "playwright": {
70
+ "command": "npx",
71
+ "args": ["@playwright/mcp@latest"],
72
+ "env": {
73
+ "DISPLAY": ":1"
74
+ }
75
+ }
76
+ }
77
+ }
78
+
79
+ # Create MCPClient from configuration dictionary
80
+ client = MCPClient.from_dict(config)
81
+
82
+ # Create LLM
83
+ llm = ChatOpenAI(model="gpt-4o")
84
+
85
+ # Create agent with the client
86
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
87
+
88
+ # Run the query
89
+ result = await agent.run(
90
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
91
+ )
92
+ print(f"\nResult: {result}")
93
+
94
+ if __name__ == "__main__":
95
+ asyncio.run(main())
96
+ ```
97
+
98
+ ## Configuration Options
99
+
100
+ You can also add the servers configuration from a config file:
101
+
102
+ ```python
103
+ client = MCPClient.from_config_file(
104
+ os.path.join("browser_mcp.json")
105
+ )
106
+ ```
107
+
108
+ Example configuration file (`browser_mcp.json`):
109
+
110
+ ```json
111
+ {
112
+ "mcpServers": {
113
+ "playwright": {
114
+ "command": "npx",
115
+ "args": ["@playwright/mcp@latest"],
116
+ "env": {
117
+ "DISPLAY": ":1"
118
+ }
119
+ }
120
+ }
121
+ }
122
+ ```
123
+
124
+ ## Restricting Tool Access
125
+
126
+ You can control which tools are available to the agent:
127
+
128
+ ```python
129
+ import asyncio
130
+ import os
131
+ from dotenv import load_dotenv
132
+ from langchain_openai import ChatOpenAI
133
+ from mcp_use import MCPAgent, MCPClient
134
+
135
+ async def main():
136
+ # Load environment variables
137
+ load_dotenv()
138
+
139
+ # Create configuration dictionary
140
+ config = {
141
+ "mcpServers": {
142
+ "playwright": {
143
+ "command": "npx",
144
+ "args": ["@playwright/mcp@latest"],
145
+ "env": {
146
+ "DISPLAY": ":1"
147
+ }
148
+ }
149
+ }
150
+ }
151
+
152
+ # Create MCPClient from configuration dictionary
153
+ client = MCPClient.from_dict(config)
154
+
155
+ # Create LLM
156
+ llm = ChatOpenAI(model="gpt-4o")
157
+
158
+ # Create agent with restricted tools
159
+ agent = MCPAgent(
160
+ llm=llm,
161
+ client=client,
162
+ max_steps=30,
163
+ disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
164
+ )
165
+
166
+ # Run the query
167
+ result = await agent.run(
168
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
169
+ )
170
+ print(f"\nResult: {result}")
171
+
172
+ if __name__ == "__main__":
173
+ asyncio.run(main())
174
+ ```
175
+
176
+ ## Available MCP Servers
177
+
178
+ mcp_use supports any MCP server, allowing you to connect to a wide range of server implementations. For a comprehensive list of available servers, check out the [awesome-mcp-servers](https://github.com/punkpeye/awesome-mcp-servers) repository.
179
+
180
+
181
+ Each server requires its own configuration. Check the [Configuration Guide](/essentials/configuration) for details.
182
+
183
+ ## HTTP Connection
184
+
185
+ mcp_use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
186
+
187
+ Here's a simple example to get you started with HTTP connections:
188
+
189
+ ```python
190
+ import asyncio
191
+ import os
192
+ from dotenv import load_dotenv
193
+ from langchain_openai import ChatOpenAI
194
+ from mcp_use import MCPAgent, MCPClient
195
+
196
+ async def main():
197
+ # Load environment variables
198
+ load_dotenv()
199
+
200
+ # Create configuration dictionary
201
+ config = {
202
+ "mcpServers": {
203
+ "http": {
204
+ "url": "http://localhost:8931/sse"
205
+ }
206
+ }
207
+ }
208
+
209
+ # Create MCPClient from configuration dictionary
210
+ client = MCPClient.from_dict(config)
211
+
212
+ # Create LLM
213
+ llm = ChatOpenAI(model="gpt-4o")
214
+
215
+ # Create agent with the client
216
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
217
+
218
+ # Run the query
219
+ result = await agent.run(
220
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
221
+ )
222
+ print(f"\nResult: {result}")
223
+
224
+ if __name__ == "__main__":
225
+ asyncio.run(main())
226
+ ```
227
+
228
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
229
+
230
+ ## Next Steps
231
+
232
+ - Learn about [Configuration Options](/essentials/configuration)
233
+ - Explore [Example Use Cases](/examples)
234
+ - Check out [Advanced Features](/essentials/advanced)
@@ -0,0 +1,53 @@
1
+ """
2
+ HTTP Example for mcp_use.
3
+
4
+ This example demonstrates how to use the mcp_use library with MCPClient
5
+ to connect to an MCP server running on a specific HTTP port.
6
+
7
+ Before running this example, you need to start the Playwright MCP server
8
+ in another terminal with:
9
+
10
+ npx @playwright/mcp@latest --port 8931
11
+
12
+ This will start the server on port 8931. Resulting in the config you find below.
13
+ Of course you can run this with any server you want at any URL.
14
+
15
+ Special thanks to https://github.com/microsoft/playwright-mcp for the server.
16
+
17
+ """
18
+
19
+ import asyncio
20
+
21
+ from dotenv import load_dotenv
22
+ from langchain_openai import ChatOpenAI
23
+
24
+ from mcp_use import MCPAgent, MCPClient
25
+
26
+
27
+ async def main():
28
+ """Run the example using a configuration file."""
29
+ # Load environment variables
30
+ load_dotenv()
31
+
32
+ config = {"mcpServers": {"http": {"url": "http://localhost:8931/sse"}}}
33
+
34
+ # Create MCPClient from config file
35
+ client = MCPClient.from_dict(config)
36
+
37
+ # Create LLM
38
+ llm = ChatOpenAI(model="gpt-4o")
39
+
40
+ # Create agent with the client
41
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
42
+
43
+ # Run the query
44
+ result = await agent.run(
45
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
46
+ max_steps=30,
47
+ )
48
+ print(f"\nResult: {result}")
49
+
50
+
51
+ if __name__ == "__main__":
52
+ # Run the appropriate example
53
+ asyncio.run(main())
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mcp-use"
3
- version = "1.0.3"
3
+ version = "1.1.4"
4
4
  description = "MCP Library for LLMs"
5
5
  authors = [
6
6
  {name = "Pietro Zullo", email = "pietro.zullo@gmail.com"}
Binary file
@@ -1,12 +0,0 @@
1
- <svg viewBox="0 0 200 60" xmlns="http://www.w3.org/2000/svg">
2
- <style>
3
- .logo-text {
4
- font-family: 'Nunito', sans-serif;
5
- font-size: 40px;
6
- font-weight: 800;
7
- fill: #ffffff;
8
- }
9
- </style>
10
- <rect width="100%" height="100%" fill="none"/>
11
- <text x="0" y="42" class="logo-text">mcp_use</text>
12
- </svg>
@@ -1,12 +0,0 @@
1
- <svg viewBox="0 0 200 60" xmlns="http://www.w3.org/2000/svg">
2
- <style>
3
- .logo-text {
4
- font-family: 'Nunito', sans-serif;
5
- font-size: 40px;
6
- font-weight: 800;
7
- fill: #000000;
8
- }
9
- </style>
10
- <rect width="100%" height="100%" fill="none"/>
11
- <text x="0" y="42" class="logo-text">mcp_use</text>
12
- </svg>
@@ -1,138 +0,0 @@
1
- ---
2
- title: Quickstart
3
- description: "Get started with mcp_use in minutes"
4
- ---
5
-
6
- # Quickstart Guide
7
-
8
- This guide will help you get started with mcp_use quickly. We'll cover installation, basic configuration, and running your first agent.
9
-
10
- ## Installation
11
-
12
- You can install mcp_use using pip:
13
-
14
- ```bash
15
- pip install mcp-use
16
- ```
17
-
18
- Or install from source:
19
-
20
- ```bash
21
- git clone https://github.com/pietrozullo/mcp-use.git
22
- cd mcp-use
23
- pip install -e .
24
- ```
25
-
26
- ## Basic Setup
27
-
28
- 1. Create a configuration file (e.g., `browser_mcp.json`):
29
-
30
- ```json
31
- {
32
- "mcpServers": {
33
- "playwright": {
34
- "command": "npx",
35
- "args": ["@playwright/mcp@latest"],
36
- "env": {
37
- "DISPLAY": ":1"
38
- }
39
- }
40
- }
41
- }
42
- ```
43
-
44
- 2. Set up your environment variables in a `.env` file:
45
-
46
- ```bash
47
- OPENAI_API_KEY=your_api_key_here
48
- ANTHROPIC_API_KEY=your_api_key_here
49
- ```
50
-
51
- ## Your First Agent
52
-
53
- Here's a simple example to get you started:
54
-
55
- ```python
56
- import asyncio
57
- import os
58
- from dotenv import load_dotenv
59
- from langchain_openai import ChatOpenAI
60
- from mcp_use import MCPAgent, MCPClient
61
-
62
- async def main():
63
- # Load environment variables
64
- load_dotenv()
65
-
66
- # Create MCPClient from config file
67
- client = MCPClient.from_config_file("browser_mcp.json")
68
-
69
- # Create LLM
70
- llm = ChatOpenAI(model="gpt-4o")
71
-
72
- # Create agent with the client
73
- agent = MCPAgent(llm=llm, client=client, max_steps=30)
74
-
75
- # Run the query
76
- result = await agent.run(
77
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
78
- )
79
- print(f"\nResult: {result}")
80
-
81
- if __name__ == "__main__":
82
- asyncio.run(main())
83
- ```
84
-
85
- ## Restricting Tool Access
86
-
87
- You can control which tools are available to the agent:
88
-
89
- ```python
90
- import asyncio
91
- import os
92
- from dotenv import load_dotenv
93
- from langchain_openai import ChatOpenAI
94
- from mcp_use import MCPAgent, MCPClient
95
-
96
- async def main():
97
- # Load environment variables
98
- load_dotenv()
99
-
100
- # Create MCPClient from config file
101
- client = MCPClient.from_config_file("browser_mcp.json")
102
-
103
- # Create LLM
104
- llm = ChatOpenAI(model="gpt-4o")
105
-
106
- # Create agent with restricted tools
107
- agent = MCPAgent(
108
- llm=llm,
109
- client=client,
110
- max_steps=30,
111
- disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
112
- )
113
-
114
- # Run the query
115
- result = await agent.run(
116
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
117
- )
118
- print(f"\nResult: {result}")
119
-
120
- if __name__ == "__main__":
121
- asyncio.run(main())
122
- ```
123
-
124
- ## Available MCP Servers
125
-
126
- mcp_use supports various MCP servers:
127
-
128
- - **Playwright**: For web browsing and automation
129
- - **Airbnb**: For property search and booking
130
- - **Blender**: For 3D modeling and animation
131
-
132
- Each server requires its own configuration. Check the [Configuration Guide](/essentials/configuration) for details.
133
-
134
- ## Next Steps
135
-
136
- - Learn about [Configuration Options](/essentials/configuration)
137
- - Explore [Example Use Cases](/examples)
138
- - Check out [Advanced Features](/essentials/advanced)
Binary file
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes