mcp-use 1.0.2__tar.gz → 1.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

Files changed (66) hide show
  1. mcp_use-1.1.4/.github/ISSUE_TEMPLATE/bug_report.md +38 -0
  2. {mcp_use-1.0.2 → mcp_use-1.1.4}/PKG-INFO +152 -18
  3. {mcp_use-1.0.2 → mcp_use-1.1.4}/README.md +151 -17
  4. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/api-reference/introduction.mdx +86 -15
  5. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/essentials/configuration.mdx +47 -20
  6. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/introduction.mdx +3 -0
  7. mcp_use-1.1.4/docs/logo/dark.svg +7 -0
  8. mcp_use-1.1.4/docs/logo/light.svg +7 -0
  9. mcp_use-1.1.4/docs/quickstart.mdx +234 -0
  10. mcp_use-1.1.4/examples/http_example.py +53 -0
  11. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/agents/langchain_agent.py +13 -11
  12. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/agents/mcpagent.py +35 -0
  13. {mcp_use-1.0.2 → mcp_use-1.1.4}/pyproject.toml +1 -1
  14. mcp_use-1.1.4/static/image.jpg +0 -0
  15. mcp_use-1.0.2/docs/logo/dark.svg +0 -12
  16. mcp_use-1.0.2/docs/logo/light.svg +0 -12
  17. mcp_use-1.0.2/docs/quickstart.mdx +0 -99
  18. mcp_use-1.0.2/static/image.jpg +0 -0
  19. {mcp_use-1.0.2 → mcp_use-1.1.4}/.github/workflows/publish.yml +0 -0
  20. {mcp_use-1.0.2 → mcp_use-1.1.4}/.github/workflows/tests.yml +0 -0
  21. {mcp_use-1.0.2 → mcp_use-1.1.4}/.gitignore +0 -0
  22. {mcp_use-1.0.2 → mcp_use-1.1.4}/.pre-commit-config.yaml +0 -0
  23. {mcp_use-1.0.2 → mcp_use-1.1.4}/LICENSE +0 -0
  24. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/README.md +0 -0
  25. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/development.mdx +0 -0
  26. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/docs.json +0 -0
  27. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/essentials/connection-types.mdx +0 -0
  28. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/essentials/llm-integration.mdx +0 -0
  29. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/favicon.svg +0 -0
  30. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/images/hero-dark.png +0 -0
  31. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/images/hero-light.png +0 -0
  32. {mcp_use-1.0.2 → mcp_use-1.1.4}/docs/snippets/snippet-intro.mdx +0 -0
  33. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/airbnb_mcp.json +0 -0
  34. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/airbnb_use.py +0 -0
  35. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/blender_use.py +0 -0
  36. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/browser_mcp.json +0 -0
  37. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/browser_use.py +0 -0
  38. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/chat_example.py +0 -0
  39. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/filesystem_use.py +0 -0
  40. {mcp_use-1.0.2 → mcp_use-1.1.4}/examples/multi_server_example.py +0 -0
  41. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/__init__.py +0 -0
  42. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/agents/__init__.py +0 -0
  43. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/agents/base.py +0 -0
  44. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/agents/prompts/default.py +0 -0
  45. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/client.py +0 -0
  46. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/config.py +0 -0
  47. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/connectors/__init__.py +0 -0
  48. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/connectors/base.py +0 -0
  49. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/connectors/http.py +0 -0
  50. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/connectors/stdio.py +0 -0
  51. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/connectors/websocket.py +0 -0
  52. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/logging.py +0 -0
  53. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/session.py +0 -0
  54. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/task_managers/__init__.py +0 -0
  55. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/task_managers/base.py +0 -0
  56. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/task_managers/sse.py +0 -0
  57. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/task_managers/stdio.py +0 -0
  58. {mcp_use-1.0.2 → mcp_use-1.1.4}/mcp_use/task_managers/websocket.py +0 -0
  59. {mcp_use-1.0.2 → mcp_use-1.1.4}/pytest.ini +0 -0
  60. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/conftest.py +0 -0
  61. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_client.py +0 -0
  62. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_config.py +0 -0
  63. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_http_connector.py +0 -0
  64. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_logging.py +0 -0
  65. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_session.py +0 -0
  66. {mcp_use-1.0.2 → mcp_use-1.1.4}/tests/unit/test_stdio_connector.py +0 -0
@@ -0,0 +1,38 @@
1
+ ---
2
+ name: Bug report
3
+ about: Create a report to help us improve
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Describe the bug**
11
+ A clear and concise description of what the bug is.
12
+
13
+ **To Reproduce**
14
+ Steps to reproduce the behavior:
15
+ 1. Go to '...'
16
+ 2. Click on '....'
17
+ 3. Scroll down to '....'
18
+ 4. See error
19
+
20
+ **Expected behavior**
21
+ A clear and concise description of what you expected to happen.
22
+
23
+ **Screenshots**
24
+ If applicable, add screenshots to help explain your problem.
25
+
26
+ **Desktop (please complete the following information):**
27
+ - OS: [e.g. iOS]
28
+ - Browser [e.g. chrome, safari]
29
+ - Version [e.g. 22]
30
+
31
+ **Smartphone (please complete the following information):**
32
+ - Device: [e.g. iPhone6]
33
+ - OS: [e.g. iOS8.1]
34
+ - Browser [e.g. stock browser, safari]
35
+ - Version [e.g. 22]
36
+
37
+ **Additional context**
38
+ Add any other context about the problem here.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.0.2
3
+ Version: 1.1.4
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -56,6 +56,19 @@ Description-Content-Type: text/markdown
56
56
 
57
57
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
58
58
 
59
+ # Features
60
+
61
+ ## ✨ Key Features
62
+
63
+ | Feature | Description |
64
+ |---------|-------------|
65
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
66
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
67
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
68
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
69
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
70
+
71
+
59
72
  # Quick start
60
73
 
61
74
  With pip:
@@ -72,7 +85,30 @@ cd mcp-use
72
85
  pip install -e .
73
86
  ```
74
87
 
75
- Spin up your agent:
88
+ ### Installing LangChain Providers
89
+
90
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
91
+
92
+ ```bash
93
+ # For OpenAI
94
+ pip install langchain-openai
95
+
96
+ # For Anthropic
97
+ pip install langchain-anthropic
98
+
99
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
100
+ ```
101
+
102
+ and add your API keys for the provider you want to use to your `.env` file.
103
+
104
+ ```bash
105
+ OPENAI_API_KEY=
106
+ ANTHROPIC_API_KEY=
107
+ ```
108
+
109
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
110
+
111
+ ### Spin up your agent:
76
112
 
77
113
  ```python
78
114
  import asyncio
@@ -85,8 +121,21 @@ async def main():
85
121
  # Load environment variables
86
122
  load_dotenv()
87
123
 
88
- # Create MCPClient from config file
89
- client = MCPClient.from_config_file("browser_mcp.json")
124
+ # Create configuration dictionary
125
+ config = {
126
+ "mcpServers": {
127
+ "playwright": {
128
+ "command": "npx",
129
+ "args": ["@playwright/mcp@latest"],
130
+ "env": {
131
+ "DISPLAY": ":1"
132
+ }
133
+ }
134
+ }
135
+ }
136
+
137
+ # Create MCPClient from configuration dictionary
138
+ client = MCPClient.from_dict(config)
90
139
 
91
140
  # Create LLM
92
141
  llm = ChatOpenAI(model="gpt-4o")
@@ -96,7 +145,7 @@ async def main():
96
145
 
97
146
  # Run the query
98
147
  result = await agent.run(
99
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
148
+ "Find the best restaurant in San Francisco",
100
149
  )
101
150
  print(f"\nResult: {result}")
102
151
 
@@ -104,6 +153,14 @@ if __name__ == "__main__":
104
153
  asyncio.run(main())
105
154
  ```
106
155
 
156
+ You can also add the servers configuration from a config file like this:
157
+
158
+ ```python
159
+ client = MCPClient.from_config_file(
160
+ os.path.join("browser_mcp.json")
161
+ )
162
+ ```
163
+
107
164
  Example configuration file (`browser_mcp.json`):
108
165
 
109
166
  ```json
@@ -120,15 +177,10 @@ Example configuration file (`browser_mcp.json`):
120
177
  }
121
178
  ```
122
179
 
123
- Add your API keys for the provider you want to use to your `.env` file.
124
-
125
- ```bash
126
- OPENAI_API_KEY=
127
- ANTHROPIC_API_KEY=
128
- ```
129
-
130
180
  For other settings, models, and more, check out the documentation.
131
181
 
182
+ # Features
183
+
132
184
  # Example Use Cases
133
185
 
134
186
  ## Web Browsing with Playwright
@@ -286,6 +338,55 @@ if __name__ == "__main__":
286
338
  asyncio.run(main())
287
339
  ```
288
340
 
341
+ ## HTTP Connection Example
342
+
343
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
344
+
345
+ Here's an example of how to use the HTTP connection feature:
346
+
347
+ ```python
348
+ import asyncio
349
+ import os
350
+ from dotenv import load_dotenv
351
+ from langchain_openai import ChatOpenAI
352
+ from mcp_use import MCPAgent, MCPClient
353
+
354
+ async def main():
355
+ """Run the example using a configuration file."""
356
+ # Load environment variables
357
+ load_dotenv()
358
+
359
+ config = {
360
+ "mcpServers": {
361
+ "http": {
362
+ "url": "http://localhost:8931/sse"
363
+ }
364
+ }
365
+ }
366
+
367
+ # Create MCPClient from config file
368
+ client = MCPClient.from_dict(config)
369
+
370
+ # Create LLM
371
+ llm = ChatOpenAI(model="gpt-4o")
372
+
373
+ # Create agent with the client
374
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
375
+
376
+ # Run the query
377
+ result = await agent.run(
378
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
379
+ max_steps=30,
380
+ )
381
+ print(f"\nResult: {result}")
382
+
383
+ if __name__ == "__main__":
384
+ # Run the appropriate example
385
+ asyncio.run(main())
386
+ ```
387
+
388
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
389
+
289
390
  # Multi-Server Support
290
391
 
291
392
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -346,25 +447,58 @@ if __name__ == "__main__":
346
447
  asyncio.run(main())
347
448
  ```
348
449
 
349
- ## Roadmap
450
+ # Tool Access Control
451
+
452
+ MCP-Use allows you to restrict which tools are available to the agent, providing better security and control over agent capabilities:
453
+
454
+ ```python
455
+ import asyncio
456
+ from mcp_use import MCPAgent, MCPClient
457
+ from langchain_openai import ChatOpenAI
458
+
459
+ async def main():
460
+ # Create client
461
+ client = MCPClient.from_config_file("config.json")
462
+
463
+ # Create agent with restricted tools
464
+ agent = MCPAgent(
465
+ llm=ChatOpenAI(model="gpt-4"),
466
+ client=client,
467
+ disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
468
+ )
469
+
470
+ # Run a query with restricted tool access
471
+ result = await agent.run(
472
+ "Find the best restaurant in San Francisco"
473
+ )
474
+ print(result)
475
+
476
+ # Clean up
477
+ await client.close_all_sessions()
478
+
479
+ if __name__ == "__main__":
480
+ asyncio.run(main())
481
+ ```
482
+
483
+ # Roadmap
350
484
 
351
485
  <ul>
352
486
  <li>[x] Multiple Servers at once </li>
353
- <li>[ ] Test remote connectors (http, ws)</li>
487
+ <li>[x] Test remote connectors (http, ws)</li>
354
488
  <li>[ ] ... </li>
355
489
  </ul>
356
490
 
357
- ## Contributing
491
+ # Contributing
358
492
 
359
493
  We love contributions! Feel free to open issues for bugs or feature requests.
360
494
 
361
- ## Requirements
495
+ # Requirements
362
496
 
363
497
  - Python 3.11+
364
498
  - MCP implementation (like Playwright MCP)
365
499
  - LangChain and appropriate model libraries (OpenAI, Anthropic, etc.)
366
500
 
367
- ## Citation
501
+ # Citation
368
502
 
369
503
  If you use MCP-Use in your research or project, please cite:
370
504
 
@@ -378,6 +512,6 @@ If you use MCP-Use in your research or project, please cite:
378
512
  }
379
513
  ```
380
514
 
381
- ## License
515
+ # License
382
516
 
383
517
  MIT
@@ -17,6 +17,19 @@
17
17
 
18
18
  💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
19
19
 
20
+ # Features
21
+
22
+ ## ✨ Key Features
23
+
24
+ | Feature | Description |
25
+ |---------|-------------|
26
+ | 🔄 **Ease of use** | Create your first MCP capable agent you need only 6 lines of code |
27
+ | 🤖 **LLM Flexibility** | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
28
+ | 🌐 **HTTP Support** | Direct connection to MCP servers running on specific HTTP ports |
29
+ | 🧩 **Multi-Server Support** | Use multiple MCP servers simultaneously in a single agent |
30
+ | 🛡️ **Tool Restrictions** | Restrict potentially dangerous tools like file system or network access |
31
+
32
+
20
33
  # Quick start
21
34
 
22
35
  With pip:
@@ -33,7 +46,30 @@ cd mcp-use
33
46
  pip install -e .
34
47
  ```
35
48
 
36
- Spin up your agent:
49
+ ### Installing LangChain Providers
50
+
51
+ mcp_use works with various LLM providers through LangChain. You'll need to install the appropriate LangChain provider package for your chosen LLM. For example:
52
+
53
+ ```bash
54
+ # For OpenAI
55
+ pip install langchain-openai
56
+
57
+ # For Anthropic
58
+ pip install langchain-anthropic
59
+
60
+ # For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/)
61
+ ```
62
+
63
+ and add your API keys for the provider you want to use to your `.env` file.
64
+
65
+ ```bash
66
+ OPENAI_API_KEY=
67
+ ANTHROPIC_API_KEY=
68
+ ```
69
+
70
+ > **Important**: Only models with tool calling capabilities can be used with mcp_use. Make sure your chosen model supports function calling or tool use.
71
+
72
+ ### Spin up your agent:
37
73
 
38
74
  ```python
39
75
  import asyncio
@@ -46,8 +82,21 @@ async def main():
46
82
  # Load environment variables
47
83
  load_dotenv()
48
84
 
49
- # Create MCPClient from config file
50
- client = MCPClient.from_config_file("browser_mcp.json")
85
+ # Create configuration dictionary
86
+ config = {
87
+ "mcpServers": {
88
+ "playwright": {
89
+ "command": "npx",
90
+ "args": ["@playwright/mcp@latest"],
91
+ "env": {
92
+ "DISPLAY": ":1"
93
+ }
94
+ }
95
+ }
96
+ }
97
+
98
+ # Create MCPClient from configuration dictionary
99
+ client = MCPClient.from_dict(config)
51
100
 
52
101
  # Create LLM
53
102
  llm = ChatOpenAI(model="gpt-4o")
@@ -57,7 +106,7 @@ async def main():
57
106
 
58
107
  # Run the query
59
108
  result = await agent.run(
60
- "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
109
+ "Find the best restaurant in San Francisco",
61
110
  )
62
111
  print(f"\nResult: {result}")
63
112
 
@@ -65,6 +114,14 @@ if __name__ == "__main__":
65
114
  asyncio.run(main())
66
115
  ```
67
116
 
117
+ You can also add the servers configuration from a config file like this:
118
+
119
+ ```python
120
+ client = MCPClient.from_config_file(
121
+ os.path.join("browser_mcp.json")
122
+ )
123
+ ```
124
+
68
125
  Example configuration file (`browser_mcp.json`):
69
126
 
70
127
  ```json
@@ -81,15 +138,10 @@ Example configuration file (`browser_mcp.json`):
81
138
  }
82
139
  ```
83
140
 
84
- Add your API keys for the provider you want to use to your `.env` file.
85
-
86
- ```bash
87
- OPENAI_API_KEY=
88
- ANTHROPIC_API_KEY=
89
- ```
90
-
91
141
  For other settings, models, and more, check out the documentation.
92
142
 
143
+ # Features
144
+
93
145
  # Example Use Cases
94
146
 
95
147
  ## Web Browsing with Playwright
@@ -247,6 +299,55 @@ if __name__ == "__main__":
247
299
  asyncio.run(main())
248
300
  ```
249
301
 
302
+ ## HTTP Connection Example
303
+
304
+ MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
305
+
306
+ Here's an example of how to use the HTTP connection feature:
307
+
308
+ ```python
309
+ import asyncio
310
+ import os
311
+ from dotenv import load_dotenv
312
+ from langchain_openai import ChatOpenAI
313
+ from mcp_use import MCPAgent, MCPClient
314
+
315
+ async def main():
316
+ """Run the example using a configuration file."""
317
+ # Load environment variables
318
+ load_dotenv()
319
+
320
+ config = {
321
+ "mcpServers": {
322
+ "http": {
323
+ "url": "http://localhost:8931/sse"
324
+ }
325
+ }
326
+ }
327
+
328
+ # Create MCPClient from config file
329
+ client = MCPClient.from_dict(config)
330
+
331
+ # Create LLM
332
+ llm = ChatOpenAI(model="gpt-4o")
333
+
334
+ # Create agent with the client
335
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
336
+
337
+ # Run the query
338
+ result = await agent.run(
339
+ "Find the best restaurant in San Francisco USING GOOGLE SEARCH",
340
+ max_steps=30,
341
+ )
342
+ print(f"\nResult: {result}")
343
+
344
+ if __name__ == "__main__":
345
+ # Run the appropriate example
346
+ asyncio.run(main())
347
+ ```
348
+
349
+ This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
350
+
250
351
  # Multi-Server Support
251
352
 
252
353
  MCP-Use supports working with multiple MCP servers simultaneously, allowing you to combine tools from different servers in a single agent. This is useful for complex tasks that require multiple capabilities, such as web browsing combined with file operations or 3D modeling.
@@ -307,25 +408,58 @@ if __name__ == "__main__":
307
408
  asyncio.run(main())
308
409
  ```
309
410
 
310
- ## Roadmap
411
+ # Tool Access Control
412
+
413
+ MCP-Use allows you to restrict which tools are available to the agent, providing better security and control over agent capabilities:
414
+
415
+ ```python
416
+ import asyncio
417
+ from mcp_use import MCPAgent, MCPClient
418
+ from langchain_openai import ChatOpenAI
419
+
420
+ async def main():
421
+ # Create client
422
+ client = MCPClient.from_config_file("config.json")
423
+
424
+ # Create agent with restricted tools
425
+ agent = MCPAgent(
426
+ llm=ChatOpenAI(model="gpt-4"),
427
+ client=client,
428
+ disallowed_tools=["file_system", "network"] # Restrict potentially dangerous tools
429
+ )
430
+
431
+ # Run a query with restricted tool access
432
+ result = await agent.run(
433
+ "Find the best restaurant in San Francisco"
434
+ )
435
+ print(result)
436
+
437
+ # Clean up
438
+ await client.close_all_sessions()
439
+
440
+ if __name__ == "__main__":
441
+ asyncio.run(main())
442
+ ```
443
+
444
+ # Roadmap
311
445
 
312
446
  <ul>
313
447
  <li>[x] Multiple Servers at once </li>
314
- <li>[ ] Test remote connectors (http, ws)</li>
448
+ <li>[x] Test remote connectors (http, ws)</li>
315
449
  <li>[ ] ... </li>
316
450
  </ul>
317
451
 
318
- ## Contributing
452
+ # Contributing
319
453
 
320
454
  We love contributions! Feel free to open issues for bugs or feature requests.
321
455
 
322
- ## Requirements
456
+ # Requirements
323
457
 
324
458
  - Python 3.11+
325
459
  - MCP implementation (like Playwright MCP)
326
460
  - LangChain and appropriate model libraries (OpenAI, Anthropic, etc.)
327
461
 
328
- ## Citation
462
+ # Citation
329
463
 
330
464
  If you use MCP-Use in your research or project, please cite:
331
465
 
@@ -339,6 +473,6 @@ If you use MCP-Use in your research or project, please cite:
339
473
  }
340
474
  ```
341
475
 
342
- ## License
476
+ # License
343
477
 
344
478
  MIT
@@ -127,24 +127,26 @@ agent = MCPAgent(
127
127
  memory_enabled=True,
128
128
  system_prompt=None,
129
129
  system_prompt_template=None,
130
- additional_instructions=None
130
+ additional_instructions=None,
131
+ disallowed_tools=None
131
132
  )
132
133
  ```
133
134
 
134
- | Parameter | Type | Required | Default | Description |
135
- | ------------------------- | ------------------- | -------- | ------- | ------------------------------------------ |
136
- | `llm` | BaseLanguageModel | Yes | - | Any LangChain-compatible language model |
137
- | `client` | MCPClient | No | None | The MCPClient instance |
138
- | `connectors` | list[BaseConnector] | No | None | List of connectors if not using client |
139
- | `server_name` | str | No | None | Name of the server to use |
140
- | `max_steps` | int | No | 5 | Maximum number of steps the agent can take |
141
- | `auto_initialize` | bool | No | False | Whether to initialize automatically |
142
- | `memory_enabled` | bool | No | True | Whether to enable memory |
143
- | `system_prompt` | str | No | None | Custom system prompt |
144
- | `system_prompt_template` | str | No | None | Custom system prompt template |
145
- | `additional_instructions` | str | No | None | Additional instructions for the agent |
146
- | `session_options` | dict | No | {} | Additional options for session creation |
147
- | `output_parser` | OutputParser | No | None | Custom output parser for LLM responses |
135
+ | Parameter | Type | Required | Default | Description |
136
+ | ------------------------- | ------------------- | -------- | ------- | ------------------------------------------------------------ |
137
+ | `llm` | BaseLanguageModel | Yes | - | Any LangChain-compatible language model |
138
+ | `client` | MCPClient | No | None | The MCPClient instance |
139
+ | `connectors` | list[BaseConnector] | No | None | List of connectors if not using client |
140
+ | `server_name` | str | No | None | Name of the server to use |
141
+ | `max_steps` | int | No | 5 | Maximum number of steps the agent can take |
142
+ | `auto_initialize` | bool | No | False | Whether to initialize automatically |
143
+ | `memory_enabled` | bool | No | True | Whether to enable memory |
144
+ | `system_prompt` | str | No | None | Custom system prompt |
145
+ | `system_prompt_template` | str | No | None | Custom system prompt template |
146
+ | `additional_instructions` | str | No | None | Additional instructions for the agent |
147
+ | `session_options` | dict | No | {} | Additional options for session creation |
148
+ | `output_parser` | OutputParser | No | None | Custom output parser for LLM responses |
149
+ | `disallowed_tools` | list[str] | No | None | List of tool names that should not be available to the agent |
148
150
 
149
151
  **When to use different parameters**:
150
152
 
@@ -176,6 +178,11 @@ agent = MCPAgent(
176
178
  - **session_options**:
177
179
  - Customize timeout for long-running server operations
178
180
  - Set retry parameters for unstable connections
181
+ - **disallowed_tools**:
182
+ - Use to restrict which tools the agent can access
183
+ - Helpful for security or to limit agent capabilities
184
+ - Useful when certain tools might be dangerous or unnecessary for a specific task
185
+ - Can be updated after initialization using `set_disallowed_tools()`
179
186
 
180
187
  ### Core Methods
181
188
 
@@ -234,6 +241,39 @@ history = agent.get_history()
234
241
  - When implementing custom logging
235
242
  - To provide context for follow-up queries
236
243
 
244
+ #### set_disallowed_tools
245
+
246
+ Sets the list of tools that should not be available to the agent.
247
+
248
+ ```python
249
+ agent.set_disallowed_tools(["tool1", "tool2"])
250
+ ```
251
+
252
+ | Parameter | Type | Required | Description |
253
+ | ------------------ | --------- | -------- | ----------------------------------------------- |
254
+ | `disallowed_tools` | list[str] | Yes | List of tool names that should not be available |
255
+
256
+ **When to use**:
257
+
258
+ - To restrict access to specific tools for security reasons
259
+ - To limit agent capabilities for specific tasks
260
+ - To prevent the agent from using potentially dangerous tools
261
+ - Note: Changes take effect on next initialization
262
+
263
+ #### get_disallowed_tools
264
+
265
+ Gets the list of tools that are not available to the agent.
266
+
267
+ ```python
268
+ disallowed = agent.get_disallowed_tools()
269
+ ```
270
+
271
+ **When to use**:
272
+
273
+ - To check which tools are currently restricted
274
+ - For debugging or auditing purposes
275
+ - To verify tool restrictions before running the agent
276
+
237
277
  ## Configuration Details
238
278
 
239
279
  ### MCP Server Configuration Schema
@@ -383,3 +423,34 @@ This approach is useful when:
383
423
  - The MCP server returns structured data that needs special handling
384
424
  - You need to extract specific information from responses
385
425
  - You're integrating with custom or specialized MCP servers
426
+
427
+ ### Restricting Tool Access
428
+
429
+ Control which tools are available to the agent:
430
+
431
+ ```python
432
+ from mcp_use import MCPAgent, MCPClient
433
+ from langchain_openai import ChatOpenAI
434
+
435
+ # Create agent with restricted tools
436
+ agent = MCPAgent(
437
+ llm=ChatOpenAI(model="gpt-4o"),
438
+ client=client,
439
+ disallowed_tools=["file_system", "network", "shell"] # Restrict potentially dangerous tools
440
+ )
441
+
442
+ # Update restrictions after initialization
443
+ agent.set_disallowed_tools(["file_system", "network", "shell", "database"])
444
+ await agent.initialize() # Reinitialize to apply changes
445
+
446
+ # Check current restrictions
447
+ restricted_tools = agent.get_disallowed_tools()
448
+ print(f"Restricted tools: {restricted_tools}")
449
+ ```
450
+
451
+ This approach is useful when:
452
+
453
+ - You need to restrict access to sensitive operations
454
+ - You want to limit the agent's capabilities for specific tasks
455
+ - You're concerned about security implications of certain tools
456
+ - You want to focus the agent on specific functionality