mcp-use 1.2.8__tar.gz → 1.2.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

Files changed (83) hide show
  1. {mcp_use-1.2.8 → mcp_use-1.2.10}/PKG-INFO +42 -3
  2. {mcp_use-1.2.8 → mcp_use-1.2.10}/README.md +41 -2
  3. mcp_use-1.2.10/docs/api-reference/mcpagent.mdx +21 -0
  4. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/development.mdx +1 -22
  5. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/favicon.svg +1 -1
  6. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/introduction.mdx +17 -0
  7. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/quickstart.mdx +11 -0
  8. mcp_use-1.2.10/examples/mcp_everything.py +41 -0
  9. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/agents/mcpagent.py +81 -0
  10. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/connectors/base.py +20 -0
  11. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/search_tools.py +12 -2
  12. {mcp_use-1.2.8 → mcp_use-1.2.10}/pyproject.toml +1 -1
  13. {mcp_use-1.2.8 → mcp_use-1.2.10}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  14. {mcp_use-1.2.8 → mcp_use-1.2.10}/.github/pull_request_template.md +0 -0
  15. {mcp_use-1.2.8 → mcp_use-1.2.10}/.github/workflows/publish.yml +0 -0
  16. {mcp_use-1.2.8 → mcp_use-1.2.10}/.github/workflows/tests.yml +0 -0
  17. {mcp_use-1.2.8 → mcp_use-1.2.10}/.gitignore +0 -0
  18. {mcp_use-1.2.8 → mcp_use-1.2.10}/.pre-commit-config.yaml +0 -0
  19. {mcp_use-1.2.8 → mcp_use-1.2.10}/CONTRIBUTING.md +0 -0
  20. {mcp_use-1.2.8 → mcp_use-1.2.10}/LICENSE +0 -0
  21. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/README.md +0 -0
  22. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/api-reference/introduction.mdx +0 -0
  23. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/building-custom-agents.mdx +0 -0
  24. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/docs.json +0 -0
  25. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/essentials/configuration.mdx +0 -0
  26. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/essentials/connection-types.mdx +0 -0
  27. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/essentials/debugging.mdx +0 -0
  28. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/essentials/llm-integration.mdx +0 -0
  29. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/essentials/server-manager.mdx +0 -0
  30. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/images/hero-dark.png +0 -0
  31. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/images/hero-light.png +0 -0
  32. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/logo/dark.svg +0 -0
  33. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/logo/light.svg +0 -0
  34. {mcp_use-1.2.8 → mcp_use-1.2.10}/docs/snippets/snippet-intro.mdx +0 -0
  35. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/airbnb_mcp.json +0 -0
  36. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/airbnb_use.py +0 -0
  37. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/blender_use.py +0 -0
  38. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/browser_mcp.json +0 -0
  39. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/browser_use.py +0 -0
  40. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/chat_example.py +0 -0
  41. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/filesystem_use.py +0 -0
  42. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/http_example.py +0 -0
  43. {mcp_use-1.2.8 → mcp_use-1.2.10}/examples/multi_server_example.py +0 -0
  44. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/__init__.py +0 -0
  45. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/adapters/__init__.py +0 -0
  46. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/adapters/base.py +0 -0
  47. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/adapters/langchain_adapter.py +0 -0
  48. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/agents/__init__.py +0 -0
  49. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/agents/base.py +0 -0
  50. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/agents/prompts/system_prompt_builder.py +0 -0
  51. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/agents/prompts/templates.py +0 -0
  52. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/client.py +0 -0
  53. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/config.py +0 -0
  54. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/connectors/__init__.py +0 -0
  55. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/connectors/http.py +0 -0
  56. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/connectors/stdio.py +0 -0
  57. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/connectors/websocket.py +0 -0
  58. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/logging.py +0 -0
  59. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/__init__.py +0 -0
  60. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/server_manager.py +0 -0
  61. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/__init__.py +0 -0
  62. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/base_tool.py +0 -0
  63. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/connect_server.py +0 -0
  64. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/disconnect_server.py +0 -0
  65. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/get_active_server.py +0 -0
  66. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/list_servers_tool.py +0 -0
  67. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/managers/tools/use_tool.py +0 -0
  68. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/session.py +0 -0
  69. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/task_managers/__init__.py +0 -0
  70. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/task_managers/base.py +0 -0
  71. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/task_managers/sse.py +0 -0
  72. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/task_managers/stdio.py +0 -0
  73. {mcp_use-1.2.8 → mcp_use-1.2.10}/mcp_use/task_managers/websocket.py +0 -0
  74. {mcp_use-1.2.8 → mcp_use-1.2.10}/pytest.ini +0 -0
  75. {mcp_use-1.2.8 → mcp_use-1.2.10}/ruff.toml +0 -0
  76. {mcp_use-1.2.8 → mcp_use-1.2.10}/static/image.jpg +0 -0
  77. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/conftest.py +0 -0
  78. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_client.py +0 -0
  79. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_config.py +0 -0
  80. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_http_connector.py +0 -0
  81. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_logging.py +0 -0
  82. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_session.py +0 -0
  83. {mcp_use-1.2.8 → mcp_use-1.2.10}/tests/unit/test_stdio_connector.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.2.8
3
+ Version: 1.2.10
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -50,6 +50,7 @@ Description-Content-Type: text/markdown
50
50
  [![PyPI Version](https://img.shields.io/pypi/v/mcp_use.svg)](https://pypi.org/project/mcp_use/)
51
51
  [![Python Versions](https://img.shields.io/pypi/pyversions/mcp_use.svg)](https://pypi.org/project/mcp_use/)
52
52
  [![Documentation](https://img.shields.io/badge/docs-mcp--use.io-blue)](https://docs.mcp-use.io)
53
+ [![Website](https://img.shields.io/badge/website-mcp--use.io-blue)](https://mcp-use.io)
53
54
  [![License](https://img.shields.io/github/license/pietrozullo/mcp-use)](https://github.com/pietrozullo/mcp-use/blob/main/LICENSE)
54
55
  [![Code style: Ruff](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff)
55
56
  [![GitHub stars](https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social)](https://github.com/pietrozullo/mcp-use/stargazers)
@@ -67,7 +68,8 @@ Description-Content-Type: text/markdown
67
68
  |---------|-------------|
68
69
  | 🔄 [**Ease of use**](#quick-start) | Create your first MCP capable agent you need only 6 lines of code |
69
70
  | 🤖 [**LLM Flexibility**](#installing-langchain-providers) | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
70
- | 🌐 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
71
+ | 🌐 [**Code Builder**](https://mcp-use.io/builder) | Explore MCP capabilities and generate starter code with the interactive [code builder](https://mcp-use.io/builder). |
72
+ | 🔗 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
71
73
  | ⚙️ [**Dynamic Server Selection**](#dynamic-server-selection-server-manager) | Agents can dynamically choose the most appropriate MCP server for a given task from the available pool |
72
74
  | 🧩 [**Multi-Server Support**](#multi-server-support) | Use multiple MCP servers simultaneously in a single agent |
73
75
  | 🛡️ [**Tool Restrictions**](#tool-access-control) | Restrict potentially dangerous tools like file system or network access |
@@ -184,6 +186,43 @@ Example configuration file (`browser_mcp.json`):
184
186
 
185
187
  For other settings, models, and more, check out the documentation.
186
188
 
189
+ ## Streaming Agent Output
190
+
191
+ MCP-Use supports asynchronous streaming of agent output using the `astream` method on `MCPAgent`. This allows you to receive incremental results, tool actions, and intermediate steps as they are generated by the agent, enabling real-time feedback and progress reporting.
192
+
193
+ ### How to use
194
+
195
+ Call `agent.astream(query)` and iterate over the results asynchronously:
196
+
197
+ ```python
198
+ async for chunk in agent.astream("Find the best restaurant in San Francisco"):
199
+ print(chunk["messages"], end="", flush=True)
200
+ ```
201
+
202
+ Each chunk is a dictionary containing keys such as `actions`, `steps`, `messages`, and (on the last chunk) `output`. This enables you to build responsive UIs or log agent progress in real time.
203
+
204
+ #### Example: Streaming in Practice
205
+
206
+ ```python
207
+ import asyncio
208
+ import os
209
+ from dotenv import load_dotenv
210
+ from langchain_openai import ChatOpenAI
211
+ from mcp_use import MCPAgent, MCPClient
212
+
213
+ async def main():
214
+ load_dotenv()
215
+ client = MCPClient.from_config_file("browser_mcp.json")
216
+ llm = ChatOpenAI(model="gpt-4o")
217
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
218
+ async for chunk in agent.astream("Look for job at nvidia for machine learning engineer."):
219
+ print(chunk["messages"], end="", flush=True)
220
+
221
+ if __name__ == "__main__":
222
+ asyncio.run(main())
223
+ ```
224
+
225
+ This streaming interface is ideal for applications that require real-time updates, such as chatbots, dashboards, or interactive notebooks.
187
226
 
188
227
  # Example Use Cases
189
228
 
@@ -344,7 +383,7 @@ if __name__ == "__main__":
344
383
 
345
384
  ## HTTP Connection Example
346
385
 
347
- MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
386
+ MCP-Use supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
348
387
 
349
388
  Here's an example of how to use the HTTP connection feature:
350
389
 
@@ -9,6 +9,7 @@
9
9
  [![PyPI Version](https://img.shields.io/pypi/v/mcp_use.svg)](https://pypi.org/project/mcp_use/)
10
10
  [![Python Versions](https://img.shields.io/pypi/pyversions/mcp_use.svg)](https://pypi.org/project/mcp_use/)
11
11
  [![Documentation](https://img.shields.io/badge/docs-mcp--use.io-blue)](https://docs.mcp-use.io)
12
+ [![Website](https://img.shields.io/badge/website-mcp--use.io-blue)](https://mcp-use.io)
12
13
  [![License](https://img.shields.io/github/license/pietrozullo/mcp-use)](https://github.com/pietrozullo/mcp-use/blob/main/LICENSE)
13
14
  [![Code style: Ruff](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff)
14
15
  [![GitHub stars](https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social)](https://github.com/pietrozullo/mcp-use/stargazers)
@@ -26,7 +27,8 @@
26
27
  |---------|-------------|
27
28
  | 🔄 [**Ease of use**](#quick-start) | Create your first MCP capable agent you need only 6 lines of code |
28
29
  | 🤖 [**LLM Flexibility**](#installing-langchain-providers) | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
29
- | 🌐 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
30
+ | 🌐 [**Code Builder**](https://mcp-use.io/builder) | Explore MCP capabilities and generate starter code with the interactive [code builder](https://mcp-use.io/builder). |
31
+ | 🔗 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
30
32
  | ⚙️ [**Dynamic Server Selection**](#dynamic-server-selection-server-manager) | Agents can dynamically choose the most appropriate MCP server for a given task from the available pool |
31
33
  | 🧩 [**Multi-Server Support**](#multi-server-support) | Use multiple MCP servers simultaneously in a single agent |
32
34
  | 🛡️ [**Tool Restrictions**](#tool-access-control) | Restrict potentially dangerous tools like file system or network access |
@@ -143,6 +145,43 @@ Example configuration file (`browser_mcp.json`):
143
145
 
144
146
  For other settings, models, and more, check out the documentation.
145
147
 
148
+ ## Streaming Agent Output
149
+
150
+ MCP-Use supports asynchronous streaming of agent output using the `astream` method on `MCPAgent`. This allows you to receive incremental results, tool actions, and intermediate steps as they are generated by the agent, enabling real-time feedback and progress reporting.
151
+
152
+ ### How to use
153
+
154
+ Call `agent.astream(query)` and iterate over the results asynchronously:
155
+
156
+ ```python
157
+ async for chunk in agent.astream("Find the best restaurant in San Francisco"):
158
+ print(chunk["messages"], end="", flush=True)
159
+ ```
160
+
161
+ Each chunk is a dictionary containing keys such as `actions`, `steps`, `messages`, and (on the last chunk) `output`. This enables you to build responsive UIs or log agent progress in real time.
162
+
163
+ #### Example: Streaming in Practice
164
+
165
+ ```python
166
+ import asyncio
167
+ import os
168
+ from dotenv import load_dotenv
169
+ from langchain_openai import ChatOpenAI
170
+ from mcp_use import MCPAgent, MCPClient
171
+
172
+ async def main():
173
+ load_dotenv()
174
+ client = MCPClient.from_config_file("browser_mcp.json")
175
+ llm = ChatOpenAI(model="gpt-4o")
176
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
177
+ async for chunk in agent.astream("Look for job at nvidia for machine learning engineer."):
178
+ print(chunk["messages"], end="", flush=True)
179
+
180
+ if __name__ == "__main__":
181
+ asyncio.run(main())
182
+ ```
183
+
184
+ This streaming interface is ideal for applications that require real-time updates, such as chatbots, dashboards, or interactive notebooks.
146
185
 
147
186
  # Example Use Cases
148
187
 
@@ -303,7 +342,7 @@ if __name__ == "__main__":
303
342
 
304
343
  ## HTTP Connection Example
305
344
 
306
- MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
345
+ MCP-Use supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
307
346
 
308
347
  Here's an example of how to use the HTTP connection feature:
309
348
 
@@ -0,0 +1,21 @@
1
+ ## astream
2
+
3
+ ```python
4
+ def astream(
5
+ query: str,
6
+ max_steps: int | None = None,
7
+ manage_connector: bool = True,
8
+ external_history: list[BaseMessage] | None = None,
9
+ ) -> AsyncIterator[dict]:
10
+ ```
11
+
12
+ Asynchronous streaming interface for agent output. Yields incremental results, tool actions, and intermediate steps as they are generated by the agent.
13
+
14
+ **Example:**
15
+
16
+ ```python
17
+ async for chunk in agent.astream("hello"):
18
+ print(chunk["messages"], end="|", flush=True)
19
+ ```
20
+
21
+ Each chunk is a dictionary containing keys such as `actions`, `steps`, `messages`, and (on the last chunk) `output`.
@@ -9,7 +9,7 @@ This guide will help you set up your development environment and contribute to m
9
9
 
10
10
  ## Prerequisites
11
11
 
12
- - Python 3.8 or higher
12
+ - Python 3.11 or higher
13
13
  - Git
14
14
  - Node.js and npm (for MCP server dependencies)
15
15
 
@@ -112,24 +112,3 @@ mcp-use/
112
112
  ├── static/ # Static assets
113
113
  └── pyproject.toml # Project configuration
114
114
  ```
115
-
116
- ## Adding New MCP Servers
117
-
118
- To add support for a new MCP server:
119
-
120
- 1. Create a new configuration template in the examples directory
121
- 2. Add necessary server-specific code in the `mcp_use` package
122
- 3. Update documentation with new server information
123
- 4. Add tests for the new server functionality
124
-
125
- ## Release Process
126
-
127
- 1. Update version in `pyproject.toml`
128
- 2. Update CHANGELOG.md
129
- 3. Create a new release tag
130
- 4. Build and publish to PyPI:
131
-
132
- ```bash
133
- python -m build
134
- python -m twine upload dist/*
135
- ```
@@ -1,5 +1,5 @@
1
1
  <svg width="303" height="303" viewBox="0 0 303 303" fill="white" xmlns="http://www.w3.org/2000/svg">
2
- <rect height="100%" width="100%" rx="55px" fill="white"/>
2
+ <rect height="100%" width="100%" rx="55px" fill="none"/>
3
3
  <path d="M106.066 106.066C86.5398 125.592 54.8816 125.592 35.3554 106.066V106.066C15.8291 86.5397 15.8291 54.8815 35.3554 35.3552V35.3552C54.8816 15.829 86.5398 15.829 106.066 35.3552V35.3552C125.592 54.8815 125.592 86.5397 106.066 106.066V106.066Z" fill="black"/>
4
4
  <path d="M267.286 267.286C247.76 286.812 216.102 286.812 196.576 267.286V267.286C177.049 247.76 177.049 216.102 196.576 196.576V196.576C216.102 177.049 247.76 177.049 267.286 196.576V196.576C286.813 216.102 286.813 247.76 267.286 267.286V267.286Z" fill="black"/>
5
5
  <path fill-rule="evenodd" clip-rule="evenodd" d="M181.957 230.04L211.425 259.508L260.922 210.011L232.851 181.94C204.215 181.726 175.645 170.695 153.796 148.846C131.947 126.997 120.915 98.4264 120.702 69.7903L92.631 41.7193L43.1335 91.2168L72.6014 120.685C100.313 121.56 127.765 132.573 148.917 153.725C170.069 174.877 181.082 202.328 181.957 230.04Z" fill="black"/>
@@ -43,3 +43,20 @@ mcp_use is an open source library that enables developers to connect any Languag
43
43
  Learn how to configure mcp_use with your MCP server
44
44
  </Card>
45
45
  </CardGroup>
46
+
47
+ ## Streaming Agent Output
48
+
49
+ MCP-Use supports asynchronous streaming of agent output using the `astream` method. This allows you to receive incremental results, tool actions, and intermediate steps as they are generated by the agent.
50
+
51
+ ### How to use
52
+
53
+ Call `agent.astream(query)` and iterate over the results asynchronously:
54
+
55
+ ```python
56
+ async for chunk in agent.astream("your query here"):
57
+ print(chunk["messages"], end="", flush=True)
58
+ ```
59
+
60
+ Each chunk is a dictionary containing keys such as `actions`, `steps`, `messages`, and (on the last chunk) `output`. This enables real-time feedback and progress reporting in your applications.
61
+
62
+ See the README for more details and usage patterns.
@@ -333,6 +333,17 @@ if __name__ == "__main__":
333
333
 
334
334
  This example demonstrates how to connect to an MCP server running on a specific HTTP port. Make sure to start your MCP server before running this example.
335
335
 
336
+ ## Streaming Agent Output
337
+
338
+ You can stream the agent's output as it is generated using the `astream` method:
339
+
340
+ ```python
341
+ async for chunk in agent.astream("Find the best restaurant in San Francisco"):
342
+ print(chunk["messages"], end="", flush=True)
343
+ ```
344
+
345
+ Each chunk contains incremental results, tool actions, and intermediate steps.
346
+
336
347
  ## Next Steps
337
348
 
338
349
  - Learn about [Configuration Options](/essentials/configuration)
@@ -0,0 +1,41 @@
1
+ """
2
+ This example shows how to test the different functionalities of MCPs using the MCP server from
3
+ anthropic.
4
+ """
5
+
6
+ import asyncio
7
+
8
+ from dotenv import load_dotenv
9
+ from langchain_openai import ChatOpenAI
10
+
11
+ from mcp_use import MCPAgent, MCPClient
12
+
13
+ everything_server = {
14
+ "mcpServers": {
15
+ "everything": {"command": "npx", "args": ["-y", "@modelcontextprotocol/server-everything"]}
16
+ }
17
+ }
18
+
19
+
20
+ async def main():
21
+ """Run the example using a configuration file."""
22
+ load_dotenv()
23
+ client = MCPClient(config=everything_server)
24
+ llm = ChatOpenAI(model="gpt-4o", temperature=0)
25
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
26
+
27
+ result = await agent.run(
28
+ """
29
+ Hello, you are a tester can you please answer the follwing questions:
30
+ - Which resources do you have access to?
31
+ - Which prompts do you have access to?
32
+ - Which tools do you have access to?
33
+ """,
34
+ max_steps=30,
35
+ )
36
+ print(f"\nResult: {result}")
37
+
38
+
39
+ if __name__ == "__main__":
40
+ # Run the appropriate example
41
+ asyncio.run(main())
@@ -6,6 +6,7 @@ to provide a simple interface for using MCP tools with different LLMs.
6
6
  """
7
7
 
8
8
  import logging
9
+ from collections.abc import AsyncIterator
9
10
 
10
11
  from langchain.agents import AgentExecutor, create_tool_calling_agent
11
12
  from langchain.globals import set_debug
@@ -304,6 +305,86 @@ class MCPAgent:
304
305
  """
305
306
  return self.disallowed_tools
306
307
 
308
+ async def _generate_response_chunks_async(
309
+ self,
310
+ query: str,
311
+ max_steps: int | None = None,
312
+ manage_connector: bool = True,
313
+ external_history: list[BaseMessage] | None = None,
314
+ ) -> AsyncIterator[str]:
315
+ """Internal async generator yielding response chunks.
316
+
317
+ The implementation purposefully keeps the logic compact:
318
+ 1. Ensure the agent is initialised (optionally handling connector
319
+ lifecycle).
320
+ 2. Forward the *same* inputs we use for ``run`` to LangChain's
321
+ ``AgentExecutor.astream``.
322
+ 3. Diff the growing ``output`` field coming from LangChain and yield
323
+ only the new part so the caller receives *incremental* chunks.
324
+ 4. Persist conversation history when memory is enabled.
325
+ """
326
+
327
+ # 1. Initialise on-demand ------------------------------------------------
328
+ initialised_here = False
329
+ if (manage_connector and not self._initialized) or (
330
+ not self._initialized and self.auto_initialize
331
+ ):
332
+ await self.initialize()
333
+ initialised_here = True
334
+
335
+ if not self._agent_executor:
336
+ raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
337
+
338
+ # 2. Build inputs --------------------------------------------------------
339
+ effective_max_steps = max_steps or self.max_steps
340
+ self._agent_executor.max_iterations = effective_max_steps
341
+
342
+ if self.memory_enabled:
343
+ self.add_to_history(HumanMessage(content=query))
344
+
345
+ history_to_use = (
346
+ external_history if external_history is not None else self._conversation_history
347
+ )
348
+ langchain_history: list[BaseMessage] = [
349
+ m for m in history_to_use if isinstance(m, HumanMessage | AIMessage)
350
+ ]
351
+ inputs = {"input": query, "chat_history": langchain_history}
352
+
353
+ # 3. Stream & diff -------------------------------------------------------
354
+ accumulated = ""
355
+ async for event in self._agent_executor.astream(inputs):
356
+ yield event
357
+
358
+ # 4. Persist assistant message ------------------------------------------
359
+ if self.memory_enabled and accumulated:
360
+ self.add_to_history(AIMessage(content=accumulated))
361
+
362
+ # 5. House-keeping -------------------------------------------------------
363
+ if initialised_here and manage_connector:
364
+ await self.close()
365
+
366
+ async def astream(
367
+ self,
368
+ query: str,
369
+ max_steps: int | None = None,
370
+ manage_connector: bool = True,
371
+ external_history: list[BaseMessage] | None = None,
372
+ ) -> AsyncIterator[str]:
373
+ """Asynchronous streaming interface.
374
+
375
+ Example::
376
+
377
+ async for chunk in agent.astream("hello"):
378
+ print(chunk, end="|", flush=True)
379
+ """
380
+ async for chunk in self._generate_response_chunks_async(
381
+ query=query,
382
+ max_steps=max_steps,
383
+ manage_connector=manage_connector,
384
+ external_history=external_history,
385
+ ):
386
+ yield chunk
387
+
307
388
  async def run(
308
389
  self,
309
390
  query: str,
@@ -131,6 +131,26 @@ class BaseConnector(ABC):
131
131
  resource = await self.client.read_resource(uri)
132
132
  return resource.content, resource.mimeType
133
133
 
134
+ async def list_prompts(self) -> list[dict[str, Any]]:
135
+ """List all available prompts from the MCP implementation."""
136
+ if not self.client:
137
+ raise RuntimeError("MCP client is not connected")
138
+
139
+ logger.debug("Listing prompts")
140
+ prompts = await self.client.list_prompts()
141
+ return prompts
142
+
143
+ async def get_prompt(
144
+ self, name: str, arguments: dict[str, Any] | None = None
145
+ ) -> tuple[bytes, str]:
146
+ """Get a prompt by name."""
147
+ if not self.client:
148
+ raise RuntimeError("MCP client is not connected")
149
+
150
+ logger.debug(f"Getting prompt: {name}")
151
+ prompt = await self.client.get_prompt(name, arguments)
152
+ return prompt
153
+
134
154
  async def request(self, method: str, params: dict[str, Any] | None = None) -> Any:
135
155
  """Send a raw request to the MCP implementation."""
136
156
  if not self.client:
@@ -3,7 +3,6 @@ import time
3
3
  from typing import ClassVar
4
4
 
5
5
  import numpy as np
6
- from fastembed import TextEmbedding
7
6
  from langchain_core.tools import BaseTool
8
7
  from pydantic import BaseModel, Field
9
8
 
@@ -114,11 +113,22 @@ class ToolSearchEngine:
114
113
  if self.model is not None:
115
114
  return True
116
115
 
116
+ try:
117
+ from fastembed import TextEmbedding # optional dependency install with [search]
118
+ except ImportError:
119
+ logger.error(
120
+ "The 'fastembed' library is not installed. "
121
+ "To use the search functionality, please install it by running: "
122
+ "pip install mcp-use[search]"
123
+ )
124
+ return False
125
+
117
126
  try:
118
127
  self.model = TextEmbedding(model_name="BAAI/bge-small-en-v1.5")
119
128
  self.embedding_function = lambda texts: list(self.model.embed(texts))
120
129
  return True
121
- except Exception:
130
+ except Exception as e:
131
+ logger.error(f"Failed to load the embedding model: {e}")
122
132
  return False
123
133
 
124
134
  async def start_indexing(self) -> None:
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mcp-use"
3
- version = "1.2.8"
3
+ version = "1.2.10"
4
4
  description = "MCP Library for LLMs"
5
5
  authors = [
6
6
  {name = "Pietro Zullo", email = "pietro.zullo@gmail.com"}
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes