mcp-use 1.2.8__py3-none-any.whl → 1.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcp-use might be problematic. Click here for more details.

@@ -6,6 +6,7 @@ to provide a simple interface for using MCP tools with different LLMs.
6
6
  """
7
7
 
8
8
  import logging
9
+ from collections.abc import AsyncIterator
9
10
 
10
11
  from langchain.agents import AgentExecutor, create_tool_calling_agent
11
12
  from langchain.globals import set_debug
@@ -304,6 +305,86 @@ class MCPAgent:
304
305
  """
305
306
  return self.disallowed_tools
306
307
 
308
+ async def _generate_response_chunks_async(
309
+ self,
310
+ query: str,
311
+ max_steps: int | None = None,
312
+ manage_connector: bool = True,
313
+ external_history: list[BaseMessage] | None = None,
314
+ ) -> AsyncIterator[str]:
315
+ """Internal async generator yielding response chunks.
316
+
317
+ The implementation purposefully keeps the logic compact:
318
+ 1. Ensure the agent is initialised (optionally handling connector
319
+ lifecycle).
320
+ 2. Forward the *same* inputs we use for ``run`` to LangChain's
321
+ ``AgentExecutor.astream``.
322
+ 3. Diff the growing ``output`` field coming from LangChain and yield
323
+ only the new part so the caller receives *incremental* chunks.
324
+ 4. Persist conversation history when memory is enabled.
325
+ """
326
+
327
+ # 1. Initialise on-demand ------------------------------------------------
328
+ initialised_here = False
329
+ if (manage_connector and not self._initialized) or (
330
+ not self._initialized and self.auto_initialize
331
+ ):
332
+ await self.initialize()
333
+ initialised_here = True
334
+
335
+ if not self._agent_executor:
336
+ raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
337
+
338
+ # 2. Build inputs --------------------------------------------------------
339
+ effective_max_steps = max_steps or self.max_steps
340
+ self._agent_executor.max_iterations = effective_max_steps
341
+
342
+ if self.memory_enabled:
343
+ self.add_to_history(HumanMessage(content=query))
344
+
345
+ history_to_use = (
346
+ external_history if external_history is not None else self._conversation_history
347
+ )
348
+ langchain_history: list[BaseMessage] = [
349
+ m for m in history_to_use if isinstance(m, HumanMessage | AIMessage)
350
+ ]
351
+ inputs = {"input": query, "chat_history": langchain_history}
352
+
353
+ # 3. Stream & diff -------------------------------------------------------
354
+ accumulated = ""
355
+ async for event in self._agent_executor.astream(inputs):
356
+ yield event
357
+
358
+ # 4. Persist assistant message ------------------------------------------
359
+ if self.memory_enabled and accumulated:
360
+ self.add_to_history(AIMessage(content=accumulated))
361
+
362
+ # 5. House-keeping -------------------------------------------------------
363
+ if initialised_here and manage_connector:
364
+ await self.close()
365
+
366
+ async def astream(
367
+ self,
368
+ query: str,
369
+ max_steps: int | None = None,
370
+ manage_connector: bool = True,
371
+ external_history: list[BaseMessage] | None = None,
372
+ ) -> AsyncIterator[str]:
373
+ """Asynchronous streaming interface.
374
+
375
+ Example::
376
+
377
+ async for chunk in agent.astream("hello"):
378
+ print(chunk, end="|", flush=True)
379
+ """
380
+ async for chunk in self._generate_response_chunks_async(
381
+ query=query,
382
+ max_steps=max_steps,
383
+ manage_connector=manage_connector,
384
+ external_history=external_history,
385
+ ):
386
+ yield chunk
387
+
307
388
  async def run(
308
389
  self,
309
390
  query: str,
@@ -131,6 +131,26 @@ class BaseConnector(ABC):
131
131
  resource = await self.client.read_resource(uri)
132
132
  return resource.content, resource.mimeType
133
133
 
134
+ async def list_prompts(self) -> list[dict[str, Any]]:
135
+ """List all available prompts from the MCP implementation."""
136
+ if not self.client:
137
+ raise RuntimeError("MCP client is not connected")
138
+
139
+ logger.debug("Listing prompts")
140
+ prompts = await self.client.list_prompts()
141
+ return prompts
142
+
143
+ async def get_prompt(
144
+ self, name: str, arguments: dict[str, Any] | None = None
145
+ ) -> tuple[bytes, str]:
146
+ """Get a prompt by name."""
147
+ if not self.client:
148
+ raise RuntimeError("MCP client is not connected")
149
+
150
+ logger.debug(f"Getting prompt: {name}")
151
+ prompt = await self.client.get_prompt(name, arguments)
152
+ return prompt
153
+
134
154
  async def request(self, method: str, params: dict[str, Any] | None = None) -> Any:
135
155
  """Send a raw request to the MCP implementation."""
136
156
  if not self.client:
@@ -3,7 +3,6 @@ import time
3
3
  from typing import ClassVar
4
4
 
5
5
  import numpy as np
6
- from fastembed import TextEmbedding
7
6
  from langchain_core.tools import BaseTool
8
7
  from pydantic import BaseModel, Field
9
8
 
@@ -114,11 +113,22 @@ class ToolSearchEngine:
114
113
  if self.model is not None:
115
114
  return True
116
115
 
116
+ try:
117
+ from fastembed import TextEmbedding # optional dependency install with [search]
118
+ except ImportError:
119
+ logger.error(
120
+ "The 'fastembed' library is not installed. "
121
+ "To use the search functionality, please install it by running: "
122
+ "pip install mcp-use[search]"
123
+ )
124
+ return False
125
+
117
126
  try:
118
127
  self.model = TextEmbedding(model_name="BAAI/bge-small-en-v1.5")
119
128
  self.embedding_function = lambda texts: list(self.model.embed(texts))
120
129
  return True
121
- except Exception:
130
+ except Exception as e:
131
+ logger.error(f"Failed to load the embedding model: {e}")
122
132
  return False
123
133
 
124
134
  async def start_indexing(self) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-use
3
- Version: 1.2.8
3
+ Version: 1.2.10
4
4
  Summary: MCP Library for LLMs
5
5
  Author-email: Pietro Zullo <pietro.zullo@gmail.com>
6
6
  License: MIT
@@ -50,6 +50,7 @@ Description-Content-Type: text/markdown
50
50
  [![PyPI Version](https://img.shields.io/pypi/v/mcp_use.svg)](https://pypi.org/project/mcp_use/)
51
51
  [![Python Versions](https://img.shields.io/pypi/pyversions/mcp_use.svg)](https://pypi.org/project/mcp_use/)
52
52
  [![Documentation](https://img.shields.io/badge/docs-mcp--use.io-blue)](https://docs.mcp-use.io)
53
+ [![Website](https://img.shields.io/badge/website-mcp--use.io-blue)](https://mcp-use.io)
53
54
  [![License](https://img.shields.io/github/license/pietrozullo/mcp-use)](https://github.com/pietrozullo/mcp-use/blob/main/LICENSE)
54
55
  [![Code style: Ruff](https://img.shields.io/badge/code%20style-ruff-000000.svg)](https://github.com/astral-sh/ruff)
55
56
  [![GitHub stars](https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social)](https://github.com/pietrozullo/mcp-use/stargazers)
@@ -67,7 +68,8 @@ Description-Content-Type: text/markdown
67
68
  |---------|-------------|
68
69
  | 🔄 [**Ease of use**](#quick-start) | Create your first MCP capable agent you need only 6 lines of code |
69
70
  | 🤖 [**LLM Flexibility**](#installing-langchain-providers) | Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.) |
70
- | 🌐 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
71
+ | 🌐 [**Code Builder**](https://mcp-use.io/builder) | Explore MCP capabilities and generate starter code with the interactive [code builder](https://mcp-use.io/builder). |
72
+ | 🔗 [**HTTP Support**](#http-connection-example) | Direct connection to MCP servers running on specific HTTP ports |
71
73
  | ⚙️ [**Dynamic Server Selection**](#dynamic-server-selection-server-manager) | Agents can dynamically choose the most appropriate MCP server for a given task from the available pool |
72
74
  | 🧩 [**Multi-Server Support**](#multi-server-support) | Use multiple MCP servers simultaneously in a single agent |
73
75
  | 🛡️ [**Tool Restrictions**](#tool-access-control) | Restrict potentially dangerous tools like file system or network access |
@@ -184,6 +186,43 @@ Example configuration file (`browser_mcp.json`):
184
186
 
185
187
  For other settings, models, and more, check out the documentation.
186
188
 
189
+ ## Streaming Agent Output
190
+
191
+ MCP-Use supports asynchronous streaming of agent output using the `astream` method on `MCPAgent`. This allows you to receive incremental results, tool actions, and intermediate steps as they are generated by the agent, enabling real-time feedback and progress reporting.
192
+
193
+ ### How to use
194
+
195
+ Call `agent.astream(query)` and iterate over the results asynchronously:
196
+
197
+ ```python
198
+ async for chunk in agent.astream("Find the best restaurant in San Francisco"):
199
+ print(chunk["messages"], end="", flush=True)
200
+ ```
201
+
202
+ Each chunk is a dictionary containing keys such as `actions`, `steps`, `messages`, and (on the last chunk) `output`. This enables you to build responsive UIs or log agent progress in real time.
203
+
204
+ #### Example: Streaming in Practice
205
+
206
+ ```python
207
+ import asyncio
208
+ import os
209
+ from dotenv import load_dotenv
210
+ from langchain_openai import ChatOpenAI
211
+ from mcp_use import MCPAgent, MCPClient
212
+
213
+ async def main():
214
+ load_dotenv()
215
+ client = MCPClient.from_config_file("browser_mcp.json")
216
+ llm = ChatOpenAI(model="gpt-4o")
217
+ agent = MCPAgent(llm=llm, client=client, max_steps=30)
218
+ async for chunk in agent.astream("Look for job at nvidia for machine learning engineer."):
219
+ print(chunk["messages"], end="", flush=True)
220
+
221
+ if __name__ == "__main__":
222
+ asyncio.run(main())
223
+ ```
224
+
225
+ This streaming interface is ideal for applications that require real-time updates, such as chatbots, dashboards, or interactive notebooks.
187
226
 
188
227
  # Example Use Cases
189
228
 
@@ -344,7 +383,7 @@ if __name__ == "__main__":
344
383
 
345
384
  ## HTTP Connection Example
346
385
 
347
- MCP-Use now supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
386
+ MCP-Use supports HTTP connections, allowing you to connect to MCP servers running on specific HTTP ports. This feature is particularly useful for integrating with web-based MCP servers.
348
387
 
349
388
  Here's an example of how to use the HTTP connection feature:
350
389
 
@@ -8,11 +8,11 @@ mcp_use/adapters/base.py,sha256=ixLHXp8FWdyZPx7Kh6s-4jEVs3qT4DWrApSLXfqzNws,6141
8
8
  mcp_use/adapters/langchain_adapter.py,sha256=s8IHPPtqqXMmWQfeBqwESs3SZA6_ECSiGRwdTOIWki0,6417
9
9
  mcp_use/agents/__init__.py,sha256=N3eVYP2PxqNO2KcQv5fY8UMUX2W3eLTNkkzuFIJ1DUA,261
10
10
  mcp_use/agents/base.py,sha256=bfuldi_89AbSbNc8KeTiCArRT9V62CNxHOWYkLHWjyA,1605
11
- mcp_use/agents/mcpagent.py,sha256=khzBFeW-mJxA4YeCA6v0jD7czu9MwOjtIXD0_WzNkQE,23795
11
+ mcp_use/agents/mcpagent.py,sha256=1U2HHOk-P-W4O81wdmEXaVWcGU2VQ9xyGaWykK1VCVQ,26954
12
12
  mcp_use/agents/prompts/system_prompt_builder.py,sha256=GH5Pvl49IBpKpZA9YTI83xMsdYSkRN_hw4LFHkKtxbg,4122
13
13
  mcp_use/agents/prompts/templates.py,sha256=AZKrGWuI516C-PmyOPvxDBibNdqJtN24sOHTGR06bi4,1933
14
14
  mcp_use/connectors/__init__.py,sha256=jnd-7pPPJMb0UNJ6aD9lInj5Tlamc8lA_mFyG8RWJpo,385
15
- mcp_use/connectors/base.py,sha256=5TcXB-I5zrwPtedB6dShceNucsK3wHBeGC2yDVq8X48,4885
15
+ mcp_use/connectors/base.py,sha256=5V8XHh3K-_zMBG22c6cxHynLyZps7M8FTIMt29OJa8o,5599
16
16
  mcp_use/connectors/http.py,sha256=2ZG5JxcK1WZ4jkTfTir6bEQLMxXBTPHyi0s42RHGeFs,2837
17
17
  mcp_use/connectors/stdio.py,sha256=MTzsqmVVihACUKngE-g5BignK3jAFds2CFv3aSzbJfs,2608
18
18
  mcp_use/connectors/websocket.py,sha256=LeU53YI3zjbwKq5GzFRziqA_z9Dn5qACiNyxWDrn2ns,9540
@@ -24,14 +24,14 @@ mcp_use/managers/tools/connect_server.py,sha256=MGYQCl11q-w6gSIYuT44dDk7ILV3Oh7k
24
24
  mcp_use/managers/tools/disconnect_server.py,sha256=4487QlLbXAh9JyfGioc6DMWd0n_dkaa8RLMvsoNZv3E,1602
25
25
  mcp_use/managers/tools/get_active_server.py,sha256=LRcHbKZopMl1PiO4D4JS4s0fwtrvtMtvb4kpnoAE8fQ,1015
26
26
  mcp_use/managers/tools/list_servers_tool.py,sha256=OPDSMNe-VuAhlUyhDnR4CiuZFpoMhnhWpAablwO5S0k,1897
27
- mcp_use/managers/tools/search_tools.py,sha256=0BfFesCBJsdCAG2tPCM-c49tmBJLwLQoR_U-sj-rp2s,11628
27
+ mcp_use/managers/tools/search_tools.py,sha256=sT2fe66IyOeASTGjdTsjyzSpqkIGKLVXBF8wXUtWXd4,12055
28
28
  mcp_use/managers/tools/use_tool.py,sha256=r7k7uMYzrk353qw7M5h1utu_IR2G85uMZkrNcg2RyZA,6824
29
29
  mcp_use/task_managers/__init__.py,sha256=4dgW5N61iiPLpwjU2rrn_uqrL8mmDJFDaF9Lukzk65A,486
30
30
  mcp_use/task_managers/base.py,sha256=ksNdxTwq8N-zqymxVoKGnWXq9iqkLYC61uB91o6Mh-4,4888
31
31
  mcp_use/task_managers/sse.py,sha256=WysmjwqRI3meXMZY_F4y9tSBMvSiUZfTJQfitM5l6jQ,2529
32
32
  mcp_use/task_managers/stdio.py,sha256=DEISpXv4mo3d5a-WT8lkWbrXJwUh7QW0nMT_IM3fHGg,2269
33
33
  mcp_use/task_managers/websocket.py,sha256=ZbCqdGgzCRtsXzRGFws-f2OzH8cPAkN4sJNDwEpRmCc,1915
34
- mcp_use-1.2.8.dist-info/METADATA,sha256=ld-cA_dlzxM-D5VkJMLqgr3WeyGzHVRx_pQ_sKQxRL8,18493
35
- mcp_use-1.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
- mcp_use-1.2.8.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
37
- mcp_use-1.2.8.dist-info/RECORD,,
34
+ mcp_use-1.2.10.dist-info/METADATA,sha256=WnpB7b1j-Py3BtKajWprmb6PLf89JxtWdCsTuquAuis,20199
35
+ mcp_use-1.2.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
+ mcp_use-1.2.10.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
37
+ mcp_use-1.2.10.dist-info/RECORD,,